Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux

Merge mainline to pick up c7513c2a2714 ("crypto/arm64: aes-ce-gcm -
add missing kernel_neon_begin/end pair").
diff --git a/Documentation/crypto/api-samples.rst b/Documentation/crypto/api-samples.rst
index 006827e..0f6ca8b 100644
--- a/Documentation/crypto/api-samples.rst
+++ b/Documentation/crypto/api-samples.rst
@@ -162,7 +162,7 @@
         char *hash_alg_name = "sha1-padlock-nano";
         int ret;
 
-        alg = crypto_alloc_shash(hash_alg_name, CRYPTO_ALG_TYPE_SHASH, 0);
+        alg = crypto_alloc_shash(hash_alg_name, 0, 0);
         if (IS_ERR(alg)) {
                 pr_info("can't alloc alg %s\n", hash_alg_name);
                 return PTR_ERR(alg);
diff --git a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
index 5dba55c..3bbf144 100644
--- a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
+++ b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
@@ -1,8 +1,9 @@
 Inside Secure SafeXcel cryptographic engine
 
 Required properties:
-- compatible: Should be "inside-secure,safexcel-eip197" or
-              "inside-secure,safexcel-eip97".
+- compatible: Should be "inside-secure,safexcel-eip197b",
+	      "inside-secure,safexcel-eip197d" or
+              "inside-secure,safexcel-eip97ies".
 - reg: Base physical address of the engine and length of memory mapped region.
 - interrupts: Interrupt numbers for the rings and engine.
 - interrupt-names: Should be "ring0", "ring1", "ring2", "ring3", "eip", "mem".
@@ -14,10 +15,18 @@
                name must be "core" for the first clock and "reg" for
                the second one.
 
+Backward compatibility:
+Two compatibles are kept for backward compatibility, but shouldn't be used for
+new submissions:
+- "inside-secure,safexcel-eip197" is equivalent to
+  "inside-secure,safexcel-eip197b".
+- "inside-secure,safexcel-eip97" is equivalent to
+  "inside-secure,safexcel-eip97ies".
+
 Example:
 
 	crypto: crypto@800000 {
-		compatible = "inside-secure,safexcel-eip197";
+		compatible = "inside-secure,safexcel-eip197b";
 		reg = <0x800000 0x200000>;
 		interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/Documentation/devicetree/bindings/rng/qcom,prng.txt b/Documentation/devicetree/bindings/crypto/qcom,prng.txt
similarity index 73%
rename from Documentation/devicetree/bindings/rng/qcom,prng.txt
rename to Documentation/devicetree/bindings/crypto/qcom,prng.txt
index 8e5853c..7ee0e9e 100644
--- a/Documentation/devicetree/bindings/rng/qcom,prng.txt
+++ b/Documentation/devicetree/bindings/crypto/qcom,prng.txt
@@ -2,7 +2,9 @@
 
 Required properties:
 
-- compatible  : should be "qcom,prng"
+- compatible  : should be "qcom,prng" for 8916 etc
+              : should be "qcom,prng-ee" for 8996 and later using EE
+		(Execution Environment) slice of prng
 - reg         : specifies base physical address and size of the registers map
 - clocks      : phandle to clock-controller plus clock-specifier pair
 - clock-names : "core" clocks all registers, FIFO and circuits in PRNG IP block
diff --git a/MAINTAINERS b/MAINTAINERS
index 32fbc6f..7a68011 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7350,7 +7350,7 @@
 R:	Tim Chen <tim.c.chen@linux.intel.com>
 L:	linux-crypto@vger.kernel.org
 S:	Supported
-F:	arch/x86/crypto/sha*-mb
+F:	arch/x86/crypto/sha*-mb/
 F:	crypto/mcryptd.c
 
 INTEL TELEMETRY DRIVER
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index d9bb52c..8930fc4 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -152,7 +152,7 @@
 		.cra_name	= "__ghash",
 		.cra_driver_name = "__driver-ghash-ce",
 		.cra_priority	= 0,
-		.cra_flags	= CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_INTERNAL,
+		.cra_flags	= CRYPTO_ALG_INTERNAL,
 		.cra_blocksize	= GHASH_BLOCK_SIZE,
 		.cra_ctxsize	= sizeof(struct ghash_key),
 		.cra_module	= THIS_MODULE,
@@ -308,9 +308,8 @@
 		.cra_name	= "ghash",
 		.cra_driver_name = "ghash-ce",
 		.cra_priority	= 300,
-		.cra_flags	= CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+		.cra_flags	= CRYPTO_ALG_ASYNC,
 		.cra_blocksize	= GHASH_BLOCK_SIZE,
-		.cra_type	= &crypto_ahash_type,
 		.cra_ctxsize	= sizeof(struct ghash_async_ctx),
 		.cra_module	= THIS_MODULE,
 		.cra_init	= ghash_async_init_tfm,
diff --git a/arch/arm/crypto/sha1-ce-glue.c b/arch/arm/crypto/sha1-ce-glue.c
index 555f72b..b732522 100644
--- a/arch/arm/crypto/sha1-ce-glue.c
+++ b/arch/arm/crypto/sha1-ce-glue.c
@@ -75,7 +75,6 @@
 		.cra_name		= "sha1",
 		.cra_driver_name	= "sha1-ce",
 		.cra_priority		= 200,
-		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		= SHA1_BLOCK_SIZE,
 		.cra_module		= THIS_MODULE,
 	}
diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
index 6fc73bf..98ab823 100644
--- a/arch/arm/crypto/sha1_glue.c
+++ b/arch/arm/crypto/sha1_glue.c
@@ -67,7 +67,6 @@
 		.cra_name	=	"sha1",
 		.cra_driver_name=	"sha1-asm",
 		.cra_priority	=	150,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA1_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
index 4e22f12..d15e0ea 100644
--- a/arch/arm/crypto/sha1_neon_glue.c
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -83,7 +83,6 @@
 		.cra_name		= "sha1",
 		.cra_driver_name	= "sha1-neon",
 		.cra_priority		= 250,
-		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		= SHA1_BLOCK_SIZE,
 		.cra_module		= THIS_MODULE,
 	}
diff --git a/arch/arm/crypto/sha2-ce-glue.c b/arch/arm/crypto/sha2-ce-glue.c
index df4dcef..1211a5c 100644
--- a/arch/arm/crypto/sha2-ce-glue.c
+++ b/arch/arm/crypto/sha2-ce-glue.c
@@ -78,7 +78,6 @@
 		.cra_name		= "sha224",
 		.cra_driver_name	= "sha224-ce",
 		.cra_priority		= 300,
-		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		= SHA256_BLOCK_SIZE,
 		.cra_module		= THIS_MODULE,
 	}
@@ -93,7 +92,6 @@
 		.cra_name		= "sha256",
 		.cra_driver_name	= "sha256-ce",
 		.cra_priority		= 300,
-		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		= SHA256_BLOCK_SIZE,
 		.cra_module		= THIS_MODULE,
 	}
diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
index a84e869..bf8ccff 100644
--- a/arch/arm/crypto/sha256_glue.c
+++ b/arch/arm/crypto/sha256_glue.c
@@ -71,7 +71,6 @@
 		.cra_name	=	"sha256",
 		.cra_driver_name =	"sha256-asm",
 		.cra_priority	=	150,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA256_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -86,7 +85,6 @@
 		.cra_name	=	"sha224",
 		.cra_driver_name =	"sha224-asm",
 		.cra_priority	=	150,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA224_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
index 39ccd65..9bbee56 100644
--- a/arch/arm/crypto/sha256_neon_glue.c
+++ b/arch/arm/crypto/sha256_neon_glue.c
@@ -79,7 +79,6 @@
 		.cra_name	=	"sha256",
 		.cra_driver_name =	"sha256-neon",
 		.cra_priority	=	250,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA256_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -94,7 +93,6 @@
 		.cra_name	=	"sha224",
 		.cra_driver_name =	"sha224-neon",
 		.cra_priority	=	250,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA224_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c
index 269a394..86540cd 100644
--- a/arch/arm/crypto/sha512-glue.c
+++ b/arch/arm/crypto/sha512-glue.c
@@ -63,7 +63,6 @@
 		.cra_name		= "sha384",
 		.cra_driver_name	= "sha384-arm",
 		.cra_priority		= 250,
-		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		= SHA512_BLOCK_SIZE,
 		.cra_module		= THIS_MODULE,
 	}
@@ -78,7 +77,6 @@
 		.cra_name		= "sha512",
 		.cra_driver_name	= "sha512-arm",
 		.cra_priority		= 250,
-		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		= SHA512_BLOCK_SIZE,
 		.cra_module		= THIS_MODULE,
 	}
diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c
index 3269368..8a5642b 100644
--- a/arch/arm/crypto/sha512-neon-glue.c
+++ b/arch/arm/crypto/sha512-neon-glue.c
@@ -75,7 +75,6 @@
 		.cra_name		= "sha384",
 		.cra_driver_name	= "sha384-neon",
 		.cra_priority		= 300,
-		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		= SHA384_BLOCK_SIZE,
 		.cra_module		= THIS_MODULE,
 
@@ -91,7 +90,6 @@
 		.cra_name		= "sha512",
 		.cra_driver_name	= "sha512-neon",
 		.cra_priority		= 300,
-		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		= SHA512_BLOCK_SIZE,
 		.cra_module		= THIS_MODULE,
 	}
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index e3e5095..adcb83e 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -567,7 +567,6 @@
 	.base.cra_name		= "cmac(aes)",
 	.base.cra_driver_name	= "cmac-aes-" MODE,
 	.base.cra_priority	= PRIO,
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= AES_BLOCK_SIZE,
 	.base.cra_ctxsize	= sizeof(struct mac_tfm_ctx) +
 				  2 * AES_BLOCK_SIZE,
@@ -583,7 +582,6 @@
 	.base.cra_name		= "xcbc(aes)",
 	.base.cra_driver_name	= "xcbc-aes-" MODE,
 	.base.cra_priority	= PRIO,
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= AES_BLOCK_SIZE,
 	.base.cra_ctxsize	= sizeof(struct mac_tfm_ctx) +
 				  2 * AES_BLOCK_SIZE,
@@ -599,7 +597,6 @@
 	.base.cra_name		= "cbcmac(aes)",
 	.base.cra_driver_name	= "cbcmac-aes-" MODE,
 	.base.cra_priority	= PRIO,
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= 1,
 	.base.cra_ctxsize	= sizeof(struct mac_tfm_ctx),
 	.base.cra_module	= THIS_MODULE,
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 8a10f1d..18b4d1b 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -204,7 +204,6 @@
 	.base.cra_name		= "ghash",
 	.base.cra_driver_name	= "ghash-ce",
 	.base.cra_priority	= 200,
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= GHASH_BLOCK_SIZE,
 	.base.cra_ctxsize	= sizeof(struct ghash_key),
 	.base.cra_module	= THIS_MODULE,
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index efbeb3e..17fac28 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -99,7 +99,6 @@
 		.cra_name		= "sha1",
 		.cra_driver_name	= "sha1-ce",
 		.cra_priority		= 200,
-		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		= SHA1_BLOCK_SIZE,
 		.cra_module		= THIS_MODULE,
 	}
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index fd1ff2b..261f519 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -114,7 +114,6 @@
 		.cra_name		= "sha224",
 		.cra_driver_name	= "sha224-ce",
 		.cra_priority		= 200,
-		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		= SHA256_BLOCK_SIZE,
 		.cra_module		= THIS_MODULE,
 	}
@@ -129,7 +128,6 @@
 		.cra_name		= "sha256",
 		.cra_driver_name	= "sha256-ce",
 		.cra_priority		= 200,
-		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		= SHA256_BLOCK_SIZE,
 		.cra_module		= THIS_MODULE,
 	}
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index e8880cc..4aedeae 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -67,8 +67,7 @@
 	.descsize		= sizeof(struct sha256_state),
 	.base.cra_name		= "sha256",
 	.base.cra_driver_name	= "sha256-arm64",
-	.base.cra_priority	= 100,
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
+	.base.cra_priority	= 125,
 	.base.cra_blocksize	= SHA256_BLOCK_SIZE,
 	.base.cra_module	= THIS_MODULE,
 }, {
@@ -80,8 +79,7 @@
 	.descsize		= sizeof(struct sha256_state),
 	.base.cra_name		= "sha224",
 	.base.cra_driver_name	= "sha224-arm64",
-	.base.cra_priority	= 100,
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
+	.base.cra_priority	= 125,
 	.base.cra_blocksize	= SHA224_BLOCK_SIZE,
 	.base.cra_module	= THIS_MODULE,
 } };
@@ -153,7 +151,6 @@
 	.base.cra_name		= "sha256",
 	.base.cra_driver_name	= "sha256-arm64-neon",
 	.base.cra_priority	= 150,
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= SHA256_BLOCK_SIZE,
 	.base.cra_module	= THIS_MODULE,
 }, {
@@ -166,7 +163,6 @@
 	.base.cra_name		= "sha224",
 	.base.cra_driver_name	= "sha224-arm64-neon",
 	.base.cra_priority	= 150,
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= SHA224_BLOCK_SIZE,
 	.base.cra_module	= THIS_MODULE,
 } };
diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
index da8222e..a336fea 100644
--- a/arch/arm64/crypto/sha3-ce-glue.c
+++ b/arch/arm64/crypto/sha3-ce-glue.c
@@ -105,7 +105,6 @@
 	.descsize		= sizeof(struct sha3_state),
 	.base.cra_name		= "sha3-224",
 	.base.cra_driver_name	= "sha3-224-ce",
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= SHA3_224_BLOCK_SIZE,
 	.base.cra_module	= THIS_MODULE,
 	.base.cra_priority	= 200,
@@ -117,7 +116,6 @@
 	.descsize		= sizeof(struct sha3_state),
 	.base.cra_name		= "sha3-256",
 	.base.cra_driver_name	= "sha3-256-ce",
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= SHA3_256_BLOCK_SIZE,
 	.base.cra_module	= THIS_MODULE,
 	.base.cra_priority	= 200,
@@ -129,7 +127,6 @@
 	.descsize		= sizeof(struct sha3_state),
 	.base.cra_name		= "sha3-384",
 	.base.cra_driver_name	= "sha3-384-ce",
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= SHA3_384_BLOCK_SIZE,
 	.base.cra_module	= THIS_MODULE,
 	.base.cra_priority	= 200,
@@ -141,7 +138,6 @@
 	.descsize		= sizeof(struct sha3_state),
 	.base.cra_name		= "sha3-512",
 	.base.cra_driver_name	= "sha3-512-ce",
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= SHA3_512_BLOCK_SIZE,
 	.base.cra_module	= THIS_MODULE,
 	.base.cra_priority	= 200,
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index a77c863..f2c5f28 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -87,7 +87,6 @@
 	.base.cra_name		= "sha384",
 	.base.cra_driver_name	= "sha384-ce",
 	.base.cra_priority	= 200,
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= SHA512_BLOCK_SIZE,
 	.base.cra_module	= THIS_MODULE,
 }, {
@@ -100,7 +99,6 @@
 	.base.cra_name		= "sha512",
 	.base.cra_driver_name	= "sha512-ce",
 	.base.cra_priority	= 200,
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= SHA512_BLOCK_SIZE,
 	.base.cra_module	= THIS_MODULE,
 } };
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index 27db485..325b23b4 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -63,7 +63,6 @@
 	.base.cra_name		= "sha512",
 	.base.cra_driver_name	= "sha512-arm64",
 	.base.cra_priority	= 150,
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= SHA512_BLOCK_SIZE,
 	.base.cra_module	= THIS_MODULE,
 }, {
@@ -76,7 +75,6 @@
 	.base.cra_name		= "sha384",
 	.base.cra_driver_name	= "sha384-arm64",
 	.base.cra_priority	= 150,
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= SHA384_BLOCK_SIZE,
 	.base.cra_module	= THIS_MODULE,
 } };
diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c
index 3b4948f..88938a2 100644
--- a/arch/arm64/crypto/sm3-ce-glue.c
+++ b/arch/arm64/crypto/sm3-ce-glue.c
@@ -72,7 +72,6 @@
 	.descsize		= sizeof(struct sm3_state),
 	.base.cra_name		= "sm3",
 	.base.cra_driver_name	= "sm3-ce",
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= SM3_BLOCK_SIZE,
 	.base.cra_module	= THIS_MODULE,
 	.base.cra_priority	= 200,
diff --git a/arch/mips/cavium-octeon/crypto/octeon-md5.c b/arch/mips/cavium-octeon/crypto/octeon-md5.c
index af4c712..d1ed066 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-md5.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-md5.c
@@ -182,7 +182,6 @@
 		.cra_name	=	"md5",
 		.cra_driver_name=	"octeon-md5",
 		.cra_priority	=	OCTEON_CR_OPCODE_PRIORITY,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	MD5_HMAC_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha1.c b/arch/mips/cavium-octeon/crypto/octeon-sha1.c
index 2b74b5b..80d71e7 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha1.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha1.c
@@ -215,7 +215,6 @@
 		.cra_name	=	"sha1",
 		.cra_driver_name=	"octeon-sha1",
 		.cra_priority	=	OCTEON_CR_OPCODE_PRIORITY,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA1_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha256.c b/arch/mips/cavium-octeon/crypto/octeon-sha256.c
index 97e96fe..8b931e6 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha256.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha256.c
@@ -239,7 +239,6 @@
 		.cra_name	=	"sha256",
 		.cra_driver_name=	"octeon-sha256",
 		.cra_priority	=	OCTEON_CR_OPCODE_PRIORITY,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA256_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -252,7 +251,6 @@
 	.base		=	{
 		.cra_name	=	"sha224",
 		.cra_driver_name=	"octeon-sha224",
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA224_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha512.c b/arch/mips/cavium-octeon/crypto/octeon-sha512.c
index d5fb3c6..6c95614 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha512.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha512.c
@@ -235,7 +235,6 @@
 		.cra_name	=	"sha512",
 		.cra_driver_name=	"octeon-sha512",
 		.cra_priority	=	OCTEON_CR_OPCODE_PRIORITY,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA512_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -249,7 +248,6 @@
 		.cra_name	=	"sha384",
 		.cra_driver_name=	"octeon-sha384",
 		.cra_priority	=	OCTEON_CR_OPCODE_PRIORITY,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA384_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/powerpc/crypto/md5-glue.c b/arch/powerpc/crypto/md5-glue.c
index 9228967..7e44cec 100644
--- a/arch/powerpc/crypto/md5-glue.c
+++ b/arch/powerpc/crypto/md5-glue.c
@@ -139,7 +139,6 @@
 		.cra_name	=	"md5",
 		.cra_driver_name=	"md5-ppc",
 		.cra_priority	=	200,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	MD5_HMAC_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c
index f9ebc38..9e1814d 100644
--- a/arch/powerpc/crypto/sha1-spe-glue.c
+++ b/arch/powerpc/crypto/sha1-spe-glue.c
@@ -185,7 +185,6 @@
 		.cra_name	=	"sha1",
 		.cra_driver_name=	"sha1-ppc-spe",
 		.cra_priority	=	300,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA1_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
index c154ceb..3911d5c 100644
--- a/arch/powerpc/crypto/sha1.c
+++ b/arch/powerpc/crypto/sha1.c
@@ -132,7 +132,6 @@
 	.base		=	{
 		.cra_name	=	"sha1",
 		.cra_driver_name=	"sha1-powerpc",
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA1_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/powerpc/crypto/sha256-spe-glue.c b/arch/powerpc/crypto/sha256-spe-glue.c
index 718a079..6227888 100644
--- a/arch/powerpc/crypto/sha256-spe-glue.c
+++ b/arch/powerpc/crypto/sha256-spe-glue.c
@@ -231,7 +231,6 @@
 		.cra_name	=	"sha256",
 		.cra_driver_name=	"sha256-ppc-spe",
 		.cra_priority	=	300,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA256_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -248,7 +247,6 @@
 		.cra_name	=	"sha224",
 		.cra_driver_name=	"sha224-ppc-spe",
 		.cra_priority	=	300,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA224_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index ad47abd..c54cb26 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -1035,7 +1035,6 @@
 	.chunksize		= AES_BLOCK_SIZE,
 
 	.base			= {
-		.cra_flags		= CRYPTO_ALG_TYPE_AEAD,
 		.cra_blocksize		= 1,
 		.cra_ctxsize		= sizeof(struct s390_aes_ctx),
 		.cra_priority		= 900,
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index 3b7f96c..86aed30 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -128,7 +128,6 @@
 		.cra_name		= "ghash",
 		.cra_driver_name	= "ghash-s390",
 		.cra_priority		= 300,
-		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		= GHASH_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct ghash_ctx),
 		.cra_module		= THIS_MODULE,
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index a00c17f..009572e 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -78,7 +78,6 @@
 		.cra_name	=	"sha1",
 		.cra_driver_name=	"sha1-s390",
 		.cra_priority	=	300,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA1_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 944aa6b..62833a1 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -71,7 +71,6 @@
 		.cra_name	=	"sha256",
 		.cra_driver_name=	"sha256-s390",
 		.cra_priority	=	300,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA256_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -108,7 +107,6 @@
 		.cra_name	=	"sha224",
 		.cra_driver_name=	"sha224-s390",
 		.cra_priority	=	300,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA224_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index b17eded..be589c3 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -76,7 +76,6 @@
 		.cra_name	=	"sha512",
 		.cra_driver_name=	"sha512-s390",
 		.cra_priority	=	300,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA512_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -115,7 +114,6 @@
 		.cra_name	=	"sha384",
 		.cra_driver_name=	"sha384-s390",
 		.cra_priority	=	300,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA384_BLOCK_SIZE,
 		.cra_ctxsize	=	sizeof(struct s390_sha_ctx),
 		.cra_module	=	THIS_MODULE,
diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c
index c9d2b92..bc9cc26 100644
--- a/arch/sparc/crypto/md5_glue.c
+++ b/arch/sparc/crypto/md5_glue.c
@@ -144,7 +144,6 @@
 		.cra_name	=	"md5",
 		.cra_driver_name=	"md5-sparc64",
 		.cra_priority	=	SPARC_CR_OPCODE_PRIORITY,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	MD5_HMAC_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c
index 1b3e47a..4d6d7fa 100644
--- a/arch/sparc/crypto/sha1_glue.c
+++ b/arch/sparc/crypto/sha1_glue.c
@@ -139,7 +139,6 @@
 		.cra_name	=	"sha1",
 		.cra_driver_name=	"sha1-sparc64",
 		.cra_priority	=	SPARC_CR_OPCODE_PRIORITY,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA1_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c
index 285268c..54c4de2 100644
--- a/arch/sparc/crypto/sha256_glue.c
+++ b/arch/sparc/crypto/sha256_glue.c
@@ -169,7 +169,6 @@
 		.cra_name	=	"sha256",
 		.cra_driver_name=	"sha256-sparc64",
 		.cra_priority	=	SPARC_CR_OPCODE_PRIORITY,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA256_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -185,7 +184,6 @@
 		.cra_name	=	"sha224",
 		.cra_driver_name=	"sha224-sparc64",
 		.cra_priority	=	SPARC_CR_OPCODE_PRIORITY,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA224_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c
index 11eb36c..4c55e97 100644
--- a/arch/sparc/crypto/sha512_glue.c
+++ b/arch/sparc/crypto/sha512_glue.c
@@ -154,7 +154,6 @@
 		.cra_name	=	"sha512",
 		.cra_driver_name=	"sha512-sparc64",
 		.cra_priority	=	SPARC_CR_OPCODE_PRIORITY,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA512_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -170,7 +169,6 @@
 		.cra_name	=	"sha384",
 		.cra_driver_name=	"sha384-sparc64",
 		.cra_priority	=	SPARC_CR_OPCODE_PRIORITY,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA384_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 2ddbe3a..3582ae8 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -154,8 +154,7 @@
 		.cra_name		= "__ghash",
 		.cra_driver_name	= "__ghash-pclmulqdqni",
 		.cra_priority		= 0,
-		.cra_flags		= CRYPTO_ALG_TYPE_SHASH |
-					  CRYPTO_ALG_INTERNAL,
+		.cra_flags		= CRYPTO_ALG_INTERNAL,
 		.cra_blocksize		= GHASH_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct ghash_ctx),
 		.cra_module		= THIS_MODULE,
@@ -315,9 +314,8 @@
 			.cra_driver_name	= "ghash-clmulni",
 			.cra_priority		= 400,
 			.cra_ctxsize		= sizeof(struct ghash_async_ctx),
-			.cra_flags		= CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+			.cra_flags		= CRYPTO_ALG_ASYNC,
 			.cra_blocksize		= GHASH_BLOCK_SIZE,
-			.cra_type		= &crypto_ahash_type,
 			.cra_module		= THIS_MODULE,
 			.cra_init		= ghash_async_init_tfm,
 			.cra_exit		= ghash_async_exit_tfm,
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index 7903777..f012b7e 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -169,7 +169,6 @@
 		.cra_name		= "poly1305",
 		.cra_driver_name	= "poly1305-simd",
 		.cra_priority		= 300,
-		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		= POLY1305_BLOCK_SIZE,
 		.cra_module		= THIS_MODULE,
 	},
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c
index e17655f..b938056 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha1-mb/sha1_mb.c
@@ -746,9 +746,8 @@
 			 * algo may not have completed before hashing thread
 			 * sleep
 			 */
-			.cra_flags	= CRYPTO_ALG_TYPE_AHASH |
-						CRYPTO_ALG_ASYNC |
-						CRYPTO_ALG_INTERNAL,
+			.cra_flags	= CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_INTERNAL,
 			.cra_blocksize	= SHA1_BLOCK_SIZE,
 			.cra_module	= THIS_MODULE,
 			.cra_list	= LIST_HEAD_INIT
@@ -871,10 +870,16 @@
 		.base = {
 			.cra_name               = "sha1",
 			.cra_driver_name        = "sha1_mb",
-			.cra_priority           = 200,
-			.cra_flags              = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+			/*
+			 * Low priority, since with few concurrent hash requests
+			 * this is extremely slow due to the flush delay.  Users
+			 * whose workloads would benefit from this can request
+			 * it explicitly by driver name, or can increase its
+			 * priority at runtime using NETLINK_CRYPTO.
+			 */
+			.cra_priority           = 50,
+			.cra_flags              = CRYPTO_ALG_ASYNC,
 			.cra_blocksize          = SHA1_BLOCK_SIZE,
-			.cra_type               = &crypto_ahash_type,
 			.cra_module             = THIS_MODULE,
 			.cra_list               = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list),
 			.cra_init               = sha1_mb_async_init_tfm,
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index fc61739..7391c7d 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -104,7 +104,6 @@
 		.cra_name	=	"sha1",
 		.cra_driver_name =	"sha1-ssse3",
 		.cra_priority	=	150,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA1_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -157,7 +156,6 @@
 		.cra_name	=	"sha1",
 		.cra_driver_name =	"sha1-avx",
 		.cra_priority	=	160,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA1_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -249,7 +247,6 @@
 		.cra_name	=	"sha1",
 		.cra_driver_name =	"sha1-avx2",
 		.cra_priority	=	170,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA1_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -307,7 +304,6 @@
 		.cra_name	=	"sha1",
 		.cra_driver_name =	"sha1-ni",
 		.cra_priority	=	250,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA1_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
index 4c46ac1..97c5fc4 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb.c
+++ b/arch/x86/crypto/sha256-mb/sha256_mb.c
@@ -745,9 +745,8 @@
 			 * algo may not have completed before hashing thread
 			 * sleep
 			 */
-			.cra_flags	= CRYPTO_ALG_TYPE_AHASH |
-						CRYPTO_ALG_ASYNC |
-						CRYPTO_ALG_INTERNAL,
+			.cra_flags	= CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_INTERNAL,
 			.cra_blocksize	= SHA256_BLOCK_SIZE,
 			.cra_module	= THIS_MODULE,
 			.cra_list	= LIST_HEAD_INIT
@@ -870,11 +869,16 @@
 		.base = {
 			.cra_name               = "sha256",
 			.cra_driver_name        = "sha256_mb",
-			.cra_priority           = 200,
-			.cra_flags              = CRYPTO_ALG_TYPE_AHASH |
-							CRYPTO_ALG_ASYNC,
+			/*
+			 * Low priority, since with few concurrent hash requests
+			 * this is extremely slow due to the flush delay.  Users
+			 * whose workloads would benefit from this can request
+			 * it explicitly by driver name, or can increase its
+			 * priority at runtime using NETLINK_CRYPTO.
+			 */
+			.cra_priority           = 50,
+			.cra_flags              = CRYPTO_ALG_ASYNC,
 			.cra_blocksize          = SHA256_BLOCK_SIZE,
-			.cra_type               = &crypto_ahash_type,
 			.cra_module             = THIS_MODULE,
 			.cra_list               = LIST_HEAD_INIT
 				(sha256_mb_async_alg.halg.base.cra_list),
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
index 16c4ccb..d2364c5 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
@@ -265,7 +265,7 @@
 	vpinsrd	$1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
 	vpinsrd	$2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
 	vpinsrd	$3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
-	vmovd   _args_digest(state , idx, 4) , %xmm0
+	vmovd	_args_digest+4*32(state, idx, 4), %xmm1
 	vpinsrd	$1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
 	vpinsrd	$2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
 	vpinsrd	$3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 9e79baf..773a873 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -109,7 +109,6 @@
 		.cra_name	=	"sha256",
 		.cra_driver_name =	"sha256-ssse3",
 		.cra_priority	=	150,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA256_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -124,7 +123,6 @@
 		.cra_name	=	"sha224",
 		.cra_driver_name =	"sha224-ssse3",
 		.cra_priority	=	150,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA224_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -177,7 +175,6 @@
 		.cra_name	=	"sha256",
 		.cra_driver_name =	"sha256-avx",
 		.cra_priority	=	160,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA256_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -192,7 +189,6 @@
 		.cra_name	=	"sha224",
 		.cra_driver_name =	"sha224-avx",
 		.cra_priority	=	160,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA224_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -261,7 +257,6 @@
 		.cra_name	=	"sha256",
 		.cra_driver_name =	"sha256-avx2",
 		.cra_priority	=	170,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA256_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -276,7 +271,6 @@
 		.cra_name	=	"sha224",
 		.cra_driver_name =	"sha224-avx2",
 		.cra_priority	=	170,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA224_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -343,7 +337,6 @@
 		.cra_name	=	"sha256",
 		.cra_driver_name =	"sha256-ni",
 		.cra_priority	=	250,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA256_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -358,7 +351,6 @@
 		.cra_name	=	"sha224",
 		.cra_driver_name =	"sha224-ni",
 		.cra_priority	=	250,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA224_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
index 39e2bbd..26b8567 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb.c
+++ b/arch/x86/crypto/sha512-mb/sha512_mb.c
@@ -778,9 +778,8 @@
 			 * algo may not have completed before hashing thread
 			 * sleep
 			 */
-			.cra_flags	= CRYPTO_ALG_TYPE_AHASH |
-						CRYPTO_ALG_ASYNC |
-						CRYPTO_ALG_INTERNAL,
+			.cra_flags	= CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_INTERNAL,
 			.cra_blocksize	= SHA512_BLOCK_SIZE,
 			.cra_module	= THIS_MODULE,
 			.cra_list	= LIST_HEAD_INIT
@@ -904,11 +903,16 @@
 		.base = {
 			.cra_name               = "sha512",
 			.cra_driver_name        = "sha512_mb",
-			.cra_priority           = 200,
-			.cra_flags              = CRYPTO_ALG_TYPE_AHASH |
-							CRYPTO_ALG_ASYNC,
+			/*
+			 * Low priority, since with few concurrent hash requests
+			 * this is extremely slow due to the flush delay.  Users
+			 * whose workloads would benefit from this can request
+			 * it explicitly by driver name, or can increase its
+			 * priority at runtime using NETLINK_CRYPTO.
+			 */
+			.cra_priority           = 50,
+			.cra_flags              = CRYPTO_ALG_ASYNC,
 			.cra_blocksize          = SHA512_BLOCK_SIZE,
-			.cra_type               = &crypto_ahash_type,
 			.cra_module             = THIS_MODULE,
 			.cra_list               = LIST_HEAD_INIT
 				(sha512_mb_async_alg.halg.base.cra_list),
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 2b0e2a6..f1b811b 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -109,7 +109,6 @@
 		.cra_name	=	"sha512",
 		.cra_driver_name =	"sha512-ssse3",
 		.cra_priority	=	150,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA512_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -124,7 +123,6 @@
 		.cra_name	=	"sha384",
 		.cra_driver_name =	"sha384-ssse3",
 		.cra_priority	=	150,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA384_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -188,7 +186,6 @@
 		.cra_name	=	"sha512",
 		.cra_driver_name =	"sha512-avx",
 		.cra_priority	=	160,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA512_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -203,7 +200,6 @@
 		.cra_name	=	"sha384",
 		.cra_driver_name =	"sha384-avx",
 		.cra_priority	=	160,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA384_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -261,7 +257,6 @@
 		.cra_name	=	"sha512",
 		.cra_driver_name =	"sha512-avx2",
 		.cra_priority	=	170,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA512_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -276,7 +271,6 @@
 		.cra_name	=	"sha384",
 		.cra_driver_name =	"sha384-avx2",
 		.cra_priority	=	170,
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA384_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index d880a48..1edb500 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -373,6 +373,7 @@
 	strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
 	strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
 		sizeof(rblkcipher.geniv));
+	rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
 
 	rblkcipher.blocksize = alg->cra_blocksize;
 	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
@@ -447,6 +448,7 @@
 	strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
 	strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
 		sizeof(rblkcipher.geniv));
+	rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
 
 	rblkcipher.blocksize = alg->cra_blocksize;
 	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
diff --git a/crypto/aegis128.c b/crypto/aegis128.c
index 3827130..c22f441 100644
--- a/crypto/aegis128.c
+++ b/crypto/aegis128.c
@@ -429,7 +429,6 @@
 	.chunksize = AEGIS_BLOCK_SIZE,
 
 	.base = {
-		.cra_flags = CRYPTO_ALG_TYPE_AEAD,
 		.cra_blocksize = 1,
 		.cra_ctxsize = sizeof(struct aegis_ctx),
 		.cra_alignmask = 0,
diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c
index 0cc1a75..b6fb21e 100644
--- a/crypto/aegis128l.c
+++ b/crypto/aegis128l.c
@@ -121,7 +121,7 @@
 				(const union aegis_chunk *)src;
 
 		while (size >= AEGIS128L_CHUNK_SIZE) {
-                    crypto_aegis128l_update_a(state, src_chunk);
+			crypto_aegis128l_update_a(state, src_chunk);
 
 			size -= AEGIS128L_CHUNK_SIZE;
 			src_chunk += 1;
@@ -493,7 +493,6 @@
 	.chunksize = AEGIS128L_CHUNK_SIZE,
 
 	.base = {
-		.cra_flags = CRYPTO_ALG_TYPE_AEAD,
 		.cra_blocksize = 1,
 		.cra_ctxsize = sizeof(struct aegis_ctx),
 		.cra_alignmask = 0,
diff --git a/crypto/aegis256.c b/crypto/aegis256.c
index a489d74..11f0f8e 100644
--- a/crypto/aegis256.c
+++ b/crypto/aegis256.c
@@ -444,7 +444,6 @@
 	.chunksize = AEGIS_BLOCK_SIZE,
 
 	.base = {
-		.cra_flags = CRYPTO_ALG_TYPE_AEAD,
 		.cra_blocksize = 1,
 		.cra_ctxsize = sizeof(struct aegis_ctx),
 		.cra_alignmask = 0,
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 01c0d4a..dd4dcab 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -512,6 +512,7 @@
 	strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
 	strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
 		sizeof(rblkcipher.geniv));
+	rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
 
 	rblkcipher.blocksize = alg->cra_blocksize;
 	rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 20ff2c7..0959b26 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -104,7 +104,6 @@
 	.final  		=	null_final,
 	.base			=	{
 		.cra_name		=	"digest_null",
-		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		=	NULL_BLOCK_SIZE,
 		.cra_module		=	THIS_MODULE,
 	}
diff --git a/crypto/dh.c b/crypto/dh.c
index 5659fe7..09a44de 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -16,14 +16,16 @@
 #include <linux/mpi.h>
 
 struct dh_ctx {
-	MPI p;
-	MPI g;
-	MPI xa;
+	MPI p;	/* Value is guaranteed to be set. */
+	MPI q;	/* Value is optional. */
+	MPI g;	/* Value is guaranteed to be set. */
+	MPI xa;	/* Value is guaranteed to be set. */
 };
 
 static void dh_clear_ctx(struct dh_ctx *ctx)
 {
 	mpi_free(ctx->p);
+	mpi_free(ctx->q);
 	mpi_free(ctx->g);
 	mpi_free(ctx->xa);
 	memset(ctx, 0, sizeof(*ctx));
@@ -60,6 +62,12 @@
 	if (!ctx->p)
 		return -EINVAL;
 
+	if (params->q && params->q_size) {
+		ctx->q = mpi_read_raw_data(params->q, params->q_size);
+		if (!ctx->q)
+			return -EINVAL;
+	}
+
 	ctx->g = mpi_read_raw_data(params->g, params->g_size);
 	if (!ctx->g)
 		return -EINVAL;
@@ -93,6 +101,55 @@
 	return -EINVAL;
 }
 
+/*
+ * SP800-56A public key verification:
+ *
+ * * If Q is provided as part of the domain paramenters, a full validation
+ *   according to SP800-56A section 5.6.2.3.1 is performed.
+ *
+ * * If Q is not provided, a partial validation according to SP800-56A section
+ *   5.6.2.3.2 is performed.
+ */
+static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y)
+{
+	if (unlikely(!ctx->p))
+		return -EINVAL;
+
+	/*
+	 * Step 1: Verify that 2 <= y <= p - 2.
+	 *
+	 * The upper limit check is actually y < p instead of y < p - 1
+	 * as the mpi_sub_ui function is yet missing.
+	 */
+	if (mpi_cmp_ui(y, 1) < 1 || mpi_cmp(y, ctx->p) >= 0)
+		return -EINVAL;
+
+	/* Step 2: Verify that 1 = y^q mod p */
+	if (ctx->q) {
+		MPI val = mpi_alloc(0);
+		int ret;
+
+		if (!val)
+			return -ENOMEM;
+
+		ret = mpi_powm(val, y, ctx->q, ctx->p);
+
+		if (ret) {
+			mpi_free(val);
+			return ret;
+		}
+
+		ret = mpi_cmp_ui(val, 1);
+
+		mpi_free(val);
+
+		if (ret != 0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int dh_compute_value(struct kpp_request *req)
 {
 	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
@@ -115,6 +172,9 @@
 			ret = -EINVAL;
 			goto err_free_val;
 		}
+		ret = dh_is_pubkey_valid(ctx, base);
+		if (ret)
+			goto err_free_base;
 	} else {
 		base = ctx->g;
 	}
diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c
index 24fdb2e..a7de3d9c 100644
--- a/crypto/dh_helper.c
+++ b/crypto/dh_helper.c
@@ -30,7 +30,7 @@
 
 static inline unsigned int dh_data_size(const struct dh *p)
 {
-	return p->key_size + p->p_size + p->g_size;
+	return p->key_size + p->p_size + p->q_size + p->g_size;
 }
 
 unsigned int crypto_dh_key_len(const struct dh *p)
@@ -56,9 +56,11 @@
 	ptr = dh_pack_data(ptr, &secret, sizeof(secret));
 	ptr = dh_pack_data(ptr, &params->key_size, sizeof(params->key_size));
 	ptr = dh_pack_data(ptr, &params->p_size, sizeof(params->p_size));
+	ptr = dh_pack_data(ptr, &params->q_size, sizeof(params->q_size));
 	ptr = dh_pack_data(ptr, &params->g_size, sizeof(params->g_size));
 	ptr = dh_pack_data(ptr, params->key, params->key_size);
 	ptr = dh_pack_data(ptr, params->p, params->p_size);
+	ptr = dh_pack_data(ptr, params->q, params->q_size);
 	dh_pack_data(ptr, params->g, params->g_size);
 
 	return 0;
@@ -79,6 +81,7 @@
 
 	ptr = dh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
 	ptr = dh_unpack_data(&params->p_size, ptr, sizeof(params->p_size));
+	ptr = dh_unpack_data(&params->q_size, ptr, sizeof(params->q_size));
 	ptr = dh_unpack_data(&params->g_size, ptr, sizeof(params->g_size));
 	if (secret.len != crypto_dh_key_len(params))
 		return -EINVAL;
@@ -88,7 +91,7 @@
 	 * some drivers assume otherwise.
 	 */
 	if (params->key_size > params->p_size ||
-	    params->g_size > params->p_size)
+	    params->g_size > params->p_size || params->q_size > params->p_size)
 		return -EINVAL;
 
 	/* Don't allocate memory. Set pointers to data within
@@ -96,7 +99,9 @@
 	 */
 	params->key = (void *)ptr;
 	params->p = (void *)(ptr + params->key_size);
-	params->g = (void *)(ptr + params->key_size + params->p_size);
+	params->q = (void *)(ptr + params->key_size + params->p_size);
+	params->g = (void *)(ptr + params->key_size + params->p_size +
+			     params->q_size);
 
 	/*
 	 * Don't permit 'p' to be 0.  It's not a prime number, and it's subject
@@ -106,6 +111,10 @@
 	if (memchr_inv(params->p, 0, params->p_size) == NULL)
 		return -EINVAL;
 
+	/* It is permissible to not provide Q. */
+	if (params->q_size == 0)
+		params->q = NULL;
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(crypto_dh_decode_key);
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 466a112..ee302fd 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1715,6 +1715,9 @@
 	drbg->outscratchpad = (u8 *)PTR_ALIGN(drbg->outscratchpadbuf,
 					      alignmask + 1);
 
+	sg_init_table(&drbg->sg_in, 1);
+	sg_init_table(&drbg->sg_out, 1);
+
 	return alignmask;
 }
 
@@ -1743,17 +1746,17 @@
 			      u8 *inbuf, u32 inlen,
 			      u8 *outbuf, u32 outlen)
 {
-	struct scatterlist sg_in, sg_out;
+	struct scatterlist *sg_in = &drbg->sg_in, *sg_out = &drbg->sg_out;
 	int ret;
 
-	sg_init_one(&sg_in, inbuf, inlen);
-	sg_init_one(&sg_out, drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
+	sg_set_buf(sg_in, inbuf, inlen);
+	sg_set_buf(sg_out, drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
 
 	while (outlen) {
 		u32 cryptlen = min3(inlen, outlen, (u32)DRBG_OUTSCRATCHLEN);
 
 		/* Output buffer may not be valid for SGL, use scratchpad */
-		skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
+		skcipher_request_set_crypt(drbg->ctr_req, sg_in, sg_out,
 					   cryptlen, drbg->V);
 		ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req),
 					&drbg->ctr_wait);
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 8155413..8facafd 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -1019,6 +1019,36 @@
 	return ret;
 }
 
+/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
+static int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
+				       struct ecc_point *pk)
+{
+	u64 yy[ECC_MAX_DIGITS], xxx[ECC_MAX_DIGITS], w[ECC_MAX_DIGITS];
+
+	/* Check 1: Verify key is not the zero point. */
+	if (ecc_point_is_zero(pk))
+		return -EINVAL;
+
+	/* Check 2: Verify key is in the range [1, p-1]. */
+	if (vli_cmp(curve->p, pk->x, pk->ndigits) != 1)
+		return -EINVAL;
+	if (vli_cmp(curve->p, pk->y, pk->ndigits) != 1)
+		return -EINVAL;
+
+	/* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */
+	vli_mod_square_fast(yy, pk->y, curve->p, pk->ndigits); /* y^2 */
+	vli_mod_square_fast(xxx, pk->x, curve->p, pk->ndigits); /* x^2 */
+	vli_mod_mult_fast(xxx, xxx, pk->x, curve->p, pk->ndigits); /* x^3 */
+	vli_mod_mult_fast(w, curve->a, pk->x, curve->p, pk->ndigits); /* a·x */
+	vli_mod_add(w, w, curve->b, curve->p, pk->ndigits); /* a·x + b */
+	vli_mod_add(w, w, xxx, curve->p, pk->ndigits); /* x^3 + a·x + b */
+	if (vli_cmp(yy, w, pk->ndigits) != 0) /* Equation */
+		return -EINVAL;
+
+	return 0;
+
+}
+
 int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
 			      const u64 *private_key, const u64 *public_key,
 			      u64 *secret)
@@ -1046,16 +1076,20 @@
 		goto out;
 	}
 
+	ecc_swap_digits(public_key, pk->x, ndigits);
+	ecc_swap_digits(&public_key[ndigits], pk->y, ndigits);
+	ret = ecc_is_pubkey_valid_partial(curve, pk);
+	if (ret)
+		goto err_alloc_product;
+
+	ecc_swap_digits(private_key, priv, ndigits);
+
 	product = ecc_alloc_point(ndigits);
 	if (!product) {
 		ret = -ENOMEM;
 		goto err_alloc_product;
 	}
 
-	ecc_swap_digits(public_key, pk->x, ndigits);
-	ecc_swap_digits(&public_key[ndigits], pk->y, ndigits);
-	ecc_swap_digits(private_key, priv, ndigits);
-
 	ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits);
 
 	ecc_swap_digits(product->x, secret, ndigits);
diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h
index b80f45d..336ab18 100644
--- a/crypto/ecc_curve_defs.h
+++ b/crypto/ecc_curve_defs.h
@@ -13,9 +13,11 @@
 	struct ecc_point g;
 	u64 *p;
 	u64 *n;
+	u64 *a;
+	u64 *b;
 };
 
-/* NIST P-192 */
+/* NIST P-192: a = p - 3 */
 static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull,
 				0x188DA80EB03090F6ull };
 static u64 nist_p192_g_y[] = { 0x73F977A11E794811ull, 0x631011ED6B24CDD5ull,
@@ -24,6 +26,10 @@
 				0xFFFFFFFFFFFFFFFFull };
 static u64 nist_p192_n[] = { 0x146BC9B1B4D22831ull, 0xFFFFFFFF99DEF836ull,
 				0xFFFFFFFFFFFFFFFFull };
+static u64 nist_p192_a[] = { 0xFFFFFFFFFFFFFFFCull, 0xFFFFFFFFFFFFFFFEull,
+				0xFFFFFFFFFFFFFFFFull };
+static u64 nist_p192_b[] = { 0xFEB8DEECC146B9B1ull, 0x0FA7E9AB72243049ull,
+				0x64210519E59C80E7ull };
 static struct ecc_curve nist_p192 = {
 	.name = "nist_192",
 	.g = {
@@ -32,10 +38,12 @@
 		.ndigits = 3,
 	},
 	.p = nist_p192_p,
-	.n = nist_p192_n
+	.n = nist_p192_n,
+	.a = nist_p192_a,
+	.b = nist_p192_b
 };
 
-/* NIST P-256 */
+/* NIST P-256: a = p - 3 */
 static u64 nist_p256_g_x[] = { 0xF4A13945D898C296ull, 0x77037D812DEB33A0ull,
 				0xF8BCE6E563A440F2ull, 0x6B17D1F2E12C4247ull };
 static u64 nist_p256_g_y[] = { 0xCBB6406837BF51F5ull, 0x2BCE33576B315ECEull,
@@ -44,6 +52,10 @@
 				0x0000000000000000ull, 0xFFFFFFFF00000001ull };
 static u64 nist_p256_n[] = { 0xF3B9CAC2FC632551ull, 0xBCE6FAADA7179E84ull,
 				0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull };
+static u64 nist_p256_a[] = { 0xFFFFFFFFFFFFFFFCull, 0x00000000FFFFFFFFull,
+				0x0000000000000000ull, 0xFFFFFFFF00000001ull };
+static u64 nist_p256_b[] = { 0x3BCE3C3E27D2604Bull, 0x651D06B0CC53B0F6ull,
+				0xB3EBBD55769886BCull, 0x5AC635D8AA3A93E7ull };
 static struct ecc_curve nist_p256 = {
 	.name = "nist_256",
 	.g = {
@@ -52,7 +64,9 @@
 		.ndigits = 4,
 	},
 	.p = nist_p256_p,
-	.n = nist_p256_n
+	.n = nist_p256_n,
+	.a = nist_p256_a,
+	.b = nist_p256_b
 };
 
 #endif
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index 1bffb3f..d9f192b 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -132,7 +132,6 @@
 		.cra_name		= "ghash",
 		.cra_driver_name	= "ghash-generic",
 		.cra_priority		= 100,
-		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		= GHASH_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct ghash_ctx),
 		.cra_module		= THIS_MODULE,
diff --git a/crypto/md4.c b/crypto/md4.c
index 810fefb..9965ec4 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -217,7 +217,6 @@
 	.descsize	=	sizeof(struct md4_ctx),
 	.base		=	{
 		.cra_name	=	"md4",
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	MD4_HMAC_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/crypto/md5.c b/crypto/md5.c
index f776ef4..94dd781 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -229,7 +229,6 @@
 	.statesize	=	sizeof(struct md5_state),
 	.base		=	{
 		.cra_name	=	"md5",
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	MD5_HMAC_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/crypto/morus1280.c b/crypto/morus1280.c
index 6180b25..d057cf5 100644
--- a/crypto/morus1280.c
+++ b/crypto/morus1280.c
@@ -514,7 +514,6 @@
 	.chunksize = MORUS1280_BLOCK_SIZE,
 
 	.base = {
-		.cra_flags = CRYPTO_ALG_TYPE_AEAD,
 		.cra_blocksize = 1,
 		.cra_ctxsize = sizeof(struct morus1280_ctx),
 		.cra_alignmask = 0,
diff --git a/crypto/morus640.c b/crypto/morus640.c
index 5eede37..1ca76e5 100644
--- a/crypto/morus640.c
+++ b/crypto/morus640.c
@@ -511,7 +511,6 @@
 	.chunksize = MORUS640_BLOCK_SIZE,
 
 	.base = {
-		.cra_flags = CRYPTO_ALG_TYPE_AEAD,
 		.cra_blocksize = 1,
 		.cra_ctxsize = sizeof(struct morus640_ctx),
 		.cra_alignmask = 0,
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index b7a3a06..47d3a6b 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -279,7 +279,6 @@
 		.cra_name		= "poly1305",
 		.cra_driver_name	= "poly1305-generic",
 		.cra_priority		= 100,
-		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		= POLY1305_BLOCK_SIZE,
 		.cra_module		= THIS_MODULE,
 	},
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
index 40e053b..5f44722 100644
--- a/crypto/rmd128.c
+++ b/crypto/rmd128.c
@@ -303,7 +303,6 @@
 	.descsize	=	sizeof(struct rmd128_ctx),
 	.base		=	{
 		.cra_name	 =	"rmd128",
-		.cra_flags	 =	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	 =	RMD128_BLOCK_SIZE,
 		.cra_module	 =	THIS_MODULE,
 	}
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index 5f3e6ea..7376453 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -347,7 +347,6 @@
 	.descsize	=	sizeof(struct rmd160_ctx),
 	.base		=	{
 		.cra_name	 =	"rmd160",
-		.cra_flags	 =	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	 =	RMD160_BLOCK_SIZE,
 		.cra_module	 =	THIS_MODULE,
 	}
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
index f50c025..0e9d306 100644
--- a/crypto/rmd256.c
+++ b/crypto/rmd256.c
@@ -49,7 +49,7 @@
 
 static void rmd256_transform(u32 *state, const __le32 *in)
 {
-	u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd, tmp;
+	u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd;
 
 	/* Initialize left lane */
 	aa = state[0];
@@ -100,7 +100,7 @@
 	ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12],  6);
 
 	/* Swap contents of "a" registers */
-	tmp = aa; aa = aaa; aaa = tmp;
+	swap(aa, aaa);
 
 	/* round 2: left lane */
 	ROUND(aa, bb, cc, dd, F2, K2, in[7],   7);
@@ -139,7 +139,7 @@
 	ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2],  11);
 
 	/* Swap contents of "b" registers */
-	tmp = bb; bb = bbb; bbb = tmp;
+	swap(bb, bbb);
 
 	/* round 3: left lane */
 	ROUND(aa, bb, cc, dd, F3, K3, in[3],  11);
@@ -178,7 +178,7 @@
 	ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13],  5);
 
 	/* Swap contents of "c" registers */
-	tmp = cc; cc = ccc; ccc = tmp;
+	swap(cc, ccc);
 
 	/* round 4: left lane */
 	ROUND(aa, bb, cc, dd, F4, K4, in[1],  11);
@@ -217,7 +217,7 @@
 	ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14],  8);
 
 	/* Swap contents of "d" registers */
-	tmp = dd; dd = ddd; ddd = tmp;
+	swap(dd, ddd);
 
 	/* combine results */
 	state[0] += aa;
@@ -322,7 +322,6 @@
 	.descsize	=	sizeof(struct rmd256_ctx),
 	.base		=	{
 		.cra_name	 =	"rmd256",
-		.cra_flags	 =	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	 =	RMD256_BLOCK_SIZE,
 		.cra_module	 =	THIS_MODULE,
 	}
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
index e1315e4..3ae1df5 100644
--- a/crypto/rmd320.c
+++ b/crypto/rmd320.c
@@ -53,7 +53,7 @@
 
 static void rmd320_transform(u32 *state, const __le32 *in)
 {
-	u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee, tmp;
+	u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee;
 
 	/* Initialize left lane */
 	aa = state[0];
@@ -106,7 +106,7 @@
 	ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12],  6);
 
 	/* Swap contents of "a" registers */
-	tmp = aa; aa = aaa; aaa = tmp;
+	swap(aa, aaa);
 
 	/* round 2: left lane" */
 	ROUND(ee, aa, bb, cc, dd, F2, K2, in[7],   7);
@@ -145,7 +145,7 @@
 	ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2],  11);
 
 	/* Swap contents of "b" registers */
-	tmp = bb; bb = bbb; bbb = tmp;
+	swap(bb, bbb);
 
 	/* round 3: left lane" */
 	ROUND(dd, ee, aa, bb, cc, F3, K3, in[3],  11);
@@ -184,7 +184,7 @@
 	ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13],  5);
 
 	/* Swap contents of "c" registers */
-	tmp = cc; cc = ccc; ccc = tmp;
+	swap(cc, ccc);
 
 	/* round 4: left lane" */
 	ROUND(cc, dd, ee, aa, bb, F4, K4, in[1],  11);
@@ -223,7 +223,7 @@
 	ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14],  8);
 
 	/* Swap contents of "d" registers */
-	tmp = dd; dd = ddd; ddd = tmp;
+	swap(dd, ddd);
 
 	/* round 5: left lane" */
 	ROUND(bb, cc, dd, ee, aa, F5, K5, in[4],   9);
@@ -262,7 +262,7 @@
 	ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11);
 
 	/* Swap contents of "e" registers */
-	tmp = ee; ee = eee; eee = tmp;
+	swap(ee, eee);
 
 	/* combine results */
 	state[0] += aa;
@@ -371,7 +371,6 @@
 	.descsize	=	sizeof(struct rmd320_ctx),
 	.base		=	{
 		.cra_name	 =	"rmd320",
-		.cra_flags	 =	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	 =	RMD320_BLOCK_SIZE,
 		.cra_module	 =	THIS_MODULE,
 	}
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 6877cbb..2af64ef 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -76,7 +76,7 @@
 	.base		=	{
 		.cra_name	=	"sha1",
 		.cra_driver_name=	"sha1-generic",
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
+		.cra_priority	=	100,
 		.cra_blocksize	=	SHA1_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 8f9c47e..1e5ba66 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -271,7 +271,7 @@
 	.base		=	{
 		.cra_name	=	"sha256",
 		.cra_driver_name=	"sha256-generic",
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
+		.cra_priority	=	100,
 		.cra_blocksize	=	SHA256_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -285,7 +285,7 @@
 	.base		=	{
 		.cra_name	=	"sha224",
 		.cra_driver_name=	"sha224-generic",
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
+		.cra_priority	=	100,
 		.cra_blocksize	=	SHA224_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 7f6735d..7ed9836 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -250,7 +250,6 @@
 	.descsize		= sizeof(struct sha3_state),
 	.base.cra_name		= "sha3-224",
 	.base.cra_driver_name	= "sha3-224-generic",
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= SHA3_224_BLOCK_SIZE,
 	.base.cra_module	= THIS_MODULE,
 }, {
@@ -261,7 +260,6 @@
 	.descsize		= sizeof(struct sha3_state),
 	.base.cra_name		= "sha3-256",
 	.base.cra_driver_name	= "sha3-256-generic",
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= SHA3_256_BLOCK_SIZE,
 	.base.cra_module	= THIS_MODULE,
 }, {
@@ -272,7 +270,6 @@
 	.descsize		= sizeof(struct sha3_state),
 	.base.cra_name		= "sha3-384",
 	.base.cra_driver_name	= "sha3-384-generic",
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= SHA3_384_BLOCK_SIZE,
 	.base.cra_module	= THIS_MODULE,
 }, {
@@ -283,7 +280,6 @@
 	.descsize		= sizeof(struct sha3_state),
 	.base.cra_name		= "sha3-512",
 	.base.cra_driver_name	= "sha3-512-generic",
-	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 	.base.cra_blocksize	= SHA3_512_BLOCK_SIZE,
 	.base.cra_module	= THIS_MODULE,
 } };
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index eba965d..4097cd5 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -23,6 +23,28 @@
 #include <asm/byteorder.h>
 #include <asm/unaligned.h>
 
+const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE] = {
+	0x38, 0xb0, 0x60, 0xa7, 0x51, 0xac, 0x96, 0x38,
+	0x4c, 0xd9, 0x32, 0x7e, 0xb1, 0xb1, 0xe3, 0x6a,
+	0x21, 0xfd, 0xb7, 0x11, 0x14, 0xbe, 0x07, 0x43,
+	0x4c, 0x0c, 0xc7, 0xbf, 0x63, 0xf6, 0xe1, 0xda,
+	0x27, 0x4e, 0xde, 0xbf, 0xe7, 0x6f, 0x65, 0xfb,
+	0xd5, 0x1a, 0xd2, 0xf1, 0x48, 0x98, 0xb9, 0x5b
+};
+EXPORT_SYMBOL_GPL(sha384_zero_message_hash);
+
+const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE] = {
+	0xcf, 0x83, 0xe1, 0x35, 0x7e, 0xef, 0xb8, 0xbd,
+	0xf1, 0x54, 0x28, 0x50, 0xd6, 0x6d, 0x80, 0x07,
+	0xd6, 0x20, 0xe4, 0x05, 0x0b, 0x57, 0x15, 0xdc,
+	0x83, 0xf4, 0xa9, 0x21, 0xd3, 0x6c, 0xe9, 0xce,
+	0x47, 0xd0, 0xd1, 0x3c, 0x5d, 0x85, 0xf2, 0xb0,
+	0xff, 0x83, 0x18, 0xd2, 0x87, 0x7e, 0xec, 0x2f,
+	0x63, 0xb9, 0x31, 0xbd, 0x47, 0x41, 0x7a, 0x81,
+	0xa5, 0x38, 0x32, 0x7a, 0xf9, 0x27, 0xda, 0x3e
+};
+EXPORT_SYMBOL_GPL(sha512_zero_message_hash);
+
 static inline u64 Ch(u64 x, u64 y, u64 z)
 {
         return z ^ (x & (y ^ z));
@@ -171,7 +193,7 @@
 	.base		=	{
 		.cra_name	=	"sha512",
 		.cra_driver_name =	"sha512-generic",
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
+		.cra_priority	=	100,
 		.cra_blocksize	=	SHA512_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -185,7 +207,7 @@
 	.base		=	{
 		.cra_name	=	"sha384",
 		.cra_driver_name =	"sha384-generic",
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
+		.cra_priority	=	100,
 		.cra_blocksize	=	SHA384_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 0fe2a29..7d6a49f 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -387,7 +387,6 @@
 	}
 	return err;
 }
-EXPORT_SYMBOL_GPL(skcipher_walk_next);
 
 static int skcipher_copy_iv(struct skcipher_walk *walk)
 {
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index 9e823d9..9a5c60f 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -184,7 +184,6 @@
 	.base		=	{
 		.cra_name	 =	"sm3",
 		.cra_driver_name =	"sm3-generic",
-		.cra_flags	 =	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	 =	SM3_BLOCK_SIZE,
 		.cra_module	 =	THIS_MODULE,
 	}
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index d5bcdd9..078ec36 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1939,7 +1939,7 @@
 		break;
 
 	case 109:
-		ret += tcrypt_test("vmac(aes)");
+		ret += tcrypt_test("vmac64(aes)");
 		break;
 
 	case 111:
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 11e4535..a1d4224 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -259,9 +259,15 @@
 	return ret;
 }
 
+enum hash_test {
+	HASH_TEST_DIGEST,
+	HASH_TEST_FINAL,
+	HASH_TEST_FINUP
+};
+
 static int __test_hash(struct crypto_ahash *tfm,
 		       const struct hash_testvec *template, unsigned int tcount,
-		       bool use_digest, const int align_offset)
+		       enum hash_test test_type, const int align_offset)
 {
 	const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
 	size_t digest_size = crypto_ahash_digestsize(tfm);
@@ -332,14 +338,17 @@
 		}
 
 		ahash_request_set_crypt(req, sg, result, template[i].psize);
-		if (use_digest) {
+		switch (test_type) {
+		case HASH_TEST_DIGEST:
 			ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
 			if (ret) {
 				pr_err("alg: hash: digest failed on test %d "
 				       "for %s: ret=%d\n", j, algo, -ret);
 				goto out;
 			}
-		} else {
+			break;
+
+		case HASH_TEST_FINAL:
 			memset(result, 1, digest_size);
 			ret = crypto_wait_req(crypto_ahash_init(req), &wait);
 			if (ret) {
@@ -371,6 +380,29 @@
 				       "for %s: ret=%d\n", j, algo, -ret);
 				goto out;
 			}
+			break;
+
+		case HASH_TEST_FINUP:
+			memset(result, 1, digest_size);
+			ret = crypto_wait_req(crypto_ahash_init(req), &wait);
+			if (ret) {
+				pr_err("alg: hash: init failed on test %d "
+				       "for %s: ret=%d\n", j, algo, -ret);
+				goto out;
+			}
+			ret = ahash_guard_result(result, 1, digest_size);
+			if (ret) {
+				pr_err("alg: hash: init failed on test %d "
+				       "for %s: used req->result\n", j, algo);
+				goto out;
+			}
+			ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
+			if (ret) {
+				pr_err("alg: hash: final failed on test %d "
+				       "for %s: ret=%d\n", j, algo, -ret);
+				goto out;
+			}
+			break;
 		}
 
 		if (memcmp(result, template[i].digest,
@@ -383,6 +415,9 @@
 		}
 	}
 
+	if (test_type)
+		goto out;
+
 	j = 0;
 	for (i = 0; i < tcount; i++) {
 		/* alignment tests are only done with continuous buffers */
@@ -540,24 +575,24 @@
 
 static int test_hash(struct crypto_ahash *tfm,
 		     const struct hash_testvec *template,
-		     unsigned int tcount, bool use_digest)
+		     unsigned int tcount, enum hash_test test_type)
 {
 	unsigned int alignmask;
 	int ret;
 
-	ret = __test_hash(tfm, template, tcount, use_digest, 0);
+	ret = __test_hash(tfm, template, tcount, test_type, 0);
 	if (ret)
 		return ret;
 
 	/* test unaligned buffers, check with one byte offset */
-	ret = __test_hash(tfm, template, tcount, use_digest, 1);
+	ret = __test_hash(tfm, template, tcount, test_type, 1);
 	if (ret)
 		return ret;
 
 	alignmask = crypto_tfm_alg_alignmask(&tfm->base);
 	if (alignmask) {
 		/* Check if alignment mask for tfm is correctly set. */
-		ret = __test_hash(tfm, template, tcount, use_digest,
+		ret = __test_hash(tfm, template, tcount, test_type,
 				  alignmask + 1);
 		if (ret)
 			return ret;
@@ -1803,9 +1838,11 @@
 		return PTR_ERR(tfm);
 	}
 
-	err = test_hash(tfm, template, tcount, true);
+	err = test_hash(tfm, template, tcount, HASH_TEST_DIGEST);
 	if (!err)
-		err = test_hash(tfm, template, tcount, false);
+		err = test_hash(tfm, template, tcount, HASH_TEST_FINAL);
+	if (!err)
+		err = test_hash(tfm, template, tcount, HASH_TEST_FINUP);
 	crypto_free_ahash(tfm);
 	return err;
 }
@@ -3478,10 +3515,10 @@
 			.hash = __VECS(tgr192_tv_template)
 		}
 	}, {
-		.alg = "vmac(aes)",
+		.alg = "vmac64(aes)",
 		.test = alg_test_hash,
 		.suite = {
-			.hash = __VECS(aes_vmac128_tv_template)
+			.hash = __VECS(vmac64_aes_tv_template)
 		}
 	}, {
 		.alg = "wp256",
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index b950aa2..759462d 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -644,12 +644,14 @@
 	"\x11\x02" /* len */
 	"\x00\x01\x00\x00" /* key_size */
 	"\x00\x01\x00\x00" /* p_size */
+	"\x00\x00\x00\x00" /* q_size */
 	"\x01\x00\x00\x00" /* g_size */
 #else
 	"\x00\x01" /* type */
 	"\x02\x11" /* len */
 	"\x00\x00\x01\x00" /* key_size */
 	"\x00\x00\x01\x00" /* p_size */
+	"\x00\x00\x00\x00" /* q_size */
 	"\x00\x00\x00\x01" /* g_size */
 #endif
 	/* xa */
@@ -751,12 +753,14 @@
 	"\x11\x02" /* len */
 	"\x00\x01\x00\x00" /* key_size */
 	"\x00\x01\x00\x00" /* p_size */
+	"\x00\x00\x00\x00" /* q_size */
 	"\x01\x00\x00\x00" /* g_size */
 #else
 	"\x00\x01" /* type */
 	"\x02\x11" /* len */
 	"\x00\x00\x01\x00" /* key_size */
 	"\x00\x00\x01\x00" /* p_size */
+	"\x00\x00\x00\x00" /* q_size */
 	"\x00\x00\x00\x01" /* g_size */
 #endif
 	/* xa */
@@ -4603,105 +4607,158 @@
 	}
 };
 
-static const char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
-				       '\x02', '\x03', '\x02', '\x02',
-				       '\x02', '\x04', '\x01', '\x07',
-				       '\x04', '\x01', '\x04', '\x03',};
-static const char vmac_string2[128] = {'a', 'b', 'c',};
-static const char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
-				       'a', 'b', 'c', 'a', 'b', 'c',
-				       'a', 'b', 'c', 'a', 'b', 'c',
-				       'a', 'b', 'c', 'a', 'b', 'c',
-				       'a', 'b', 'c', 'a', 'b', 'c',
-				       'a', 'b', 'c', 'a', 'b', 'c',
-				       'a', 'b', 'c', 'a', 'b', 'c',
-				       'a', 'b', 'c', 'a', 'b', 'c',
-				      };
+static const char vmac64_string1[144] = {
+	'\0',     '\0',   '\0',   '\0',   '\0',   '\0',   '\0',   '\0',
+	'\0',     '\0',   '\0',   '\0',   '\0',   '\0',   '\0',   '\0',
+	'\x01', '\x01', '\x01', '\x01', '\x02', '\x03', '\x02', '\x02',
+	'\x02', '\x04', '\x01', '\x07', '\x04', '\x01', '\x04', '\x03',
+};
 
-static const char vmac_string4[17] = {'b', 'c', 'e', 'f',
-				      'i', 'j', 'l', 'm',
-				      'o', 'p', 'r', 's',
-				      't', 'u', 'w', 'x', 'z'};
+static const char vmac64_string2[144] = {
+	'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+	'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+	 'a',  'b',  'c',
+};
 
-static const char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
-				       'o', 'l', 'k', ']', '%',
-				       '9', '2', '7', '!', 'A'};
+static const char vmac64_string3[144] = {
+	'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+	'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+	 'a',  'b',  'c',  'a',  'b',  'c',  'a',  'b',
+	 'c',  'a',  'b',  'c',  'a',  'b',  'c',  'a',
+	 'b',  'c',  'a',  'b',  'c',  'a',  'b',  'c',
+	 'a',  'b',  'c',  'a',  'b',  'c',  'a',  'b',
+	 'c',  'a',  'b',  'c',  'a',  'b',  'c',  'a',
+	 'b',  'c',  'a',  'b',  'c',  'a',  'b',  'c',
+};
 
-static const char vmac_string6[129] = {'p', 't', '*', '7', 'l',
-				       'i', '!', '#', 'w', '0',
-				       'z', '/', '4', 'A', 'n'};
+static const char vmac64_string4[33] = {
+	'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+	'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+	'b',   'c',  'e',  'f',  'i',  'j',  'l',  'm',
+	'o',   'p',  'r',  's',  't',  'u',  'w',  'x',
+	'z',
+};
 
-static const struct hash_testvec aes_vmac128_tv_template[] = {
-	{
+static const char vmac64_string5[143] = {
+	'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+	'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+	 'r',  'm',  'b',  't',  'c',  'o',  'l',  'k',
+	 ']',  '%',  '9',  '2',  '7',  '!',  'A',
+};
+
+static const char vmac64_string6[145] = {
+	'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+	'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+	 'p',  't',  '*',  '7',  'l',  'i',  '!',  '#',
+	 'w',  '0',  'z',  '/',  '4',  'A',  'n',
+};
+
+static const struct hash_testvec vmac64_aes_tv_template[] = {
+	{ /* draft-krovetz-vmac-01 test vector 1 */
+		.key	= "abcdefghijklmnop",
+		.ksize	= 16,
+		.plaintext = "\0\0\0\0\0\0\0\0bcdefghi",
+		.psize	= 16,
+		.digest	= "\x25\x76\xbe\x1c\x56\xd8\xb8\x1b",
+	}, { /* draft-krovetz-vmac-01 test vector 2 */
+		.key	= "abcdefghijklmnop",
+		.ksize	= 16,
+		.plaintext = "\0\0\0\0\0\0\0\0bcdefghiabc",
+		.psize	= 19,
+		.digest	= "\x2d\x37\x6c\xf5\xb1\x81\x3c\xe5",
+	}, { /* draft-krovetz-vmac-01 test vector 3 */
+		.key	= "abcdefghijklmnop",
+		.ksize	= 16,
+		.plaintext = "\0\0\0\0\0\0\0\0bcdefghi"
+			  "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
+		.psize	= 64,
+		.digest	= "\xe8\x42\x1f\x61\xd5\x73\xd2\x98",
+	}, { /* draft-krovetz-vmac-01 test vector 4 */
+		.key	= "abcdefghijklmnop",
+		.ksize	= 16,
+		.plaintext = "\0\0\0\0\0\0\0\0bcdefghi"
+			  "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+			  "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+			  "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+			  "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+			  "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+			  "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
+		.psize	= 316,
+		.digest	= "\x44\x92\xdf\x6c\x5c\xac\x1b\xbe",
+		.tap	= { 1, 100, 200, 15 },
+		.np	= 4,
+	}, {
 		.key	= "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
-		.plaintext = NULL,
-		.digest	= "\x07\x58\x80\x35\x77\xa4\x7b\x54",
-		.psize	= 0,
 		.ksize	= 16,
+		.plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.psize	= 16,
+		.digest	= "\x54\x7b\xa4\x77\x35\x80\x58\x07",
 	}, {
-		.key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
+		.key	= "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
-		.plaintext = vmac_string1,
-		.digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1",
-		.psize  = 128,
-		.ksize  = 16,
+		.ksize	= 16,
+		.plaintext = vmac64_string1,
+		.psize	= sizeof(vmac64_string1),
+		.digest	= "\xa1\x8c\x68\xae\xd3\x3c\xf5\xce",
 	}, {
-		.key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
+		.key	= "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
-		.plaintext = vmac_string2,
-		.digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d",
-		.psize  = 128,
-		.ksize  = 16,
+		.ksize	= 16,
+		.plaintext = vmac64_string2,
+		.psize	= sizeof(vmac64_string2),
+		.digest	= "\x2d\x14\xbd\x81\x73\xb0\x27\xc9",
 	}, {
-		.key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
+		.key	= "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
-		.plaintext = vmac_string3,
-		.digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19",
-		.psize  = 128,
-		.ksize  = 16,
+		.ksize	= 16,
+		.plaintext = vmac64_string3,
+		.psize	= sizeof(vmac64_string3),
+		.digest	= "\x19\x0b\x47\x98\x8c\x95\x1a\x8d",
 	}, {
 		.key	= "abcdefghijklmnop",
-		.plaintext = NULL,
-		.digest	= "\x3b\x89\xa1\x26\x9e\x55\x8f\x84",
-		.psize	= 0,
 		.ksize	= 16,
+		.plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.psize	= 16,
+		.digest	= "\x84\x8f\x55\x9e\x26\xa1\x89\x3b",
 	}, {
-		.key    = "abcdefghijklmnop",
-		.plaintext = vmac_string1,
-		.digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2",
-		.psize  = 128,
-		.ksize  = 16,
+		.key	= "abcdefghijklmnop",
+		.ksize	= 16,
+		.plaintext = vmac64_string1,
+		.psize	= sizeof(vmac64_string1),
+		.digest	= "\xc2\x74\x8d\xf6\xb0\xab\x5e\xab",
 	}, {
-		.key    = "abcdefghijklmnop",
-		.plaintext = vmac_string2,
-		.digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf",
-		.psize  = 128,
-		.ksize  = 16,
+		.key	= "abcdefghijklmnop",
+		.ksize	= 16,
+		.plaintext = vmac64_string2,
+		.psize	= sizeof(vmac64_string2),
+		.digest	= "\xdf\x09\x7b\x3d\x42\x68\x15\x11",
 	}, {
-		.key    = "abcdefghijklmnop",
-		.plaintext = vmac_string3,
-		.digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4",
-		.psize  = 128,
-		.ksize  = 16,
+		.key	= "abcdefghijklmnop",
+		.ksize	= 16,
+		.plaintext = vmac64_string3,
+		.psize	= sizeof(vmac64_string3),
+		.digest	= "\xd4\xfa\x8f\xed\xe1\x8f\x32\x8b",
 	}, {
-		.key = "a09b5cd!f#07K\x00\x00\x00",
-		.plaintext = vmac_string4,
-		.digest = "\xab\xa5\x0f\xea\x42\x4e\xa1\x5f",
-		.psize = sizeof(vmac_string4),
-		.ksize = 16,
+		.key	= "a09b5cd!f#07K\x00\x00\x00",
+		.ksize	= 16,
+		.plaintext = vmac64_string4,
+		.psize	= sizeof(vmac64_string4),
+		.digest	= "\x5f\xa1\x4e\x42\xea\x0f\xa5\xab",
 	}, {
-		.key = "a09b5cd!f#07K\x00\x00\x00",
-		.plaintext = vmac_string5,
-		.digest = "\x25\x31\x98\xbc\x1d\xe8\x67\x60",
-		.psize = sizeof(vmac_string5),
-		.ksize = 16,
+		.key	= "a09b5cd!f#07K\x00\x00\x00",
+		.ksize	= 16,
+		.plaintext = vmac64_string5,
+		.psize	= sizeof(vmac64_string5),
+		.digest	= "\x60\x67\xe8\x1d\xbc\x98\x31\x25",
 	}, {
-		.key = "a09b5cd!f#07K\x00\x00\x00",
-		.plaintext = vmac_string6,
-		.digest = "\xc4\xae\x9b\x47\x95\x65\xeb\x41",
-		.psize = sizeof(vmac_string6),
-		.ksize = 16,
+		.key	= "a09b5cd!f#07K\x00\x00\x00",
+		.ksize	= 16,
+		.plaintext = vmac64_string6,
+		.psize	= sizeof(vmac64_string6),
+		.digest	= "\x41\xeb\x65\x95\x47\x9b\xae\xc4",
 	},
 };
 
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index 321bc6f..022d3dd 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -636,7 +636,6 @@
 	.descsize	=	sizeof(struct tgr192_ctx),
 	.base		=	{
 		.cra_name	=	"tgr192",
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	TGR192_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -648,7 +647,6 @@
 	.descsize	=	sizeof(struct tgr192_ctx),
 	.base		=	{
 		.cra_name	=	"tgr160",
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	TGR192_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -660,7 +658,6 @@
 	.descsize	=	sizeof(struct tgr192_ctx),
 	.base		=	{
 		.cra_name	=	"tgr128",
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	TGR192_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/crypto/vmac.c b/crypto/vmac.c
index df76a81..5f436df 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -1,6 +1,10 @@
 /*
- * Modified to interface to the Linux kernel
+ * VMAC: Message Authentication Code using Universal Hashing
+ *
+ * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
+ *
  * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2018, Google Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -16,14 +20,15 @@
  * Place - Suite 330, Boston, MA 02111-1307 USA.
  */
 
-/* --------------------------------------------------------------------------
- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
- * This implementation is herby placed in the public domain.
- * The authors offers no warranty. Use at your own risk.
- * Please send bug reports to the authors.
- * Last modified: 17 APR 08, 1700 PDT
- * ----------------------------------------------------------------------- */
+/*
+ * Derived from:
+ *	VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
+ *	This implementation is herby placed in the public domain.
+ *	The authors offers no warranty. Use at your own risk.
+ *	Last modified: 17 APR 08, 1700 PDT
+ */
 
+#include <asm/unaligned.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/crypto.h>
@@ -31,10 +36,42 @@
 #include <linux/scatterlist.h>
 #include <asm/byteorder.h>
 #include <crypto/scatterwalk.h>
-#include <crypto/vmac.h>
 #include <crypto/internal/hash.h>
 
 /*
+ * User definable settings.
+ */
+#define VMAC_TAG_LEN	64
+#define VMAC_KEY_SIZE	128/* Must be 128, 192 or 256			*/
+#define VMAC_KEY_LEN	(VMAC_KEY_SIZE/8)
+#define VMAC_NHBYTES	128/* Must 2^i for any 3 < i < 13 Standard = 128*/
+#define VMAC_NONCEBYTES	16
+
+/* per-transform (per-key) context */
+struct vmac_tfm_ctx {
+	struct crypto_cipher *cipher;
+	u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
+	u64 polykey[2*VMAC_TAG_LEN/64];
+	u64 l3key[2*VMAC_TAG_LEN/64];
+};
+
+/* per-request context */
+struct vmac_desc_ctx {
+	union {
+		u8 partial[VMAC_NHBYTES];	/* partial block */
+		__le64 partial_words[VMAC_NHBYTES / 8];
+	};
+	unsigned int partial_size;	/* size of the partial block */
+	bool first_block_processed;
+	u64 polytmp[2*VMAC_TAG_LEN/64];	/* running total of L2-hash */
+	union {
+		u8 bytes[VMAC_NONCEBYTES];
+		__be64 pads[VMAC_NONCEBYTES / 8];
+	} nonce;
+	unsigned int nonce_size; /* nonce bytes filled so far */
+};
+
+/*
  * Constants and masks
  */
 #define UINT64_C(x) x##ULL
@@ -318,13 +355,6 @@
 	} while (0)
 #endif
 
-static void vhash_abort(struct vmac_ctx *ctx)
-{
-	ctx->polytmp[0] = ctx->polykey[0] ;
-	ctx->polytmp[1] = ctx->polykey[1] ;
-	ctx->first_block_processed = 0;
-}
-
 static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
 {
 	u64 rh, rl, t, z = 0;
@@ -364,280 +394,227 @@
 	return rl;
 }
 
-static void vhash_update(const unsigned char *m,
-			unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
-			struct vmac_ctx *ctx)
+/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
+static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
+			 struct vmac_desc_ctx *dctx,
+			 const __le64 *mptr, unsigned int blocks)
 {
-	u64 rh, rl, *mptr;
-	const u64 *kptr = (u64 *)ctx->nhkey;
-	int i;
-	u64 ch, cl;
-	u64 pkh = ctx->polykey[0];
-	u64 pkl = ctx->polykey[1];
+	const u64 *kptr = tctx->nhkey;
+	const u64 pkh = tctx->polykey[0];
+	const u64 pkl = tctx->polykey[1];
+	u64 ch = dctx->polytmp[0];
+	u64 cl = dctx->polytmp[1];
+	u64 rh, rl;
 
-	if (!mbytes)
-		return;
-
-	BUG_ON(mbytes % VMAC_NHBYTES);
-
-	mptr = (u64 *)m;
-	i = mbytes / VMAC_NHBYTES;  /* Must be non-zero */
-
-	ch = ctx->polytmp[0];
-	cl = ctx->polytmp[1];
-
-	if (!ctx->first_block_processed) {
-		ctx->first_block_processed = 1;
+	if (!dctx->first_block_processed) {
+		dctx->first_block_processed = true;
 		nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
 		rh &= m62;
 		ADD128(ch, cl, rh, rl);
 		mptr += (VMAC_NHBYTES/sizeof(u64));
-		i--;
+		blocks--;
 	}
 
-	while (i--) {
+	while (blocks--) {
 		nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
 		rh &= m62;
 		poly_step(ch, cl, pkh, pkl, rh, rl);
 		mptr += (VMAC_NHBYTES/sizeof(u64));
 	}
 
-	ctx->polytmp[0] = ch;
-	ctx->polytmp[1] = cl;
+	dctx->polytmp[0] = ch;
+	dctx->polytmp[1] = cl;
 }
 
-static u64 vhash(unsigned char m[], unsigned int mbytes,
-			u64 *tagl, struct vmac_ctx *ctx)
+static int vmac_setkey(struct crypto_shash *tfm,
+		       const u8 *key, unsigned int keylen)
 {
-	u64 rh, rl, *mptr;
-	const u64 *kptr = (u64 *)ctx->nhkey;
-	int i, remaining;
-	u64 ch, cl;
-	u64 pkh = ctx->polykey[0];
-	u64 pkl = ctx->polykey[1];
+	struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
+	__be64 out[2];
+	u8 in[16] = { 0 };
+	unsigned int i;
+	int err;
 
-	mptr = (u64 *)m;
-	i = mbytes / VMAC_NHBYTES;
-	remaining = mbytes % VMAC_NHBYTES;
-
-	if (ctx->first_block_processed) {
-		ch = ctx->polytmp[0];
-		cl = ctx->polytmp[1];
-	} else if (i) {
-		nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
-		ch &= m62;
-		ADD128(ch, cl, pkh, pkl);
-		mptr += (VMAC_NHBYTES/sizeof(u64));
-		i--;
-	} else if (remaining) {
-		nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
-		ch &= m62;
-		ADD128(ch, cl, pkh, pkl);
-		mptr += (VMAC_NHBYTES/sizeof(u64));
-		goto do_l3;
-	} else {/* Empty String */
-		ch = pkh; cl = pkl;
-		goto do_l3;
+	if (keylen != VMAC_KEY_LEN) {
+		crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
 	}
 
-	while (i--) {
-		nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
-		rh &= m62;
-		poly_step(ch, cl, pkh, pkl, rh, rl);
-		mptr += (VMAC_NHBYTES/sizeof(u64));
-	}
-	if (remaining) {
-		nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
-		rh &= m62;
-		poly_step(ch, cl, pkh, pkl, rh, rl);
-	}
-
-do_l3:
-	vhash_abort(ctx);
-	remaining *= 8;
-	return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
-}
-
-static u64 vmac(unsigned char m[], unsigned int mbytes,
-			const unsigned char n[16], u64 *tagl,
-			struct vmac_ctx_t *ctx)
-{
-	u64 *in_n, *out_p;
-	u64 p, h;
-	int i;
-
-	in_n = ctx->__vmac_ctx.cached_nonce;
-	out_p = ctx->__vmac_ctx.cached_aes;
-
-	i = n[15] & 1;
-	if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
-		in_n[0] = *(u64 *)(n);
-		in_n[1] = *(u64 *)(n+8);
-		((unsigned char *)in_n)[15] &= 0xFE;
-		crypto_cipher_encrypt_one(ctx->child,
-			(unsigned char *)out_p, (unsigned char *)in_n);
-
-		((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
-	}
-	p = be64_to_cpup(out_p + i);
-	h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
-	return le64_to_cpu(p + h);
-}
-
-static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
-{
-	u64 in[2] = {0}, out[2];
-	unsigned i;
-	int err = 0;
-
-	err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
+	err = crypto_cipher_setkey(tctx->cipher, key, keylen);
 	if (err)
 		return err;
 
 	/* Fill nh key */
-	((unsigned char *)in)[0] = 0x80;
-	for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
-		crypto_cipher_encrypt_one(ctx->child,
-			(unsigned char *)out, (unsigned char *)in);
-		ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
-		ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
-		((unsigned char *)in)[15] += 1;
+	in[0] = 0x80;
+	for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
+		crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
+		tctx->nhkey[i] = be64_to_cpu(out[0]);
+		tctx->nhkey[i+1] = be64_to_cpu(out[1]);
+		in[15]++;
 	}
 
 	/* Fill poly key */
-	((unsigned char *)in)[0] = 0xC0;
-	in[1] = 0;
-	for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
-		crypto_cipher_encrypt_one(ctx->child,
-			(unsigned char *)out, (unsigned char *)in);
-		ctx->__vmac_ctx.polytmp[i] =
-			ctx->__vmac_ctx.polykey[i] =
-				be64_to_cpup(out) & mpoly;
-		ctx->__vmac_ctx.polytmp[i+1] =
-			ctx->__vmac_ctx.polykey[i+1] =
-				be64_to_cpup(out+1) & mpoly;
-		((unsigned char *)in)[15] += 1;
+	in[0] = 0xC0;
+	in[15] = 0;
+	for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
+		crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
+		tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
+		tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
+		in[15]++;
 	}
 
 	/* Fill ip key */
-	((unsigned char *)in)[0] = 0xE0;
-	in[1] = 0;
-	for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
+	in[0] = 0xE0;
+	in[15] = 0;
+	for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
 		do {
-			crypto_cipher_encrypt_one(ctx->child,
-				(unsigned char *)out, (unsigned char *)in);
-			ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
-			ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
-			((unsigned char *)in)[15] += 1;
-		} while (ctx->__vmac_ctx.l3key[i] >= p64
-			|| ctx->__vmac_ctx.l3key[i+1] >= p64);
+			crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
+			tctx->l3key[i] = be64_to_cpu(out[0]);
+			tctx->l3key[i+1] = be64_to_cpu(out[1]);
+			in[15]++;
+		} while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
 	}
 
-	/* Invalidate nonce/aes cache and reset other elements */
-	ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
-	ctx->__vmac_ctx.cached_nonce[1] = (u64)0;  /* Ensure illegal nonce */
-	ctx->__vmac_ctx.first_block_processed = 0;
-
-	return err;
+	return 0;
 }
 
-static int vmac_setkey(struct crypto_shash *parent,
-		const u8 *key, unsigned int keylen)
+static int vmac_init(struct shash_desc *desc)
 {
-	struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+	const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+	struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
 
-	if (keylen != VMAC_KEY_LEN) {
-		crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	dctx->partial_size = 0;
+	dctx->first_block_processed = false;
+	memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
+	dctx->nonce_size = 0;
+	return 0;
+}
+
+static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
+{
+	const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+	struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
+	unsigned int n;
+
+	/* Nonce is passed as first VMAC_NONCEBYTES bytes of data */
+	if (dctx->nonce_size < VMAC_NONCEBYTES) {
+		n = min(len, VMAC_NONCEBYTES - dctx->nonce_size);
+		memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n);
+		dctx->nonce_size += n;
+		p += n;
+		len -= n;
+	}
+
+	if (dctx->partial_size) {
+		n = min(len, VMAC_NHBYTES - dctx->partial_size);
+		memcpy(&dctx->partial[dctx->partial_size], p, n);
+		dctx->partial_size += n;
+		p += n;
+		len -= n;
+		if (dctx->partial_size == VMAC_NHBYTES) {
+			vhash_blocks(tctx, dctx, dctx->partial_words, 1);
+			dctx->partial_size = 0;
+		}
+	}
+
+	if (len >= VMAC_NHBYTES) {
+		n = round_down(len, VMAC_NHBYTES);
+		/* TODO: 'p' may be misaligned here */
+		vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
+		p += n;
+		len -= n;
+	}
+
+	if (len) {
+		memcpy(dctx->partial, p, len);
+		dctx->partial_size = len;
+	}
+
+	return 0;
+}
+
+static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
+		       struct vmac_desc_ctx *dctx)
+{
+	unsigned int partial = dctx->partial_size;
+	u64 ch = dctx->polytmp[0];
+	u64 cl = dctx->polytmp[1];
+
+	/* L1 and L2-hash the final block if needed */
+	if (partial) {
+		/* Zero-pad to next 128-bit boundary */
+		unsigned int n = round_up(partial, 16);
+		u64 rh, rl;
+
+		memset(&dctx->partial[partial], 0, n - partial);
+		nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
+		rh &= m62;
+		if (dctx->first_block_processed)
+			poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
+				  rh, rl);
+		else
+			ADD128(ch, cl, rh, rl);
+	}
+
+	/* L3-hash the 128-bit output of L2-hash */
+	return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
+}
+
+static int vmac_final(struct shash_desc *desc, u8 *out)
+{
+	const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+	struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
+	int index;
+	u64 hash, pad;
+
+	if (dctx->nonce_size != VMAC_NONCEBYTES)
 		return -EINVAL;
-	}
 
-	return vmac_set_key((u8 *)key, ctx);
-}
-
-static int vmac_init(struct shash_desc *pdesc)
-{
-	return 0;
-}
-
-static int vmac_update(struct shash_desc *pdesc, const u8 *p,
-		unsigned int len)
-{
-	struct crypto_shash *parent = pdesc->tfm;
-	struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
-	int expand;
-	int min;
-
-	expand = VMAC_NHBYTES - ctx->partial_size > 0 ?
-			VMAC_NHBYTES - ctx->partial_size : 0;
-
-	min = len < expand ? len : expand;
-
-	memcpy(ctx->partial + ctx->partial_size, p, min);
-	ctx->partial_size += min;
-
-	if (len < expand)
-		return 0;
-
-	vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx);
-	ctx->partial_size = 0;
-
-	len -= expand;
-	p += expand;
-
-	if (len % VMAC_NHBYTES) {
-		memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES),
-			len % VMAC_NHBYTES);
-		ctx->partial_size = len % VMAC_NHBYTES;
-	}
-
-	vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx);
-
-	return 0;
-}
-
-static int vmac_final(struct shash_desc *pdesc, u8 *out)
-{
-	struct crypto_shash *parent = pdesc->tfm;
-	struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
-	vmac_t mac;
-	u8 nonce[16] = {};
-
-	/* vmac() ends up accessing outside the array bounds that
-	 * we specify.  In appears to access up to the next 2-word
-	 * boundary.  We'll just be uber cautious and zero the
-	 * unwritten bytes in the buffer.
+	/*
+	 * The VMAC specification requires a nonce at least 1 bit shorter than
+	 * the block cipher's block length, so we actually only accept a 127-bit
+	 * nonce.  We define the unused bit to be the first one and require that
+	 * it be 0, so the needed prepending of a 0 bit is implicit.
 	 */
-	if (ctx->partial_size) {
-		memset(ctx->partial + ctx->partial_size, 0,
-			VMAC_NHBYTES - ctx->partial_size);
-	}
-	mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
-	memcpy(out, &mac, sizeof(vmac_t));
-	memzero_explicit(&mac, sizeof(vmac_t));
-	memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
-	ctx->partial_size = 0;
+	if (dctx->nonce.bytes[0] & 0x80)
+		return -EINVAL;
+
+	/* Finish calculating the VHASH of the message */
+	hash = vhash_final(tctx, dctx);
+
+	/* Generate pseudorandom pad by encrypting the nonce */
+	BUILD_BUG_ON(VMAC_NONCEBYTES != 2 * (VMAC_TAG_LEN / 8));
+	index = dctx->nonce.bytes[VMAC_NONCEBYTES - 1] & 1;
+	dctx->nonce.bytes[VMAC_NONCEBYTES - 1] &= ~1;
+	crypto_cipher_encrypt_one(tctx->cipher, dctx->nonce.bytes,
+				  dctx->nonce.bytes);
+	pad = be64_to_cpu(dctx->nonce.pads[index]);
+
+	/* The VMAC is the sum of VHASH and the pseudorandom pad */
+	put_unaligned_be64(hash + pad, out);
 	return 0;
 }
 
 static int vmac_init_tfm(struct crypto_tfm *tfm)
 {
-	struct crypto_cipher *cipher;
-	struct crypto_instance *inst = (void *)tfm->__crt_alg;
+	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
-	struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
+	struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
+	struct crypto_cipher *cipher;
 
 	cipher = crypto_spawn_cipher(spawn);
 	if (IS_ERR(cipher))
 		return PTR_ERR(cipher);
 
-	ctx->child = cipher;
+	tctx->cipher = cipher;
 	return 0;
 }
 
 static void vmac_exit_tfm(struct crypto_tfm *tfm)
 {
-	struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
-	crypto_free_cipher(ctx->child);
+	struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_cipher(tctx->cipher);
 }
 
 static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
@@ -655,7 +632,11 @@
 	if (IS_ERR(alg))
 		return PTR_ERR(alg);
 
-	inst = shash_alloc_instance("vmac", alg);
+	err = -EINVAL;
+	if (alg->cra_blocksize != VMAC_NONCEBYTES)
+		goto out_put_alg;
+
+	inst = shash_alloc_instance(tmpl->name, alg);
 	err = PTR_ERR(inst);
 	if (IS_ERR(inst))
 		goto out_put_alg;
@@ -670,11 +651,12 @@
 	inst->alg.base.cra_blocksize = alg->cra_blocksize;
 	inst->alg.base.cra_alignmask = alg->cra_alignmask;
 
-	inst->alg.digestsize = sizeof(vmac_t);
-	inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
+	inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
 	inst->alg.base.cra_init = vmac_init_tfm;
 	inst->alg.base.cra_exit = vmac_exit_tfm;
 
+	inst->alg.descsize = sizeof(struct vmac_desc_ctx);
+	inst->alg.digestsize = VMAC_TAG_LEN / 8;
 	inst->alg.init = vmac_init;
 	inst->alg.update = vmac_update;
 	inst->alg.final = vmac_final;
@@ -691,8 +673,8 @@
 	return err;
 }
 
-static struct crypto_template vmac_tmpl = {
-	.name = "vmac",
+static struct crypto_template vmac64_tmpl = {
+	.name = "vmac64",
 	.create = vmac_create,
 	.free = shash_free_instance,
 	.module = THIS_MODULE,
@@ -700,12 +682,12 @@
 
 static int __init vmac_module_init(void)
 {
-	return crypto_register_template(&vmac_tmpl);
+	return crypto_register_template(&vmac64_tmpl);
 }
 
 static void __exit vmac_module_exit(void)
 {
-	crypto_unregister_template(&vmac_tmpl);
+	crypto_unregister_template(&vmac64_tmpl);
 }
 
 module_init(vmac_module_init);
@@ -713,4 +695,4 @@
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("VMAC hash algorithm");
-MODULE_ALIAS_CRYPTO("vmac");
+MODULE_ALIAS_CRYPTO("vmac64");
diff --git a/crypto/wp512.c b/crypto/wp512.c
index 7ee5a04..149e577 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -1127,7 +1127,6 @@
 	.descsize	=	sizeof(struct wp512_ctx),
 	.base		=	{
 		.cra_name	=	"wp512",
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	WP512_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -1139,7 +1138,6 @@
 	.descsize	=	sizeof(struct wp512_ctx),
 	.base		=	{
 		.cra_name	=	"wp384",
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	WP512_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
@@ -1151,7 +1149,6 @@
 	.descsize	=	sizeof(struct wp512_ctx),
 	.base		=	{
 		.cra_name	=	"wp256",
-		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	WP512_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
 	}
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c34b257..dac895d 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -307,19 +307,6 @@
 
 	  If unsure, say Y.
 
-config HW_RANDOM_MSM
-	tristate "Qualcomm SoCs Random Number Generator support"
-	depends on HW_RANDOM && ARCH_QCOM
-	default HW_RANDOM
-	---help---
-	  This driver provides kernel-side support for the Random Number
-	  Generator hardware found on Qualcomm SoCs.
-
-	  To compile this driver as a module, choose M here. the
-	  module will be called msm-rng.
-
-	  If unsure, say Y.
-
 config HW_RANDOM_ST
 	tristate "ST Microelectronics HW Random Number Generator support"
 	depends on HW_RANDOM && ARCH_STI
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 533e913..e35ec3c 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -29,7 +29,6 @@
 obj-$(CONFIG_HW_RANDOM_HISI)	+= hisi-rng.o
 obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
 obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
-obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
 obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
 obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
 obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
deleted file mode 100644
index 841fee8..0000000
--- a/drivers/char/hw_random/msm-rng.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/hw_random.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-
-/* Device specific register offsets */
-#define PRNG_DATA_OUT		0x0000
-#define PRNG_STATUS		0x0004
-#define PRNG_LFSR_CFG		0x0100
-#define PRNG_CONFIG		0x0104
-
-/* Device specific register masks and config values */
-#define PRNG_LFSR_CFG_MASK	0x0000ffff
-#define PRNG_LFSR_CFG_CLOCKS	0x0000dddd
-#define PRNG_CONFIG_HW_ENABLE	BIT(1)
-#define PRNG_STATUS_DATA_AVAIL	BIT(0)
-
-#define MAX_HW_FIFO_DEPTH	16
-#define MAX_HW_FIFO_SIZE	(MAX_HW_FIFO_DEPTH * 4)
-#define WORD_SZ			4
-
-struct msm_rng {
-	void __iomem *base;
-	struct clk *clk;
-	struct hwrng hwrng;
-};
-
-#define to_msm_rng(p)	container_of(p, struct msm_rng, hwrng)
-
-static int msm_rng_enable(struct hwrng *hwrng, int enable)
-{
-	struct msm_rng *rng = to_msm_rng(hwrng);
-	u32 val;
-	int ret;
-
-	ret = clk_prepare_enable(rng->clk);
-	if (ret)
-		return ret;
-
-	if (enable) {
-		/* Enable PRNG only if it is not already enabled */
-		val = readl_relaxed(rng->base + PRNG_CONFIG);
-		if (val & PRNG_CONFIG_HW_ENABLE)
-			goto already_enabled;
-
-		val = readl_relaxed(rng->base + PRNG_LFSR_CFG);
-		val &= ~PRNG_LFSR_CFG_MASK;
-		val |= PRNG_LFSR_CFG_CLOCKS;
-		writel(val, rng->base + PRNG_LFSR_CFG);
-
-		val = readl_relaxed(rng->base + PRNG_CONFIG);
-		val |= PRNG_CONFIG_HW_ENABLE;
-		writel(val, rng->base + PRNG_CONFIG);
-	} else {
-		val = readl_relaxed(rng->base + PRNG_CONFIG);
-		val &= ~PRNG_CONFIG_HW_ENABLE;
-		writel(val, rng->base + PRNG_CONFIG);
-	}
-
-already_enabled:
-	clk_disable_unprepare(rng->clk);
-	return 0;
-}
-
-static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
-{
-	struct msm_rng *rng = to_msm_rng(hwrng);
-	size_t currsize = 0;
-	u32 *retdata = data;
-	size_t maxsize;
-	int ret;
-	u32 val;
-
-	/* calculate max size bytes to transfer back to caller */
-	maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
-
-	ret = clk_prepare_enable(rng->clk);
-	if (ret)
-		return ret;
-
-	/* read random data from hardware */
-	do {
-		val = readl_relaxed(rng->base + PRNG_STATUS);
-		if (!(val & PRNG_STATUS_DATA_AVAIL))
-			break;
-
-		val = readl_relaxed(rng->base + PRNG_DATA_OUT);
-		if (!val)
-			break;
-
-		*retdata++ = val;
-		currsize += WORD_SZ;
-
-		/* make sure we stay on 32bit boundary */
-		if ((maxsize - currsize) < WORD_SZ)
-			break;
-	} while (currsize < maxsize);
-
-	clk_disable_unprepare(rng->clk);
-
-	return currsize;
-}
-
-static int msm_rng_init(struct hwrng *hwrng)
-{
-	return msm_rng_enable(hwrng, 1);
-}
-
-static void msm_rng_cleanup(struct hwrng *hwrng)
-{
-	msm_rng_enable(hwrng, 0);
-}
-
-static int msm_rng_probe(struct platform_device *pdev)
-{
-	struct resource *res;
-	struct msm_rng *rng;
-	int ret;
-
-	rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
-	if (!rng)
-		return -ENOMEM;
-
-	platform_set_drvdata(pdev, rng);
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	rng->base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(rng->base))
-		return PTR_ERR(rng->base);
-
-	rng->clk = devm_clk_get(&pdev->dev, "core");
-	if (IS_ERR(rng->clk))
-		return PTR_ERR(rng->clk);
-
-	rng->hwrng.name = KBUILD_MODNAME,
-	rng->hwrng.init = msm_rng_init,
-	rng->hwrng.cleanup = msm_rng_cleanup,
-	rng->hwrng.read = msm_rng_read,
-
-	ret = devm_hwrng_register(&pdev->dev, &rng->hwrng);
-	if (ret) {
-		dev_err(&pdev->dev, "failed to register hwrng\n");
-		return ret;
-	}
-
-	return 0;
-}
-
-static const struct of_device_id msm_rng_of_match[] = {
-	{ .compatible = "qcom,prng", },
-	{}
-};
-MODULE_DEVICE_TABLE(of, msm_rng_of_match);
-
-static struct platform_driver msm_rng_driver = {
-	.probe = msm_rng_probe,
-	.driver = {
-		.name = KBUILD_MODNAME,
-		.of_match_table = of_match_ptr(msm_rng_of_match),
-	}
-};
-module_platform_driver(msm_rng_driver);
-
-MODULE_ALIAS("platform:" KBUILD_MODNAME);
-MODULE_AUTHOR("The Linux Foundation");
-MODULE_DESCRIPTION("Qualcomm MSM random number generator driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 43cccf6..2bf799b 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -585,6 +585,17 @@
 	  hardware. To compile this driver as a module, choose M here. The
 	  module will be called qcrypto.
 
+config CRYPTO_DEV_QCOM_RNG
+	tristate "Qualcomm Random Number Generator Driver"
+	depends on ARCH_QCOM || COMPILE_TEST
+	select CRYPTO_RNG
+	help
+	  This driver provides support for the Random Number
+	  Generator hardware found on Qualcomm SoCs.
+
+	  To compile this driver as a module, choose M here. The
+          module will be called qcom-rng. If unsure, say N.
+
 config CRYPTO_DEV_VMX
 	bool "Support for VMX cryptographic acceleration instructions"
 	depends on PPC64 && VSX
@@ -689,8 +700,10 @@
 	select CRYPTO_AES
 	select CRYPTO_AUTHENC
 	select CRYPTO_BLKCIPHER
+	select CRYPTO_DES
 	select CRYPTO_HASH
 	select CRYPTO_HMAC
+	select CRYPTO_MD5
 	select CRYPTO_SHA1
 	select CRYPTO_SHA256
 	select CRYPTO_SHA512
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 7ae87b4..3602875 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -33,6 +33,7 @@
 obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
 obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
 obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
+obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
 obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
 obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
 obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 05981cc..6eaec9b 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1132,8 +1132,7 @@
 			.cra_name = "cbc(aes)",
 			.cra_driver_name = "cbc-aes-ppc4xx",
 			.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
-			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
-				CRYPTO_ALG_ASYNC |
+			.cra_flags = CRYPTO_ALG_ASYNC |
 				CRYPTO_ALG_KERN_DRIVER_ONLY,
 			.cra_blocksize = AES_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1153,8 +1152,7 @@
 			.cra_name = "cfb(aes)",
 			.cra_driver_name = "cfb-aes-ppc4xx",
 			.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
-			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
-				CRYPTO_ALG_ASYNC |
+			.cra_flags = CRYPTO_ALG_ASYNC |
 				CRYPTO_ALG_KERN_DRIVER_ONLY,
 			.cra_blocksize = AES_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1174,8 +1172,7 @@
 			.cra_name = "ctr(aes)",
 			.cra_driver_name = "ctr-aes-ppc4xx",
 			.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
-			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
-				CRYPTO_ALG_NEED_FALLBACK |
+			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
 				CRYPTO_ALG_ASYNC |
 				CRYPTO_ALG_KERN_DRIVER_ONLY,
 			.cra_blocksize = AES_BLOCK_SIZE,
@@ -1196,8 +1193,7 @@
 			.cra_name = "rfc3686(ctr(aes))",
 			.cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
 			.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
-			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
-				CRYPTO_ALG_ASYNC |
+			.cra_flags = CRYPTO_ALG_ASYNC |
 				CRYPTO_ALG_KERN_DRIVER_ONLY,
 			.cra_blocksize = AES_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1217,8 +1213,7 @@
 			.cra_name = "ecb(aes)",
 			.cra_driver_name = "ecb-aes-ppc4xx",
 			.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
-			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
-				CRYPTO_ALG_ASYNC |
+			.cra_flags = CRYPTO_ALG_ASYNC |
 				CRYPTO_ALG_KERN_DRIVER_ONLY,
 			.cra_blocksize = AES_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1237,8 +1232,7 @@
 			.cra_name = "ofb(aes)",
 			.cra_driver_name = "ofb-aes-ppc4xx",
 			.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
-			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
-				CRYPTO_ALG_ASYNC |
+			.cra_flags = CRYPTO_ALG_ASYNC |
 				CRYPTO_ALG_KERN_DRIVER_ONLY,
 			.cra_blocksize = AES_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct crypto4xx_ctx),
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index e66f18a0..74f083f 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -186,7 +186,10 @@
 	 * always be the same. Use a macro for the key size to avoid unnecessary
 	 * computations.
 	 */
-	copied = sg_copy_to_buffer(pubkey, 1, cmd->data, ATMEL_ECC_PUBKEY_SIZE);
+	copied = sg_copy_to_buffer(pubkey,
+				   sg_nents_for_len(pubkey,
+						    ATMEL_ECC_PUBKEY_SIZE),
+				   cmd->data, ATMEL_ECC_PUBKEY_SIZE);
 	if (copied != ATMEL_ECC_PUBKEY_SIZE)
 		return -EINVAL;
 
@@ -268,15 +271,17 @@
 	struct kpp_request *req = areq;
 	struct atmel_ecdh_ctx *ctx = work_data->ctx;
 	struct atmel_ecc_cmd *cmd = &work_data->cmd;
-	size_t copied;
-	size_t n_sz = ctx->n_sz;
+	size_t copied, n_sz;
 
 	if (status)
 		goto free_work_data;
 
+	/* might want less than we've got */
+	n_sz = min_t(size_t, ctx->n_sz, req->dst_len);
+
 	/* copy the shared secret */
-	copied = sg_copy_from_buffer(req->dst, 1, &cmd->data[RSP_DATA_IDX],
-				     n_sz);
+	copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz),
+				     &cmd->data[RSP_DATA_IDX], n_sz);
 	if (copied != n_sz)
 		status = -EINVAL;
 
@@ -440,7 +445,7 @@
 {
 	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
 	struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
-	size_t copied;
+	size_t copied, nbytes;
 	int ret = 0;
 
 	if (ctx->do_fallback) {
@@ -448,10 +453,14 @@
 		return crypto_kpp_generate_public_key(req);
 	}
 
+	/* might want less than we've got */
+	nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len);
+
 	/* public key was saved at private key generation */
-	copied = sg_copy_from_buffer(req->dst, 1, ctx->public_key,
-				     ATMEL_ECC_PUBKEY_SIZE);
-	if (copied != ATMEL_ECC_PUBKEY_SIZE)
+	copied = sg_copy_from_buffer(req->dst,
+				     sg_nents_for_len(req->dst, nbytes),
+				     ctx->public_key, nbytes);
+	if (copied != nbytes)
 		ret = -EINVAL;
 
 	return ret;
@@ -470,6 +479,10 @@
 		return crypto_kpp_compute_shared_secret(req);
 	}
 
+	/* must have exactly two points to be on the curve */
+	if (req->src_len != ATMEL_ECC_PUBKEY_SIZE)
+		return -EINVAL;
+
 	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
 							     GFP_ATOMIC;
 
@@ -554,10 +567,6 @@
 	}
 
 	crypto_kpp_set_flags(fallback, crypto_kpp_get_flags(tfm));
-
-	dev_info(&ctx->client->dev, "Using '%s' as fallback implementation.\n",
-		 crypto_tfm_alg_driver_name(crypto_kpp_tfm(fallback)));
-
 	ctx->fallback = fallback;
 
 	return 0;
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 4d43081..8a19df2 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2316,9 +2316,7 @@
 		goto error;
 	}
 
-	tfm = crypto_alloc_ahash(name,
-				 CRYPTO_ALG_TYPE_AHASH,
-				 CRYPTO_ALG_TYPE_AHASH_MASK);
+	tfm = crypto_alloc_ahash(name, 0, 0);
 	if (IS_ERR(tfm)) {
 		err = PTR_ERR(tfm);
 		goto error;
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 0fb8bbf..7f07a50 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2704,7 +2704,7 @@
 			.cra_name = "sha1",
 			.cra_driver_name = "artpec-sha1",
 			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+			.cra_flags = CRYPTO_ALG_ASYNC,
 			.cra_blocksize = SHA1_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
 			.cra_alignmask = 3,
@@ -2727,7 +2727,7 @@
 			.cra_name = "sha256",
 			.cra_driver_name = "artpec-sha256",
 			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+			.cra_flags = CRYPTO_ALG_ASYNC,
 			.cra_blocksize = SHA256_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
 			.cra_alignmask = 3,
@@ -2751,7 +2751,7 @@
 			.cra_name = "hmac(sha256)",
 			.cra_driver_name = "artpec-hmac-sha256",
 			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+			.cra_flags = CRYPTO_ALG_ASYNC,
 			.cra_blocksize = SHA256_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
 			.cra_alignmask = 3,
@@ -2777,7 +2777,7 @@
 			.cra_name = "sha384",
 			.cra_driver_name = "artpec-sha384",
 			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+			.cra_flags = CRYPTO_ALG_ASYNC,
 			.cra_blocksize = SHA384_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
 			.cra_alignmask = 3,
@@ -2801,7 +2801,7 @@
 			.cra_name = "hmac(sha384)",
 			.cra_driver_name = "artpec-hmac-sha384",
 			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+			.cra_flags = CRYPTO_ALG_ASYNC,
 			.cra_blocksize = SHA384_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
 			.cra_alignmask = 3,
@@ -2824,7 +2824,7 @@
 			.cra_name = "sha512",
 			.cra_driver_name = "artpec-sha512",
 			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+			.cra_flags = CRYPTO_ALG_ASYNC,
 			.cra_blocksize = SHA512_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
 			.cra_alignmask = 3,
@@ -2848,7 +2848,7 @@
 			.cra_name = "hmac(sha512)",
 			.cra_driver_name = "artpec-hmac-sha512",
 			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+			.cra_flags = CRYPTO_ALG_ASYNC,
 			.cra_blocksize = SHA512_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
 			.cra_alignmask = 3,
@@ -2867,8 +2867,7 @@
 			.cra_name = "ecb(aes)",
 			.cra_driver_name = "artpec6-ecb-aes",
 			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
-				     CRYPTO_ALG_ASYNC,
+			.cra_flags = CRYPTO_ALG_ASYNC,
 			.cra_blocksize = AES_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
 			.cra_alignmask = 3,
@@ -2888,8 +2887,7 @@
 			.cra_name = "ctr(aes)",
 			.cra_driver_name = "artpec6-ctr-aes",
 			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
-				     CRYPTO_ALG_ASYNC |
+			.cra_flags = CRYPTO_ALG_ASYNC |
 				     CRYPTO_ALG_NEED_FALLBACK,
 			.cra_blocksize = 1,
 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
@@ -2911,8 +2909,7 @@
 			.cra_name = "cbc(aes)",
 			.cra_driver_name = "artpec6-cbc-aes",
 			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
-				     CRYPTO_ALG_ASYNC,
+			.cra_flags = CRYPTO_ALG_ASYNC,
 			.cra_blocksize = AES_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
 			.cra_alignmask = 3,
@@ -2933,8 +2930,7 @@
 			.cra_name = "xts(aes)",
 			.cra_driver_name = "artpec6-xts-aes",
 			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
-				     CRYPTO_ALG_ASYNC,
+			.cra_flags = CRYPTO_ALG_ASYNC,
 			.cra_blocksize = 1,
 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
 			.cra_alignmask = 3,
@@ -2964,7 +2960,7 @@
 			.cra_name = "gcm(aes)",
 			.cra_driver_name = "artpec-gcm-aes",
 			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+			.cra_flags = CRYPTO_ALG_ASYNC |
 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
 			.cra_blocksize = 1,
 			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 309c67c..2d1f1db 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -3914,8 +3914,7 @@
 				    .cra_name = "md5",
 				    .cra_driver_name = "md5-iproc",
 				    .cra_blocksize = MD5_BLOCK_WORDS * 4,
-				    .cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					     CRYPTO_ALG_ASYNC,
+				    .cra_flags = CRYPTO_ALG_ASYNC,
 				}
 		      },
 	 .cipher_info = {
@@ -4649,8 +4648,7 @@
 	hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
 	hash->halg.base.cra_init = ahash_cra_init;
 	hash->halg.base.cra_exit = generic_cra_exit;
-	hash->halg.base.cra_type = &crypto_ahash_type;
-	hash->halg.base.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
+	hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
 	hash->halg.statesize = sizeof(struct spu_hash_export_s);
 
 	if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
@@ -4691,7 +4689,7 @@
 	aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
 	INIT_LIST_HEAD(&aead->base.cra_list);
 
-	aead->base.cra_flags |= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+	aead->base.cra_flags |= CRYPTO_ALG_ASYNC;
 	/* setkey set in alg initialization */
 	aead->setauthsize = aead_setauthsize;
 	aead->encrypt = aead_encrypt;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 0beb281..43975ab 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1846,8 +1846,7 @@
 	alg->cra_priority = CAAM_CRA_PRIORITY;
 	alg->cra_blocksize = template->blocksize;
 	alg->cra_alignmask = 0;
-	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
-	alg->cra_type = &crypto_ahash_type;
+	alg->cra_flags = CRYPTO_ALG_ASYNC;
 
 	t_alg->alg_type = template->alg_type;
 
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index df21d99..600336d 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -351,7 +351,7 @@
 	return 0;
 }
 
-struct crypto_alg algs[] = { {
+static struct crypto_alg algs[] = { {
 	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_blocksize = AES_BLOCK_SIZE,
 	.cra_ctxsize = sizeof(struct cvm_enc_ctx),
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 26687f31..3c6fe57 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -399,13 +399,12 @@
 	base = &halg->base;
 	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
 	snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp");
-	base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
+	base->cra_flags = CRYPTO_ALG_ASYNC |
 			  CRYPTO_ALG_KERN_DRIVER_ONLY |
 			  CRYPTO_ALG_NEED_FALLBACK;
 	base->cra_blocksize = AES_BLOCK_SIZE;
 	base->cra_ctxsize = sizeof(struct ccp_ctx);
 	base->cra_priority = CCP_CRA_PRIORITY;
-	base->cra_type = &crypto_ahash_type;
 	base->cra_init = ccp_aes_cmac_cra_init;
 	base->cra_exit = ccp_aes_cmac_cra_exit;
 	base->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 871c962..2ca64bb 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -497,13 +497,12 @@
 	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
 	snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
 		 def->drv_name);
-	base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
+	base->cra_flags = CRYPTO_ALG_ASYNC |
 			  CRYPTO_ALG_KERN_DRIVER_ONLY |
 			  CRYPTO_ALG_NEED_FALLBACK;
 	base->cra_blocksize = def->block_size;
 	base->cra_ctxsize = sizeof(struct ccp_ctx);
 	base->cra_priority = CCP_CRA_PRIORITY;
-	base->cra_type = &crypto_ahash_type;
 	base->cra_init = ccp_sha_cra_init;
 	base->cra_exit = ccp_sha_cra_exit;
 	base->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index ff478d8..9b59638 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -62,14 +62,14 @@
 	int reg;
 
 	/* Read the interrupt status: */
-	status = ioread32(psp->io_regs + PSP_P2CMSG_INTSTS);
+	status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
 
 	/* Check if it is command completion: */
-	if (!(status & BIT(PSP_CMD_COMPLETE_REG)))
+	if (!(status & PSP_CMD_COMPLETE))
 		goto done;
 
 	/* Check if it is SEV command completion: */
-	reg = ioread32(psp->io_regs + PSP_CMDRESP);
+	reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
 	if (reg & PSP_CMDRESP_RESP) {
 		psp->sev_int_rcvd = 1;
 		wake_up(&psp->sev_int_queue);
@@ -77,17 +77,15 @@
 
 done:
 	/* Clear the interrupt status by writing the same value we read. */
-	iowrite32(status, psp->io_regs + PSP_P2CMSG_INTSTS);
+	iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
 
 	return IRQ_HANDLED;
 }
 
 static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg)
 {
-	psp->sev_int_rcvd = 0;
-
 	wait_event(psp->sev_int_queue, psp->sev_int_rcvd);
-	*reg = ioread32(psp->io_regs + PSP_CMDRESP);
+	*reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
 }
 
 static int sev_cmd_buffer_len(int cmd)
@@ -145,13 +143,15 @@
 	print_hex_dump_debug("(in):  ", DUMP_PREFIX_OFFSET, 16, 2, data,
 			     sev_cmd_buffer_len(cmd), false);
 
-	iowrite32(phys_lsb, psp->io_regs + PSP_CMDBUFF_ADDR_LO);
-	iowrite32(phys_msb, psp->io_regs + PSP_CMDBUFF_ADDR_HI);
+	iowrite32(phys_lsb, psp->io_regs + psp->vdata->cmdbuff_addr_lo_reg);
+	iowrite32(phys_msb, psp->io_regs + psp->vdata->cmdbuff_addr_hi_reg);
+
+	psp->sev_int_rcvd = 0;
 
 	reg = cmd;
 	reg <<= PSP_CMDRESP_CMD_SHIFT;
 	reg |= PSP_CMDRESP_IOC;
-	iowrite32(reg, psp->io_regs + PSP_CMDRESP);
+	iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
 
 	/* wait for command completion */
 	sev_wait_cmd_ioc(psp, &reg);
@@ -789,7 +789,7 @@
 static int sev_init(struct psp_device *psp)
 {
 	/* Check if device supports SEV feature */
-	if (!(ioread32(psp->io_regs + PSP_FEATURE_REG) & 1)) {
+	if (!(ioread32(psp->io_regs + psp->vdata->feature_reg) & 1)) {
 		dev_dbg(psp->dev, "device does not support SEV\n");
 		return 1;
 	}
@@ -817,11 +817,11 @@
 		goto e_err;
 	}
 
-	psp->io_regs = sp->io_map + psp->vdata->offset;
+	psp->io_regs = sp->io_map;
 
 	/* Disable and clear interrupts until ready */
-	iowrite32(0, psp->io_regs + PSP_P2CMSG_INTEN);
-	iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTSTS);
+	iowrite32(0, psp->io_regs + psp->vdata->inten_reg);
+	iowrite32(-1, psp->io_regs + psp->vdata->intsts_reg);
 
 	/* Request an irq */
 	ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp);
@@ -838,7 +838,9 @@
 		sp->set_psp_master_device(sp);
 
 	/* Enable interrupt */
-	iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTEN);
+	iowrite32(-1, psp->io_regs + psp->vdata->inten_reg);
+
+	dev_notice(dev, "psp enabled\n");
 
 	return 0;
 
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index c7e9098a..8b53a96 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -30,24 +30,7 @@
 
 #include "sp-dev.h"
 
-#define PSP_C2PMSG(_num)		((_num) << 2)
-#define PSP_CMDRESP			PSP_C2PMSG(32)
-#define PSP_CMDBUFF_ADDR_LO		PSP_C2PMSG(56)
-#define PSP_CMDBUFF_ADDR_HI             PSP_C2PMSG(57)
-#define PSP_FEATURE_REG			PSP_C2PMSG(63)
-
-#define PSP_P2CMSG(_num)		((_num) << 2)
-#define PSP_CMD_COMPLETE_REG		1
-#define PSP_CMD_COMPLETE		PSP_P2CMSG(PSP_CMD_COMPLETE_REG)
-
-#define PSP_P2CMSG_INTEN		0x0110
-#define PSP_P2CMSG_INTSTS		0x0114
-
-#define PSP_C2PMSG_ATTR_0		0x0118
-#define PSP_C2PMSG_ATTR_1		0x011c
-#define PSP_C2PMSG_ATTR_2		0x0120
-#define PSP_C2PMSG_ATTR_3		0x0124
-#define PSP_P2CMSG_ATTR_0		0x0128
+#define PSP_CMD_COMPLETE		BIT(1)
 
 #define PSP_CMDRESP_CMD_SHIFT		16
 #define PSP_CMDRESP_IOC			BIT(0)
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index acb197b..14398ca 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -44,7 +44,12 @@
 };
 
 struct psp_vdata {
-	const unsigned int offset;
+	const unsigned int cmdresp_reg;
+	const unsigned int cmdbuff_addr_lo_reg;
+	const unsigned int cmdbuff_addr_hi_reg;
+	const unsigned int feature_reg;
+	const unsigned int inten_reg;
+	const unsigned int intsts_reg;
 };
 
 /* Structure to hold SP device data */
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index f5f43c5..7da93e9 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -269,38 +269,62 @@
 #endif
 
 #ifdef CONFIG_CRYPTO_DEV_SP_PSP
-static const struct psp_vdata psp_entry = {
-	.offset = 0x10500,
+static const struct psp_vdata pspv1 = {
+	.cmdresp_reg		= 0x10580,
+	.cmdbuff_addr_lo_reg	= 0x105e0,
+	.cmdbuff_addr_hi_reg	= 0x105e4,
+	.feature_reg		= 0x105fc,
+	.inten_reg		= 0x10610,
+	.intsts_reg		= 0x10614,
+};
+
+static const struct psp_vdata pspv2 = {
+	.cmdresp_reg		= 0x10980,
+	.cmdbuff_addr_lo_reg	= 0x109e0,
+	.cmdbuff_addr_hi_reg	= 0x109e4,
+	.feature_reg		= 0x109fc,
+	.inten_reg		= 0x10690,
+	.intsts_reg		= 0x10694,
 };
 #endif
 
 static const struct sp_dev_vdata dev_vdata[] = {
-	{
+	{	/* 0 */
 		.bar = 2,
 #ifdef CONFIG_CRYPTO_DEV_SP_CCP
 		.ccp_vdata = &ccpv3,
 #endif
 	},
-	{
+	{	/* 1 */
 		.bar = 2,
 #ifdef CONFIG_CRYPTO_DEV_SP_CCP
 		.ccp_vdata = &ccpv5a,
 #endif
 #ifdef CONFIG_CRYPTO_DEV_SP_PSP
-		.psp_vdata = &psp_entry
+		.psp_vdata = &pspv1,
 #endif
 	},
-	{
+	{	/* 2 */
 		.bar = 2,
 #ifdef CONFIG_CRYPTO_DEV_SP_CCP
 		.ccp_vdata = &ccpv5b,
 #endif
 	},
+	{	/* 3 */
+		.bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+		.ccp_vdata = &ccpv5a,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+		.psp_vdata = &pspv2,
+#endif
+	},
 };
 static const struct pci_device_id sp_pci_table[] = {
 	{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
 	{ PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] },
 	{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
+	{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
 	/* Last entry must be zero */
 	{ 0, }
 };
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index d2810c1..1100185 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -593,34 +593,82 @@
 	}
 }
 
+/*
+ * Update a CTR-AES 128 bit counter
+ */
+static void cc_update_ctr(u8 *ctr, unsigned int increment)
+{
+	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+	    IS_ALIGNED((unsigned long)ctr, 8)) {
+
+		__be64 *high_be = (__be64 *)ctr;
+		__be64 *low_be = high_be + 1;
+		u64 orig_low = __be64_to_cpu(*low_be);
+		u64 new_low = orig_low + (u64)increment;
+
+		*low_be = __cpu_to_be64(new_low);
+
+		if (new_low < orig_low)
+			*high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
+	} else {
+		u8 *pos = (ctr + AES_BLOCK_SIZE);
+		u8 val;
+		unsigned int size;
+
+		for (; increment; increment--)
+			for (size = AES_BLOCK_SIZE; size; size--) {
+				val = *--pos + 1;
+				*pos = val;
+				if (val)
+					break;
+			}
+	}
+}
+
 static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
 {
 	struct skcipher_request *req = (struct skcipher_request *)cc_req;
 	struct scatterlist *dst = req->dst;
 	struct scatterlist *src = req->src;
 	struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+	struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
+	unsigned int len;
+
+	switch (ctx_p->cipher_mode) {
+	case DRV_CIPHER_CBC:
+		/*
+		 * The crypto API expects us to set the req->iv to the last
+		 * ciphertext block. For encrypt, simply copy from the result.
+		 * For decrypt, we must copy from a saved buffer since this
+		 * could be an in-place decryption operation and the src is
+		 * lost by this point.
+		 */
+		if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT)  {
+			memcpy(req->iv, req_ctx->backup_info, ivsize);
+			kzfree(req_ctx->backup_info);
+		} else if (!err) {
+			len = req->cryptlen - ivsize;
+			scatterwalk_map_and_copy(req->iv, req->dst, len,
+						 ivsize, 0);
+		}
+		break;
+
+	case DRV_CIPHER_CTR:
+		/* Compute the counter of the last block */
+		len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
+		cc_update_ctr((u8 *)req->iv, len);
+		break;
+
+	default:
+		break;
+	}
 
 	cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
 	kzfree(req_ctx->iv);
 
-	/*
-	 * The crypto API expects us to set the req->iv to the last
-	 * ciphertext block. For encrypt, simply copy from the result.
-	 * For decrypt, we must copy from a saved buffer since this
-	 * could be an in-place decryption operation and the src is
-	 * lost by this point.
-	 */
-	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT)  {
-		memcpy(req->iv, req_ctx->backup_info, ivsize);
-		kzfree(req_ctx->backup_info);
-	} else if (!err) {
-		scatterwalk_map_and_copy(req->iv, req->dst,
-					 (req->cryptlen - ivsize),
-					 ivsize, 0);
-	}
-
 	skcipher_request_complete(req, err);
 }
 
@@ -639,7 +687,7 @@
 	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
 	struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
 	struct cc_crypto_req cc_req = {};
-	int rc, cts_restore_flag = 0;
+	int rc;
 	unsigned int seq_len = 0;
 	gfp_t flags = cc_gfp_flags(&req->base);
 
@@ -671,23 +719,10 @@
 		goto exit_process;
 	}
 
-	/*For CTS in case of data size aligned to 16 use CBC mode*/
-	if (((nbytes % AES_BLOCK_SIZE) == 0) &&
-	    ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS) {
-		ctx_p->cipher_mode = DRV_CIPHER_CBC;
-		cts_restore_flag = 1;
-	}
-
 	/* Setup request structure */
 	cc_req.user_cb = (void *)cc_cipher_complete;
 	cc_req.user_arg = (void *)req;
 
-#ifdef ENABLE_CYCLE_COUNT
-	cc_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
-		STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
-
-#endif
-
 	/* Setup request context */
 	req_ctx->gen_ctx.op_type = direction;
 
@@ -728,9 +763,6 @@
 	}
 
 exit_process:
-	if (cts_restore_flag)
-		ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
-
 	if (rc != -EINPROGRESS && rc != -EBUSY) {
 		kzfree(req_ctx->backup_info);
 		kzfree(req_ctx->iv);
@@ -752,20 +784,29 @@
 static int cc_cipher_decrypt(struct skcipher_request *req)
 {
 	struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
 	struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
 	unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
 	gfp_t flags = cc_gfp_flags(&req->base);
+	unsigned int len;
 
-	/*
-	 * Allocate and save the last IV sized bytes of the source, which will
-	 * be lost in case of in-place decryption and might be needed for CTS.
-	 */
-	req_ctx->backup_info = kmalloc(ivsize, flags);
-	if (!req_ctx->backup_info)
-		return -ENOMEM;
+	if (ctx_p->cipher_mode == DRV_CIPHER_CBC) {
 
-	scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
-				 (req->cryptlen - ivsize), ivsize, 0);
+		/* Allocate and save the last IV sized bytes of the source,
+		 * which will be lost in case of in-place decryption.
+		 */
+		req_ctx->backup_info = kzalloc(ivsize, flags);
+		if (!req_ctx->backup_info)
+			return -ENOMEM;
+
+		len = req->cryptlen - ivsize;
+		scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
+					 ivsize, 0);
+	} else {
+		req_ctx->backup_info = NULL;
+	}
+
 	req_ctx->is_giv = false;
 
 	return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
@@ -975,8 +1016,8 @@
 		.min_hw_rev = CC_HW_REV_712,
 	},
 	{
-		.name = "cts1(cbc(paes))",
-		.driver_name = "cts1-cbc-paes-ccree",
+		.name = "cts(cbc(paes))",
+		.driver_name = "cts-cbc-paes-ccree",
 		.blocksize = AES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.template_skcipher = {
@@ -1210,8 +1251,8 @@
 		.min_hw_rev = CC_HW_REV_630,
 	},
 	{
-		.name = "cts1(cbc(aes))",
-		.driver_name = "cts1-cbc-aes-ccree",
+		.name = "cts(cbc(aes))",
+		.driver_name = "cts-cbc-aes-ccree",
 		.blocksize = AES_BLOCK_SIZE,
 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
 		.template_skcipher = {
@@ -1338,8 +1379,7 @@
 
 	alg->base.cra_init = cc_cipher_init;
 	alg->base.cra_exit = cc_cipher_exit;
-	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
-				CRYPTO_ALG_TYPE_SKCIPHER;
+	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
 
 	t_alg->cipher_mode = tmpl->cipher_mode;
 	t_alg->flow_mode = tmpl->flow_mode;
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index bd974fe..1ff229c 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -131,8 +131,8 @@
 	}
 
 	if (irr) {
-		dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n",
-			irr);
+		dev_dbg_ratelimited(dev, "IRR includes unknown cause bits (0x%08X)\n",
+				    irr);
 		/* Just warning */
 	}
 
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index 96ff777..b931330 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -602,7 +602,7 @@
 	return rc;
 }
 
-static int cc_hash_finup(struct ahash_request *req)
+static int cc_do_finup(struct ahash_request *req, bool update)
 {
 	struct ahash_req_ctx *state = ahash_request_ctx(req);
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -619,15 +619,15 @@
 	int rc;
 	gfp_t flags = cc_gfp_flags(&req->base);
 
-	dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
-		nbytes);
+	dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
+		update ? "finup" : "final", nbytes);
 
 	if (cc_map_req(dev, state, ctx)) {
 		dev_err(dev, "map_ahash_source() failed\n");
 		return -EINVAL;
 	}
 
-	if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
+	if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
 				      flags)) {
 		dev_err(dev, "map_ahash_request_final() failed\n");
 		cc_unmap_req(dev, state, ctx);
@@ -646,67 +646,7 @@
 
 	idx = cc_restore_hash(desc, ctx, state, idx);
 
-	if (is_hmac)
-		idx = cc_fin_hmac(desc, req, idx);
-
-	idx = cc_fin_result(desc, req, idx);
-
-	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
-	if (rc != -EINPROGRESS && rc != -EBUSY) {
-		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
-		cc_unmap_hash_request(dev, state, src, true);
-		cc_unmap_result(dev, state, digestsize, result);
-		cc_unmap_req(dev, state, ctx);
-	}
-	return rc;
-}
-
-static int cc_hash_final(struct ahash_request *req)
-{
-	struct ahash_req_ctx *state = ahash_request_ctx(req);
-	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-	u32 digestsize = crypto_ahash_digestsize(tfm);
-	struct scatterlist *src = req->src;
-	unsigned int nbytes = req->nbytes;
-	u8 *result = req->result;
-	struct device *dev = drvdata_to_dev(ctx->drvdata);
-	bool is_hmac = ctx->is_hmac;
-	struct cc_crypto_req cc_req = {};
-	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
-	unsigned int idx = 0;
-	int rc;
-	gfp_t flags = cc_gfp_flags(&req->base);
-
-	dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
-		nbytes);
-
-	if (cc_map_req(dev, state, ctx)) {
-		dev_err(dev, "map_ahash_source() failed\n");
-		return -EINVAL;
-	}
-
-	if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0,
-				      flags)) {
-		dev_err(dev, "map_ahash_request_final() failed\n");
-		cc_unmap_req(dev, state, ctx);
-		return -ENOMEM;
-	}
-
-	if (cc_map_result(dev, state, digestsize)) {
-		dev_err(dev, "map_ahash_digest() failed\n");
-		cc_unmap_hash_request(dev, state, src, true);
-		cc_unmap_req(dev, state, ctx);
-		return -ENOMEM;
-	}
-
-	/* Setup request structure */
-	cc_req.user_cb = cc_hash_complete;
-	cc_req.user_arg = req;
-
-	idx = cc_restore_hash(desc, ctx, state, idx);
-
-	/* "DO-PAD" must be enabled only when writing current length to HW */
+	/* Pad the hash */
 	hw_desc_init(&desc[idx]);
 	set_cipher_do(&desc[idx], DO_PAD);
 	set_cipher_mode(&desc[idx], ctx->hw_mode);
@@ -731,6 +671,17 @@
 	return rc;
 }
 
+static int cc_hash_finup(struct ahash_request *req)
+{
+	return cc_do_finup(req, true);
+}
+
+
+static int cc_hash_final(struct ahash_request *req)
+{
+	return cc_do_finup(req, false);
+}
+
 static int cc_hash_init(struct ahash_request *req)
 {
 	struct ahash_req_ctx *state = ahash_request_ctx(req);
@@ -1813,9 +1764,7 @@
 	alg->cra_exit = cc_cra_exit;
 
 	alg->cra_init = cc_cra_init;
-	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
-			CRYPTO_ALG_KERN_DRIVER_ONLY;
-	alg->cra_type = &crypto_ahash_type;
+	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
 
 	t_crypto_alg->hash_mode = template->hash_mode;
 	t_crypto_alg->hw_mode = template->hw_mode;
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index b916c4e..5c539af 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -4203,7 +4203,6 @@
 #define SZ_AHASH_CTX sizeof(struct chcr_context)
 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
-#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
 
 /*
  *	chcr_register_alg - Register crypto algorithms with kernel framework.
@@ -4237,8 +4236,7 @@
 			break;
 		case CRYPTO_ALG_TYPE_AEAD:
 			driver_algs[i].alg.aead.base.cra_flags =
-				CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
-				CRYPTO_ALG_NEED_FALLBACK;
+				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
 			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
 			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
 			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
@@ -4258,10 +4256,9 @@
 			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
 			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
 			a_hash->halg.base.cra_module = THIS_MODULE;
-			a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
+			a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
 			a_hash->halg.base.cra_alignmask = 0;
 			a_hash->halg.base.cra_exit = NULL;
-			a_hash->halg.base.cra_type = &crypto_ahash_type;
 
 			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
 				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
index 55d5014..4909607 100644
--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
+++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
@@ -97,7 +97,7 @@
 int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val)
 {
 	return chtls_set_tcb_field(sk, 1, 1ULL << bit_pos,
-				   val << bit_pos);
+				   (u64)val << bit_pos);
 }
 
 static int chtls_set_tcb_keyid(struct sock *sk, int keyid)
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 4e86f86..7e71043 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2017 Marvell
  *
  * Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
  */
 
 #include <linux/clk.h>
@@ -33,7 +30,19 @@
 static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
 {
 	u32 val, htable_offset;
-	int i;
+	int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
+
+	if (priv->version == EIP197B) {
+		cs_rc_max = EIP197B_CS_RC_MAX;
+		cs_ht_wc = EIP197B_CS_HT_WC;
+		cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
+		cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
+	} else {
+		cs_rc_max = EIP197D_CS_RC_MAX;
+		cs_ht_wc = EIP197D_CS_HT_WC;
+		cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
+		cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
+	}
 
 	/* Enable the record cache memory access */
 	val = readl(priv->base + EIP197_CS_RAM_CTRL);
@@ -54,7 +63,7 @@
 	writel(val, priv->base + EIP197_TRC_PARAMS);
 
 	/* Clear all records */
-	for (i = 0; i < EIP197_CS_RC_MAX; i++) {
+	for (i = 0; i < cs_rc_max; i++) {
 		u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
 
 		writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
@@ -64,14 +73,14 @@
 		val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
 		if (i == 0)
 			val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
-		else if (i == EIP197_CS_RC_MAX - 1)
+		else if (i == cs_rc_max - 1)
 			val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
 		writel(val, priv->base + offset + sizeof(u32));
 	}
 
 	/* Clear the hash table entries */
-	htable_offset = EIP197_CS_RC_MAX * EIP197_CS_RC_SIZE;
-	for (i = 0; i < 64; i++)
+	htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
+	for (i = 0; i < cs_ht_wc; i++)
 		writel(GENMASK(29, 0),
 		       priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
 
@@ -82,23 +91,23 @@
 
 	/* Write head and tail pointers of the record free chain */
 	val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
-	      EIP197_TRC_FREECHAIN_TAIL_PTR(EIP197_CS_RC_MAX - 1);
+	      EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
 	writel(val, priv->base + EIP197_TRC_FREECHAIN);
 
 	/* Configure the record cache #1 */
-	val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(EIP197_CS_TRC_REC_WC) |
-	      EIP197_TRC_PARAMS2_HTABLE_PTR(EIP197_CS_RC_MAX);
+	val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
+	      EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
 	writel(val, priv->base + EIP197_TRC_PARAMS2);
 
 	/* Configure the record cache #2 */
-	val = EIP197_TRC_PARAMS_RC_SZ_LARGE(EIP197_CS_TRC_LG_REC_WC) |
+	val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
 	      EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
 	      EIP197_TRC_PARAMS_HTABLE_SZ(2);
 	writel(val, priv->base + EIP197_TRC_PARAMS);
 }
 
 static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
-				  const struct firmware *fw, u32 ctrl,
+				  const struct firmware *fw, int pe, u32 ctrl,
 				  u32 prog_en)
 {
 	const u32 *data = (const u32 *)fw->data;
@@ -112,7 +121,7 @@
 	       EIP197_PE(priv) + ctrl);
 
 	/* Enable access to the program memory */
-	writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
+	writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
 
 	/* Write the firmware */
 	for (i = 0; i < fw->size / sizeof(u32); i++)
@@ -120,7 +129,7 @@
 		       priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
 
 	/* Disable access to the program memory */
-	writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
+	writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
 
 	/* Release engine from reset */
 	val = readl(EIP197_PE(priv) + ctrl);
@@ -132,35 +141,62 @@
 {
 	const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
 	const struct firmware *fw[FW_NB];
-	int i, j, ret = 0;
+	char fw_path[31], *dir = NULL;
+	int i, j, ret = 0, pe;
 	u32 val;
 
+	switch (priv->version) {
+	case EIP197B:
+		dir = "eip197b";
+		break;
+	case EIP197D:
+		dir = "eip197d";
+		break;
+	default:
+		/* No firmware is required */
+		return 0;
+	}
+
 	for (i = 0; i < FW_NB; i++) {
-		ret = request_firmware(&fw[i], fw_name[i], priv->dev);
+		snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]);
+		ret = request_firmware(&fw[i], fw_path, priv->dev);
 		if (ret) {
-			dev_err(priv->dev,
-				"Failed to request firmware %s (%d)\n",
-				fw_name[i], ret);
-			goto release_fw;
+			if (priv->version != EIP197B)
+				goto release_fw;
+
+			/* Fallback to the old firmware location for the
+			 * EIP197b.
+			 */
+			ret = request_firmware(&fw[i], fw_name[i], priv->dev);
+			if (ret) {
+				dev_err(priv->dev,
+					"Failed to request firmware %s (%d)\n",
+					fw_name[i], ret);
+				goto release_fw;
+			}
 		}
-	 }
+	}
 
-	/* Clear the scratchpad memory */
-	val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
-	val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
-	       EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
-	       EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
-	       EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
-	writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
+	for (pe = 0; pe < priv->config.pes; pe++) {
+		/* Clear the scratchpad memory */
+		val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
+		val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
+		       EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
+		       EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
+		       EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
+		writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
 
-	memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
-		  EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
+		memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0,
+			  EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
 
-	eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
-			      EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
+		eip197_write_firmware(priv, fw[FW_IFPP], pe,
+				      EIP197_PE_ICE_FPP_CTRL(pe),
+				      EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
 
-	eip197_write_firmware(priv, fw[FW_IPUE], EIP197_PE_ICE_PUE_CTRL,
-			      EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
+		eip197_write_firmware(priv, fw[FW_IPUE], pe,
+				      EIP197_PE_ICE_PUE_CTRL(pe),
+				      EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
+	}
 
 release_fw:
 	for (j = 0; j < i; j++)
@@ -256,7 +292,7 @@
 static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
 {
 	u32 version, val;
-	int i, ret;
+	int i, ret, pe;
 
 	/* Determine endianess and configure byte swap */
 	version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
@@ -267,6 +303,10 @@
 	else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
 		val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
 
+	/* For EIP197 set maximum number of TX commands to 2^5 = 32 */
+	if (priv->version == EIP197B || priv->version == EIP197D)
+		val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
+
 	writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
 
 	/* Configure wr/rd cache values */
@@ -282,82 +322,94 @@
 	/* Clear any pending interrupt */
 	writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
 
-	/* Data Fetch Engine configuration */
-
-	/* Reset all DFE threads */
-	writel(EIP197_DxE_THR_CTRL_RESET_PE,
-	       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
-
-	if (priv->version == EIP197) {
-		/* Reset HIA input interface arbiter */
-		writel(EIP197_HIA_RA_PE_CTRL_RESET,
-		       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
-	}
-
-	/* DMA transfer size to use */
-	val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
-	val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
-	val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
-	val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
-	val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
-	writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG);
-
-	/* Leave the DFE threads reset state */
-	writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
-
-	/* Configure the procesing engine thresholds */
-	writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
-	       EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES);
-	writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
-	       EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES);
-
-	if (priv->version == EIP197) {
-		/* enable HIA input interface arbiter and rings */
-		writel(EIP197_HIA_RA_PE_CTRL_EN |
-		       GENMASK(priv->config.rings - 1, 0),
-		       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
-	}
-
-	/* Data Store Engine configuration */
-
-	/* Reset all DSE threads */
-	writel(EIP197_DxE_THR_CTRL_RESET_PE,
-	       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
-
-	/* Wait for all DSE threads to complete */
-	while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) &
-		GENMASK(15, 12)) != GENMASK(15, 12))
-		;
-
-	/* DMA transfer size to use */
-	val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
-	val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
-	val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
-	val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
-	/* FIXME: instability issues can occur for EIP97 but disabling it impact
-	 * performances.
-	 */
-	if (priv->version == EIP197)
-		val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
-	writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG);
-
-	/* Leave the DSE threads reset state */
-	writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
-
-	/* Configure the procesing engine thresholds */
-	writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
-	       EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES);
-
 	/* Processing Engine configuration */
+	for (pe = 0; pe < priv->config.pes; pe++) {
+		/* Data Fetch Engine configuration */
 
-	/* H/W capabilities selection */
-	val = EIP197_FUNCTION_RSVD;
-	val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
-	val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
-	val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
-	val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
-	val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
-	writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
+		/* Reset all DFE threads */
+		writel(EIP197_DxE_THR_CTRL_RESET_PE,
+		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
+
+		if (priv->version == EIP197B || priv->version == EIP197D) {
+			/* Reset HIA input interface arbiter */
+			writel(EIP197_HIA_RA_PE_CTRL_RESET,
+			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
+		}
+
+		/* DMA transfer size to use */
+		val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
+		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
+		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
+		val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
+		       EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
+		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
+		val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
+		writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
+
+		/* Leave the DFE threads reset state */
+		writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
+
+		/* Configure the processing engine thresholds */
+		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
+		       EIP197_PE_IN_xBUF_THRES_MAX(9),
+		       EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
+		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
+		       EIP197_PE_IN_xBUF_THRES_MAX(7),
+		       EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
+
+		if (priv->version == EIP197B || priv->version == EIP197D) {
+			/* enable HIA input interface arbiter and rings */
+			writel(EIP197_HIA_RA_PE_CTRL_EN |
+			       GENMASK(priv->config.rings - 1, 0),
+			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
+		}
+
+		/* Data Store Engine configuration */
+
+		/* Reset all DSE threads */
+		writel(EIP197_DxE_THR_CTRL_RESET_PE,
+		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+
+		/* Wait for all DSE threads to complete */
+		while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
+			GENMASK(15, 12)) != GENMASK(15, 12))
+			;
+
+		/* DMA transfer size to use */
+		val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
+		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
+		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
+		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
+		val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
+		/* FIXME: instability issues can occur for EIP97 but disabling it impact
+		 * performances.
+		 */
+		if (priv->version == EIP197B || priv->version == EIP197D)
+			val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
+		writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
+
+		/* Leave the DSE threads reset state */
+		writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+
+		/* Configure the procesing engine thresholds */
+		writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
+		       EIP197_PE_OUT_DBUF_THRES_MAX(8),
+		       EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
+
+		/* Processing Engine configuration */
+
+		/* H/W capabilities selection */
+		val = EIP197_FUNCTION_RSVD;
+		val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
+		val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
+		val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC;
+		val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC;
+		val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
+		val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5;
+		val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
+		val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
+		writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
+	}
 
 	/* Command Descriptor Rings prepare */
 	for (i = 0; i < priv->config.rings; i++) {
@@ -408,18 +460,20 @@
 		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
 	}
 
-	/* Enable command descriptor rings */
-	writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
-	       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
+	for (pe = 0; pe < priv->config.pes; pe++) {
+		/* Enable command descriptor rings */
+		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
 
-	/* Enable result descriptor rings */
-	writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
-	       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
+		/* Enable result descriptor rings */
+		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+	}
 
 	/* Clear any HIA interrupt */
 	writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
 
-	if (priv->version == EIP197) {
+	if (priv->version == EIP197B || priv->version == EIP197D) {
 		eip197_trc_cache_init(priv);
 
 		ret = eip197_load_firmwares(priv);
@@ -452,7 +506,6 @@
 {
 	struct crypto_async_request *req, *backlog;
 	struct safexcel_context *ctx;
-	struct safexcel_request *request;
 	int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
 
 	/* If a request wasn't properly dequeued because of a lack of resources,
@@ -476,16 +529,10 @@
 		}
 
 handle_req:
-		request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
-		if (!request)
-			goto request_failed;
-
 		ctx = crypto_tfm_ctx(req->tfm);
-		ret = ctx->send(req, ring, request, &commands, &results);
-		if (ret) {
-			kfree(request);
+		ret = ctx->send(req, ring, &commands, &results);
+		if (ret)
 			goto request_failed;
-		}
 
 		if (backlog)
 			backlog->complete(backlog, -EINPROGRESS);
@@ -494,14 +541,8 @@
 		 * to the engine because the input data was cached, continue to
 		 * dequeue other requests as this is valid and not an error.
 		 */
-		if (!commands && !results) {
-			kfree(request);
+		if (!commands && !results)
 			continue;
-		}
-
-		spin_lock_bh(&priv->ring[ring].egress_lock);
-		list_add_tail(&request->list, &priv->ring[ring].list);
-		spin_unlock_bh(&priv->ring[ring].egress_lock);
 
 		cdesc += commands;
 		rdesc += results;
@@ -519,7 +560,7 @@
 	if (!nreq)
 		return;
 
-	spin_lock_bh(&priv->ring[ring].egress_lock);
+	spin_lock_bh(&priv->ring[ring].lock);
 
 	priv->ring[ring].requests += nreq;
 
@@ -528,7 +569,7 @@
 		priv->ring[ring].busy = true;
 	}
 
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
+	spin_unlock_bh(&priv->ring[ring].lock);
 
 	/* let the RDR know we have pending descriptors */
 	writel((rdesc * priv->config.rd_offset) << 2,
@@ -560,6 +601,24 @@
 	return -EINVAL;
 }
 
+inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+				 int ring,
+				 struct safexcel_result_desc *rdesc,
+				 struct crypto_async_request *req)
+{
+	int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
+
+	priv->ring[ring].rdr_req[i] = req;
+}
+
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
+{
+	int i = safexcel_ring_first_rdr_index(priv, ring);
+
+	return priv->ring[ring].rdr_req[i];
+}
+
 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
 {
 	struct safexcel_command_desc *cdesc;
@@ -588,21 +647,16 @@
 
 int safexcel_invalidate_cache(struct crypto_async_request *async,
 			      struct safexcel_crypto_priv *priv,
-			      dma_addr_t ctxr_dma, int ring,
-			      struct safexcel_request *request)
+			      dma_addr_t ctxr_dma, int ring)
 {
 	struct safexcel_command_desc *cdesc;
 	struct safexcel_result_desc *rdesc;
 	int ret = 0;
 
-	spin_lock_bh(&priv->ring[ring].egress_lock);
-
 	/* Prepare command descriptor */
 	cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
-	if (IS_ERR(cdesc)) {
-		ret = PTR_ERR(cdesc);
-		goto unlock;
-	}
+	if (IS_ERR(cdesc))
+		return PTR_ERR(cdesc);
 
 	cdesc->control_data.type = EIP197_TYPE_EXTENDED;
 	cdesc->control_data.options = 0;
@@ -617,21 +671,20 @@
 		goto cdesc_rollback;
 	}
 
-	request->req = async;
-	goto unlock;
+	safexcel_rdr_req_set(priv, ring, rdesc, async);
+
+	return ret;
 
 cdesc_rollback:
 	safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
 
-unlock:
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
 	return ret;
 }
 
 static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
 						     int ring)
 {
-	struct safexcel_request *sreq;
+	struct crypto_async_request *req;
 	struct safexcel_context *ctx;
 	int ret, i, nreq, ndesc, tot_descs, handled = 0;
 	bool should_complete;
@@ -646,28 +699,22 @@
 		goto requests_left;
 
 	for (i = 0; i < nreq; i++) {
-		spin_lock_bh(&priv->ring[ring].egress_lock);
-		sreq = list_first_entry(&priv->ring[ring].list,
-					struct safexcel_request, list);
-		list_del(&sreq->list);
-		spin_unlock_bh(&priv->ring[ring].egress_lock);
+		req = safexcel_rdr_req_get(priv, ring);
 
-		ctx = crypto_tfm_ctx(sreq->req->tfm);
-		ndesc = ctx->handle_result(priv, ring, sreq->req,
+		ctx = crypto_tfm_ctx(req->tfm);
+		ndesc = ctx->handle_result(priv, ring, req,
 					   &should_complete, &ret);
 		if (ndesc < 0) {
-			kfree(sreq);
 			dev_err(priv->dev, "failed to handle result (%d)", ndesc);
 			goto acknowledge;
 		}
 
 		if (should_complete) {
 			local_bh_disable();
-			sreq->req->complete(sreq->req, ret);
+			req->complete(req, ret);
 			local_bh_enable();
 		}
 
-		kfree(sreq);
 		tot_descs += ndesc;
 		handled++;
 	}
@@ -686,7 +733,7 @@
 		goto handle_results;
 
 requests_left:
-	spin_lock_bh(&priv->ring[ring].egress_lock);
+	spin_lock_bh(&priv->ring[ring].lock);
 
 	priv->ring[ring].requests -= handled;
 	safexcel_try_push_requests(priv, ring);
@@ -694,7 +741,7 @@
 	if (!priv->ring[ring].requests)
 		priv->ring[ring].busy = false;
 
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
+	spin_unlock_bh(&priv->ring[ring].lock);
 }
 
 static void safexcel_dequeue_work(struct work_struct *work)
@@ -785,17 +832,29 @@
 }
 
 static struct safexcel_alg_template *safexcel_algs[] = {
+	&safexcel_alg_ecb_des,
+	&safexcel_alg_cbc_des,
+	&safexcel_alg_ecb_des3_ede,
+	&safexcel_alg_cbc_des3_ede,
 	&safexcel_alg_ecb_aes,
 	&safexcel_alg_cbc_aes,
+	&safexcel_alg_md5,
 	&safexcel_alg_sha1,
 	&safexcel_alg_sha224,
 	&safexcel_alg_sha256,
+	&safexcel_alg_sha384,
+	&safexcel_alg_sha512,
+	&safexcel_alg_hmac_md5,
 	&safexcel_alg_hmac_sha1,
 	&safexcel_alg_hmac_sha224,
 	&safexcel_alg_hmac_sha256,
+	&safexcel_alg_hmac_sha384,
+	&safexcel_alg_hmac_sha512,
 	&safexcel_alg_authenc_hmac_sha1_cbc_aes,
 	&safexcel_alg_authenc_hmac_sha224_cbc_aes,
 	&safexcel_alg_authenc_hmac_sha256_cbc_aes,
+	&safexcel_alg_authenc_hmac_sha384_cbc_aes,
+	&safexcel_alg_authenc_hmac_sha512_cbc_aes,
 };
 
 static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
@@ -805,6 +864,9 @@
 	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
 		safexcel_algs[i]->priv = priv;
 
+		if (!(safexcel_algs[i]->engines & priv->version))
+			continue;
+
 		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
 			ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
 		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
@@ -820,6 +882,9 @@
 
 fail:
 	for (j = 0; j < i; j++) {
+		if (!(safexcel_algs[j]->engines & priv->version))
+			continue;
+
 		if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
 			crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
 		else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
@@ -836,6 +901,9 @@
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
+		if (!(safexcel_algs[i]->engines & priv->version))
+			continue;
+
 		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
 			crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
 		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
@@ -847,9 +915,21 @@
 
 static void safexcel_configure(struct safexcel_crypto_priv *priv)
 {
-	u32 val, mask;
+	u32 val, mask = 0;
 
 	val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
+
+	/* Read number of PEs from the engine */
+	switch (priv->version) {
+	case EIP197B:
+	case EIP197D:
+		mask = EIP197_N_PES_MASK;
+		break;
+	default:
+		mask = EIP97_N_PES_MASK;
+	}
+	priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
+
 	val = (val & GENMASK(27, 25)) >> 25;
 	mask = BIT(val) - 1;
 
@@ -867,7 +947,9 @@
 {
 	struct safexcel_register_offsets *offsets = &priv->offsets;
 
-	if (priv->version == EIP197) {
+	switch (priv->version) {
+	case EIP197B:
+	case EIP197D:
 		offsets->hia_aic	= EIP197_HIA_AIC_BASE;
 		offsets->hia_aic_g	= EIP197_HIA_AIC_G_BASE;
 		offsets->hia_aic_r	= EIP197_HIA_AIC_R_BASE;
@@ -878,7 +960,8 @@
 		offsets->hia_dse_thr	= EIP197_HIA_DSE_THR_BASE;
 		offsets->hia_gen_cfg	= EIP197_HIA_GEN_CFG_BASE;
 		offsets->pe		= EIP197_PE_BASE;
-	} else {
+		break;
+	case EIP97IES:
 		offsets->hia_aic	= EIP97_HIA_AIC_BASE;
 		offsets->hia_aic_g	= EIP97_HIA_AIC_G_BASE;
 		offsets->hia_aic_r	= EIP97_HIA_AIC_R_BASE;
@@ -889,6 +972,7 @@
 		offsets->hia_dse_thr	= EIP97_HIA_DSE_THR_BASE;
 		offsets->hia_gen_cfg	= EIP97_HIA_GEN_CFG_BASE;
 		offsets->pe		= EIP97_PE_BASE;
+		break;
 	}
 }
 
@@ -906,6 +990,9 @@
 	priv->dev = dev;
 	priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
 
+	if (priv->version == EIP197B || priv->version == EIP197D)
+		priv->flags |= EIP197_TRC_CACHE;
+
 	safexcel_init_register_offsets(priv);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -957,6 +1044,13 @@
 
 	safexcel_configure(priv);
 
+	priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring),
+				  GFP_KERNEL);
+	if (!priv->ring) {
+		ret = -ENOMEM;
+		goto err_reg_clk;
+	}
+
 	for (i = 0; i < priv->config.rings; i++) {
 		char irq_name[6] = {0}; /* "ringX\0" */
 		char wq_name[9] = {0}; /* "wq_ringX\0" */
@@ -969,6 +1063,14 @@
 		if (ret)
 			goto err_reg_clk;
 
+		priv->ring[i].rdr_req = devm_kzalloc(dev,
+			sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE,
+			GFP_KERNEL);
+		if (!priv->ring[i].rdr_req) {
+			ret = -ENOMEM;
+			goto err_reg_clk;
+		}
+
 		ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
 		if (!ring_irq) {
 			ret = -ENOMEM;
@@ -1004,9 +1106,7 @@
 		crypto_init_queue(&priv->ring[i].queue,
 				  EIP197_DEFAULT_RING_SIZE);
 
-		INIT_LIST_HEAD(&priv->ring[i].list);
 		spin_lock_init(&priv->ring[i].lock);
-		spin_lock_init(&priv->ring[i].egress_lock);
 		spin_lock_init(&priv->ring[i].queue_lock);
 	}
 
@@ -1034,6 +1134,24 @@
 	return ret;
 }
 
+static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->config.rings; i++) {
+		/* clear any pending interrupt */
+		writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
+		writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
+
+		/* Reset the CDR base address */
+		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+
+		/* Reset the RDR base address */
+		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+	}
+}
 
 static int safexcel_remove(struct platform_device *pdev)
 {
@@ -1041,6 +1159,8 @@
 	int i;
 
 	safexcel_unregister_algorithms(priv);
+	safexcel_hw_reset_rings(priv);
+
 	clk_disable_unprepare(priv->clk);
 
 	for (i = 0; i < priv->config.rings; i++)
@@ -1051,12 +1171,26 @@
 
 static const struct of_device_id safexcel_of_match_table[] = {
 	{
-		.compatible = "inside-secure,safexcel-eip97",
-		.data = (void *)EIP97,
+		.compatible = "inside-secure,safexcel-eip97ies",
+		.data = (void *)EIP97IES,
 	},
 	{
+		.compatible = "inside-secure,safexcel-eip197b",
+		.data = (void *)EIP197B,
+	},
+	{
+		.compatible = "inside-secure,safexcel-eip197d",
+		.data = (void *)EIP197D,
+	},
+	{
+		/* Deprecated. Kept for backward compatibility. */
+		.compatible = "inside-secure,safexcel-eip97",
+		.data = (void *)EIP97IES,
+	},
+	{
+		/* Deprecated. Kept for backward compatibility. */
 		.compatible = "inside-secure,safexcel-eip197",
-		.data = (void *)EIP197,
+		.data = (void *)EIP197B,
 	},
 	{},
 };
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 8b3ee9b..65624a8 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (C) 2017 Marvell
  *
  * Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
  */
 
 #ifndef __SAFEXCEL_H__
@@ -95,13 +92,13 @@
 #define EIP197_HIA_xDR_STAT			0x003c
 
 /* register offsets */
-#define EIP197_HIA_DFE_CFG			0x0000
-#define EIP197_HIA_DFE_THR_CTRL			0x0000
-#define EIP197_HIA_DFE_THR_STAT			0x0004
-#define EIP197_HIA_DSE_CFG			0x0000
-#define EIP197_HIA_DSE_THR_CTRL			0x0000
-#define EIP197_HIA_DSE_THR_STAT			0x0004
-#define EIP197_HIA_RA_PE_CTRL			0x0010
+#define EIP197_HIA_DFE_CFG(n)			(0x0000 + (128 * (n)))
+#define EIP197_HIA_DFE_THR_CTRL(n)		(0x0000 + (128 * (n)))
+#define EIP197_HIA_DFE_THR_STAT(n)		(0x0004 + (128 * (n)))
+#define EIP197_HIA_DSE_CFG(n)			(0x0000 + (128 * (n)))
+#define EIP197_HIA_DSE_THR_CTRL(n)		(0x0000 + (128 * (n)))
+#define EIP197_HIA_DSE_THR_STAT(n)		(0x0004 + (128 * (n)))
+#define EIP197_HIA_RA_PE_CTRL(n)		(0x0010 + (8   * (n)))
 #define EIP197_HIA_RA_PE_STAT			0x0014
 #define EIP197_HIA_AIC_R_OFF(r)			((r) * 0x1000)
 #define EIP197_HIA_AIC_R_ENABLE_CTRL(r)		(0xe008 - EIP197_HIA_AIC_R_OFF(r))
@@ -114,18 +111,18 @@
 #define EIP197_HIA_MST_CTRL			0xfff4
 #define EIP197_HIA_OPTIONS			0xfff8
 #define EIP197_HIA_VERSION			0xfffc
-#define EIP197_PE_IN_DBUF_THRES			0x0000
-#define EIP197_PE_IN_TBUF_THRES			0x0100
-#define EIP197_PE_ICE_SCRATCH_RAM		0x0800
-#define EIP197_PE_ICE_PUE_CTRL			0x0c80
-#define EIP197_PE_ICE_SCRATCH_CTRL		0x0d04
-#define EIP197_PE_ICE_FPP_CTRL			0x0d80
-#define EIP197_PE_ICE_RAM_CTRL			0x0ff0
-#define EIP197_PE_EIP96_FUNCTION_EN		0x1004
-#define EIP197_PE_EIP96_CONTEXT_CTRL		0x1008
-#define EIP197_PE_EIP96_CONTEXT_STAT		0x100c
-#define EIP197_PE_OUT_DBUF_THRES		0x1c00
-#define EIP197_PE_OUT_TBUF_THRES		0x1d00
+#define EIP197_PE_IN_DBUF_THRES(n)		(0x0000 + (0x2000 * (n)))
+#define EIP197_PE_IN_TBUF_THRES(n)		(0x0100 + (0x2000 * (n)))
+#define EIP197_PE_ICE_SCRATCH_RAM(n)		(0x0800 + (0x2000 * (n)))
+#define EIP197_PE_ICE_PUE_CTRL(n)		(0x0c80 + (0x2000 * (n)))
+#define EIP197_PE_ICE_SCRATCH_CTRL(n)		(0x0d04 + (0x2000 * (n)))
+#define EIP197_PE_ICE_FPP_CTRL(n)		(0x0d80 + (0x2000 * (n)))
+#define EIP197_PE_ICE_RAM_CTRL(n)		(0x0ff0 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_FUNCTION_EN(n)		(0x1004 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_CONTEXT_CTRL(n)		(0x1008 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_CONTEXT_STAT(n)		(0x100c + (0x2000 * (n)))
+#define EIP197_PE_OUT_DBUF_THRES(n)		(0x1c00 + (0x2000 * (n)))
+#define EIP197_PE_OUT_TBUF_THRES(n)		(0x1d00 + (0x2000 * (n)))
 #define EIP197_MST_CTRL				0xfff4
 
 /* EIP197-specific registers, no indirection */
@@ -184,6 +181,11 @@
 #define EIP197_HIA_RA_PE_CTRL_RESET		BIT(31)
 #define EIP197_HIA_RA_PE_CTRL_EN		BIT(30)
 
+/* EIP197_HIA_OPTIONS */
+#define EIP197_N_PES_OFFSET			4
+#define EIP197_N_PES_MASK			GENMASK(4, 0)
+#define EIP97_N_PES_MASK			GENMASK(2, 0)
+
 /* EIP197_HIA_AIC_R_ENABLE_CTRL */
 #define EIP197_CDR_IRQ(n)			BIT((n) * 2)
 #define EIP197_RDR_IRQ(n)			BIT((n) * 2 + 1)
@@ -217,6 +219,7 @@
 #define WR_CACHE_4BITS				(WR_CACHE_3BITS << 1 | BIT(0))
 #define EIP197_MST_CTRL_RD_CACHE(n)		(((n) & 0xf) << 0)
 #define EIP197_MST_CTRL_WD_CACHE(n)		(((n) & 0xf) << 4)
+#define EIP197_MST_CTRL_TX_MAX_CMD(n)		(((n) & 0xf) << 20)
 #define EIP197_MST_CTRL_BYTE_SWAP		BIT(24)
 #define EIP197_MST_CTRL_NO_BYTE_SWAP		BIT(25)
 
@@ -287,7 +290,7 @@
 	u32 control0;
 	u32 control1;
 
-	__le32 data[24];
+	__le32 data[40];
 } __packed;
 
 /* control0 */
@@ -305,14 +308,19 @@
 #define CONTEXT_CONTROL_NO_FINISH_HASH		BIT(5)
 #define CONTEXT_CONTROL_SIZE(n)			((n) << 8)
 #define CONTEXT_CONTROL_KEY_EN			BIT(16)
+#define CONTEXT_CONTROL_CRYPTO_ALG_DES		(0x0 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_3DES		(0x2 << 17)
 #define CONTEXT_CONTROL_CRYPTO_ALG_AES128	(0x5 << 17)
 #define CONTEXT_CONTROL_CRYPTO_ALG_AES192	(0x6 << 17)
 #define CONTEXT_CONTROL_CRYPTO_ALG_AES256	(0x7 << 17)
 #define CONTEXT_CONTROL_DIGEST_PRECOMPUTED	(0x1 << 21)
 #define CONTEXT_CONTROL_DIGEST_HMAC		(0x3 << 21)
+#define CONTEXT_CONTROL_CRYPTO_ALG_MD5		(0x0 << 23)
 #define CONTEXT_CONTROL_CRYPTO_ALG_SHA1		(0x2 << 23)
 #define CONTEXT_CONTROL_CRYPTO_ALG_SHA224	(0x4 << 23)
 #define CONTEXT_CONTROL_CRYPTO_ALG_SHA256	(0x3 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA384	(0x6 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA512	(0x5 << 23)
 #define CONTEXT_CONTROL_INV_FR			(0x5 << 24)
 #define CONTEXT_CONTROL_INV_TR			(0x6 << 24)
 
@@ -327,6 +335,11 @@
 #define CONTEXT_CONTROL_COUNTER_MODE		BIT(10)
 #define CONTEXT_CONTROL_HASH_STORE		BIT(19)
 
+/* The hash counter given to the engine in the context has a granularity of
+ * 64 bits.
+ */
+#define EIP197_COUNTER_BLOCK_SIZE		64
+
 /* EIP197_CS_RAM_CTRL */
 #define EIP197_TRC_ENABLE_0			BIT(4)
 #define EIP197_TRC_ENABLE_1			BIT(5)
@@ -349,13 +362,19 @@
 #define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n)	((n) << 18)
 
 /* Cache helpers */
-#define EIP197_CS_RC_MAX			52
+#define EIP197B_CS_RC_MAX			52
+#define EIP197D_CS_RC_MAX			96
 #define EIP197_CS_RC_SIZE			(4 * sizeof(u32))
 #define EIP197_CS_RC_NEXT(x)			(x)
 #define EIP197_CS_RC_PREV(x)			((x) << 10)
 #define EIP197_RC_NULL				0x3ff
-#define EIP197_CS_TRC_REC_WC			59
-#define EIP197_CS_TRC_LG_REC_WC			73
+#define EIP197B_CS_TRC_REC_WC			59
+#define EIP197D_CS_TRC_REC_WC			64
+#define EIP197B_CS_TRC_LG_REC_WC		73
+#define EIP197D_CS_TRC_LG_REC_WC		80
+#define EIP197B_CS_HT_WC			64
+#define EIP197D_CS_HT_WC			256
+
 
 /* Result data */
 struct result_data_desc {
@@ -450,6 +469,7 @@
 #define EIP197_OPTION_MAGIC_VALUE	BIT(0)
 #define EIP197_OPTION_64BIT_CTX		BIT(1)
 #define EIP197_OPTION_CTX_CTRL_IN_CMD	BIT(8)
+#define EIP197_OPTION_2_TOKEN_IV_CMD	GENMASK(11, 10)
 #define EIP197_OPTION_4_TOKEN_IV_CMD	GENMASK(11, 9)
 
 #define EIP197_TYPE_EXTENDED		0x3
@@ -480,7 +500,7 @@
 	FW_NB
 };
 
-struct safexcel_ring {
+struct safexcel_desc_ring {
 	void *base;
 	void *base_end;
 	dma_addr_t base_dma;
@@ -489,8 +509,7 @@
 	void *write;
 	void *read;
 
-	/* number of elements used in the ring */
-	unsigned nr;
+	/* descriptor element offset */
 	unsigned offset;
 };
 
@@ -500,12 +519,8 @@
 	SAFEXCEL_ALG_TYPE_AHASH,
 };
 
-struct safexcel_request {
-	struct list_head list;
-	struct crypto_async_request *req;
-};
-
 struct safexcel_config {
+	u32 pes;
 	u32 rings;
 
 	u32 cd_size;
@@ -521,9 +536,40 @@
 	int ring;
 };
 
+struct safexcel_ring {
+	spinlock_t lock;
+
+	struct workqueue_struct *workqueue;
+	struct safexcel_work_data work_data;
+
+	/* command/result rings */
+	struct safexcel_desc_ring cdr;
+	struct safexcel_desc_ring rdr;
+
+	/* result ring crypto API request */
+	struct crypto_async_request **rdr_req;
+
+	/* queue */
+	struct crypto_queue queue;
+	spinlock_t queue_lock;
+
+	/* Number of requests in the engine. */
+	int requests;
+
+	/* The ring is currently handling at least one request */
+	bool busy;
+
+	/* Store for current requests when bailing out of the dequeueing
+	 * function when no enough resources are available.
+	 */
+	struct crypto_async_request *req;
+	struct crypto_async_request *backlog;
+};
+
 enum safexcel_eip_version {
-	EIP97,
-	EIP197,
+	EIP97IES = BIT(0),
+	EIP197B  = BIT(1),
+	EIP197D  = BIT(2),
 };
 
 struct safexcel_register_offsets {
@@ -539,6 +585,10 @@
 	u32 pe;
 };
 
+enum safexcel_flags {
+	EIP197_TRC_CACHE = BIT(0),
+};
+
 struct safexcel_crypto_priv {
 	void __iomem *base;
 	struct device *dev;
@@ -548,46 +598,19 @@
 
 	enum safexcel_eip_version version;
 	struct safexcel_register_offsets offsets;
+	u32 flags;
 
 	/* context DMA pool */
 	struct dma_pool *context_pool;
 
 	atomic_t ring_used;
 
-	struct {
-		spinlock_t lock;
-		spinlock_t egress_lock;
-
-		struct list_head list;
-		struct workqueue_struct *workqueue;
-		struct safexcel_work_data work_data;
-
-		/* command/result rings */
-		struct safexcel_ring cdr;
-		struct safexcel_ring rdr;
-
-		/* queue */
-		struct crypto_queue queue;
-		spinlock_t queue_lock;
-
-		/* Number of requests in the engine. */
-		int requests;
-
-		/* The ring is currently handling at least one request */
-		bool busy;
-
-		/* Store for current requests when bailing out of the dequeueing
-		 * function when no enough resources are available.
-		 */
-		struct crypto_async_request *req;
-		struct crypto_async_request *backlog;
-	} ring[EIP197_MAX_RINGS];
+	struct safexcel_ring *ring;
 };
 
 struct safexcel_context {
 	int (*send)(struct crypto_async_request *req, int ring,
-		    struct safexcel_request *request, int *commands,
-		    int *results);
+		    int *commands, int *results);
 	int (*handle_result)(struct safexcel_crypto_priv *priv, int ring,
 			     struct crypto_async_request *req, bool *complete,
 			     int *ret);
@@ -600,13 +623,13 @@
 };
 
 struct safexcel_ahash_export_state {
-	u64 len;
-	u64 processed;
+	u64 len[2];
+	u64 processed[2];
 
 	u32 digest;
 
-	u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
-	u8 cache[SHA256_BLOCK_SIZE];
+	u32 state[SHA512_DIGEST_SIZE / sizeof(u32)];
+	u8 cache[SHA512_BLOCK_SIZE];
 };
 
 /*
@@ -617,6 +640,7 @@
 struct safexcel_alg_template {
 	struct safexcel_crypto_priv *priv;
 	enum safexcel_alg_type type;
+	u32 engines;
 	union {
 		struct skcipher_alg skcipher;
 		struct aead_alg aead;
@@ -635,16 +659,16 @@
 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
 int safexcel_invalidate_cache(struct crypto_async_request *async,
 			      struct safexcel_crypto_priv *priv,
-			      dma_addr_t ctxr_dma, int ring,
-			      struct safexcel_request *request);
+			      dma_addr_t ctxr_dma, int ring);
 int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
-				   struct safexcel_ring *cdr,
-				   struct safexcel_ring *rdr);
+				   struct safexcel_desc_ring *cdr,
+				   struct safexcel_desc_ring *rdr);
 int safexcel_select_ring(struct safexcel_crypto_priv *priv);
 void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
-			      struct safexcel_ring *ring);
+			      struct safexcel_desc_ring *ring);
+void *safexcel_ring_first_rptr(struct safexcel_crypto_priv *priv, int  ring);
 void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
-				 struct safexcel_ring *ring);
+				 struct safexcel_desc_ring *ring);
 struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
 						 int ring_id,
 						 bool first, bool last,
@@ -655,21 +679,44 @@
 						 int ring_id,
 						bool first, bool last,
 						dma_addr_t data, u32 len);
+int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
+				  int ring);
+int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
+				  int ring,
+				  struct safexcel_result_desc *rdesc);
+void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+			  int ring,
+			  struct safexcel_result_desc *rdesc,
+			  struct crypto_async_request *req);
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring);
 void safexcel_inv_complete(struct crypto_async_request *req, int error);
 int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
 			 void *istate, void *ostate);
 
 /* available algorithms */
+extern struct safexcel_alg_template safexcel_alg_ecb_des;
+extern struct safexcel_alg_template safexcel_alg_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_ecb_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede;
 extern struct safexcel_alg_template safexcel_alg_ecb_aes;
 extern struct safexcel_alg_template safexcel_alg_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_md5;
 extern struct safexcel_alg_template safexcel_alg_sha1;
 extern struct safexcel_alg_template safexcel_alg_sha224;
 extern struct safexcel_alg_template safexcel_alg_sha256;
+extern struct safexcel_alg_template safexcel_alg_sha384;
+extern struct safexcel_alg_template safexcel_alg_sha512;
+extern struct safexcel_alg_template safexcel_alg_hmac_md5;
 extern struct safexcel_alg_template safexcel_alg_hmac_sha1;
 extern struct safexcel_alg_template safexcel_alg_hmac_sha224;
 extern struct safexcel_alg_template safexcel_alg_hmac_sha256;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha384;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha512;
 extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes;
 extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes;
 extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes;
 
 #endif
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 6bb60fd..3aef1d4 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2017 Marvell
  *
  * Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
  */
 
 #include <linux/device.h>
@@ -15,6 +12,7 @@
 #include <crypto/aead.h>
 #include <crypto/aes.h>
 #include <crypto/authenc.h>
+#include <crypto/des.h>
 #include <crypto/sha.h>
 #include <crypto/skcipher.h>
 #include <crypto/internal/aead.h>
@@ -27,21 +25,28 @@
 	SAFEXCEL_DECRYPT,
 };
 
+enum safexcel_cipher_alg {
+	SAFEXCEL_DES,
+	SAFEXCEL_3DES,
+	SAFEXCEL_AES,
+};
+
 struct safexcel_cipher_ctx {
 	struct safexcel_context base;
 	struct safexcel_crypto_priv *priv;
 
 	u32 mode;
+	enum safexcel_cipher_alg alg;
 	bool aead;
 
 	__le32 key[8];
 	unsigned int key_len;
 
 	/* All the below is AEAD specific */
-	u32 alg;
+	u32 hash_alg;
 	u32 state_sz;
-	u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
-	u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
+	u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+	u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
 };
 
 struct safexcel_cipher_req {
@@ -57,10 +62,24 @@
 	unsigned offset = 0;
 
 	if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
-		offset = AES_BLOCK_SIZE / sizeof(u32);
-		memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
+		switch (ctx->alg) {
+		case SAFEXCEL_DES:
+			offset = DES_BLOCK_SIZE / sizeof(u32);
+			memcpy(cdesc->control_data.token, iv, DES_BLOCK_SIZE);
+			cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
+			break;
+		case SAFEXCEL_3DES:
+			offset = DES3_EDE_BLOCK_SIZE / sizeof(u32);
+			memcpy(cdesc->control_data.token, iv, DES3_EDE_BLOCK_SIZE);
+			cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
+			break;
 
-		cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+		case SAFEXCEL_AES:
+			offset = AES_BLOCK_SIZE / sizeof(u32);
+			memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
+			cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+			break;
+		}
 	}
 
 	token = (struct safexcel_token *)(cdesc->control_data.token + offset);
@@ -145,7 +164,7 @@
 		return ret;
 	}
 
-	if (priv->version == EIP197 && ctx->base.ctxr_dma) {
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
 		for (i = 0; i < len / sizeof(u32); i++) {
 			if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
 				ctx->base.needs_inv = true;
@@ -179,12 +198,12 @@
 		goto badkey;
 
 	/* Encryption key */
-	if (priv->version == EIP197 && ctx->base.ctxr_dma &&
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
 	    memcmp(ctx->key, keys.enckey, keys.enckeylen))
 		ctx->base.needs_inv = true;
 
 	/* Auth key */
-	switch (ctx->alg) {
+	switch (ctx->hash_alg) {
 	case CONTEXT_CONTROL_CRYPTO_ALG_SHA1:
 		if (safexcel_hmac_setkey("safexcel-sha1", keys.authkey,
 					 keys.authkeylen, &istate, &ostate))
@@ -200,6 +219,16 @@
 					 keys.authkeylen, &istate, &ostate))
 			goto badkey;
 		break;
+	case CONTEXT_CONTROL_CRYPTO_ALG_SHA384:
+		if (safexcel_hmac_setkey("safexcel-sha384", keys.authkey,
+					 keys.authkeylen, &istate, &ostate))
+			goto badkey;
+		break;
+	case CONTEXT_CONTROL_CRYPTO_ALG_SHA512:
+		if (safexcel_hmac_setkey("safexcel-sha512", keys.authkey,
+					 keys.authkeylen, &istate, &ostate))
+			goto badkey;
+		break;
 	default:
 		dev_err(priv->dev, "aead: unsupported hash algorithm\n");
 		goto badkey;
@@ -208,7 +237,7 @@
 	crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) &
 				    CRYPTO_TFM_RES_MASK);
 
-	if (priv->version == EIP197 && ctx->base.ctxr_dma &&
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
 	    (memcmp(ctx->ipad, istate.state, ctx->state_sz) ||
 	     memcmp(ctx->opad, ostate.state, ctx->state_sz)))
 		ctx->base.needs_inv = true;
@@ -258,22 +287,28 @@
 
 	if (ctx->aead)
 		cdesc->control_data.control0 |= CONTEXT_CONTROL_DIGEST_HMAC |
-						ctx->alg;
+						ctx->hash_alg;
 
-	switch (ctx->key_len) {
-	case AES_KEYSIZE_128:
-		cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
-		break;
-	case AES_KEYSIZE_192:
-		cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
-		break;
-	case AES_KEYSIZE_256:
-		cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
-		break;
-	default:
-		dev_err(priv->dev, "aes keysize not supported: %u\n",
-			ctx->key_len);
-		return -EINVAL;
+	if (ctx->alg == SAFEXCEL_DES) {
+		cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_DES;
+	} else if (ctx->alg == SAFEXCEL_3DES) {
+		cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_3DES;
+	} else if (ctx->alg == SAFEXCEL_AES) {
+		switch (ctx->key_len) {
+		case AES_KEYSIZE_128:
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
+			break;
+		case AES_KEYSIZE_192:
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
+			break;
+		case AES_KEYSIZE_256:
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
+			break;
+		default:
+			dev_err(priv->dev, "aes keysize not supported: %u\n",
+				ctx->key_len);
+			return -EINVAL;
+		}
 	}
 
 	ctrl_size = ctx->key_len / sizeof(u32);
@@ -298,7 +333,6 @@
 
 	*ret = 0;
 
-	spin_lock_bh(&priv->ring[ring].egress_lock);
 	do {
 		rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
 		if (IS_ERR(rdesc)) {
@@ -315,7 +349,6 @@
 	} while (!rdesc->last_seg);
 
 	safexcel_complete(priv, ring);
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
 
 	if (src == dst) {
 		dma_unmap_sg(priv->dev, src,
@@ -335,8 +368,7 @@
 	return ndesc;
 }
 
-static int safexcel_aes_send(struct crypto_async_request *base, int ring,
-			     struct safexcel_request *request,
+static int safexcel_send_req(struct crypto_async_request *base, int ring,
 			     struct safexcel_cipher_req *sreq,
 			     struct scatterlist *src, struct scatterlist *dst,
 			     unsigned int cryptlen, unsigned int assoclen,
@@ -346,7 +378,7 @@
 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
 	struct safexcel_crypto_priv *priv = ctx->priv;
 	struct safexcel_command_desc *cdesc;
-	struct safexcel_result_desc *rdesc;
+	struct safexcel_result_desc *rdesc, *first_rdesc = NULL;
 	struct scatterlist *sg;
 	unsigned int totlen = cryptlen + assoclen;
 	int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
@@ -386,8 +418,6 @@
 		       ctx->opad, ctx->state_sz);
 	}
 
-	spin_lock_bh(&priv->ring[ring].egress_lock);
-
 	/* command descriptors */
 	for_each_sg(src, sg, nr_src, i) {
 		int len = sg_dma_len(sg);
@@ -434,12 +464,12 @@
 			ret = PTR_ERR(rdesc);
 			goto rdesc_rollback;
 		}
+		if (first)
+			first_rdesc = rdesc;
 		n_rdesc++;
 	}
 
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
-
-	request->req = base;
+	safexcel_rdr_req_set(priv, ring, first_rdesc, base);
 
 	*commands = n_cdesc;
 	*results = n_rdesc;
@@ -452,8 +482,6 @@
 	for (i = 0; i < n_cdesc; i++)
 		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
 
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
-
 	if (src == dst) {
 		dma_unmap_sg(priv->dev, src,
 			     sg_nents_for_len(src, totlen),
@@ -481,7 +509,6 @@
 
 	*ret = 0;
 
-	spin_lock_bh(&priv->ring[ring].egress_lock);
 	do {
 		rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
 		if (IS_ERR(rdesc)) {
@@ -491,17 +518,13 @@
 			break;
 		}
 
-		if (rdesc->result_data.error_code) {
-			dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n",
-				rdesc->result_data.error_code);
-			*ret = -EIO;
-		}
+		if (likely(!*ret))
+			*ret = safexcel_rdesc_check_errors(priv, rdesc);
 
 		ndesc++;
 	} while (!rdesc->last_seg);
 
 	safexcel_complete(priv, ring);
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
 
 	if (ctx->base.exit_inv) {
 		dma_pool_free(priv->context_pool, ctx->base.ctxr,
@@ -577,15 +600,13 @@
 }
 
 static int safexcel_cipher_send_inv(struct crypto_async_request *base,
-				    int ring, struct safexcel_request *request,
-				    int *commands, int *results)
+				    int ring, int *commands, int *results)
 {
 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
 	struct safexcel_crypto_priv *priv = ctx->priv;
 	int ret;
 
-	ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring,
-					request);
+	ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring);
 	if (unlikely(ret))
 		return ret;
 
@@ -596,7 +617,6 @@
 }
 
 static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
-				  struct safexcel_request *request,
 				  int *commands, int *results)
 {
 	struct skcipher_request *req = skcipher_request_cast(async);
@@ -605,21 +625,19 @@
 	struct safexcel_crypto_priv *priv = ctx->priv;
 	int ret;
 
-	BUG_ON(priv->version == EIP97 && sreq->needs_inv);
+	BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
 
 	if (sreq->needs_inv)
-		ret = safexcel_cipher_send_inv(async, ring, request, commands,
-					       results);
+		ret = safexcel_cipher_send_inv(async, ring, commands, results);
 	else
-		ret = safexcel_aes_send(async, ring, request, sreq, req->src,
+		ret = safexcel_send_req(async, ring, sreq, req->src,
 					req->dst, req->cryptlen, 0, 0, req->iv,
 					commands, results);
 	return ret;
 }
 
 static int safexcel_aead_send(struct crypto_async_request *async, int ring,
-			      struct safexcel_request *request, int *commands,
-			      int *results)
+			      int *commands, int *results)
 {
 	struct aead_request *req = aead_request_cast(async);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -628,14 +646,13 @@
 	struct safexcel_crypto_priv *priv = ctx->priv;
 	int ret;
 
-	BUG_ON(priv->version == EIP97 && sreq->needs_inv);
+	BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
 
 	if (sreq->needs_inv)
-		ret = safexcel_cipher_send_inv(async, ring, request, commands,
-					       results);
+		ret = safexcel_cipher_send_inv(async, ring, commands, results);
 	else
-		ret = safexcel_aes_send(async, ring, request, sreq, req->src,
-					req->dst, req->cryptlen, req->assoclen,
+		ret = safexcel_send_req(async, ring, sreq, req->src, req->dst,
+					req->cryptlen, req->assoclen,
 					crypto_aead_authsize(tfm), req->iv,
 					commands, results);
 	return ret;
@@ -705,9 +722,10 @@
 	return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
 }
 
-static int safexcel_aes(struct crypto_async_request *base,
+static int safexcel_queue_req(struct crypto_async_request *base,
 			struct safexcel_cipher_req *sreq,
-			enum safexcel_cipher_direction dir, u32 mode)
+			enum safexcel_cipher_direction dir, u32 mode,
+			enum safexcel_cipher_alg alg)
 {
 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
 	struct safexcel_crypto_priv *priv = ctx->priv;
@@ -715,10 +733,11 @@
 
 	sreq->needs_inv = false;
 	sreq->direction = dir;
+	ctx->alg = alg;
 	ctx->mode = mode;
 
 	if (ctx->base.ctxr) {
-		if (priv->version == EIP197 && ctx->base.needs_inv) {
+		if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) {
 			sreq->needs_inv = true;
 			ctx->base.needs_inv = false;
 		}
@@ -745,14 +764,16 @@
 
 static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
 {
-	return safexcel_aes(&req->base, skcipher_request_ctx(req),
-			    SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB);
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+			SAFEXCEL_AES);
 }
 
 static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
 {
-	return safexcel_aes(&req->base, skcipher_request_ctx(req),
-			    SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB);
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+			SAFEXCEL_AES);
 }
 
 static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
@@ -795,7 +816,7 @@
 	if (safexcel_cipher_cra_exit(tfm))
 		return;
 
-	if (priv->version == EIP197) {
+	if (priv->flags & EIP197_TRC_CACHE) {
 		ret = safexcel_skcipher_exit_inv(tfm);
 		if (ret)
 			dev_warn(priv->dev, "skcipher: invalidation error %d\n",
@@ -815,7 +836,7 @@
 	if (safexcel_cipher_cra_exit(tfm))
 		return;
 
-	if (priv->version == EIP197) {
+	if (priv->flags & EIP197_TRC_CACHE) {
 		ret = safexcel_aead_exit_inv(tfm);
 		if (ret)
 			dev_warn(priv->dev, "aead: invalidation error %d\n",
@@ -828,6 +849,7 @@
 
 struct safexcel_alg_template safexcel_alg_ecb_aes = {
 	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.engines = EIP97IES | EIP197B | EIP197D,
 	.alg.skcipher = {
 		.setkey = safexcel_skcipher_aes_setkey,
 		.encrypt = safexcel_ecb_aes_encrypt,
@@ -838,7 +860,7 @@
 			.cra_name = "ecb(aes)",
 			.cra_driver_name = "safexcel-ecb-aes",
 			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+			.cra_flags = CRYPTO_ALG_ASYNC |
 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
 			.cra_blocksize = AES_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -852,18 +874,21 @@
 
 static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
 {
-	return safexcel_aes(&req->base, skcipher_request_ctx(req),
-			    SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+			SAFEXCEL_AES);
 }
 
 static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
 {
-	return safexcel_aes(&req->base, skcipher_request_ctx(req),
-			    SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+			SAFEXCEL_AES);
 }
 
 struct safexcel_alg_template safexcel_alg_cbc_aes = {
 	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.engines = EIP97IES | EIP197B | EIP197D,
 	.alg.skcipher = {
 		.setkey = safexcel_skcipher_aes_setkey,
 		.encrypt = safexcel_cbc_aes_encrypt,
@@ -875,7 +900,7 @@
 			.cra_name = "cbc(aes)",
 			.cra_driver_name = "safexcel-cbc-aes",
 			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+			.cra_flags = CRYPTO_ALG_ASYNC |
 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
 			.cra_blocksize = AES_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -887,20 +912,234 @@
 	},
 };
 
+static int safexcel_cbc_des_encrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+			SAFEXCEL_DES);
+}
+
+static int safexcel_cbc_des_decrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+			SAFEXCEL_DES);
+}
+
+static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
+			       unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	u32 tmp[DES_EXPKEY_WORDS];
+	int ret;
+
+	if (len != DES_KEY_SIZE) {
+		crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	ret = des_ekey(tmp, key);
+	if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		return -EINVAL;
+	}
+
+	/* if context exits and key changed, need to invalidate it */
+	if (ctx->base.ctxr_dma)
+		if (memcmp(ctx->key, key, len))
+			ctx->base.needs_inv = true;
+
+	memcpy(ctx->key, key, len);
+	ctx->key_len = len;
+
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_des = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.skcipher = {
+		.setkey = safexcel_des_setkey,
+		.encrypt = safexcel_cbc_des_encrypt,
+		.decrypt = safexcel_cbc_des_decrypt,
+		.min_keysize = DES_KEY_SIZE,
+		.max_keysize = DES_KEY_SIZE,
+		.ivsize = DES_BLOCK_SIZE,
+		.base = {
+			.cra_name = "cbc(des)",
+			.cra_driver_name = "safexcel-cbc-des",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_ecb_des_encrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+			SAFEXCEL_DES);
+}
+
+static int safexcel_ecb_des_decrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+			SAFEXCEL_DES);
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_des = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.skcipher = {
+		.setkey = safexcel_des_setkey,
+		.encrypt = safexcel_ecb_des_encrypt,
+		.decrypt = safexcel_ecb_des_decrypt,
+		.min_keysize = DES_KEY_SIZE,
+		.max_keysize = DES_KEY_SIZE,
+		.ivsize = DES_BLOCK_SIZE,
+		.base = {
+			.cra_name = "ecb(des)",
+			.cra_driver_name = "safexcel-ecb-des",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_cbc_des3_ede_encrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+			SAFEXCEL_3DES);
+}
+
+static int safexcel_cbc_des3_ede_decrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+			SAFEXCEL_3DES);
+}
+
+static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
+				   const u8 *key, unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (len != DES3_EDE_KEY_SIZE) {
+		crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	/* if context exits and key changed, need to invalidate it */
+	if (ctx->base.ctxr_dma) {
+		if (memcmp(ctx->key, key, len))
+			ctx->base.needs_inv = true;
+	}
+
+	memcpy(ctx->key, key, len);
+
+	ctx->key_len = len;
+
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_des3_ede = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.skcipher = {
+		.setkey = safexcel_des3_ede_setkey,
+		.encrypt = safexcel_cbc_des3_ede_encrypt,
+		.decrypt = safexcel_cbc_des3_ede_decrypt,
+		.min_keysize = DES3_EDE_KEY_SIZE,
+		.max_keysize = DES3_EDE_KEY_SIZE,
+		.ivsize = DES3_EDE_BLOCK_SIZE,
+		.base = {
+			.cra_name = "cbc(des3_ede)",
+			.cra_driver_name = "safexcel-cbc-des3_ede",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_ecb_des3_ede_encrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+			SAFEXCEL_3DES);
+}
+
+static int safexcel_ecb_des3_ede_decrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+			SAFEXCEL_3DES);
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_des3_ede = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.skcipher = {
+		.setkey = safexcel_des3_ede_setkey,
+		.encrypt = safexcel_ecb_des3_ede_encrypt,
+		.decrypt = safexcel_ecb_des3_ede_decrypt,
+		.min_keysize = DES3_EDE_KEY_SIZE,
+		.max_keysize = DES3_EDE_KEY_SIZE,
+		.ivsize = DES3_EDE_BLOCK_SIZE,
+		.base = {
+			.cra_name = "ecb(des3_ede)",
+			.cra_driver_name = "safexcel-ecb-des3_ede",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
 static int safexcel_aead_encrypt(struct aead_request *req)
 {
 	struct safexcel_cipher_req *creq = aead_request_ctx(req);
 
-	return safexcel_aes(&req->base, creq, SAFEXCEL_ENCRYPT,
-			    CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+	return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT,
+			CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES);
 }
 
 static int safexcel_aead_decrypt(struct aead_request *req)
 {
 	struct safexcel_cipher_req *creq = aead_request_ctx(req);
 
-	return safexcel_aes(&req->base, creq, SAFEXCEL_DECRYPT,
-			    CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+	return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT,
+			CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES);
 }
 
 static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
@@ -926,13 +1165,14 @@
 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
 
 	safexcel_aead_cra_init(tfm);
-	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
 	ctx->state_sz = SHA1_DIGEST_SIZE;
 	return 0;
 }
 
 struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
 	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.engines = EIP97IES | EIP197B | EIP197D,
 	.alg.aead = {
 		.setkey = safexcel_aead_aes_setkey,
 		.encrypt = safexcel_aead_encrypt,
@@ -943,7 +1183,7 @@
 			.cra_name = "authenc(hmac(sha1),cbc(aes))",
 			.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes",
 			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+			.cra_flags = CRYPTO_ALG_ASYNC |
 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
 			.cra_blocksize = AES_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -960,13 +1200,14 @@
 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
 
 	safexcel_aead_cra_init(tfm);
-	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
 	ctx->state_sz = SHA256_DIGEST_SIZE;
 	return 0;
 }
 
 struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
 	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.engines = EIP97IES | EIP197B | EIP197D,
 	.alg.aead = {
 		.setkey = safexcel_aead_aes_setkey,
 		.encrypt = safexcel_aead_encrypt,
@@ -977,7 +1218,7 @@
 			.cra_name = "authenc(hmac(sha256),cbc(aes))",
 			.cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes",
 			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+			.cra_flags = CRYPTO_ALG_ASYNC |
 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
 			.cra_blocksize = AES_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -994,13 +1235,14 @@
 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
 
 	safexcel_aead_cra_init(tfm);
-	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
 	ctx->state_sz = SHA256_DIGEST_SIZE;
 	return 0;
 }
 
 struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
 	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.engines = EIP97IES | EIP197B | EIP197D,
 	.alg.aead = {
 		.setkey = safexcel_aead_aes_setkey,
 		.encrypt = safexcel_aead_encrypt,
@@ -1011,7 +1253,7 @@
 			.cra_name = "authenc(hmac(sha224),cbc(aes))",
 			.cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes",
 			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+			.cra_flags = CRYPTO_ALG_ASYNC |
 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
 			.cra_blocksize = AES_BLOCK_SIZE,
 			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1022,3 +1264,73 @@
 		},
 	},
 };
+
+static int safexcel_aead_sha512_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+	ctx->state_sz = SHA512_DIGEST_SIZE;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.aead = {
+		.setkey = safexcel_aead_aes_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = SHA512_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha512),cbc(aes))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha512_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha384_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+	ctx->state_sz = SHA512_DIGEST_SIZE;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.aead = {
+		.setkey = safexcel_aead_aes_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = SHA384_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha384),cbc(aes))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes",
+			.cra_priority = 300,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha384_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index c77b0e1..ac9282c 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -1,14 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2017 Marvell
  *
  * Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
  */
 
 #include <crypto/hmac.h>
+#include <crypto/md5.h>
 #include <crypto/sha.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
@@ -22,8 +20,8 @@
 
 	u32 alg;
 
-	u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
-	u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
+	u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+	u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
 };
 
 struct safexcel_ahash_req {
@@ -38,18 +36,26 @@
 	u32 digest;
 
 	u8 state_sz;    /* expected sate size, only set once */
-	u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
+	u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
 
-	u64 len;
-	u64 processed;
+	u64 len[2];
+	u64 processed[2];
 
-	u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+	u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
 	dma_addr_t cache_dma;
 	unsigned int cache_sz;
 
-	u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+	u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
 };
 
+static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
+{
+	if (req->len[1] > req->processed[1])
+		return 0xffffffff - (req->len[0] - req->processed[0]);
+
+	return req->len[0] - req->processed[0];
+}
+
 static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
 				u32 input_length, u32 result_length)
 {
@@ -72,9 +78,9 @@
 static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
 				     struct safexcel_ahash_req *req,
 				     struct safexcel_command_desc *cdesc,
-				     unsigned int digestsize,
-				     unsigned int blocksize)
+				     unsigned int digestsize)
 {
+	struct safexcel_crypto_priv *priv = ctx->priv;
 	int i;
 
 	cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
@@ -82,12 +88,17 @@
 	cdesc->control_data.control0 |= req->digest;
 
 	if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
-		if (req->processed) {
-			if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+		if (req->processed[0] || req->processed[1]) {
+			if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
+				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(5);
+			else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
 				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
 			else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
 				 ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
 				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
+			else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384 ||
+				 ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
+				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(17);
 
 			cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
 		} else {
@@ -102,12 +113,28 @@
 		 * fields. Do this now as we need it to setup the first command
 		 * descriptor.
 		 */
-		if (req->processed) {
+		if (req->processed[0] || req->processed[1]) {
 			for (i = 0; i < digestsize / sizeof(u32); i++)
 				ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
 
-			if (req->finish)
-				ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
+			if (req->finish) {
+				u64 count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
+				count += ((0xffffffff / EIP197_COUNTER_BLOCK_SIZE) *
+					  req->processed[1]);
+
+				/* This is a haredware limitation, as the
+				 * counter must fit into an u32. This represents
+				 * a farily big amount of input data, so we
+				 * shouldn't see this.
+				 */
+				if (unlikely(count & 0xffff0000)) {
+					dev_warn(priv->dev,
+						 "Input data is too big\n");
+					return;
+				}
+
+				ctx->base.ctxr->data[i] = cpu_to_le32(count);
+			}
 		}
 	} else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
 		cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32));
@@ -126,11 +153,10 @@
 	struct ahash_request *areq = ahash_request_cast(async);
 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
 	struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
-	int cache_len;
+	u64 cache_len;
 
 	*ret = 0;
 
-	spin_lock_bh(&priv->ring[ring].egress_lock);
 	rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
 	if (IS_ERR(rdesc)) {
 		dev_err(priv->dev,
@@ -141,7 +167,6 @@
 	}
 
 	safexcel_complete(priv, ring);
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
 
 	if (sreq->nents) {
 		dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
@@ -164,7 +189,7 @@
 		memcpy(areq->result, sreq->state,
 		       crypto_ahash_digestsize(ahash));
 
-	cache_len = sreq->len - sreq->processed;
+	cache_len = safexcel_queued_len(sreq);
 	if (cache_len)
 		memcpy(sreq->cache, sreq->cache_next, cache_len);
 
@@ -174,7 +199,6 @@
 }
 
 static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
-				   struct safexcel_request *request,
 				   int *commands, int *results)
 {
 	struct ahash_request *areq = ahash_request_cast(async);
@@ -185,9 +209,10 @@
 	struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
 	struct safexcel_result_desc *rdesc;
 	struct scatterlist *sg;
-	int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
+	int i, extra, n_cdesc = 0, ret = 0;
+	u64 queued, len, cache_len;
 
-	queued = len = req->len - req->processed;
+	queued = len = safexcel_queued_len(req);
 	if (queued <= crypto_ahash_blocksize(ahash))
 		cache_len = queued;
 	else
@@ -220,16 +245,12 @@
 		}
 	}
 
-	spin_lock_bh(&priv->ring[ring].egress_lock);
-
 	/* Add a command descriptor for the cached data, if any */
 	if (cache_len) {
 		req->cache_dma = dma_map_single(priv->dev, req->cache,
 						cache_len, DMA_TO_DEVICE);
-		if (dma_mapping_error(priv->dev, req->cache_dma)) {
-			spin_unlock_bh(&priv->ring[ring].egress_lock);
+		if (dma_mapping_error(priv->dev, req->cache_dma))
 			return -EINVAL;
-		}
 
 		req->cache_sz = cache_len;
 		first_cdesc = safexcel_add_cdesc(priv, ring, 1,
@@ -260,7 +281,7 @@
 		int sglen = sg_dma_len(sg);
 
 		/* Do not overflow the request */
-		if (queued - sglen < 0)
+		if (queued < sglen)
 			sglen = queued;
 
 		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
@@ -282,8 +303,7 @@
 
 send_command:
 	/* Setup the context options */
-	safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
-				 crypto_ahash_blocksize(ahash));
+	safexcel_context_control(ctx, req, first_cdesc, req->state_sz);
 
 	/* Add the token */
 	safexcel_hash_token(first_cdesc, len, req->state_sz);
@@ -303,10 +323,11 @@
 		goto unmap_result;
 	}
 
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
+	safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
 
-	req->processed += len;
-	request->req = &areq->base;
+	req->processed[0] += len;
+	if (req->processed[0] < len)
+		req->processed[1]++;
 
 	*commands = n_cdesc;
 	*results = 1;
@@ -327,7 +348,6 @@
 		req->cache_sz = 0;
 	}
 
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
 	return ret;
 }
 
@@ -335,16 +355,18 @@
 {
 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
-	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
 	unsigned int state_w_sz = req->state_sz / sizeof(u32);
+	u64 processed;
 	int i;
 
+	processed = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
+	processed += (0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * req->processed[1];
+
 	for (i = 0; i < state_w_sz; i++)
 		if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
 			return true;
 
-	if (ctx->base.ctxr->data[state_w_sz] !=
-	    cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
+	if (ctx->base.ctxr->data[state_w_sz] != cpu_to_le32(processed))
 		return true;
 
 	return false;
@@ -363,21 +385,16 @@
 
 	*ret = 0;
 
-	spin_lock_bh(&priv->ring[ring].egress_lock);
 	rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
 	if (IS_ERR(rdesc)) {
 		dev_err(priv->dev,
 			"hash: invalidate: could not retrieve the result descriptor\n");
 		*ret = PTR_ERR(rdesc);
-	} else if (rdesc->result_data.error_code) {
-		dev_err(priv->dev,
-			"hash: invalidate: result descriptor error (%d)\n",
-			rdesc->result_data.error_code);
-		*ret = -EINVAL;
+	} else {
+		*ret = safexcel_rdesc_check_errors(priv, rdesc);
 	}
 
 	safexcel_complete(priv, ring);
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
 
 	if (ctx->base.exit_inv) {
 		dma_pool_free(priv->context_pool, ctx->base.ctxr,
@@ -413,7 +430,7 @@
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
 	int err;
 
-	BUG_ON(priv->version == EIP97 && req->needs_inv);
+	BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv);
 
 	if (req->needs_inv) {
 		req->needs_inv = false;
@@ -428,15 +445,14 @@
 }
 
 static int safexcel_ahash_send_inv(struct crypto_async_request *async,
-				   int ring, struct safexcel_request *request,
-				   int *commands, int *results)
+				   int ring, int *commands, int *results)
 {
 	struct ahash_request *areq = ahash_request_cast(async);
 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
 	int ret;
 
 	ret = safexcel_invalidate_cache(async, ctx->priv,
-					ctx->base.ctxr_dma, ring, request);
+					ctx->base.ctxr_dma, ring);
 	if (unlikely(ret))
 		return ret;
 
@@ -447,19 +463,17 @@
 }
 
 static int safexcel_ahash_send(struct crypto_async_request *async,
-			       int ring, struct safexcel_request *request,
-			       int *commands, int *results)
+			       int ring, int *commands, int *results)
 {
 	struct ahash_request *areq = ahash_request_cast(async);
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
 	int ret;
 
 	if (req->needs_inv)
-		ret = safexcel_ahash_send_inv(async, ring, request,
-					      commands, results);
+		ret = safexcel_ahash_send_inv(async, ring, commands, results);
 	else
-		ret = safexcel_ahash_send_req(async, ring, request,
-					      commands, results);
+		ret = safexcel_ahash_send_req(async, ring, commands, results);
+
 	return ret;
 }
 
@@ -509,17 +523,17 @@
 {
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
-	int queued, cache_len;
+	u64 queued, cache_len;
 
-	/* cache_len: everyting accepted by the driver but not sent yet,
-	 * tot sz handled by update() - last req sz - tot sz handled by send()
-	 */
-	cache_len = req->len - areq->nbytes - req->processed;
 	/* queued: everything accepted by the driver which will be handled by
 	 * the next send() calls.
 	 * tot sz handled by update() - tot sz handled by send()
 	 */
-	queued = req->len - req->processed;
+	queued = safexcel_queued_len(req);
+	/* cache_len: everything accepted by the driver but not sent yet,
+	 * tot sz handled by update() - last req sz - tot sz handled by send()
+	 */
+	cache_len = queued - areq->nbytes;
 
 	/*
 	 * In case there isn't enough bytes to proceed (less than a
@@ -546,8 +560,8 @@
 	req->needs_inv = false;
 
 	if (ctx->base.ctxr) {
-		if (priv->version == EIP197 &&
-		    !ctx->base.needs_inv && req->processed &&
+		if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
+		    (req->processed[0] || req->processed[1]) &&
 		    req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
 			/* We're still setting needs_inv here, even though it is
 			 * cleared right away, because the needs_inv flag can be
@@ -590,7 +604,9 @@
 	if (!areq->nbytes)
 		return 0;
 
-	req->len += areq->nbytes;
+	req->len[0] += areq->nbytes;
+	if (req->len[0] < areq->nbytes)
+		req->len[1]++;
 
 	safexcel_ahash_cache(areq);
 
@@ -605,7 +621,7 @@
 		return safexcel_ahash_enqueue(areq);
 
 	if (!req->last_req &&
-	    req->len - req->processed > crypto_ahash_blocksize(ahash))
+	    safexcel_queued_len(req) > crypto_ahash_blocksize(ahash))
 		return safexcel_ahash_enqueue(areq);
 
 	return 0;
@@ -620,8 +636,11 @@
 	req->finish = true;
 
 	/* If we have an overall 0 length request */
-	if (!(req->len + areq->nbytes)) {
-		if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+	if (!req->len[0] && !req->len[1] && !areq->nbytes) {
+		if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
+			memcpy(areq->result, md5_zero_message_hash,
+			       MD5_DIGEST_SIZE);
+		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
 			memcpy(areq->result, sha1_zero_message_hash,
 			       SHA1_DIGEST_SIZE);
 		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
@@ -630,6 +649,12 @@
 		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
 			memcpy(areq->result, sha256_zero_message_hash,
 			       SHA256_DIGEST_SIZE);
+		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384)
+			memcpy(areq->result, sha384_zero_message_hash,
+			       SHA384_DIGEST_SIZE);
+		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
+			memcpy(areq->result, sha512_zero_message_hash,
+			       SHA512_DIGEST_SIZE);
 
 		return 0;
 	}
@@ -654,8 +679,10 @@
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
 	struct safexcel_ahash_export_state *export = out;
 
-	export->len = req->len;
-	export->processed = req->processed;
+	export->len[0] = req->len[0];
+	export->len[1] = req->len[1];
+	export->processed[0] = req->processed[0];
+	export->processed[1] = req->processed[1];
 
 	export->digest = req->digest;
 
@@ -676,8 +703,10 @@
 	if (ret)
 		return ret;
 
-	req->len = export->len;
-	req->processed = export->processed;
+	req->len[0] = export->len[0];
+	req->len[1] = export->len[1];
+	req->processed[0] = export->processed[0];
+	req->processed[1] = export->processed[1];
 
 	req->digest = export->digest;
 
@@ -743,7 +772,7 @@
 	if (!ctx->base.ctxr)
 		return;
 
-	if (priv->version == EIP197) {
+	if (priv->flags & EIP197_TRC_CACHE) {
 		ret = safexcel_ahash_exit_inv(tfm);
 		if (ret)
 			dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
@@ -755,6 +784,7 @@
 
 struct safexcel_alg_template safexcel_alg_sha1 = {
 	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
 	.alg.ahash = {
 		.init = safexcel_sha1_init,
 		.update = safexcel_ahash_update,
@@ -908,8 +938,7 @@
 	u8 *ipad, *opad;
 	int ret;
 
-	tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
-				 CRYPTO_ALG_TYPE_AHASH_MASK);
+	tfm = crypto_alloc_ahash(alg, 0, 0);
 	if (IS_ERR(tfm))
 		return PTR_ERR(tfm);
 
@@ -963,7 +992,7 @@
 	if (ret)
 		return ret;
 
-	if (priv->version == EIP197 && ctx->base.ctxr) {
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr) {
 		for (i = 0; i < state_sz / sizeof(u32); i++) {
 			if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
 			    ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
@@ -988,6 +1017,7 @@
 
 struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
 	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
 	.alg.ahash = {
 		.init = safexcel_hmac_sha1_init,
 		.update = safexcel_ahash_update,
@@ -1051,6 +1081,7 @@
 
 struct safexcel_alg_template safexcel_alg_sha256 = {
 	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
 	.alg.ahash = {
 		.init = safexcel_sha256_init,
 		.update = safexcel_ahash_update,
@@ -1113,6 +1144,7 @@
 
 struct safexcel_alg_template safexcel_alg_sha224 = {
 	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
 	.alg.ahash = {
 		.init = safexcel_sha224_init,
 		.update = safexcel_ahash_update,
@@ -1168,6 +1200,7 @@
 
 struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
 	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
 	.alg.ahash = {
 		.init = safexcel_hmac_sha224_init,
 		.update = safexcel_ahash_update,
@@ -1224,6 +1257,7 @@
 
 struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
 	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
 	.alg.ahash = {
 		.init = safexcel_hmac_sha256_init,
 		.update = safexcel_ahash_update,
@@ -1251,3 +1285,375 @@
 		},
 	},
 };
+
+static int safexcel_sha512_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	req->state[0] = lower_32_bits(SHA512_H0);
+	req->state[1] = upper_32_bits(SHA512_H0);
+	req->state[2] = lower_32_bits(SHA512_H1);
+	req->state[3] = upper_32_bits(SHA512_H1);
+	req->state[4] = lower_32_bits(SHA512_H2);
+	req->state[5] = upper_32_bits(SHA512_H2);
+	req->state[6] = lower_32_bits(SHA512_H3);
+	req->state[7] = upper_32_bits(SHA512_H3);
+	req->state[8] = lower_32_bits(SHA512_H4);
+	req->state[9] = upper_32_bits(SHA512_H4);
+	req->state[10] = lower_32_bits(SHA512_H5);
+	req->state[11] = upper_32_bits(SHA512_H5);
+	req->state[12] = lower_32_bits(SHA512_H6);
+	req->state[13] = upper_32_bits(SHA512_H6);
+	req->state[14] = lower_32_bits(SHA512_H7);
+	req->state[15] = upper_32_bits(SHA512_H7);
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA512_DIGEST_SIZE;
+
+	return 0;
+}
+
+static int safexcel_sha512_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_sha512_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha512 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_sha512_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_sha512_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA512_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "sha512",
+				.cra_driver_name = "safexcel-sha512",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA512_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_sha384_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	req->state[0] = lower_32_bits(SHA384_H0);
+	req->state[1] = upper_32_bits(SHA384_H0);
+	req->state[2] = lower_32_bits(SHA384_H1);
+	req->state[3] = upper_32_bits(SHA384_H1);
+	req->state[4] = lower_32_bits(SHA384_H2);
+	req->state[5] = upper_32_bits(SHA384_H2);
+	req->state[6] = lower_32_bits(SHA384_H3);
+	req->state[7] = upper_32_bits(SHA384_H3);
+	req->state[8] = lower_32_bits(SHA384_H4);
+	req->state[9] = upper_32_bits(SHA384_H4);
+	req->state[10] = lower_32_bits(SHA384_H5);
+	req->state[11] = upper_32_bits(SHA384_H5);
+	req->state[12] = lower_32_bits(SHA384_H6);
+	req->state[13] = upper_32_bits(SHA384_H6);
+	req->state[14] = lower_32_bits(SHA384_H7);
+	req->state[15] = upper_32_bits(SHA384_H7);
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA512_DIGEST_SIZE;
+
+	return 0;
+}
+
+static int safexcel_sha384_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_sha384_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha384 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_sha384_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_sha384_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA384_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "sha384",
+				.cra_driver_name = "safexcel-sha384",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA384_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key,
+				       unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha512",
+					SHA512_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha512_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	safexcel_sha512_init(areq);
+	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	return 0;
+}
+
+static int safexcel_hmac_sha512_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_sha512_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha512 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_hmac_sha512_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_sha512_digest,
+		.setkey = safexcel_hmac_sha512_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA512_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(sha512)",
+				.cra_driver_name = "safexcel-hmac-sha512",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA512_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key,
+				       unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha384",
+					SHA512_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha384_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	safexcel_sha384_init(areq);
+	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	return 0;
+}
+
+static int safexcel_hmac_sha384_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_sha384_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_hmac_sha384_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_sha384_digest,
+		.setkey = safexcel_hmac_sha384_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA384_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(sha384)",
+				.cra_driver_name = "safexcel-hmac-sha384",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA384_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_md5_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	req->state[0] = MD5_H0;
+	req->state[1] = MD5_H1;
+	req->state[2] = MD5_H2;
+	req->state[3] = MD5_H3;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = MD5_DIGEST_SIZE;
+
+	return 0;
+}
+
+static int safexcel_md5_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_md5_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_md5 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_md5_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_md5_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = MD5_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "md5",
+				.cra_driver_name = "safexcel-md5",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_md5_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	safexcel_md5_init(areq);
+	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	return 0;
+}
+
+static int safexcel_hmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
+				     unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-md5",
+					MD5_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_md5_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_md5_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_md5 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.engines = EIP97IES | EIP197B | EIP197D,
+	.alg.ahash = {
+		.init = safexcel_hmac_md5_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_md5_digest,
+		.setkey = safexcel_hmac_md5_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = MD5_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(md5)",
+				.cra_driver_name = "safexcel-hmac-md5",
+				.cra_priority = 300,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index c9d2a87..eb75fa6 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2017 Marvell
  *
  * Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
  */
 
 #include <linux/dma-mapping.h>
@@ -14,8 +11,8 @@
 #include "safexcel.h"
 
 int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
-				   struct safexcel_ring *cdr,
-				   struct safexcel_ring *rdr)
+				   struct safexcel_desc_ring *cdr,
+				   struct safexcel_desc_ring *rdr)
 {
 	cdr->offset = sizeof(u32) * priv->config.cd_offset;
 	cdr->base = dmam_alloc_coherent(priv->dev,
@@ -24,7 +21,7 @@
 	if (!cdr->base)
 		return -ENOMEM;
 	cdr->write = cdr->base;
-	cdr->base_end = cdr->base + cdr->offset * EIP197_DEFAULT_RING_SIZE;
+	cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
 	cdr->read = cdr->base;
 
 	rdr->offset = sizeof(u32) * priv->config.rd_offset;
@@ -34,7 +31,7 @@
 	if (!rdr->base)
 		return -ENOMEM;
 	rdr->write = rdr->base;
-	rdr->base_end = rdr->base + rdr->offset * EIP197_DEFAULT_RING_SIZE;
+	rdr->base_end = rdr->base + rdr->offset  * (EIP197_DEFAULT_RING_SIZE - 1);
 	rdr->read = rdr->base;
 
 	return 0;
@@ -46,49 +43,73 @@
 }
 
 static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
-				     struct safexcel_ring *ring)
+				     struct safexcel_desc_ring *ring)
 {
 	void *ptr = ring->write;
 
-	if (ring->nr == EIP197_DEFAULT_RING_SIZE - 1)
+	if ((ring->write == ring->read - ring->offset) ||
+	    (ring->read == ring->base && ring->write == ring->base_end))
 		return ERR_PTR(-ENOMEM);
 
-	ring->write += ring->offset;
 	if (ring->write == ring->base_end)
 		ring->write = ring->base;
+	else
+		ring->write += ring->offset;
 
-	ring->nr++;
 	return ptr;
 }
 
 void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
-			      struct safexcel_ring *ring)
+			      struct safexcel_desc_ring *ring)
 {
 	void *ptr = ring->read;
 
-	if (!ring->nr)
+	if (ring->write == ring->read)
 		return ERR_PTR(-ENOENT);
 
-	ring->read += ring->offset;
 	if (ring->read == ring->base_end)
 		ring->read = ring->base;
+	else
+		ring->read += ring->offset;
 
-	ring->nr--;
 	return ptr;
 }
 
-void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
-				 struct safexcel_ring *ring)
+inline void *safexcel_ring_curr_rptr(struct safexcel_crypto_priv *priv,
+				     int ring)
 {
-	if (!ring->nr)
+	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+	return rdr->read;
+}
+
+inline int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
+					 int ring)
+{
+	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+	return (rdr->read - rdr->base) / rdr->offset;
+}
+
+inline int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
+					 int ring,
+					 struct safexcel_result_desc *rdesc)
+{
+	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+	return ((void *)rdesc - rdr->base) / rdr->offset;
+}
+
+void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
+				 struct safexcel_desc_ring *ring)
+{
+	if (ring->write == ring->read)
 		return;
 
 	if (ring->write == ring->base)
-		ring->write = ring->base_end - ring->offset;
+		ring->write = ring->base_end;
 	else
 		ring->write -= ring->offset;
-
-	ring->nr--;
 }
 
 struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index e34d80b..99ff54c 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -1183,8 +1183,7 @@
 	u8 *opad;
 	int ret;
 
-	tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
-				 CRYPTO_ALG_TYPE_AHASH_MASK);
+	tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
 	if (IS_ERR(tfm))
 		return PTR_ERR(tfm);
 
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index ab6235b..55f34cf 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1487,8 +1487,7 @@
 	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
 	snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
 	base->cra_priority = N2_CRA_PRIORITY;
-	base->cra_flags = CRYPTO_ALG_TYPE_AHASH |
-			  CRYPTO_ALG_KERN_DRIVER_ONLY |
+	base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
 			  CRYPTO_ALG_NEED_FALLBACK;
 	base->cra_blocksize = tmpl->block_size;
 	base->cra_ctxsize = sizeof(struct n2_hash_ctx);
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index c2f7d4b..ad3358e 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -386,7 +386,6 @@
 		.cra_name        = "xcbc(aes)",
 		.cra_driver_name = "xcbc-aes-nx",
 		.cra_priority    = 300,
-		.cra_flags       = CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize   = AES_BLOCK_SIZE,
 		.cra_module      = THIS_MODULE,
 		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index becb738..a6764af 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -288,7 +288,6 @@
 		.cra_name        = "sha256",
 		.cra_driver_name = "sha256-nx",
 		.cra_priority    = 300,
-		.cra_flags       = CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize   = SHA256_BLOCK_SIZE,
 		.cra_module      = THIS_MODULE,
 		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index b6e183d..92956bc 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -294,7 +294,6 @@
 		.cra_name        = "sha512",
 		.cra_driver_name = "sha512-nx",
 		.cra_priority    = 300,
-		.cra_flags       = CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize   = SHA512_BLOCK_SIZE,
 		.cra_module      = THIS_MODULE,
 		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index d1a1c74..0641185 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1464,8 +1464,7 @@
 		.cra_name		= "sha1",
 		.cra_driver_name	= "omap-sha1",
 		.cra_priority		= 400,
-		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-						CRYPTO_ALG_KERN_DRIVER_ONLY |
+		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
 						CRYPTO_ALG_ASYNC |
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA1_BLOCK_SIZE,
@@ -1487,8 +1486,7 @@
 		.cra_name		= "md5",
 		.cra_driver_name	= "omap-md5",
 		.cra_priority		= 400,
-		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-						CRYPTO_ALG_KERN_DRIVER_ONLY |
+		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
 						CRYPTO_ALG_ASYNC |
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA1_BLOCK_SIZE,
@@ -1511,8 +1509,7 @@
 		.cra_name		= "hmac(sha1)",
 		.cra_driver_name	= "omap-hmac-sha1",
 		.cra_priority		= 400,
-		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-						CRYPTO_ALG_KERN_DRIVER_ONLY |
+		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
 						CRYPTO_ALG_ASYNC |
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA1_BLOCK_SIZE,
@@ -1536,8 +1533,7 @@
 		.cra_name		= "hmac(md5)",
 		.cra_driver_name	= "omap-hmac-md5",
 		.cra_priority		= 400,
-		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-						CRYPTO_ALG_KERN_DRIVER_ONLY |
+		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
 						CRYPTO_ALG_ASYNC |
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA1_BLOCK_SIZE,
@@ -1564,8 +1560,7 @@
 		.cra_name		= "sha224",
 		.cra_driver_name	= "omap-sha224",
 		.cra_priority		= 400,
-		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-						CRYPTO_ALG_ASYNC |
+		.cra_flags		= CRYPTO_ALG_ASYNC |
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA224_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
@@ -1586,8 +1581,7 @@
 		.cra_name		= "sha256",
 		.cra_driver_name	= "omap-sha256",
 		.cra_priority		= 400,
-		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-						CRYPTO_ALG_ASYNC |
+		.cra_flags		= CRYPTO_ALG_ASYNC |
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA256_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
@@ -1609,8 +1603,7 @@
 		.cra_name		= "hmac(sha224)",
 		.cra_driver_name	= "omap-hmac-sha224",
 		.cra_priority		= 400,
-		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-						CRYPTO_ALG_ASYNC |
+		.cra_flags		= CRYPTO_ALG_ASYNC |
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA224_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
@@ -1633,8 +1626,7 @@
 		.cra_name		= "hmac(sha256)",
 		.cra_driver_name	= "omap-hmac-sha256",
 		.cra_priority		= 400,
-		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-						CRYPTO_ALG_ASYNC |
+		.cra_flags		= CRYPTO_ALG_ASYNC |
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA256_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
@@ -1659,8 +1651,7 @@
 		.cra_name		= "sha384",
 		.cra_driver_name	= "omap-sha384",
 		.cra_priority		= 400,
-		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-						CRYPTO_ALG_ASYNC |
+		.cra_flags		= CRYPTO_ALG_ASYNC |
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA384_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
@@ -1681,8 +1672,7 @@
 		.cra_name		= "sha512",
 		.cra_driver_name	= "omap-sha512",
 		.cra_priority		= 400,
-		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-						CRYPTO_ALG_ASYNC |
+		.cra_flags		= CRYPTO_ALG_ASYNC |
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA512_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
@@ -1704,8 +1694,7 @@
 		.cra_name		= "hmac(sha384)",
 		.cra_driver_name	= "omap-hmac-sha384",
 		.cra_priority		= 400,
-		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-						CRYPTO_ALG_ASYNC |
+		.cra_flags		= CRYPTO_ALG_ASYNC |
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA384_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
@@ -1728,8 +1717,7 @@
 		.cra_name		= "hmac(sha512)",
 		.cra_driver_name	= "omap-hmac-sha512",
 		.cra_priority		= 400,
-		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-						CRYPTO_ALG_ASYNC |
+		.cra_flags		= CRYPTO_ALG_ASYNC |
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA512_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index d32c793..21e5cae 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -247,8 +247,7 @@
 		.cra_name		=	"sha1",
 		.cra_driver_name	=	"sha1-padlock",
 		.cra_priority		=	PADLOCK_CRA_PRIORITY,
-		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH |
-						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		=	SHA1_BLOCK_SIZE,
 		.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
 		.cra_module		=	THIS_MODULE,
@@ -271,8 +270,7 @@
 		.cra_name		=	"sha256",
 		.cra_driver_name	=	"sha256-padlock",
 		.cra_priority		=	PADLOCK_CRA_PRIORITY,
-		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH |
-						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		=	SHA256_BLOCK_SIZE,
 		.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
 		.cra_module		=	THIS_MODULE,
@@ -484,7 +482,6 @@
 		.cra_name		=	"sha1",
 		.cra_driver_name	=	"sha1-padlock-nano",
 		.cra_priority		=	PADLOCK_CRA_PRIORITY,
-		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		=	SHA1_BLOCK_SIZE,
 		.cra_module		=	THIS_MODULE,
 	}
@@ -503,7 +500,6 @@
 		.cra_name		=	"sha256",
 		.cra_driver_name	=	"sha256-padlock-nano",
 		.cra_priority		=	PADLOCK_CRA_PRIORITY,
-		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		=	SHA256_BLOCK_SIZE,
 		.cra_module		=	THIS_MODULE,
 	}
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 53227d7..d8a5db1 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -378,8 +378,7 @@
 	else
 		return -EINVAL;
 
-	ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH,
-				       CRYPTO_ALG_TYPE_AHASH_MASK);
+	ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
 	if (IS_ERR(ahash_tfm))
 		return PTR_ERR(ahash_tfm);
 
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
new file mode 100644
index 0000000..e54249c
--- /dev/null
+++ b/drivers/crypto/qcom-rng.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-18 Linaro Limited
+//
+// Based on msm-rng.c and downstream driver
+
+#include <crypto/internal/rng.h>
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+/* Device specific register offsets */
+#define PRNG_DATA_OUT		0x0000
+#define PRNG_STATUS		0x0004
+#define PRNG_LFSR_CFG		0x0100
+#define PRNG_CONFIG		0x0104
+
+/* Device specific register masks and config values */
+#define PRNG_LFSR_CFG_MASK	0x0000ffff
+#define PRNG_LFSR_CFG_CLOCKS	0x0000dddd
+#define PRNG_CONFIG_HW_ENABLE	BIT(1)
+#define PRNG_STATUS_DATA_AVAIL	BIT(0)
+
+#define WORD_SZ			4
+
+struct qcom_rng {
+	struct mutex lock;
+	void __iomem *base;
+	struct clk *clk;
+	unsigned int skip_init;
+};
+
+struct qcom_rng_ctx {
+	struct qcom_rng *rng;
+};
+
+static struct qcom_rng *qcom_rng_dev;
+
+static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
+{
+	unsigned int currsize = 0;
+	u32 val;
+
+	/* read random data from hardware */
+	do {
+		val = readl_relaxed(rng->base + PRNG_STATUS);
+		if (!(val & PRNG_STATUS_DATA_AVAIL))
+			break;
+
+		val = readl_relaxed(rng->base + PRNG_DATA_OUT);
+		if (!val)
+			break;
+
+		if ((max - currsize) >= WORD_SZ) {
+			memcpy(data, &val, WORD_SZ);
+			data += WORD_SZ;
+			currsize += WORD_SZ;
+		} else {
+			/* copy only remaining bytes */
+			memcpy(data, &val, max - currsize);
+			break;
+		}
+	} while (currsize < max);
+
+	return currsize;
+}
+
+static int qcom_rng_generate(struct crypto_rng *tfm,
+			     const u8 *src, unsigned int slen,
+			     u8 *dstn, unsigned int dlen)
+{
+	struct qcom_rng_ctx *ctx = crypto_rng_ctx(tfm);
+	struct qcom_rng *rng = ctx->rng;
+	int ret;
+
+	ret = clk_prepare_enable(rng->clk);
+	if (ret)
+		return ret;
+
+	mutex_lock(&rng->lock);
+
+	ret = qcom_rng_read(rng, dstn, dlen);
+
+	mutex_unlock(&rng->lock);
+	clk_disable_unprepare(rng->clk);
+
+	return 0;
+}
+
+static int qcom_rng_seed(struct crypto_rng *tfm, const u8 *seed,
+			 unsigned int slen)
+{
+	return 0;
+}
+
+static int qcom_rng_enable(struct qcom_rng *rng)
+{
+	u32 val;
+	int ret;
+
+	ret = clk_prepare_enable(rng->clk);
+	if (ret)
+		return ret;
+
+	/* Enable PRNG only if it is not already enabled */
+	val = readl_relaxed(rng->base + PRNG_CONFIG);
+	if (val & PRNG_CONFIG_HW_ENABLE)
+		goto already_enabled;
+
+	val = readl_relaxed(rng->base + PRNG_LFSR_CFG);
+	val &= ~PRNG_LFSR_CFG_MASK;
+	val |= PRNG_LFSR_CFG_CLOCKS;
+	writel(val, rng->base + PRNG_LFSR_CFG);
+
+	val = readl_relaxed(rng->base + PRNG_CONFIG);
+	val |= PRNG_CONFIG_HW_ENABLE;
+	writel(val, rng->base + PRNG_CONFIG);
+
+already_enabled:
+	clk_disable_unprepare(rng->clk);
+
+	return 0;
+}
+
+static int qcom_rng_init(struct crypto_tfm *tfm)
+{
+	struct qcom_rng_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	ctx->rng = qcom_rng_dev;
+
+	if (!ctx->rng->skip_init)
+		return qcom_rng_enable(ctx->rng);
+
+	return 0;
+}
+
+static struct rng_alg qcom_rng_alg = {
+	.generate	= qcom_rng_generate,
+	.seed		= qcom_rng_seed,
+	.seedsize	= 0,
+	.base		= {
+		.cra_name		= "stdrng",
+		.cra_driver_name	= "qcom-rng",
+		.cra_flags		= CRYPTO_ALG_TYPE_RNG,
+		.cra_priority		= 300,
+		.cra_ctxsize		= sizeof(struct qcom_rng_ctx),
+		.cra_module		= THIS_MODULE,
+		.cra_init		= qcom_rng_init,
+	}
+};
+
+static int qcom_rng_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct qcom_rng *rng;
+	int ret;
+
+	rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+	if (!rng)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, rng);
+	mutex_init(&rng->lock);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	rng->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(rng->base))
+		return PTR_ERR(rng->base);
+
+	/* ACPI systems have clk already on, so skip clk_get */
+	if (!has_acpi_companion(&pdev->dev)) {
+		rng->clk = devm_clk_get(&pdev->dev, "core");
+		if (IS_ERR(rng->clk))
+			return PTR_ERR(rng->clk);
+	}
+
+
+	rng->skip_init = (unsigned long)device_get_match_data(&pdev->dev);
+
+	qcom_rng_dev = rng;
+	ret = crypto_register_rng(&qcom_rng_alg);
+	if (ret) {
+		dev_err(&pdev->dev, "Register crypto rng failed: %d\n", ret);
+		qcom_rng_dev = NULL;
+	}
+
+	return ret;
+}
+
+static int qcom_rng_remove(struct platform_device *pdev)
+{
+	crypto_unregister_rng(&qcom_rng_alg);
+
+	qcom_rng_dev = NULL;
+
+	return 0;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id qcom_rng_acpi_match[] = {
+	{ .id = "QCOM8160", .driver_data = 1 },
+	{}
+};
+MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match);
+#endif
+
+static const struct of_device_id qcom_rng_of_match[] = {
+	{ .compatible = "qcom,prng", .data = (void *)0},
+	{ .compatible = "qcom,prng-ee", .data = (void *)1},
+	{}
+};
+MODULE_DEVICE_TABLE(of, qcom_rng_of_match);
+
+static struct platform_driver qcom_rng_driver = {
+	.probe = qcom_rng_probe,
+	.remove =  qcom_rng_remove,
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = of_match_ptr(qcom_rng_of_match),
+		.acpi_match_table = ACPI_PTR(qcom_rng_acpi_match),
+	}
+};
+module_platform_driver(qcom_rng_driver);
+
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_DESCRIPTION("Qualcomm random number generator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index bf71630..faa2820 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1765,8 +1765,7 @@
 		.cra_name		= "sha1",
 		.cra_driver_name	= "exynos-sha1",
 		.cra_priority		= 100,
-		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-					  CRYPTO_ALG_KERN_DRIVER_ONLY |
+		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
 					  CRYPTO_ALG_ASYNC |
 					  CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= HASH_BLOCK_SIZE,
@@ -1791,8 +1790,7 @@
 		.cra_name		= "md5",
 		.cra_driver_name	= "exynos-md5",
 		.cra_priority		= 100,
-		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-					  CRYPTO_ALG_KERN_DRIVER_ONLY |
+		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
 					  CRYPTO_ALG_ASYNC |
 					  CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= HASH_BLOCK_SIZE,
@@ -1817,8 +1815,7 @@
 		.cra_name		= "sha256",
 		.cra_driver_name	= "exynos-sha256",
 		.cra_priority		= 100,
-		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-					  CRYPTO_ALG_KERN_DRIVER_ONLY |
+		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
 					  CRYPTO_ALG_ASYNC |
 					  CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= HASH_BLOCK_SIZE,
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 0f2245e..e7540a5 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1253,8 +1253,7 @@
 		.cra_name		= "sha1",
 		.cra_driver_name	= "sahara-sha1",
 		.cra_priority		= 300,
-		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-						CRYPTO_ALG_ASYNC |
+		.cra_flags		= CRYPTO_ALG_ASYNC |
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA1_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct sahara_ctx),
@@ -1280,8 +1279,7 @@
 		.cra_name		= "sha256",
 		.cra_driver_name	= "sahara-sha256",
 		.cra_priority		= 300,
-		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-						CRYPTO_ALG_ASYNC |
+		.cra_flags		= CRYPTO_ALG_ASYNC |
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA256_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct sahara_ctx),
@@ -1351,7 +1349,7 @@
 
 err_sha_v3_algs:
 	for (j = 0; j < k; j++)
-		crypto_unregister_ahash(&sha_v4_algs[j]);
+		crypto_unregister_ahash(&sha_v3_algs[j]);
 
 err_aes_algs:
 	for (j = 0; j < i; j++)
@@ -1367,7 +1365,7 @@
 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
 		crypto_unregister_alg(&aes_algs[i]);
 
-	for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
+	for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
 		crypto_unregister_ahash(&sha_v3_algs[i]);
 
 	if (dev->version > SAHARA_VERSION_3)
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index c5d3efc..23b0b7b 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/reset.h>
 
 #include <crypto/aes.h>
@@ -105,6 +106,7 @@
 #define GCM_CTR_INIT            2
 #define _walked_in              (cryp->in_walk.offset - cryp->in_sg->offset)
 #define _walked_out             (cryp->out_walk.offset - cryp->out_sg->offset)
+#define CRYP_AUTOSUSPEND_DELAY	50
 
 struct stm32_cryp_caps {
 	bool                    swap_final;
@@ -519,6 +521,8 @@
 	int ret;
 	u32 cfg, hw_mode;
 
+	pm_runtime_get_sync(cryp->dev);
+
 	/* Disable interrupt */
 	stm32_cryp_write(cryp, CRYP_IMSCR, 0);
 
@@ -638,6 +642,9 @@
 		free_pages((unsigned long)buf_out, pages);
 	}
 
+	pm_runtime_mark_last_busy(cryp->dev);
+	pm_runtime_put_autosuspend(cryp->dev);
+
 	if (is_gcm(cryp) || is_ccm(cryp)) {
 		crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
 		cryp->areq = NULL;
@@ -1969,6 +1976,13 @@
 		return ret;
 	}
 
+	pm_runtime_set_autosuspend_delay(dev, CRYP_AUTOSUSPEND_DELAY);
+	pm_runtime_use_autosuspend(dev);
+
+	pm_runtime_get_noresume(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
 	rst = devm_reset_control_get(dev, NULL);
 	if (!IS_ERR(rst)) {
 		reset_control_assert(rst);
@@ -2008,6 +2022,8 @@
 
 	dev_info(dev, "Initialized\n");
 
+	pm_runtime_put_sync(dev);
+
 	return 0;
 
 err_aead_algs:
@@ -2020,6 +2036,11 @@
 	list_del(&cryp->list);
 	spin_unlock(&cryp_list.lock);
 
+	pm_runtime_disable(dev);
+	pm_runtime_put_noidle(dev);
+	pm_runtime_disable(dev);
+	pm_runtime_put_noidle(dev);
+
 	clk_disable_unprepare(cryp->clk);
 
 	return ret;
@@ -2028,10 +2049,15 @@
 static int stm32_cryp_remove(struct platform_device *pdev)
 {
 	struct stm32_cryp *cryp = platform_get_drvdata(pdev);
+	int ret;
 
 	if (!cryp)
 		return -ENODEV;
 
+	ret = pm_runtime_get_sync(cryp->dev);
+	if (ret < 0)
+		return ret;
+
 	crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
 	crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
 
@@ -2041,16 +2067,52 @@
 	list_del(&cryp->list);
 	spin_unlock(&cryp_list.lock);
 
+	pm_runtime_disable(cryp->dev);
+	pm_runtime_put_noidle(cryp->dev);
+
 	clk_disable_unprepare(cryp->clk);
 
 	return 0;
 }
 
+#ifdef CONFIG_PM
+static int stm32_cryp_runtime_suspend(struct device *dev)
+{
+	struct stm32_cryp *cryp = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(cryp->clk);
+
+	return 0;
+}
+
+static int stm32_cryp_runtime_resume(struct device *dev)
+{
+	struct stm32_cryp *cryp = dev_get_drvdata(dev);
+	int ret;
+
+	ret = clk_prepare_enable(cryp->clk);
+	if (ret) {
+		dev_err(cryp->dev, "Failed to prepare_enable clock\n");
+		return ret;
+	}
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_cryp_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+	SET_RUNTIME_PM_OPS(stm32_cryp_runtime_suspend,
+			   stm32_cryp_runtime_resume, NULL)
+};
+
 static struct platform_driver stm32_cryp_driver = {
 	.probe  = stm32_cryp_probe,
 	.remove = stm32_cryp_remove,
 	.driver = {
 		.name           = DRIVER_NAME,
+		.pm		= &stm32_cryp_pm_ops,
 		.of_match_table = stm32_dt_ids,
 	},
 };
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index cdc96f1..590d735 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -31,6 +31,7 @@
 #include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/reset.h>
 
 #include <crypto/engine.h>
@@ -121,6 +122,8 @@
 #define HASH_QUEUE_LENGTH		16
 #define HASH_DMA_THRESHOLD		50
 
+#define HASH_AUTOSUSPEND_DELAY		50
+
 struct stm32_hash_ctx {
 	struct crypto_engine_ctx enginectx;
 	struct stm32_hash_dev	*hdev;
@@ -814,12 +817,17 @@
 		rctx->flags |= HASH_FLAGS_ERRORS;
 	}
 
+	pm_runtime_mark_last_busy(hdev->dev);
+	pm_runtime_put_autosuspend(hdev->dev);
+
 	crypto_finalize_hash_request(hdev->engine, req, err);
 }
 
 static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
 			      struct stm32_hash_request_ctx *rctx)
 {
+	pm_runtime_get_sync(hdev->dev);
+
 	if (!(HASH_FLAGS_INIT & hdev->flags)) {
 		stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
 		stm32_hash_write(hdev, HASH_STR, 0);
@@ -967,6 +975,8 @@
 	u32 *preg;
 	unsigned int i;
 
+	pm_runtime_get_sync(hdev->dev);
+
 	while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
 		cpu_relax();
 
@@ -982,6 +992,9 @@
 	for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
 		*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
 
+	pm_runtime_mark_last_busy(hdev->dev);
+	pm_runtime_put_autosuspend(hdev->dev);
+
 	memcpy(out, rctx, sizeof(*rctx));
 
 	return 0;
@@ -1000,6 +1013,8 @@
 
 	preg = rctx->hw_context;
 
+	pm_runtime_get_sync(hdev->dev);
+
 	stm32_hash_write(hdev, HASH_IMR, *preg++);
 	stm32_hash_write(hdev, HASH_STR, *preg++);
 	stm32_hash_write(hdev, HASH_CR, *preg);
@@ -1009,6 +1024,9 @@
 	for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
 		stm32_hash_write(hdev, HASH_CSR(i), *preg++);
 
+	pm_runtime_mark_last_busy(hdev->dev);
+	pm_runtime_put_autosuspend(hdev->dev);
+
 	kfree(rctx->hw_context);
 
 	return 0;
@@ -1132,8 +1150,7 @@
 				.cra_name = "md5",
 				.cra_driver_name = "stm32-md5",
 				.cra_priority = 200,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					CRYPTO_ALG_ASYNC |
+				.cra_flags = CRYPTO_ALG_ASYNC |
 					CRYPTO_ALG_KERN_DRIVER_ONLY,
 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1159,8 +1176,7 @@
 				.cra_name = "hmac(md5)",
 				.cra_driver_name = "stm32-hmac-md5",
 				.cra_priority = 200,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					CRYPTO_ALG_ASYNC |
+				.cra_flags = CRYPTO_ALG_ASYNC |
 					CRYPTO_ALG_KERN_DRIVER_ONLY,
 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1185,8 +1201,7 @@
 				.cra_name = "sha1",
 				.cra_driver_name = "stm32-sha1",
 				.cra_priority = 200,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					CRYPTO_ALG_ASYNC |
+				.cra_flags = CRYPTO_ALG_ASYNC |
 					CRYPTO_ALG_KERN_DRIVER_ONLY,
 				.cra_blocksize = SHA1_BLOCK_SIZE,
 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1212,8 +1227,7 @@
 				.cra_name = "hmac(sha1)",
 				.cra_driver_name = "stm32-hmac-sha1",
 				.cra_priority = 200,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					CRYPTO_ALG_ASYNC |
+				.cra_flags = CRYPTO_ALG_ASYNC |
 					CRYPTO_ALG_KERN_DRIVER_ONLY,
 				.cra_blocksize = SHA1_BLOCK_SIZE,
 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1241,8 +1255,7 @@
 				.cra_name = "sha224",
 				.cra_driver_name = "stm32-sha224",
 				.cra_priority = 200,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					CRYPTO_ALG_ASYNC |
+				.cra_flags = CRYPTO_ALG_ASYNC |
 					CRYPTO_ALG_KERN_DRIVER_ONLY,
 				.cra_blocksize = SHA224_BLOCK_SIZE,
 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1268,8 +1281,7 @@
 				.cra_name = "hmac(sha224)",
 				.cra_driver_name = "stm32-hmac-sha224",
 				.cra_priority = 200,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					CRYPTO_ALG_ASYNC |
+				.cra_flags = CRYPTO_ALG_ASYNC |
 					CRYPTO_ALG_KERN_DRIVER_ONLY,
 				.cra_blocksize = SHA224_BLOCK_SIZE,
 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1294,8 +1306,7 @@
 				.cra_name = "sha256",
 				.cra_driver_name = "stm32-sha256",
 				.cra_priority = 200,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					CRYPTO_ALG_ASYNC |
+				.cra_flags = CRYPTO_ALG_ASYNC |
 					CRYPTO_ALG_KERN_DRIVER_ONLY,
 				.cra_blocksize = SHA256_BLOCK_SIZE,
 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1321,8 +1332,7 @@
 				.cra_name = "hmac(sha256)",
 				.cra_driver_name = "stm32-hmac-sha256",
 				.cra_priority = 200,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					CRYPTO_ALG_ASYNC |
+				.cra_flags = CRYPTO_ALG_ASYNC |
 					CRYPTO_ALG_KERN_DRIVER_ONLY,
 				.cra_blocksize = SHA256_BLOCK_SIZE,
 				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1482,6 +1492,13 @@
 		return ret;
 	}
 
+	pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
+	pm_runtime_use_autosuspend(dev);
+
+	pm_runtime_get_noresume(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
 	hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
 	if (!IS_ERR(hdev->rst)) {
 		reset_control_assert(hdev->rst);
@@ -1522,6 +1539,8 @@
 	dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
 		 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
 
+	pm_runtime_put_sync(dev);
+
 	return 0;
 
 err_algs:
@@ -1535,6 +1554,9 @@
 	if (hdev->dma_lch)
 		dma_release_channel(hdev->dma_lch);
 
+	pm_runtime_disable(dev);
+	pm_runtime_put_noidle(dev);
+
 	clk_disable_unprepare(hdev->clk);
 
 	return ret;
@@ -1543,11 +1565,16 @@
 static int stm32_hash_remove(struct platform_device *pdev)
 {
 	static struct stm32_hash_dev *hdev;
+	int ret;
 
 	hdev = platform_get_drvdata(pdev);
 	if (!hdev)
 		return -ENODEV;
 
+	ret = pm_runtime_get_sync(hdev->dev);
+	if (ret < 0)
+		return ret;
+
 	stm32_hash_unregister_algs(hdev);
 
 	crypto_engine_exit(hdev->engine);
@@ -1559,16 +1586,52 @@
 	if (hdev->dma_lch)
 		dma_release_channel(hdev->dma_lch);
 
+	pm_runtime_disable(hdev->dev);
+	pm_runtime_put_noidle(hdev->dev);
+
 	clk_disable_unprepare(hdev->clk);
 
 	return 0;
 }
 
+#ifdef CONFIG_PM
+static int stm32_hash_runtime_suspend(struct device *dev)
+{
+	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(hdev->clk);
+
+	return 0;
+}
+
+static int stm32_hash_runtime_resume(struct device *dev)
+{
+	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
+	int ret;
+
+	ret = clk_prepare_enable(hdev->clk);
+	if (ret) {
+		dev_err(hdev->dev, "Failed to prepare_enable clock\n");
+		return ret;
+	}
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_hash_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+	SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
+			   stm32_hash_runtime_resume, NULL)
+};
+
 static struct platform_driver stm32_hash_driver = {
 	.probe		= stm32_hash_probe,
 	.remove		= stm32_hash_remove,
 	.driver		= {
 		.name	= "stm32-hash",
+		.pm = &stm32_hash_pm_ops,
 		.of_match_table	= stm32_hash_of_match,
 	}
 };
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
index 8f09b84..5f3242a 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -6,8 +6,10 @@
 
 #include <linux/bitrev.h>
 #include <linux/clk.h>
+#include <linux/crc32poly.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 
 #include <crypto/internal/hash.h>
 
@@ -28,9 +30,7 @@
 #define CRC_CR_REVERSE          (BIT(7) | BIT(6) | BIT(5))
 #define CRC_INIT_DEFAULT        0xFFFFFFFF
 
-/* Polynomial reversed */
-#define POLY_CRC32              0xEDB88320
-#define POLY_CRC32C             0x82F63B78
+#define CRC_AUTOSUSPEND_DELAY	50
 
 struct stm32_crc {
 	struct list_head list;
@@ -66,7 +66,7 @@
 	struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
 
 	mctx->key = CRC_INIT_DEFAULT;
-	mctx->poly = POLY_CRC32;
+	mctx->poly = CRC32_POLY_LE;
 	return 0;
 }
 
@@ -75,7 +75,7 @@
 	struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
 
 	mctx->key = CRC_INIT_DEFAULT;
-	mctx->poly = POLY_CRC32C;
+	mctx->poly = CRC32C_POLY_LE;
 	return 0;
 }
 
@@ -106,6 +106,8 @@
 	}
 	spin_unlock_bh(&crc_list.lock);
 
+	pm_runtime_get_sync(ctx->crc->dev);
+
 	/* Reset, set key, poly and configure in bit reverse mode */
 	writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
 	writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
@@ -115,6 +117,9 @@
 	ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
 	ctx->crc->nb_pending_bytes = 0;
 
+	pm_runtime_mark_last_busy(ctx->crc->dev);
+	pm_runtime_put_autosuspend(ctx->crc->dev);
+
 	return 0;
 }
 
@@ -126,6 +131,8 @@
 	u32 *d32;
 	unsigned int i;
 
+	pm_runtime_get_sync(crc->dev);
+
 	if (unlikely(crc->nb_pending_bytes)) {
 		while (crc->nb_pending_bytes != sizeof(u32) && length) {
 			/* Fill in pending data */
@@ -149,6 +156,9 @@
 	/* Store partial result */
 	ctx->partial = readl_relaxed(crc->regs + CRC_DR);
 
+	pm_runtime_mark_last_busy(crc->dev);
+	pm_runtime_put_autosuspend(crc->dev);
+
 	/* Check for pending data (non 32 bits) */
 	length &= 3;
 	if (likely(!length))
@@ -174,7 +184,7 @@
 	struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
 
 	/* Send computed CRC */
-	put_unaligned_le32(mctx->poly == POLY_CRC32C ?
+	put_unaligned_le32(mctx->poly == CRC32C_POLY_LE ?
 			   ~ctx->partial : ctx->partial, out);
 
 	return 0;
@@ -272,6 +282,13 @@
 		return ret;
 	}
 
+	pm_runtime_set_autosuspend_delay(dev, CRC_AUTOSUSPEND_DELAY);
+	pm_runtime_use_autosuspend(dev);
+
+	pm_runtime_get_noresume(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
 	platform_set_drvdata(pdev, crc);
 
 	spin_lock(&crc_list.lock);
@@ -287,12 +304,18 @@
 
 	dev_info(dev, "Initialized\n");
 
+	pm_runtime_put_sync(dev);
+
 	return 0;
 }
 
 static int stm32_crc_remove(struct platform_device *pdev)
 {
 	struct stm32_crc *crc = platform_get_drvdata(pdev);
+	int ret = pm_runtime_get_sync(crc->dev);
+
+	if (ret < 0)
+		return ret;
 
 	spin_lock(&crc_list.lock);
 	list_del(&crc->list);
@@ -300,11 +323,46 @@
 
 	crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
 
+	pm_runtime_disable(crc->dev);
+	pm_runtime_put_noidle(crc->dev);
+
 	clk_disable_unprepare(crc->clk);
 
 	return 0;
 }
 
+#ifdef CONFIG_PM
+static int stm32_crc_runtime_suspend(struct device *dev)
+{
+	struct stm32_crc *crc = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(crc->clk);
+
+	return 0;
+}
+
+static int stm32_crc_runtime_resume(struct device *dev)
+{
+	struct stm32_crc *crc = dev_get_drvdata(dev);
+	int ret;
+
+	ret = clk_prepare_enable(crc->clk);
+	if (ret) {
+		dev_err(crc->dev, "Failed to prepare_enable clock\n");
+		return ret;
+	}
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_crc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+	SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
+			   stm32_crc_runtime_resume, NULL)
+};
+
 static const struct of_device_id stm32_dt_ids[] = {
 	{ .compatible = "st,stm32f7-crc", },
 	{},
@@ -316,6 +374,7 @@
 	.remove = stm32_crc_remove,
 	.driver = {
 		.name           = DRIVER_NAME,
+		.pm		= &stm32_crc_pm_ops,
 		.of_match_table = stm32_dt_ids,
 	},
 };
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index a81d89b..89adf9e 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -45,11 +45,9 @@
 				.cra_driver_name = "md5-sun4i-ss",
 				.cra_priority = 300,
 				.cra_alignmask = 3,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH,
 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
 				.cra_ctxsize = sizeof(struct sun4i_req_ctx),
 				.cra_module = THIS_MODULE,
-				.cra_type = &crypto_ahash_type,
 				.cra_init = sun4i_hash_crainit
 			}
 		}
@@ -73,11 +71,9 @@
 				.cra_driver_name = "sha1-sun4i-ss",
 				.cra_priority = 300,
 				.cra_alignmask = 3,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH,
 				.cra_blocksize = SHA1_BLOCK_SIZE,
 				.cra_ctxsize = sizeof(struct sun4i_req_ctx),
 				.cra_module = THIS_MODULE,
-				.cra_type = &crypto_ahash_type,
 				.cra_init = sun4i_hash_crainit
 			}
 		}
@@ -96,8 +92,7 @@
 			.cra_driver_name = "cbc-aes-sun4i-ss",
 			.cra_priority = 300,
 			.cra_blocksize = AES_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
-				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
 			.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
 			.cra_module = THIS_MODULE,
 			.cra_alignmask = 3,
@@ -118,8 +113,7 @@
 			.cra_driver_name = "ecb-aes-sun4i-ss",
 			.cra_priority = 300,
 			.cra_blocksize = AES_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
-				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
 			.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
 			.cra_module = THIS_MODULE,
 			.cra_alignmask = 3,
@@ -140,8 +134,7 @@
 			.cra_driver_name = "cbc-des-sun4i-ss",
 			.cra_priority = 300,
 			.cra_blocksize = DES_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
-				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
 			.cra_ctxsize = sizeof(struct sun4i_req_ctx),
 			.cra_module = THIS_MODULE,
 			.cra_alignmask = 3,
@@ -161,8 +154,7 @@
 			.cra_driver_name = "ecb-des-sun4i-ss",
 			.cra_priority = 300,
 			.cra_blocksize = DES_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
-				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
 			.cra_ctxsize = sizeof(struct sun4i_req_ctx),
 			.cra_module = THIS_MODULE,
 			.cra_alignmask = 3,
@@ -183,8 +175,7 @@
 			.cra_driver_name = "cbc-des3-sun4i-ss",
 			.cra_priority = 300,
 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
-				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
 			.cra_ctxsize = sizeof(struct sun4i_req_ctx),
 			.cra_module = THIS_MODULE,
 			.cra_alignmask = 3,
@@ -205,7 +196,6 @@
 			.cra_driver_name = "ecb-des3-sun4i-ss",
 			.cra_priority = 300,
 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER,
 			.cra_ctxsize = sizeof(struct sun4i_req_ctx),
 			.cra_module = THIS_MODULE,
 			.cra_alignmask = 3,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index cf14f09..6988012 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -2822,8 +2822,7 @@
 				.cra_name = "md5",
 				.cra_driver_name = "md5-talitos",
 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					     CRYPTO_ALG_ASYNC,
+				.cra_flags = CRYPTO_ALG_ASYNC,
 			}
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2838,8 +2837,7 @@
 				.cra_name = "sha1",
 				.cra_driver_name = "sha1-talitos",
 				.cra_blocksize = SHA1_BLOCK_SIZE,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					     CRYPTO_ALG_ASYNC,
+				.cra_flags = CRYPTO_ALG_ASYNC,
 			}
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2854,8 +2852,7 @@
 				.cra_name = "sha224",
 				.cra_driver_name = "sha224-talitos",
 				.cra_blocksize = SHA224_BLOCK_SIZE,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					     CRYPTO_ALG_ASYNC,
+				.cra_flags = CRYPTO_ALG_ASYNC,
 			}
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2870,8 +2867,7 @@
 				.cra_name = "sha256",
 				.cra_driver_name = "sha256-talitos",
 				.cra_blocksize = SHA256_BLOCK_SIZE,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					     CRYPTO_ALG_ASYNC,
+				.cra_flags = CRYPTO_ALG_ASYNC,
 			}
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2886,8 +2882,7 @@
 				.cra_name = "sha384",
 				.cra_driver_name = "sha384-talitos",
 				.cra_blocksize = SHA384_BLOCK_SIZE,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					     CRYPTO_ALG_ASYNC,
+				.cra_flags = CRYPTO_ALG_ASYNC,
 			}
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2902,8 +2897,7 @@
 				.cra_name = "sha512",
 				.cra_driver_name = "sha512-talitos",
 				.cra_blocksize = SHA512_BLOCK_SIZE,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					     CRYPTO_ALG_ASYNC,
+				.cra_flags = CRYPTO_ALG_ASYNC,
 			}
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2918,8 +2912,7 @@
 				.cra_name = "hmac(md5)",
 				.cra_driver_name = "hmac-md5-talitos",
 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					     CRYPTO_ALG_ASYNC,
+				.cra_flags = CRYPTO_ALG_ASYNC,
 			}
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2934,8 +2927,7 @@
 				.cra_name = "hmac(sha1)",
 				.cra_driver_name = "hmac-sha1-talitos",
 				.cra_blocksize = SHA1_BLOCK_SIZE,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					     CRYPTO_ALG_ASYNC,
+				.cra_flags = CRYPTO_ALG_ASYNC,
 			}
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2950,8 +2942,7 @@
 				.cra_name = "hmac(sha224)",
 				.cra_driver_name = "hmac-sha224-talitos",
 				.cra_blocksize = SHA224_BLOCK_SIZE,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					     CRYPTO_ALG_ASYNC,
+				.cra_flags = CRYPTO_ALG_ASYNC,
 			}
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2966,8 +2957,7 @@
 				.cra_name = "hmac(sha256)",
 				.cra_driver_name = "hmac-sha256-talitos",
 				.cra_blocksize = SHA256_BLOCK_SIZE,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					     CRYPTO_ALG_ASYNC,
+				.cra_flags = CRYPTO_ALG_ASYNC,
 			}
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2982,8 +2972,7 @@
 				.cra_name = "hmac(sha384)",
 				.cra_driver_name = "hmac-sha384-talitos",
 				.cra_blocksize = SHA384_BLOCK_SIZE,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					     CRYPTO_ALG_ASYNC,
+				.cra_flags = CRYPTO_ALG_ASYNC,
 			}
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2998,8 +2987,7 @@
 				.cra_name = "hmac(sha512)",
 				.cra_driver_name = "hmac-sha512-talitos",
 				.cra_blocksize = SHA512_BLOCK_SIZE,
-				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
-					     CRYPTO_ALG_ASYNC,
+				.cra_flags = CRYPTO_ALG_ASYNC,
 			}
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -3186,7 +3174,6 @@
 		alg = &t_alg->algt.alg.hash.halg.base;
 		alg->cra_init = talitos_cra_init_ahash;
 		alg->cra_exit = talitos_cra_exit;
-		alg->cra_type = &crypto_ahash_type;
 		t_alg->algt.alg.hash.init = ahash_init;
 		t_alg->algt.alg.hash.update = ahash_update;
 		t_alg->algt.alg.hash.final = ahash_final;
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 2d0a677..daf4fed 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1524,8 +1524,7 @@
 			.halg.base = {
 				.cra_name = "sha1",
 				.cra_driver_name = "sha1-ux500",
-				.cra_flags = (CRYPTO_ALG_TYPE_AHASH |
-					      CRYPTO_ALG_ASYNC),
+				.cra_flags = CRYPTO_ALG_ASYNC,
 				.cra_blocksize = SHA1_BLOCK_SIZE,
 				.cra_ctxsize = sizeof(struct hash_ctx),
 				.cra_init = hash_cra_init,
@@ -1548,11 +1547,9 @@
 			.halg.base = {
 				.cra_name = "sha256",
 				.cra_driver_name = "sha256-ux500",
-				.cra_flags = (CRYPTO_ALG_TYPE_AHASH |
-					      CRYPTO_ALG_ASYNC),
+				.cra_flags = CRYPTO_ALG_ASYNC,
 				.cra_blocksize = SHA256_BLOCK_SIZE,
 				.cra_ctxsize = sizeof(struct hash_ctx),
-				.cra_type = &crypto_ahash_type,
 				.cra_init = hash_cra_init,
 				.cra_module = THIS_MODULE,
 			}
@@ -1574,11 +1571,9 @@
 			.halg.base = {
 				.cra_name = "hmac(sha1)",
 				.cra_driver_name = "hmac-sha1-ux500",
-				.cra_flags = (CRYPTO_ALG_TYPE_AHASH |
-					      CRYPTO_ALG_ASYNC),
+				.cra_flags = CRYPTO_ALG_ASYNC,
 				.cra_blocksize = SHA1_BLOCK_SIZE,
 				.cra_ctxsize = sizeof(struct hash_ctx),
-				.cra_type = &crypto_ahash_type,
 				.cra_init = hash_cra_init,
 				.cra_module = THIS_MODULE,
 			}
@@ -1600,11 +1595,9 @@
 			.halg.base = {
 				.cra_name = "hmac(sha256)",
 				.cra_driver_name = "hmac-sha256-ux500",
-				.cra_flags = (CRYPTO_ALG_TYPE_AHASH |
-					      CRYPTO_ALG_ASYNC),
+				.cra_flags = CRYPTO_ALG_ASYNC,
 				.cra_blocksize = SHA256_BLOCK_SIZE,
 				.cra_ctxsize = sizeof(struct hash_ctx),
-				.cra_type = &crypto_ahash_type,
 				.cra_init = hash_cra_init,
 				.cra_module = THIS_MODULE,
 			}
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index af6a908..7a104f6 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -49,12 +49,18 @@
 	bool encrypt;
 };
 
+struct virtio_crypto_algo {
+	uint32_t algonum;
+	uint32_t service;
+	unsigned int active_devs;
+	struct crypto_alg algo;
+};
+
 /*
  * The algs_lock protects the below global virtio_crypto_active_devs
  * and crypto algorithms registion.
  */
 static DEFINE_MUTEX(algs_lock);
-static unsigned int virtio_crypto_active_devs;
 static void virtio_crypto_ablkcipher_finalize_req(
 	struct virtio_crypto_sym_request *vc_sym_req,
 	struct ablkcipher_request *req,
@@ -312,15 +318,21 @@
 					 unsigned int keylen)
 {
 	struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	uint32_t alg;
 	int ret;
 
+	ret = virtio_crypto_alg_validate_key(keylen, &alg);
+	if (ret)
+		return ret;
+
 	if (!ctx->vcrypto) {
 		/* New key */
 		int node = virtio_crypto_get_current_node();
 		struct virtio_crypto *vcrypto =
-				      virtcrypto_get_dev_node(node);
+				      virtcrypto_get_dev_node(node,
+				      VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
 		if (!vcrypto) {
-			pr_err("virtio_crypto: Could not find a virtio device in the system\n");
+			pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
 			return -ENODEV;
 		}
 
@@ -571,57 +583,85 @@
 	virtcrypto_clear_request(&vc_sym_req->base);
 }
 
-static struct crypto_alg virtio_crypto_algs[] = { {
-	.cra_name = "cbc(aes)",
-	.cra_driver_name = "virtio_crypto_aes_cbc",
-	.cra_priority = 150,
-	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-	.cra_blocksize = AES_BLOCK_SIZE,
-	.cra_ctxsize  = sizeof(struct virtio_crypto_ablkcipher_ctx),
-	.cra_alignmask = 0,
-	.cra_module = THIS_MODULE,
-	.cra_type = &crypto_ablkcipher_type,
-	.cra_init = virtio_crypto_ablkcipher_init,
-	.cra_exit = virtio_crypto_ablkcipher_exit,
-	.cra_u = {
-	   .ablkcipher = {
-			.setkey = virtio_crypto_ablkcipher_setkey,
-			.decrypt = virtio_crypto_ablkcipher_decrypt,
-			.encrypt = virtio_crypto_ablkcipher_encrypt,
-			.min_keysize = AES_MIN_KEY_SIZE,
-			.max_keysize = AES_MAX_KEY_SIZE,
-			.ivsize = AES_BLOCK_SIZE,
+static struct virtio_crypto_algo virtio_crypto_algs[] = { {
+	.algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
+	.service = VIRTIO_CRYPTO_SERVICE_CIPHER,
+	.algo = {
+		.cra_name = "cbc(aes)",
+		.cra_driver_name = "virtio_crypto_aes_cbc",
+		.cra_priority = 150,
+		.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize  = sizeof(struct virtio_crypto_ablkcipher_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+		.cra_type = &crypto_ablkcipher_type,
+		.cra_init = virtio_crypto_ablkcipher_init,
+		.cra_exit = virtio_crypto_ablkcipher_exit,
+		.cra_u = {
+			.ablkcipher = {
+				.setkey = virtio_crypto_ablkcipher_setkey,
+				.decrypt = virtio_crypto_ablkcipher_decrypt,
+				.encrypt = virtio_crypto_ablkcipher_encrypt,
+				.min_keysize = AES_MIN_KEY_SIZE,
+				.max_keysize = AES_MAX_KEY_SIZE,
+				.ivsize = AES_BLOCK_SIZE,
+			},
 		},
 	},
 } };
 
-int virtio_crypto_algs_register(void)
+int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
 {
 	int ret = 0;
+	int i = 0;
 
 	mutex_lock(&algs_lock);
-	if (++virtio_crypto_active_devs != 1)
-		goto unlock;
 
-	ret = crypto_register_algs(virtio_crypto_algs,
-			ARRAY_SIZE(virtio_crypto_algs));
-	if (ret)
-		virtio_crypto_active_devs--;
+	for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
+
+		uint32_t service = virtio_crypto_algs[i].service;
+		uint32_t algonum = virtio_crypto_algs[i].algonum;
+
+		if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
+			continue;
+
+		if (virtio_crypto_algs[i].active_devs == 0) {
+			ret = crypto_register_alg(&virtio_crypto_algs[i].algo);
+			if (ret)
+				goto unlock;
+		}
+
+		virtio_crypto_algs[i].active_devs++;
+		dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
+			 virtio_crypto_algs[i].algo.cra_name);
+	}
 
 unlock:
 	mutex_unlock(&algs_lock);
 	return ret;
 }
 
-void virtio_crypto_algs_unregister(void)
+void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
 {
+	int i = 0;
+
 	mutex_lock(&algs_lock);
-	if (--virtio_crypto_active_devs != 0)
-		goto unlock;
 
-	crypto_unregister_algs(virtio_crypto_algs,
-			ARRAY_SIZE(virtio_crypto_algs));
+	for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
 
-unlock:
+		uint32_t service = virtio_crypto_algs[i].service;
+		uint32_t algonum = virtio_crypto_algs[i].algonum;
+
+		if (virtio_crypto_algs[i].active_devs == 0 ||
+		    !virtcrypto_algo_is_supported(vcrypto, service, algonum))
+			continue;
+
+		if (virtio_crypto_algs[i].active_devs == 1)
+			crypto_unregister_alg(&virtio_crypto_algs[i].algo);
+
+		virtio_crypto_algs[i].active_devs--;
+	}
+
 	mutex_unlock(&algs_lock);
 }
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 66501a5..63ef7f7 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -55,6 +55,20 @@
 	/* Number of queue currently used by the driver */
 	u32 curr_queue;
 
+	/*
+	 * Specifies the services mask which the device support,
+	 * see VIRTIO_CRYPTO_SERVICE_*
+	 */
+	u32 crypto_services;
+
+	/* Detailed algorithms mask */
+	u32 cipher_algo_l;
+	u32 cipher_algo_h;
+	u32 hash_algo;
+	u32 mac_algo_l;
+	u32 mac_algo_h;
+	u32 aead_algo;
+
 	/* Maximum length of cipher key */
 	u32 max_cipher_key_len;
 	/* Maximum length of authenticated key */
@@ -102,7 +116,12 @@
 int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
 void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
 int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
-struct virtio_crypto *virtcrypto_get_dev_node(int node);
+bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto_dev,
+				  uint32_t service,
+				  uint32_t algo);
+struct virtio_crypto *virtcrypto_get_dev_node(int node,
+					      uint32_t service,
+					      uint32_t algo);
 int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
 void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
 int virtio_crypto_ablkcipher_crypt_req(
@@ -122,7 +141,7 @@
 	return node;
 }
 
-int virtio_crypto_algs_register(void);
-void virtio_crypto_algs_unregister(void);
+int virtio_crypto_algs_register(struct virtio_crypto *vcrypto);
+void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto);
 
 #endif /* _VIRTIO_CRYPTO_COMMON_H */
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 8332698..8f745f2 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -303,6 +303,13 @@
 	u32 max_data_queues = 0, max_cipher_key_len = 0;
 	u32 max_auth_key_len = 0;
 	u64 max_size = 0;
+	u32 cipher_algo_l = 0;
+	u32 cipher_algo_h = 0;
+	u32 hash_algo = 0;
+	u32 mac_algo_l = 0;
+	u32 mac_algo_h = 0;
+	u32 aead_algo = 0;
+	u32 crypto_services = 0;
 
 	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
 		return -ENODEV;
@@ -339,6 +346,20 @@
 		max_auth_key_len, &max_auth_key_len);
 	virtio_cread(vdev, struct virtio_crypto_config,
 		max_size, &max_size);
+	virtio_cread(vdev, struct virtio_crypto_config,
+		crypto_services, &crypto_services);
+	virtio_cread(vdev, struct virtio_crypto_config,
+		cipher_algo_l, &cipher_algo_l);
+	virtio_cread(vdev, struct virtio_crypto_config,
+		cipher_algo_h, &cipher_algo_h);
+	virtio_cread(vdev, struct virtio_crypto_config,
+		hash_algo, &hash_algo);
+	virtio_cread(vdev, struct virtio_crypto_config,
+		mac_algo_l, &mac_algo_l);
+	virtio_cread(vdev, struct virtio_crypto_config,
+		mac_algo_h, &mac_algo_h);
+	virtio_cread(vdev, struct virtio_crypto_config,
+		aead_algo, &aead_algo);
 
 	/* Add virtio crypto device to global table */
 	err = virtcrypto_devmgr_add_dev(vcrypto);
@@ -358,6 +379,14 @@
 	vcrypto->max_cipher_key_len = max_cipher_key_len;
 	vcrypto->max_auth_key_len = max_auth_key_len;
 	vcrypto->max_size = max_size;
+	vcrypto->crypto_services = crypto_services;
+	vcrypto->cipher_algo_l = cipher_algo_l;
+	vcrypto->cipher_algo_h = cipher_algo_h;
+	vcrypto->mac_algo_l = mac_algo_l;
+	vcrypto->mac_algo_h = mac_algo_h;
+	vcrypto->hash_algo = hash_algo;
+	vcrypto->aead_algo = aead_algo;
+
 
 	dev_info(&vdev->dev,
 		"max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c
index a69ff71..d70de3a 100644
--- a/drivers/crypto/virtio/virtio_crypto_mgr.c
+++ b/drivers/crypto/virtio/virtio_crypto_mgr.c
@@ -181,14 +181,20 @@
 /*
  * virtcrypto_get_dev_node() - Get vcrypto_dev on the node.
  * @node:  Node id the driver works.
+ * @service: Crypto service that needs to be supported by the
+ *	      dev
+ * @algo: The algorithm number that needs to be supported by the
+ *	  dev
  *
- * Function returns the virtio crypto device used fewest on the node.
+ * Function returns the virtio crypto device used fewest on the node,
+ * and supports the given crypto service and algorithm.
  *
  * To be used by virtio crypto device specific drivers.
  *
  * Return: pointer to vcrypto_dev or NULL if not found.
  */
-struct virtio_crypto *virtcrypto_get_dev_node(int node)
+struct virtio_crypto *virtcrypto_get_dev_node(int node, uint32_t service,
+					      uint32_t algo)
 {
 	struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev;
 	unsigned long best = ~0;
@@ -199,7 +205,8 @@
 
 		if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
 		     dev_to_node(&tmp_dev->vdev->dev) < 0) &&
-		    virtcrypto_dev_started(tmp_dev)) {
+		    virtcrypto_dev_started(tmp_dev) &&
+		    virtcrypto_algo_is_supported(tmp_dev, service, algo)) {
 			ctr = atomic_read(&tmp_dev->ref_count);
 			if (best > ctr) {
 				vcrypto_dev = tmp_dev;
@@ -214,7 +221,9 @@
 		/* Get any started device */
 		list_for_each_entry(tmp_dev,
 				virtcrypto_devmgr_get_head(), list) {
-			if (virtcrypto_dev_started(tmp_dev)) {
+			if (virtcrypto_dev_started(tmp_dev) &&
+			    virtcrypto_algo_is_supported(tmp_dev,
+			    service, algo)) {
 				vcrypto_dev = tmp_dev;
 				break;
 			}
@@ -240,7 +249,7 @@
  */
 int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
 {
-	if (virtio_crypto_algs_register()) {
+	if (virtio_crypto_algs_register(vcrypto)) {
 		pr_err("virtio_crypto: Failed to register crypto algs\n");
 		return -EFAULT;
 	}
@@ -260,5 +269,65 @@
  */
 void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
 {
-	virtio_crypto_algs_unregister();
+	virtio_crypto_algs_unregister(vcrypto);
+}
+
+/*
+ * vcrypto_algo_is_supported()
+ * @vcrypto: Pointer to virtio crypto device.
+ * @service: The bit number for service validate.
+ *	      See VIRTIO_CRYPTO_SERVICE_*
+ * @algo : The bit number for the algorithm to validate.
+ *
+ *
+ * Validate if the virtio crypto device supports a service and
+ * algo.
+ *
+ * Return true if device supports a service and algo.
+ */
+
+bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto,
+				  uint32_t service,
+				  uint32_t algo)
+{
+	uint32_t service_mask = 1u << service;
+	uint32_t algo_mask = 0;
+	bool low = true;
+
+	if (algo > 31) {
+		algo -= 32;
+		low = false;
+	}
+
+	if (!(vcrypto->crypto_services & service_mask))
+		return false;
+
+	switch (service) {
+	case VIRTIO_CRYPTO_SERVICE_CIPHER:
+		if (low)
+			algo_mask = vcrypto->cipher_algo_l;
+		else
+			algo_mask = vcrypto->cipher_algo_h;
+		break;
+
+	case VIRTIO_CRYPTO_SERVICE_HASH:
+		algo_mask = vcrypto->hash_algo;
+		break;
+
+	case VIRTIO_CRYPTO_SERVICE_MAC:
+		if (low)
+			algo_mask = vcrypto->mac_algo_l;
+		else
+			algo_mask = vcrypto->mac_algo_h;
+		break;
+
+	case VIRTIO_CRYPTO_SERVICE_AEAD:
+		algo_mask = vcrypto->aead_algo;
+		break;
+	}
+
+	if (!(algo_mask & (1u << algo)))
+		return false;
+
+	return true;
 }
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 1c4b5b8..dd8b871 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -215,7 +215,7 @@
 		 .cra_name = "ghash",
 		 .cra_driver_name = "p8_ghash",
 		 .cra_priority = 1000,
-		 .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK,
+		 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
 		 .cra_blocksize = GHASH_BLOCK_SIZE,
 		 .cra_ctxsize = sizeof(struct p8_ghash_ctx),
 		 .cra_module = THIS_MODULE,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index e107e18..1e929a1 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -119,6 +119,7 @@
 #include <linux/clk.h>
 #include <linux/bitrev.h>
 #include <linux/crc32.h>
+#include <linux/crc32poly.h>
 
 #include "xgbe.h"
 #include "xgbe-common.h"
@@ -887,7 +888,6 @@
 
 static u32 xgbe_vid_crc32_le(__le16 vid_le)
 {
-	u32 poly = 0xedb88320;	/* CRCPOLY_LE */
 	u32 crc = ~0;
 	u32 temp = 0;
 	unsigned char *data = (unsigned char *)&vid_le;
@@ -904,7 +904,7 @@
 		data_byte >>= 1;
 
 		if (temp)
-			crc ^= poly;
+			crc ^= CRC32_POLY_LE;
 	}
 
 	return crc;
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 5a655d2..024998d 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -19,6 +19,7 @@
 #include <linux/init.h>
 #include <linux/spinlock.h>
 #include <linux/crc32.h>
+#include <linux/crc32poly.h>
 #include <linux/bitrev.h>
 #include <linux/ethtool.h>
 #include <linux/slab.h>
@@ -37,11 +38,6 @@
 #define trunc_page(x)	((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
 #define round_page(x)	trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
 
-/*
- * CRC polynomial - used in working out multicast filter bits.
- */
-#define ENET_CRCPOLY 0x04c11db7
-
 /* switch to use multicast code lifted from sunhme driver */
 #define SUNHME_MULTICAST
 
@@ -838,7 +834,7 @@
 		next = next >> 1;
 
 		/* do the XOR */
-		if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
+		if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE;
 	}
 	return cur;
 }
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index aa1374d..b7fb640 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -54,6 +54,7 @@
 #include <linux/ssb/ssb_driver_gige.h>
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
+#include <linux/crc32poly.h>
 
 #include <net/checksum.h>
 #include <net/ip.h>
@@ -9720,7 +9721,7 @@
 			reg >>= 1;
 
 			if (tmp)
-				reg ^= 0xedb88320;
+				reg ^= CRC32_POLY_LE;
 		}
 	}
 
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index c729665..a7cb378 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -48,6 +48,7 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/clk.h>
+#include <linux/crc32poly.h>
 #include <linux/platform_device.h>
 #include <linux/mdio.h>
 #include <linux/phy.h>
@@ -2949,7 +2950,6 @@
  */
 
 #define FEC_HASH_BITS	6		/* #bits in hash */
-#define CRC32_POLY	0xEDB88320
 
 static void set_multicast_list(struct net_device *ndev)
 {
@@ -2989,7 +2989,7 @@
 			data = ha->addr[i];
 			for (bit = 0; bit < 8; bit++, data >>= 1) {
 				crc = (crc >> 1) ^
-				(((crc ^ data) & 1) ? CRC32_POLY : 0);
+				(((crc ^ data) & 1) ? CRC32_POLY_LE : 0);
 			}
 		}
 
diff --git a/drivers/net/ethernet/freescale/fs_enet/fec.h b/drivers/net/ethernet/freescale/fs_enet/fec.h
index 7832db7..1dbee5d 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fec.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fec.h
@@ -2,9 +2,6 @@
 #ifndef FS_ENET_FEC_H
 #define FS_ENET_FEC_H
 
-/* CRC polynomium used by the FEC for the multicast group filtering */
-#define FEC_CRC_POLY   0x04C11DB7
-
 #define FEC_MAX_MULTICAST_ADDRS	64
 
 /* Interrupt events/masks.
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 1fc27c9..05093e5 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -18,6 +18,7 @@
 #include <linux/string.h>
 #include <linux/ptrace.h>
 #include <linux/errno.h>
+#include <linux/crc32poly.h>
 #include <linux/ioport.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
@@ -187,7 +188,7 @@
 			msb = crc >> 31;
 			crc <<= 1;
 			if (msb ^ (byte & 0x1))
-				crc ^= FEC_CRC_POLY;
+				crc ^= CRC32_POLY_BE;
 			byte >>= 1;
 		}
 	}
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index f3e9dd4..0e9719f 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -30,6 +30,7 @@
 #include <linux/ethtool.h>
 #include <linux/cache.h>
 #include <linux/crc32.h>
+#include <linux/crc32poly.h>
 #include <linux/mii.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
@@ -1078,7 +1079,7 @@
 
 }  /* ks_stop_rx */
 
-static unsigned long const ethernet_polynomial = 0x04c11db7U;
+static unsigned long const ethernet_polynomial = CRC32_POLY_BE;
 
 static unsigned long ether_gen_crc(int length, u8 *data)
 {
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
index 458a784..99d86e3 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
@@ -20,6 +20,7 @@
 #include <linux/clk.h>
 #include <linux/bitrev.h>
 #include <linux/crc32.h>
+#include <linux/crc32poly.h>
 #include <linux/dcbnl.h>
 
 #include "dwc-xlgmac.h"
@@ -193,7 +194,6 @@
 {
 	unsigned char *data = (unsigned char *)&vid_le;
 	unsigned char data_byte = 0;
-	u32 poly = 0xedb88320;
 	u32 crc = ~0;
 	u32 temp = 0;
 	int i, bits;
@@ -208,7 +208,7 @@
 		data_byte >>= 1;
 
 		if (temp)
-			crc ^= poly;
+			crc ^= CRC32_POLY_LE;
 	}
 
 	return crc;
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index 7bc74d7..1075eac 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -40,6 +40,7 @@
 #include <linux/uaccess.h>
 #include <asm/byteorder.h>
 #include <linux/atomic.h>
+#include <linux/crc32poly.h>
 #include <linux/semaphore.h>
 
 #include "osdep_service.h"
@@ -49,8 +50,6 @@
 
 /* =====WEP related===== */
 
-#define CRC32_POLY 0x04c11db7
-
 struct arc4context {
 	u32 x;
 	u32 y;
@@ -135,7 +134,7 @@
 	for (i = 0; i < 256; ++i) {
 		k = crc32_reverseBit((u8)i);
 		for (c = ((u32)k) << 24, j = 8; j > 0; --j)
-			c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
+			c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY_BE : (c << 1);
 		p1 = (u8 *)&crc32_table[i];
 		p1[0] = crc32_reverseBit(p[3]);
 		p1[1] = crc32_reverseBit(p[2]);
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index 612277a..6c8ac9e 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -6,6 +6,7 @@
  ******************************************************************************/
 #define  _RTW_SECURITY_C_
 
+#include <linux/crc32poly.h>
 #include <drv_types.h>
 #include <rtw_debug.h>
 
@@ -87,8 +88,6 @@
 
 /* WEP related ===== */
 
-#define CRC32_POLY 0x04c11db7
-
 struct arc4context {
 	u32 x;
 	u32 y;
@@ -178,7 +177,7 @@
 		for (i = 0; i < 256; ++i) {
 			k = crc32_reverseBit((u8)i);
 			for (c = ((u32)k) << 24, j = 8; j > 0; --j) {
-				c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
+				c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY_BE : (c << 1);
 			}
 			p1 = (u8 *)&crc32_table[i];
 
diff --git a/drivers/staging/skein/skein_generic.c b/drivers/staging/skein/skein_generic.c
index 11f5e53..c31fc64 100644
--- a/drivers/staging/skein/skein_generic.c
+++ b/drivers/staging/skein/skein_generic.c
@@ -137,7 +137,6 @@
 	.base		=	{
 		.cra_name		=	"skein256",
 		.cra_driver_name	=	"skein",
-		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		=	SKEIN_256_BLOCK_BYTES,
 		.cra_module		=	THIS_MODULE,
 	}
@@ -155,7 +154,6 @@
 	.base		=	{
 		.cra_name		=	"skein512",
 		.cra_driver_name	=	"skein",
-		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		=	SKEIN_512_BLOCK_BYTES,
 		.cra_module		=	THIS_MODULE,
 	}
@@ -173,7 +171,6 @@
 	.base		=	{
 		.cra_name		=	"skein1024",
 		.cra_driver_name	=	"skein",
-		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		=	SKEIN_1024_BLOCK_BYTES,
 		.cra_module		=	THIS_MODULE,
 	}
diff --git a/include/crypto/dh.h b/include/crypto/dh.h
index 71e1bb2..7e0dad9 100644
--- a/include/crypto/dh.h
+++ b/include/crypto/dh.h
@@ -29,17 +29,21 @@
  *
  * @key:	Private DH key
  * @p:		Diffie-Hellman parameter P
+ * @q:		Diffie-Hellman parameter Q
  * @g:		Diffie-Hellman generator G
  * @key_size:	Size of the private DH key
  * @p_size:	Size of DH parameter P
+ * @q_size:	Size of DH parameter Q
  * @g_size:	Size of DH generator G
  */
 struct dh {
 	void *key;
 	void *p;
+	void *q;
 	void *g;
 	unsigned int key_size;
 	unsigned int p_size;
+	unsigned int q_size;
 	unsigned int g_size;
 };
 
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 8f94110..54b9f5d3 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -127,6 +127,7 @@
 	__u8 *outscratchpadbuf;			/* CTR mode output scratchpad */
         __u8 *outscratchpad;			/* CTR mode aligned outbuf */
 	struct crypto_wait ctr_wait;		/* CTR mode async wait obj */
+	struct scatterlist sg_in, sg_out;	/* CTR mode SGLs */
 
 	bool seeded;		/* DRBG fully seeded? */
 	bool pr;		/* Prediction resistance enabled? */
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
index 0555b57..8a46202 100644
--- a/include/crypto/sha.h
+++ b/include/crypto/sha.h
@@ -71,6 +71,10 @@
 
 extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE];
 
+extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE];
+
+extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE];
+
 struct sha1_state {
 	u32 state[SHA1_DIGEST_SIZE / 4];
 	u64 count;
diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h
deleted file mode 100644
index 6b700c7..0000000
--- a/include/crypto/vmac.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Modified to interface to the Linux kernel
- * Copyright (c) 2009, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#ifndef __CRYPTO_VMAC_H
-#define __CRYPTO_VMAC_H
-
-/* --------------------------------------------------------------------------
- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
- * This implementation is herby placed in the public domain.
- * The authors offers no warranty. Use at your own risk.
- * Please send bug reports to the authors.
- * Last modified: 17 APR 08, 1700 PDT
- * ----------------------------------------------------------------------- */
-
-/*
- * User definable settings.
- */
-#define VMAC_TAG_LEN	64
-#define VMAC_KEY_SIZE	128/* Must be 128, 192 or 256			*/
-#define VMAC_KEY_LEN	(VMAC_KEY_SIZE/8)
-#define VMAC_NHBYTES	128/* Must 2^i for any 3 < i < 13 Standard = 128*/
-
-/*
- * This implementation uses u32 and u64 as names for unsigned 32-
- * and 64-bit integer types. These are defined in C99 stdint.h. The
- * following may need adaptation if you are not running a C99 or
- * Microsoft C environment.
- */
-struct vmac_ctx {
-	u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
-	u64 polykey[2*VMAC_TAG_LEN/64];
-	u64 l3key[2*VMAC_TAG_LEN/64];
-	u64 polytmp[2*VMAC_TAG_LEN/64];
-	u64 cached_nonce[2];
-	u64 cached_aes[2];
-	int first_block_processed;
-};
-
-typedef u64 vmac_t;
-
-struct vmac_ctx_t {
-	struct crypto_cipher *child;
-	struct vmac_ctx __vmac_ctx;
-	u8 partial[VMAC_NHBYTES];	/* partial block */
-	int partial_size;		/* size of the partial block */
-};
-
-#endif /* __CRYPTO_VMAC_H */
diff --git a/include/linux/crc32poly.h b/include/linux/crc32poly.h
new file mode 100644
index 0000000..62c4b77
--- /dev/null
+++ b/include/linux/crc32poly.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CRC32_POLY_H
+#define _LINUX_CRC32_POLY_H
+
+/*
+ * There are multiple 16-bit CRC polynomials in common use, but this is
+ * *the* standard CRC-32 polynomial, first popularized by Ethernet.
+ * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
+ */
+#define CRC32_POLY_LE 0xedb88320
+#define CRC32_POLY_BE 0x04c11db7
+
+/*
+ * This is the CRC32c polynomial, as outlined by Castagnoli.
+ * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
+ * x^8+x^6+x^0
+ */
+#define CRC32C_POLY_LE 0x82F63B78
+
+#endif /* _LINUX_CRC32_POLY_H */
diff --git a/lib/crc32.c b/lib/crc32.c
index 2ef20fe..a6c9afa 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -27,6 +27,7 @@
 /* see: Documentation/crc32.txt for a description of algorithms */
 
 #include <linux/crc32.h>
+#include <linux/crc32poly.h>
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/sched.h>
@@ -184,7 +185,7 @@
 #if CRC_LE_BITS == 1
 u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
 {
-	return crc32_le_generic(crc, p, len, NULL, CRCPOLY_LE);
+	return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE);
 }
 u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
 {
@@ -194,7 +195,7 @@
 u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
 {
 	return crc32_le_generic(crc, p, len,
-			(const u32 (*)[256])crc32table_le, CRCPOLY_LE);
+			(const u32 (*)[256])crc32table_le, CRC32_POLY_LE);
 }
 u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
 {
@@ -268,7 +269,7 @@
 
 u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
 {
-	return crc32_generic_shift(crc, len, CRCPOLY_LE);
+	return crc32_generic_shift(crc, len, CRC32_POLY_LE);
 }
 
 u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
@@ -330,13 +331,13 @@
 #if CRC_LE_BITS == 1
 u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
 {
-	return crc32_be_generic(crc, p, len, NULL, CRCPOLY_BE);
+	return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
 }
 #else
 u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
 {
 	return crc32_be_generic(crc, p, len,
-			(const u32 (*)[256])crc32table_be, CRCPOLY_BE);
+			(const u32 (*)[256])crc32table_be, CRC32_POLY_BE);
 }
 #endif
 EXPORT_SYMBOL(crc32_be);
diff --git a/lib/crc32defs.h b/lib/crc32defs.h
index cb275a2..0c8fb59 100644
--- a/lib/crc32defs.h
+++ b/lib/crc32defs.h
@@ -1,18 +1,4 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/*
- * There are multiple 16-bit CRC polynomials in common use, but this is
- * *the* standard CRC-32 polynomial, first popularized by Ethernet.
- * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
- */
-#define CRCPOLY_LE 0xedb88320
-#define CRCPOLY_BE 0x04c11db7
-
-/*
- * This is the CRC32c polynomial, as outlined by Castagnoli.
- * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
- * x^8+x^6+x^0
- */
-#define CRC32C_POLY_LE 0x82F63B78
 
 /* Try to choose an implementation variant via Kconfig */
 #ifdef CONFIG_CRC32_SLICEBY8
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 0234361..7c4932e 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -51,6 +51,7 @@
 #endif /* STATIC */
 
 #include <linux/decompress/mm.h>
+#include <linux/crc32poly.h>
 
 #ifndef INT_MAX
 #define INT_MAX 0x7fffffff
@@ -654,7 +655,7 @@
 	for (i = 0; i < 256; i++) {
 		c = i << 24;
 		for (j = 8; j; j--)
-			c = c&0x80000000 ? (c << 1)^0x04c11db7 : (c << 1);
+			c = c&0x80000000 ? (c << 1)^(CRC32_POLY_BE) : (c << 1);
 		bd->crc32Table[i] = c;
 	}
 
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index 8f26660..f755b99 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <stdio.h>
+#include "../include/linux/crc32poly.h"
 #include "../include/generated/autoconf.h"
 #include "crc32defs.h"
 #include <inttypes.h>
@@ -57,7 +58,7 @@
 
 static void crc32init_le(void)
 {
-	crc32init_le_generic(CRCPOLY_LE, crc32table_le);
+	crc32init_le_generic(CRC32_POLY_LE, crc32table_le);
 }
 
 static void crc32cinit_le(void)
@@ -76,7 +77,7 @@
 	crc32table_be[0][0] = 0;
 
 	for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
-		crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0);
+		crc = (crc << 1) ^ ((crc & 0x80000000) ? CRC32_POLY_BE : 0);
 		for (j = 0; j < i; j++)
 			crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
 	}
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
index 34532d1..25a5d87 100644
--- a/lib/xz/xz_crc32.c
+++ b/lib/xz/xz_crc32.c
@@ -15,6 +15,7 @@
  * but they are bigger and use more memory for the lookup table.
  */
 
+#include <linux/crc32poly.h>
 #include "xz_private.h"
 
 /*
@@ -29,7 +30,7 @@
 
 XZ_EXTERN void xz_crc32_init(void)
 {
-	const uint32_t poly = 0xEDB88320;
+	const uint32_t poly = CRC32_POLY_LE;
 
 	uint32_t i;
 	uint32_t j;
diff --git a/security/keys/dh.c b/security/keys/dh.c
index b203f77..711e89d 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -317,7 +317,7 @@
 	if (ret)
 		goto out3;
 
-	tfm = crypto_alloc_kpp("dh", CRYPTO_ALG_TYPE_KPP, 0);
+	tfm = crypto_alloc_kpp("dh", 0, 0);
 	if (IS_ERR(tfm)) {
 		ret = PTR_ERR(tfm);
 		goto out3;