crypto: more robust crypto_memneq

Disabling compiler optimizations can be fragile, since a new
optimization could be added to -O0 or -Os that breaks the assumptions
the code is making.

Instead of disabling compiler optimizations, use a dummy inline assembly
(based on RELOC_HIDE) to block the problematic kinds of optimization,
while still allowing other optimizations to be applied to the code.

The dummy inline assembly is added after every OR, and has the
accumulator variable as its input and output. The compiler is forced to
assume that the dummy inline assembly could both depend on the
accumulator variable and change the accumulator variable, so it is
forced to compute the value correctly before the inline assembly, and
cannot assume anything about its value after the inline assembly.

This change should be enough to make crypto_memneq work correctly (with
data-independent timing) even if it is inlined at its call sites. That
can be done later in a followup patch.

Compile-tested on x86_64.

Signed-off-by: Cesar Eduardo Barros <cesarb@cesarb.eti.br>
Acked-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
diff --git a/crypto/memneq.c b/crypto/memneq.c
index cd01622..570f6f3 100644
--- a/crypto/memneq.c
+++ b/crypto/memneq.c
@@ -72,6 +72,7 @@
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
 	while (size >= sizeof(unsigned long)) {
 		neq |= *(unsigned long *)a ^ *(unsigned long *)b;
+		OPTIMIZER_HIDE_VAR(neq);
 		a += sizeof(unsigned long);
 		b += sizeof(unsigned long);
 		size -= sizeof(unsigned long);
@@ -79,6 +80,7 @@
 #endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
 	while (size > 0) {
 		neq |= *(unsigned char *)a ^ *(unsigned char *)b;
+		OPTIMIZER_HIDE_VAR(neq);
 		a += 1;
 		b += 1;
 		size -= 1;
@@ -89,33 +91,60 @@
 /* Loop-free fast-path for frequently used 16-byte size */
 static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
 {
+	unsigned long neq = 0;
+
 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-	if (sizeof(unsigned long) == 8)
-		return ((*(unsigned long *)(a)   ^ *(unsigned long *)(b))
-		      | (*(unsigned long *)(a+8) ^ *(unsigned long *)(b+8)));
-	else if (sizeof(unsigned int) == 4)
-		return ((*(unsigned int *)(a)    ^ *(unsigned int *)(b))
-                      | (*(unsigned int *)(a+4)  ^ *(unsigned int *)(b+4))
-		      | (*(unsigned int *)(a+8)  ^ *(unsigned int *)(b+8))
-	              | (*(unsigned int *)(a+12) ^ *(unsigned int *)(b+12)));
-	else
+	if (sizeof(unsigned long) == 8) {
+		neq |= *(unsigned long *)(a)   ^ *(unsigned long *)(b);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned long *)(a+8) ^ *(unsigned long *)(b+8);
+		OPTIMIZER_HIDE_VAR(neq);
+	} else if (sizeof(unsigned int) == 4) {
+		neq |= *(unsigned int *)(a)    ^ *(unsigned int *)(b);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned int *)(a+4)  ^ *(unsigned int *)(b+4);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned int *)(a+8)  ^ *(unsigned int *)(b+8);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned int *)(a+12) ^ *(unsigned int *)(b+12);
+		OPTIMIZER_HIDE_VAR(neq);
+	} else {
 #endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
-		return ((*(unsigned char *)(a)    ^ *(unsigned char *)(b))
-		      | (*(unsigned char *)(a+1)  ^ *(unsigned char *)(b+1))
-		      | (*(unsigned char *)(a+2)  ^ *(unsigned char *)(b+2))
-		      | (*(unsigned char *)(a+3)  ^ *(unsigned char *)(b+3))
-		      | (*(unsigned char *)(a+4)  ^ *(unsigned char *)(b+4))
-		      | (*(unsigned char *)(a+5)  ^ *(unsigned char *)(b+5))
-		      | (*(unsigned char *)(a+6)  ^ *(unsigned char *)(b+6))
-		      | (*(unsigned char *)(a+7)  ^ *(unsigned char *)(b+7))
-		      | (*(unsigned char *)(a+8)  ^ *(unsigned char *)(b+8))
-		      | (*(unsigned char *)(a+9)  ^ *(unsigned char *)(b+9))
-		      | (*(unsigned char *)(a+10) ^ *(unsigned char *)(b+10))
-		      | (*(unsigned char *)(a+11) ^ *(unsigned char *)(b+11))
-		      | (*(unsigned char *)(a+12) ^ *(unsigned char *)(b+12))
-		      | (*(unsigned char *)(a+13) ^ *(unsigned char *)(b+13))
-		      | (*(unsigned char *)(a+14) ^ *(unsigned char *)(b+14))
-		      | (*(unsigned char *)(a+15) ^ *(unsigned char *)(b+15)));
+		neq |= *(unsigned char *)(a)    ^ *(unsigned char *)(b);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned char *)(a+1)  ^ *(unsigned char *)(b+1);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned char *)(a+2)  ^ *(unsigned char *)(b+2);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned char *)(a+3)  ^ *(unsigned char *)(b+3);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned char *)(a+4)  ^ *(unsigned char *)(b+4);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned char *)(a+5)  ^ *(unsigned char *)(b+5);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned char *)(a+6)  ^ *(unsigned char *)(b+6);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned char *)(a+7)  ^ *(unsigned char *)(b+7);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned char *)(a+8)  ^ *(unsigned char *)(b+8);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned char *)(a+9)  ^ *(unsigned char *)(b+9);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned char *)(a+10) ^ *(unsigned char *)(b+10);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned char *)(a+11) ^ *(unsigned char *)(b+11);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned char *)(a+12) ^ *(unsigned char *)(b+12);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned char *)(a+13) ^ *(unsigned char *)(b+13);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned char *)(a+14) ^ *(unsigned char *)(b+14);
+		OPTIMIZER_HIDE_VAR(neq);
+		neq |= *(unsigned char *)(a+15) ^ *(unsigned char *)(b+15);
+		OPTIMIZER_HIDE_VAR(neq);
+	}
+
+	return neq;
 }
 
 /* Compare two areas of memory without leaking timing information,