It's not necessary to do rounding for alloca operations when the requested
alignment is equal to the stack alignment.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40004 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/Transforms/SimplifyLibCalls/memcmp.ll b/test/Transforms/SimplifyLibCalls/memcmp.ll
new file mode 100644
index 0000000..bea9eed
--- /dev/null
+++ b/test/Transforms/SimplifyLibCalls/memcmp.ll
@@ -0,0 +1,24 @@
+; Test that the memcmpOptimizer works correctly
+; RUN: llvm-upgrade < %s | llvm-as | opt -simplify-libcalls | llvm-dis | \
+; RUN:   not grep {call.*memcmp}
+; RUN: llvm-upgrade < %s | llvm-as | opt -simplify-libcalls -disable-output
+
+declare int %memcmp(sbyte*,sbyte*,int)
+%h = constant [2 x sbyte] c"h\00"
+%hel = constant [4 x sbyte] c"hel\00"
+%hello_u = constant [8 x sbyte] c"hello_u\00"
+
+implementation
+
+void %test(sbyte *%P, sbyte *%Q, int %N, int* %IP, bool *%BP) {
+  %A = call int %memcmp(sbyte *%P, sbyte* %P, int %N)
+  volatile store int %A, int* %IP
+  %B = call int %memcmp(sbyte *%P, sbyte* %Q, int 0)
+  volatile store int %B, int* %IP
+  %C = call int %memcmp(sbyte *%P, sbyte* %Q, int 1)
+  volatile store int %C, int* %IP
+  %D = call int %memcmp(sbyte *%P, sbyte* %Q, int 2)
+  %E = seteq int %D, 0
+  volatile store bool %E, bool* %BP
+  ret void
+}