It's not necessary to do rounding for alloca operations when the requested
alignment is equal to the stack alignment.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40004 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/X86/shift-double.llx b/test/CodeGen/X86/shift-double.llx
new file mode 100644
index 0000000..760e490
--- /dev/null
+++ b/test/CodeGen/X86/shift-double.llx
@@ -0,0 +1,31 @@
+; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \
+; RUN:   grep {sh\[lr\]d} | wc -l | grep 5
+
+long %test1(long %X, ubyte %C) {
+	%Y = shl long %X, ubyte %C
+	ret long %Y
+}
+long %test2(long %X, ubyte %C) {
+	%Y = shr long %X, ubyte %C
+	ret long %Y
+}
+ulong %test3(ulong %X, ubyte %C) {
+	%Y = shr ulong %X, ubyte %C
+	ret ulong %Y
+}
+
+uint %test4(uint %A, uint %B, ubyte %C) {
+	%X = shl uint %A, ubyte %C
+	%Cv = sub ubyte 32, %C
+	%Y = shr uint %B, ubyte %Cv
+	%Z = or uint %Y, %X
+	ret uint %Z
+}
+
+ushort %test5(ushort %A, ushort %B, ubyte %C) {
+	%X = shl ushort %A, ubyte %C
+	%Cv = sub ubyte 16, %C
+	%Y = shr ushort %B, ubyte %Cv
+	%Z = or ushort %Y, %X
+	ret ushort %Z
+}