|  | ; RUN: llvm-as < %s | llc -march=arm -mattr=+neon > %t | 
|  | ; RUN: grep {vmax\\.s8} %t | count 2 | 
|  | ; RUN: grep {vmax\\.s16} %t | count 2 | 
|  | ; RUN: grep {vmax\\.s32} %t | count 2 | 
|  | ; RUN: grep {vmax\\.u8} %t | count 2 | 
|  | ; RUN: grep {vmax\\.u16} %t | count 2 | 
|  | ; RUN: grep {vmax\\.u32} %t | count 2 | 
|  | ; RUN: grep {vmax\\.f32} %t | count 2 | 
|  |  | 
|  | define <8 x i8> @vmaxs8(<8 x i8>* %A, <8 x i8>* %B) nounwind { | 
|  | %tmp1 = load <8 x i8>* %A | 
|  | %tmp2 = load <8 x i8>* %B | 
|  | %tmp3 = call <8 x i8> @llvm.arm.neon.vmaxs.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) | 
|  | ret <8 x i8> %tmp3 | 
|  | } | 
|  |  | 
|  | define <4 x i16> @vmaxs16(<4 x i16>* %A, <4 x i16>* %B) nounwind { | 
|  | %tmp1 = load <4 x i16>* %A | 
|  | %tmp2 = load <4 x i16>* %B | 
|  | %tmp3 = call <4 x i16> @llvm.arm.neon.vmaxs.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) | 
|  | ret <4 x i16> %tmp3 | 
|  | } | 
|  |  | 
|  | define <2 x i32> @vmaxs32(<2 x i32>* %A, <2 x i32>* %B) nounwind { | 
|  | %tmp1 = load <2 x i32>* %A | 
|  | %tmp2 = load <2 x i32>* %B | 
|  | %tmp3 = call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) | 
|  | ret <2 x i32> %tmp3 | 
|  | } | 
|  |  | 
|  | define <8 x i8> @vmaxu8(<8 x i8>* %A, <8 x i8>* %B) nounwind { | 
|  | %tmp1 = load <8 x i8>* %A | 
|  | %tmp2 = load <8 x i8>* %B | 
|  | %tmp3 = call <8 x i8> @llvm.arm.neon.vmaxu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) | 
|  | ret <8 x i8> %tmp3 | 
|  | } | 
|  |  | 
|  | define <4 x i16> @vmaxu16(<4 x i16>* %A, <4 x i16>* %B) nounwind { | 
|  | %tmp1 = load <4 x i16>* %A | 
|  | %tmp2 = load <4 x i16>* %B | 
|  | %tmp3 = call <4 x i16> @llvm.arm.neon.vmaxu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) | 
|  | ret <4 x i16> %tmp3 | 
|  | } | 
|  |  | 
|  | define <2 x i32> @vmaxu32(<2 x i32>* %A, <2 x i32>* %B) nounwind { | 
|  | %tmp1 = load <2 x i32>* %A | 
|  | %tmp2 = load <2 x i32>* %B | 
|  | %tmp3 = call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) | 
|  | ret <2 x i32> %tmp3 | 
|  | } | 
|  |  | 
|  | define <2 x float> @vmaxf32(<2 x float>* %A, <2 x float>* %B) nounwind { | 
|  | %tmp1 = load <2 x float>* %A | 
|  | %tmp2 = load <2 x float>* %B | 
|  | %tmp3 = call <2 x float> @llvm.arm.neon.vmaxf.v2f32(<2 x float> %tmp1, <2 x float> %tmp2) | 
|  | ret <2 x float> %tmp3 | 
|  | } | 
|  |  | 
|  | define <16 x i8> @vmaxQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind { | 
|  | %tmp1 = load <16 x i8>* %A | 
|  | %tmp2 = load <16 x i8>* %B | 
|  | %tmp3 = call <16 x i8> @llvm.arm.neon.vmaxs.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) | 
|  | ret <16 x i8> %tmp3 | 
|  | } | 
|  |  | 
|  | define <8 x i16> @vmaxQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind { | 
|  | %tmp1 = load <8 x i16>* %A | 
|  | %tmp2 = load <8 x i16>* %B | 
|  | %tmp3 = call <8 x i16> @llvm.arm.neon.vmaxs.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) | 
|  | ret <8 x i16> %tmp3 | 
|  | } | 
|  |  | 
|  | define <4 x i32> @vmaxQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind { | 
|  | %tmp1 = load <4 x i32>* %A | 
|  | %tmp2 = load <4 x i32>* %B | 
|  | %tmp3 = call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2) | 
|  | ret <4 x i32> %tmp3 | 
|  | } | 
|  |  | 
|  | define <16 x i8> @vmaxQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind { | 
|  | %tmp1 = load <16 x i8>* %A | 
|  | %tmp2 = load <16 x i8>* %B | 
|  | %tmp3 = call <16 x i8> @llvm.arm.neon.vmaxu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) | 
|  | ret <16 x i8> %tmp3 | 
|  | } | 
|  |  | 
|  | define <8 x i16> @vmaxQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind { | 
|  | %tmp1 = load <8 x i16>* %A | 
|  | %tmp2 = load <8 x i16>* %B | 
|  | %tmp3 = call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) | 
|  | ret <8 x i16> %tmp3 | 
|  | } | 
|  |  | 
|  | define <4 x i32> @vmaxQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind { | 
|  | %tmp1 = load <4 x i32>* %A | 
|  | %tmp2 = load <4 x i32>* %B | 
|  | %tmp3 = call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2) | 
|  | ret <4 x i32> %tmp3 | 
|  | } | 
|  |  | 
|  | define <4 x float> @vmaxQf32(<4 x float>* %A, <4 x float>* %B) nounwind { | 
|  | %tmp1 = load <4 x float>* %A | 
|  | %tmp2 = load <4 x float>* %B | 
|  | %tmp3 = call <4 x float> @llvm.arm.neon.vmaxf.v4f32(<4 x float> %tmp1, <4 x float> %tmp2) | 
|  | ret <4 x float> %tmp3 | 
|  | } | 
|  |  | 
|  | declare <8 x i8>  @llvm.arm.neon.vmaxs.v8i8(<8 x i8>, <8 x i8>) nounwind readnone | 
|  | declare <4 x i16> @llvm.arm.neon.vmaxs.v4i16(<4 x i16>, <4 x i16>) nounwind readnone | 
|  | declare <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32>, <2 x i32>) nounwind readnone | 
|  |  | 
|  | declare <8 x i8>  @llvm.arm.neon.vmaxu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone | 
|  | declare <4 x i16> @llvm.arm.neon.vmaxu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone | 
|  | declare <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone | 
|  |  | 
|  | declare <2 x float> @llvm.arm.neon.vmaxf.v2f32(<2 x float>, <2 x float>) nounwind readnone | 
|  |  | 
|  | declare <16 x i8> @llvm.arm.neon.vmaxs.v16i8(<16 x i8>, <16 x i8>) nounwind readnone | 
|  | declare <8 x i16> @llvm.arm.neon.vmaxs.v8i16(<8 x i16>, <8 x i16>) nounwind readnone | 
|  | declare <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32>, <4 x i32>) nounwind readnone | 
|  |  | 
|  | declare <16 x i8> @llvm.arm.neon.vmaxu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone | 
|  | declare <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone | 
|  | declare <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone | 
|  |  | 
|  | declare <4 x float> @llvm.arm.neon.vmaxf.v4f32(<4 x float>, <4 x float>) nounwind readnone |