Amara Emerson | 3308909 | 2013-09-19 11:59:01 +0000 | [diff] [blame^] | 1 | ; RUN: llc < %s -mtriple=armv8 -mattr=+crypto | FileCheck %s |
| 2 | |
| 3 | define arm_aapcs_vfpcc <16 x i8> @test_aesde(<16 x i8>* %a, <16 x i8> *%b) { |
| 4 | %tmp = load <16 x i8>* %a |
| 5 | %tmp2 = load <16 x i8>* %b |
| 6 | %tmp3 = call <16 x i8> @llvm.arm.neon.aesd.v16i8(<16 x i8> %tmp, <16 x i8> %tmp2) |
| 7 | ; CHECK: aesd.8 q{{[0-9]+}}, q{{[0-9]+}} |
| 8 | %tmp4 = call <16 x i8> @llvm.arm.neon.aese.v16i8(<16 x i8> %tmp3, <16 x i8> %tmp2) |
| 9 | ; CHECK: aese.8 q{{[0-9]+}}, q{{[0-9]+}} |
| 10 | %tmp5 = call <16 x i8> @llvm.arm.neon.aesimc.v16i8(<16 x i8> %tmp4) |
| 11 | ; CHECK: aesimc.8 q{{[0-9]+}}, q{{[0-9]+}} |
| 12 | %tmp6 = call <16 x i8> @llvm.arm.neon.aesmc.v16i8(<16 x i8> %tmp5) |
| 13 | ; CHECK: aesmc.8 q{{[0-9]+}}, q{{[0-9]+}} |
| 14 | ret <16 x i8> %tmp6 |
| 15 | } |
| 16 | |
| 17 | define arm_aapcs_vfpcc <4 x i32> @test_sha(<4 x i32> *%a, <4 x i32> *%b, <4 x i32> *%c) { |
| 18 | %tmp = load <4 x i32>* %a |
| 19 | %tmp2 = load <4 x i32>* %b |
| 20 | %tmp3 = load <4 x i32>* %c |
| 21 | %res1 = call <4 x i32> @llvm.arm.neon.sha1h.v4i32(<4 x i32> %tmp) |
| 22 | ; CHECK: sha1h.32 q{{[0-9]+}}, q{{[0-9]+}} |
| 23 | %res2 = call <4 x i32> @llvm.arm.neon.sha1c.v4i32(<4 x i32> %tmp2, <4 x i32> %tmp3, <4 x i32> %res1) |
| 24 | ; CHECK: sha1c.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}} |
| 25 | %res3 = call <4 x i32> @llvm.arm.neon.sha1m.v4i32(<4 x i32> %res2, <4 x i32> %tmp3, <4 x i32> %res1) |
| 26 | ; CHECK: sha1m.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}} |
| 27 | %res4 = call <4 x i32> @llvm.arm.neon.sha1p.v4i32(<4 x i32> %res3, <4 x i32> %tmp3, <4 x i32> %res1) |
| 28 | ; CHECK: sha1p.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}} |
| 29 | %res5 = call <4 x i32> @llvm.arm.neon.sha1su0.v4i32(<4 x i32> %res4, <4 x i32> %tmp3, <4 x i32> %res1) |
| 30 | ; CHECK: sha1su0.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}} |
| 31 | %res6 = call <4 x i32> @llvm.arm.neon.sha1su1.v4i32(<4 x i32> %res5, <4 x i32> %res1) |
| 32 | ; CHECK: sha1su1.32 q{{[0-9]+}}, q{{[0-9]+}} |
| 33 | %res7 = call <4 x i32> @llvm.arm.neon.sha256h.v4i32(<4 x i32> %res6, <4 x i32> %tmp3, <4 x i32> %res1) |
| 34 | ; CHECK: sha256h.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}} |
| 35 | %res8 = call <4 x i32> @llvm.arm.neon.sha256h2.v4i32(<4 x i32> %res7, <4 x i32> %tmp3, <4 x i32> %res1) |
| 36 | ; CHECK: sha256h2.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}} |
| 37 | %res9 = call <4 x i32> @llvm.arm.neon.sha256su1.v4i32(<4 x i32> %res8, <4 x i32> %tmp3, <4 x i32> %res1) |
| 38 | ; CHECK: sha256su1.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}} |
| 39 | %res10 = call <4 x i32> @llvm.arm.neon.sha256su0.v4i32(<4 x i32> %res9, <4 x i32> %tmp3) |
| 40 | ; CHECK: sha256su0.32 q{{[0-9]+}}, q{{[0-9]+}} |
| 41 | ret <4 x i32> %res10 |
| 42 | } |
| 43 | |
| 44 | declare <16 x i8> @llvm.arm.neon.aesd.v16i8(<16 x i8>, <16 x i8>) |
| 45 | declare <16 x i8> @llvm.arm.neon.aese.v16i8(<16 x i8>, <16 x i8>) |
| 46 | declare <16 x i8> @llvm.arm.neon.aesimc.v16i8(<16 x i8>) |
| 47 | declare <16 x i8> @llvm.arm.neon.aesmc.v16i8(<16 x i8>) |
| 48 | declare <4 x i32> @llvm.arm.neon.sha1h.v4i32(<4 x i32>) |
| 49 | declare <4 x i32> @llvm.arm.neon.sha1c.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) |
| 50 | declare <4 x i32> @llvm.arm.neon.sha1m.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) |
| 51 | declare <4 x i32> @llvm.arm.neon.sha1p.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) |
| 52 | declare <4 x i32> @llvm.arm.neon.sha1su0.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) |
| 53 | declare <4 x i32> @llvm.arm.neon.sha256h.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) |
| 54 | declare <4 x i32> @llvm.arm.neon.sha256h2.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) |
| 55 | declare <4 x i32> @llvm.arm.neon.sha256su1.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) |
| 56 | declare <4 x i32> @llvm.arm.neon.sha256su0.v4i32(<4 x i32>, <4 x i32>) |
| 57 | declare <4 x i32> @llvm.arm.neon.sha1su1.v4i32(<4 x i32>, <4 x i32>) |