Avoid using i64 types for vld1q_lane/vst1q_lane intrinsics.

The backend has to legalize i64 types by splitting them into two 32-bit pieces,
which leads to poor quality code.  If we produce code for these intrinsics that
uses one-element vector types, which can live in Neon vector registers without
getting split up, then the generated code is much better.  Radar 11998303.

git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@161879 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/arm-neon-misc.c b/test/CodeGen/arm-neon-misc.c
new file mode 100644
index 0000000..56ce316
--- /dev/null
+++ b/test/CodeGen/arm-neon-misc.c
@@ -0,0 +1,34 @@
+// REQUIRES: arm-registered-target
+// RUN: %clang_cc1 -triple thumbv7-apple-darwin \
+// RUN:   -target-abi apcs-gnu \
+// RUN:   -target-cpu cortex-a8 \
+// RUN:   -mfloat-abi soft \
+// RUN:   -target-feature +soft-float-abi \
+// RUN:   -ffreestanding \
+// RUN:   -emit-llvm -w -o - %s | FileCheck %s
+
+#include <arm_neon.h>
+
+// Radar 11998303: Avoid using i64 types for vld1q_lane and vst1q_lane Neon
+// intrinsics with <2 x i64> vectors to avoid poor code for i64 in the backend.
+void t1(uint64_t *src, uint8_t *dst) {
+// CHECK: @t1
+  uint64x2_t q = vld1q_u64(src);
+// CHECK: call <2 x i64> @llvm.arm.neon.vld1.v2i64
+  vst1q_lane_u64(dst, q, 1);
+// CHECK: bitcast <16 x i8> %{{.*}} to <2 x i64>
+// CHECK: shufflevector <2 x i64>
+// CHECK: call void @llvm.arm.neon.vst1.v1i64
+}
+
+void t2(uint64_t *src1, uint8_t *src2, uint64x2_t *dst) {
+// CHECK: @t2
+    uint64x2_t q = vld1q_u64(src1);
+// CHECK: call <2 x i64> @llvm.arm.neon.vld1.v2i64
+    q = vld1q_lane_u64(src2, q, 0);
+// CHECK: shufflevector <2 x i64>
+// CHECK: call <1 x i64> @llvm.arm.neon.vld1.v1i64
+// CHECK: shufflevector <1 x i64>
+    *dst = q;
+// CHECK: store <2 x i64>
+}