Reapply "[ARM] Combine base-updating/post-incrementing vector load/stores."

r223862 tried to also combine base-updating load/stores.
r224198 reverted it, as "it created a regression on the test-suite
on test MultiSource/Benchmarks/Ptrdist/anagram by scrambling the order
in which the words are shown."
Reapply, with a fix to ignore non-normal load/stores.
Truncstores are handled elsewhere (you can actually write a pattern for
those, whereas for postinc loads you can't, since they return two values),
but it should be possible to also combine extloads base updates, by checking
that the memory (rather than result) type is of the same size as the addend.

Original commit message:
We used to only combine intrinsics, and turn them into VLD1_UPD/VST1_UPD
when the base pointer is incremented after the load/store.

We can do the same thing for generic load/stores.

Note that we can only combine the first load/store+adds pair in
a sequence (as might be generated for a v16f32 load for instance),
because other combines turn the base pointer addition chain (each
computing the address of the next load, from the address of the last
load) into independent additions (common base pointer + this load's
offset).

Differential Revision: http://reviews.llvm.org/D6585

llvm-svn: 224203
diff --git a/llvm/test/CodeGen/ARM/alloc-no-stack-realign.ll b/llvm/test/CodeGen/ARM/alloc-no-stack-realign.ll
index 174b811..635db7c 100644
--- a/llvm/test/CodeGen/ARM/alloc-no-stack-realign.ll
+++ b/llvm/test/CodeGen/ARM/alloc-no-stack-realign.ll
@@ -9,8 +9,8 @@
 define void @test1(<16 x float>* noalias sret %agg.result) nounwind ssp "no-realign-stack" {
 entry:
 ; NO-REALIGN-LABEL: test1
-; NO-REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1:[0-9]+]]:128]
-; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1]], #16
+; NO-REALIGN: mov r[[R2:[0-9]+]], r[[R1:[0-9]+]]
+; NO-REALIGN: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]!
 ; NO-REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
 ; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1]], #32
 ; NO-REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
@@ -21,16 +21,14 @@
 ; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
 ; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1]], #32
 ; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
-; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1]], #16
-; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; NO-REALIGN: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]!
 ; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
 
 ; NO-REALIGN: add r[[R2:[0-9]+]], r[[R0:0]], #48
 ; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
 ; NO-REALIGN: add r[[R2:[0-9]+]], r[[R0]], #32
 ; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
-; NO-REALIGN: add r[[R2:[0-9]+]], r[[R1]], #16
-; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
+; NO-REALIGN: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]!
 ; NO-REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]
  %retval = alloca <16 x float>, align 16
  %0 = load <16 x float>* @T3_retval, align 16
@@ -44,8 +42,8 @@
 entry:
 ; REALIGN-LABEL: test2
 ; REALIGN: bic sp, sp, #63
-; REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1:[0-9]+]]:128]
-; REALIGN: add r[[R2:[0-9]+]], r[[R1]], #16
+; REALIGN: mov r[[R2:[0-9]+]], r[[R1:[0-9]+]]
+; REALIGN: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]!
 ; REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
 ; REALIGN: add r[[R2:[0-9]+]], r[[R1]], #32
 ; REALIGN: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
@@ -65,8 +63,7 @@
 ; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
 ; REALIGN: add r[[R1:[0-9]+]], r[[R0]], #32
 ; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
-; REALIGN: add r[[R1:[0-9]+]], r[[R0]], #16
-; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
+; REALIGN: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]!
 ; REALIGN: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R0]]:128]
  %retval = alloca <16 x float>, align 16
  %0 = load <16 x float>* @T3_retval, align 16
diff --git a/llvm/test/CodeGen/ARM/memcpy-inline.ll b/llvm/test/CodeGen/ARM/memcpy-inline.ll
index 84ce4a7..dca2eb9 100644
--- a/llvm/test/CodeGen/ARM/memcpy-inline.ll
+++ b/llvm/test/CodeGen/ARM/memcpy-inline.ll
@@ -46,10 +46,8 @@
 ; CHECK: movw [[REG2:r[0-9]+]], #16716
 ; CHECK: movt [[REG2:r[0-9]+]], #72
 ; CHECK: str [[REG2]], [r0, #32]
-; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]
-; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]
-; CHECK: adds r0, #16
-; CHECK: adds r1, #16
+; CHECK: vld1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]!
+; CHECK: vst1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]!
 ; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]
 ; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]
   tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([36 x i8]* @.str2, i64 0, i64 0), i64 36, i32 1, i1 false)
@@ -59,10 +57,8 @@
 define void @t3(i8* nocapture %C) nounwind {
 entry:
 ; CHECK-LABEL: t3:
-; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]
-; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]
-; CHECK: adds r0, #16
-; CHECK: adds r1, #16
+; CHECK: vld1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]!
+; CHECK: vst1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]!
 ; CHECK: vld1.8 {d{{[0-9]+}}}, [r1]
 ; CHECK: vst1.8 {d{{[0-9]+}}}, [r0]
   tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([24 x i8]* @.str3, i64 0, i64 0), i64 24, i32 1, i1 false)
@@ -73,7 +69,8 @@
 entry:
 ; CHECK-LABEL: t4:
 ; CHECK: vld1.8 {[[REG3:d[0-9]+]], [[REG4:d[0-9]+]]}, [r1]
-; CHECK: vst1.8 {[[REG3]], [[REG4]]}, [r0]
+; CHECK: vst1.64 {[[REG3]], [[REG4]]}, [r0]!
+; CHECK: strh [[REG5:r[0-9]+]], [r0]
   tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([18 x i8]* @.str4, i64 0, i64 0), i64 18, i32 1, i1 false)
   ret void
 }
diff --git a/llvm/test/CodeGen/ARM/vector-load.ll b/llvm/test/CodeGen/ARM/vector-load.ll
new file mode 100644
index 0000000..0a018d8
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/vector-load.ll
@@ -0,0 +1,212 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:o-p:32:32-i1:8:32-i8:8:32-i16:16:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
+target triple = "thumbv7s-apple-ios8.0.0"
+
+define <8 x i8> @load_v8i8(<8 x i8>** %ptr) {
+;CHECK-LABEL: load_v8i8:
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]
+	%A = load <8 x i8>** %ptr
+	%lA = load <8 x i8>* %A, align 1
+	ret <8 x i8> %lA
+}
+
+define <8 x i8> @load_v8i8_update(<8 x i8>** %ptr) {
+;CHECK-LABEL: load_v8i8_update:
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <8 x i8>** %ptr
+	%lA = load <8 x i8>* %A, align 1
+	%inc = getelementptr <8 x i8>* %A, i38 1
+        store <8 x i8>* %inc, <8 x i8>** %ptr
+	ret <8 x i8> %lA
+}
+
+define <4 x i16> @load_v4i16(<4 x i16>** %ptr) {
+;CHECK-LABEL: load_v4i16:
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]
+	%A = load <4 x i16>** %ptr
+	%lA = load <4 x i16>* %A, align 1
+	ret <4 x i16> %lA
+}
+
+define <4 x i16> @load_v4i16_update(<4 x i16>** %ptr) {
+;CHECK-LABEL: load_v4i16_update:
+;CHECK: vld1.16 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <4 x i16>** %ptr
+	%lA = load <4 x i16>* %A, align 1
+	%inc = getelementptr <4 x i16>* %A, i34 1
+        store <4 x i16>* %inc, <4 x i16>** %ptr
+	ret <4 x i16> %lA
+}
+
+define <2 x i32> @load_v2i32(<2 x i32>** %ptr) {
+;CHECK-LABEL: load_v2i32:
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]
+	%A = load <2 x i32>** %ptr
+	%lA = load <2 x i32>* %A, align 1
+	ret <2 x i32> %lA
+}
+
+define <2 x i32> @load_v2i32_update(<2 x i32>** %ptr) {
+;CHECK-LABEL: load_v2i32_update:
+;CHECK: vld1.32 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <2 x i32>** %ptr
+	%lA = load <2 x i32>* %A, align 1
+	%inc = getelementptr <2 x i32>* %A, i32 1
+        store <2 x i32>* %inc, <2 x i32>** %ptr
+	ret <2 x i32> %lA
+}
+
+define <2 x float> @load_v2f32(<2 x float>** %ptr) {
+;CHECK-LABEL: load_v2f32:
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]
+	%A = load <2 x float>** %ptr
+	%lA = load <2 x float>* %A, align 1
+	ret <2 x float> %lA
+}
+
+define <2 x float> @load_v2f32_update(<2 x float>** %ptr) {
+;CHECK-LABEL: load_v2f32_update:
+;CHECK: vld1.32 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <2 x float>** %ptr
+	%lA = load <2 x float>* %A, align 1
+	%inc = getelementptr <2 x float>* %A, i32 1
+        store <2 x float>* %inc, <2 x float>** %ptr
+	ret <2 x float> %lA
+}
+
+define <1 x i64> @load_v1i64(<1 x i64>** %ptr) {
+;CHECK-LABEL: load_v1i64:
+;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]
+	%A = load <1 x i64>** %ptr
+	%lA = load <1 x i64>* %A, align 1
+	ret <1 x i64> %lA
+}
+
+define <1 x i64> @load_v1i64_update(<1 x i64>** %ptr) {
+;CHECK-LABEL: load_v1i64_update:
+;CHECK: vld1.64 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <1 x i64>** %ptr
+	%lA = load <1 x i64>* %A, align 1
+	%inc = getelementptr <1 x i64>* %A, i31 1
+        store <1 x i64>* %inc, <1 x i64>** %ptr
+	ret <1 x i64> %lA
+}
+
+define <16 x i8> @load_v16i8(<16 x i8>** %ptr) {
+;CHECK-LABEL: load_v16i8:
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+	%A = load <16 x i8>** %ptr
+	%lA = load <16 x i8>* %A, align 1
+	ret <16 x i8> %lA
+}
+
+define <16 x i8> @load_v16i8_update(<16 x i8>** %ptr) {
+;CHECK-LABEL: load_v16i8_update:
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <16 x i8>** %ptr
+	%lA = load <16 x i8>* %A, align 1
+	%inc = getelementptr <16 x i8>* %A, i316 1
+        store <16 x i8>* %inc, <16 x i8>** %ptr
+	ret <16 x i8> %lA
+}
+
+define <8 x i16> @load_v8i16(<8 x i16>** %ptr) {
+;CHECK-LABEL: load_v8i16:
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+	%A = load <8 x i16>** %ptr
+	%lA = load <8 x i16>* %A, align 1
+	ret <8 x i16> %lA
+}
+
+define <8 x i16> @load_v8i16_update(<8 x i16>** %ptr) {
+;CHECK-LABEL: load_v8i16_update:
+;CHECK: vld1.16 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <8 x i16>** %ptr
+	%lA = load <8 x i16>* %A, align 1
+	%inc = getelementptr <8 x i16>* %A, i38 1
+        store <8 x i16>* %inc, <8 x i16>** %ptr
+	ret <8 x i16> %lA
+}
+
+define <4 x i32> @load_v4i32(<4 x i32>** %ptr) {
+;CHECK-LABEL: load_v4i32:
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+	%A = load <4 x i32>** %ptr
+	%lA = load <4 x i32>* %A, align 1
+	ret <4 x i32> %lA
+}
+
+define <4 x i32> @load_v4i32_update(<4 x i32>** %ptr) {
+;CHECK-LABEL: load_v4i32_update:
+;CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <4 x i32>** %ptr
+	%lA = load <4 x i32>* %A, align 1
+	%inc = getelementptr <4 x i32>* %A, i34 1
+        store <4 x i32>* %inc, <4 x i32>** %ptr
+	ret <4 x i32> %lA
+}
+
+define <4 x float> @load_v4f32(<4 x float>** %ptr) {
+;CHECK-LABEL: load_v4f32:
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+	%A = load <4 x float>** %ptr
+	%lA = load <4 x float>* %A, align 1
+	ret <4 x float> %lA
+}
+
+define <4 x float> @load_v4f32_update(<4 x float>** %ptr) {
+;CHECK-LABEL: load_v4f32_update:
+;CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <4 x float>** %ptr
+	%lA = load <4 x float>* %A, align 1
+	%inc = getelementptr <4 x float>* %A, i34 1
+        store <4 x float>* %inc, <4 x float>** %ptr
+	ret <4 x float> %lA
+}
+
+define <2 x i64> @load_v2i64(<2 x i64>** %ptr) {
+;CHECK-LABEL: load_v2i64:
+;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+	%A = load <2 x i64>** %ptr
+	%lA = load <2 x i64>* %A, align 1
+	ret <2 x i64> %lA
+}
+
+define <2 x i64> @load_v2i64_update(<2 x i64>** %ptr) {
+;CHECK-LABEL: load_v2i64_update:
+;CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <2 x i64>** %ptr
+	%lA = load <2 x i64>* %A, align 1
+	%inc = getelementptr <2 x i64>* %A, i32 1
+        store <2 x i64>* %inc, <2 x i64>** %ptr
+	ret <2 x i64> %lA
+}
+
+; Make sure we don't break smaller-than-dreg extloads.
+define <4 x i32> @zextload_v8i8tov8i32(<4 x i8>** %ptr) {
+;CHECK-LABEL: zextload_v8i8tov8i32:
+;CHECK: vld1.32 {{{d[0-9]+}}[0]}, [{{r[0-9]+}}:32]
+;CHECK: vmovl.u8        {{q[0-9]+}}, {{d[0-9]+}}
+;CHECK: vmovl.u16       {{q[0-9]+}}, {{d[0-9]+}}
+	%A = load <4 x i8>** %ptr
+	%lA = load <4 x i8>* %A, align 1
+        %zlA = zext <4 x i8> %lA to <4 x i32>
+	ret <4 x i32> %zlA
+}
+
+define <4 x i32> @zextload_v8i8tov8i32_fake_update(<4 x i8>** %ptr) {
+;CHECK-LABEL: zextload_v8i8tov8i32_fake_update:
+;CHECK: ldr.w   r[[PTRREG:[0-9]+]], [r0]
+;CHECK: vld1.32 {{{d[0-9]+}}[0]}, [r[[PTRREG]]:32]
+;CHECK: add.w   r[[INCREG:[0-9]+]], r[[PTRREG]], #16
+;CHECK: str.w   r[[INCREG]], [r0]
+;CHECK: vmovl.u8        {{q[0-9]+}}, {{d[0-9]+}}
+;CHECK: vmovl.u16       {{q[0-9]+}}, {{d[0-9]+}}
+	%A = load <4 x i8>** %ptr
+	%lA = load <4 x i8>* %A, align 4
+	%inc = getelementptr <4 x i8>* %A, i38 4
+        store <4 x i8>* %inc, <4 x i8>** %ptr
+        %zlA = zext <4 x i8> %lA to <4 x i32>
+	ret <4 x i32> %zlA
+}
diff --git a/llvm/test/CodeGen/ARM/vector-store.ll b/llvm/test/CodeGen/ARM/vector-store.ll
new file mode 100644
index 0000000..1799411
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/vector-store.ll
@@ -0,0 +1,218 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:o-p:32:32-i1:8:32-i8:8:32-i16:16:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
+target triple = "thumbv7s-apple-ios8.0.0"
+
+define void @store_v8i8(<8 x i8>** %ptr, <8 x i8> %val) {
+;CHECK-LABEL: store_v8i8:
+;CHECK: str r1, [r0]
+	%A = load <8 x i8>** %ptr
+	store  <8 x i8> %val, <8 x i8>* %A, align 1
+	ret void
+}
+
+define void @store_v8i8_update(<8 x i8>** %ptr, <8 x i8> %val) {
+;CHECK-LABEL: store_v8i8_update:
+;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <8 x i8>** %ptr
+	store  <8 x i8> %val, <8 x i8>* %A, align 1
+	%inc = getelementptr <8 x i8>* %A, i38 1
+        store <8 x i8>* %inc, <8 x i8>** %ptr
+	ret void
+}
+
+define void @store_v4i16(<4 x i16>** %ptr, <4 x i16> %val) {
+;CHECK-LABEL: store_v4i16:
+;CHECK: str r1, [r0]
+	%A = load <4 x i16>** %ptr
+	store  <4 x i16> %val, <4 x i16>* %A, align 1
+	ret void
+}
+
+define void @store_v4i16_update(<4 x i16>** %ptr, <4 x i16> %val) {
+;CHECK-LABEL: store_v4i16_update:
+;CHECK: vst1.16 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <4 x i16>** %ptr
+	store  <4 x i16> %val, <4 x i16>* %A, align 1
+	%inc = getelementptr <4 x i16>* %A, i34 1
+        store <4 x i16>* %inc, <4 x i16>** %ptr
+	ret void
+}
+
+define void @store_v2i32(<2 x i32>** %ptr, <2 x i32> %val) {
+;CHECK-LABEL: store_v2i32:
+;CHECK: str r1, [r0]
+	%A = load <2 x i32>** %ptr
+	store  <2 x i32> %val, <2 x i32>* %A, align 1
+	ret void
+}
+
+define void @store_v2i32_update(<2 x i32>** %ptr, <2 x i32> %val) {
+;CHECK-LABEL: store_v2i32_update:
+;CHECK: vst1.32 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <2 x i32>** %ptr
+	store  <2 x i32> %val, <2 x i32>* %A, align 1
+	%inc = getelementptr <2 x i32>* %A, i32 1
+        store <2 x i32>* %inc, <2 x i32>** %ptr
+	ret void
+}
+
+define void @store_v2f32(<2 x float>** %ptr, <2 x float> %val) {
+;CHECK-LABEL: store_v2f32:
+;CHECK: str r1, [r0]
+	%A = load <2 x float>** %ptr
+	store  <2 x float> %val, <2 x float>* %A, align 1
+	ret void
+}
+
+define void @store_v2f32_update(<2 x float>** %ptr, <2 x float> %val) {
+;CHECK-LABEL: store_v2f32_update:
+;CHECK: vst1.32 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <2 x float>** %ptr
+	store  <2 x float> %val, <2 x float>* %A, align 1
+	%inc = getelementptr <2 x float>* %A, i32 1
+        store <2 x float>* %inc, <2 x float>** %ptr
+	ret void
+}
+
+define void @store_v1i64(<1 x i64>** %ptr, <1 x i64> %val) {
+;CHECK-LABEL: store_v1i64:
+;CHECK: str r1, [r0]
+	%A = load <1 x i64>** %ptr
+	store  <1 x i64> %val, <1 x i64>* %A, align 1
+	ret void
+}
+
+define void @store_v1i64_update(<1 x i64>** %ptr, <1 x i64> %val) {
+;CHECK-LABEL: store_v1i64_update:
+;CHECK: vst1.64 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <1 x i64>** %ptr
+	store  <1 x i64> %val, <1 x i64>* %A, align 1
+	%inc = getelementptr <1 x i64>* %A, i31 1
+        store <1 x i64>* %inc, <1 x i64>** %ptr
+	ret void
+}
+
+define void @store_v16i8(<16 x i8>** %ptr, <16 x i8> %val) {
+;CHECK-LABEL: store_v16i8:
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+	%A = load <16 x i8>** %ptr
+	store  <16 x i8> %val, <16 x i8>* %A, align 1
+	ret void
+}
+
+define void @store_v16i8_update(<16 x i8>** %ptr, <16 x i8> %val) {
+;CHECK-LABEL: store_v16i8_update:
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <16 x i8>** %ptr
+	store  <16 x i8> %val, <16 x i8>* %A, align 1
+	%inc = getelementptr <16 x i8>* %A, i316 1
+        store <16 x i8>* %inc, <16 x i8>** %ptr
+	ret void
+}
+
+define void @store_v8i16(<8 x i16>** %ptr, <8 x i16> %val) {
+;CHECK-LABEL: store_v8i16:
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+	%A = load <8 x i16>** %ptr
+	store  <8 x i16> %val, <8 x i16>* %A, align 1
+	ret void
+}
+
+define void @store_v8i16_update(<8 x i16>** %ptr, <8 x i16> %val) {
+;CHECK-LABEL: store_v8i16_update:
+;CHECK: vst1.16 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <8 x i16>** %ptr
+	store  <8 x i16> %val, <8 x i16>* %A, align 1
+	%inc = getelementptr <8 x i16>* %A, i38 1
+        store <8 x i16>* %inc, <8 x i16>** %ptr
+	ret void
+}
+
+define void @store_v4i32(<4 x i32>** %ptr, <4 x i32> %val) {
+;CHECK-LABEL: store_v4i32:
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+	%A = load <4 x i32>** %ptr
+	store  <4 x i32> %val, <4 x i32>* %A, align 1
+	ret void
+}
+
+define void @store_v4i32_update(<4 x i32>** %ptr, <4 x i32> %val) {
+;CHECK-LABEL: store_v4i32_update:
+;CHECK: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <4 x i32>** %ptr
+	store  <4 x i32> %val, <4 x i32>* %A, align 1
+	%inc = getelementptr <4 x i32>* %A, i34 1
+        store <4 x i32>* %inc, <4 x i32>** %ptr
+	ret void
+}
+
+define void @store_v4f32(<4 x float>** %ptr, <4 x float> %val) {
+;CHECK-LABEL: store_v4f32:
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+	%A = load <4 x float>** %ptr
+	store  <4 x float> %val, <4 x float>* %A, align 1
+	ret void
+}
+
+define void @store_v4f32_update(<4 x float>** %ptr, <4 x float> %val) {
+;CHECK-LABEL: store_v4f32_update:
+;CHECK: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <4 x float>** %ptr
+	store  <4 x float> %val, <4 x float>* %A, align 1
+	%inc = getelementptr <4 x float>* %A, i34 1
+        store <4 x float>* %inc, <4 x float>** %ptr
+	ret void
+}
+
+define void @store_v2i64(<2 x i64>** %ptr, <2 x i64> %val) {
+;CHECK-LABEL: store_v2i64:
+;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]
+	%A = load <2 x i64>** %ptr
+	store  <2 x i64> %val, <2 x i64>* %A, align 1
+	ret void
+}
+
+define void @store_v2i64_update(<2 x i64>** %ptr, <2 x i64> %val) {
+;CHECK-LABEL: store_v2i64_update:
+;CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
+	%A = load <2 x i64>** %ptr
+	store  <2 x i64> %val, <2 x i64>* %A, align 1
+	%inc = getelementptr <2 x i64>* %A, i32 1
+        store <2 x i64>* %inc, <2 x i64>** %ptr
+	ret void
+}
+
+define void @truncstore_v4i32tov4i8(<4 x i8>** %ptr, <4 x i32> %val) {
+;CHECK-LABEL: truncstore_v4i32tov4i8:
+;CHECK: ldr.w   r9, [sp]
+;CHECK: vmov    {{d[0-9]+}}, r3, r9
+;CHECK: vmov    {{d[0-9]+}}, r1, r2
+;CHECK: vmovn.i32       [[VECLO:d[0-9]+]], {{q[0-9]+}}
+;CHECK: vuzp.8  [[VECLO]], {{d[0-9]+}}
+;CHECK: ldr     r[[PTRREG:[0-9]+]], [r0]
+;CHECK: vst1.32 {[[VECLO]][0]}, [r[[PTRREG]]]
+	%A = load <4 x i8>** %ptr
+        %trunc = trunc <4 x i32> %val to <4 x i8>
+	store  <4 x i8> %trunc, <4 x i8>* %A, align 1
+	ret void
+}
+
+define void @truncstore_v4i32tov4i8_fake_update(<4 x i8>** %ptr, <4 x i32> %val) {
+;CHECK-LABEL: truncstore_v4i32tov4i8_fake_update:
+;CHECK: ldr.w   r9, [sp]
+;CHECK: vmov    {{d[0-9]+}}, r3, r9
+;CHECK: vmov    {{d[0-9]+}}, r1, r2
+;CHECK: movs    [[IMM16:r[0-9]+]], #16
+;CHECK: vmovn.i32       [[VECLO:d[0-9]+]], {{q[0-9]+}}
+;CHECK: vuzp.8  [[VECLO]], {{d[0-9]+}}
+;CHECK: ldr     r[[PTRREG:[0-9]+]], [r0]
+;CHECK: vst1.32 {[[VECLO]][0]}, [r[[PTRREG]]:32], [[IMM16]]
+;CHECK: str     r[[PTRREG]], [r0]
+	%A = load <4 x i8>** %ptr
+        %trunc = trunc <4 x i32> %val to <4 x i8>
+	store  <4 x i8> %trunc, <4 x i8>* %A, align 4
+	%inc = getelementptr <4 x i8>* %A, i38 4
+        store <4 x i8>* %inc, <4 x i8>** %ptr
+	ret void
+}