Lower Neon VLD* intrinsics to custom DAG nodes, and manually allocate the
results to fixed registers.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78025 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index d0806fb..f32db3f 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -114,7 +114,12 @@
VGETLANEs, // sign-extend vector extract element
// Vector duplicate lane (128-bit result only; 64-bit is a shuffle)
- VDUPLANEQ // splat a lane from a 64-bit vector to a 128-bit vector
+ VDUPLANEQ, // splat a lane from a 64-bit vector to a 128-bit vector
+
+ // Vector load/store with (de)interleaving
+ VLD2D,
+ VLD3D,
+ VLD4D
};
}
@@ -237,6 +242,7 @@
SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
unsigned CallingConv, SelectionDAG &DAG);
SDValue LowerCALL(SDValue Op, SelectionDAG &DAG);
+ SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG);
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG);
SDValue LowerRET(SDValue Op, SelectionDAG &DAG);
SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG);