Fix memory access lowering on SPU, adding
support for the case where alignment<value size.
These cases were silently miscompiled before this patch.
Now they are overly verbose -especially storing is- and
any front-end should still avoid misaligned memory
accesses as much as possible. The bit juggling algorithm
added here probably has some room for improvement still.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@118889 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/CellSPU/loads.ll b/test/CodeGen/CellSPU/loads.ll
index d40217d..03d7ad1 100644
--- a/test/CodeGen/CellSPU/loads.ll
+++ b/test/CodeGen/CellSPU/loads.ll
@@ -38,3 +38,15 @@
%val = load <4 x float>* undef
ret <4 x float> %val
}
+
+;check that 'misaligned' loads that may span two memory chunks
+;have two loads. Don't check for the bitmanipulation, as that
+;might change with improved algorithms or scheduling
+define i32 @load_misaligned( i32* %ptr ){
+;CHECK: load_misaligned
+;CHECK: lqd
+;CHECK: lqd
+;CHECK: bi $lr
+ %rv = load i32* %ptr, align 2
+ ret i32 %rv
+}