Fix memory access lowering on SPU, adding
support for the case where alignment<value size.

These cases were silently miscompiled before this patch.
Now they are overly verbose -especially storing is- and
any front-end should still avoid misaligned memory 
accesses as much as possible. The bit juggling algorithm
added here probably has some room for improvement still.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@118889 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/CellSPU/arg_ret.ll b/test/CodeGen/CellSPU/arg_ret.ll
index 6f07458..7410b72 100644
--- a/test/CodeGen/CellSPU/arg_ret.ll
+++ b/test/CodeGen/CellSPU/arg_ret.ll
@@ -26,7 +26,7 @@
 
 define ccc %paramstruct @test_return( i32 %param,  %paramstruct %prm )
 {
-;CHECK:  lqd	$75, 80($sp)
+;CHECK:  lqd	{{\$[0-9]+}}, 80($sp)
 ;CHECK-NOT:	ori	{{\$[0-9]+, \$[0-9]+, 0}}
 ;CHECK:  lr    $3, $4
   ret %paramstruct %prm