[Hexagon] Preclude non-memory test from being optimized away. NFC.

llvm-svn: 307153
diff --git a/llvm/test/CodeGen/Hexagon/convertdptoint.ll b/llvm/test/CodeGen/Hexagon/convertdptoint.ll
index a09c2fd..adf76e5 100644
--- a/llvm/test/CodeGen/Hexagon/convertdptoint.ll
+++ b/llvm/test/CodeGen/Hexagon/convertdptoint.ll
@@ -12,10 +12,10 @@
   %b = alloca double, align 8
   %c = alloca double, align 8
   store i32 0, i32* %retval
-  store double 1.540000e+01, double* %a, align 8
-  store double 9.100000e+00, double* %b, align 8
-  %0 = load double, double* %a, align 8
-  %1 = load double, double* %b, align 8
+  store volatile double 1.540000e+01, double* %a, align 8
+  store volatile double 9.100000e+00, double* %b, align 8
+  %0 = load volatile double, double* %a, align 8
+  %1 = load volatile double, double* %b, align 8
   %add = fadd double %0, %1
   store double %add, double* %c, align 8
   %2 = load double, double* %c, align 8
diff --git a/llvm/test/CodeGen/Hexagon/convertdptoll.ll b/llvm/test/CodeGen/Hexagon/convertdptoll.ll
index f46d46c..6b5bf56 100644
--- a/llvm/test/CodeGen/Hexagon/convertdptoll.ll
+++ b/llvm/test/CodeGen/Hexagon/convertdptoll.ll
@@ -17,8 +17,8 @@
   %0 = load double, double* %a, align 8
   %1 = load double, double* %b, align 8
   %add = fadd double %0, %1
-  store double %add, double* %c, align 8
-  %2 = load double, double* %c, align 8
+  store volatile double %add, double* %c, align 8
+  %2 = load volatile double, double* %c, align 8
   %conv = fptosi double %2 to i64
   store i64 %conv, i64* %i, align 8
   %3 = load i64, i64* %i, align 8
diff --git a/llvm/test/CodeGen/Hexagon/convertsptoint.ll b/llvm/test/CodeGen/Hexagon/convertsptoint.ll
index 7593e57..939b3b0 100644
--- a/llvm/test/CodeGen/Hexagon/convertsptoint.ll
+++ b/llvm/test/CodeGen/Hexagon/convertsptoint.ll
@@ -17,8 +17,8 @@
   %0 = load float, float* %a, align 4
   %1 = load float, float* %b, align 4
   %add = fadd float %0, %1
-  store float %add, float* %c, align 4
-  %2 = load float, float* %c, align 4
+  store volatile float %add, float* %c, align 4
+  %2 = load volatile float, float* %c, align 4
   %conv = fptosi float %2 to i32
   store i32 %conv, i32* %i, align 4
   %3 = load i32, i32* %i, align 4
diff --git a/llvm/test/CodeGen/Hexagon/convertsptoll.ll b/llvm/test/CodeGen/Hexagon/convertsptoll.ll
index d8432cb..f540397 100644
--- a/llvm/test/CodeGen/Hexagon/convertsptoll.ll
+++ b/llvm/test/CodeGen/Hexagon/convertsptoll.ll
@@ -17,8 +17,8 @@
   %0 = load float, float* %a, align 4
   %1 = load float, float* %b, align 4
   %add = fadd float %0, %1
-  store float %add, float* %c, align 4
-  %2 = load float, float* %c, align 4
+  store volatile float %add, float* %c, align 4
+  %2 = load volatile float, float* %c, align 4
   %conv = fptosi float %2 to i64
   store i64 %conv, i64* %i, align 8
   %3 = load i64, i64* %i, align 8
diff --git a/llvm/test/CodeGen/Hexagon/dadd.ll b/llvm/test/CodeGen/Hexagon/dadd.ll
index 5fcd705..3068f49 100644
--- a/llvm/test/CodeGen/Hexagon/dadd.ll
+++ b/llvm/test/CodeGen/Hexagon/dadd.ll
@@ -9,10 +9,10 @@
   %a = alloca double, align 8
   %b = alloca double, align 8
   %c = alloca double, align 8
-  store double 1.540000e+01, double* %a, align 8
-  store double 9.100000e+00, double* %b, align 8
-  %0 = load double, double* %a, align 8
-  %1 = load double, double* %b, align 8
+  store volatile double 1.540000e+01, double* %a, align 8
+  store volatile double 9.100000e+00, double* %b, align 8
+  %0 = load volatile double, double* %a, align 8
+  %1 = load volatile double, double* %b, align 8
   %add = fadd double %0, %1
   store double %add, double* %c, align 8
   ret i32 0
diff --git a/llvm/test/CodeGen/Hexagon/dmul.ll b/llvm/test/CodeGen/Hexagon/dmul.ll
index 1b79e0a..a6cf62b 100644
--- a/llvm/test/CodeGen/Hexagon/dmul.ll
+++ b/llvm/test/CodeGen/Hexagon/dmul.ll
@@ -8,10 +8,10 @@
   %a = alloca double, align 8
   %b = alloca double, align 8
   %c = alloca double, align 8
-  store double 1.540000e+01, double* %a, align 8
-  store double 9.100000e+00, double* %b, align 8
-  %0 = load double, double* %b, align 8
-  %1 = load double, double* %a, align 8
+  store volatile double 1.540000e+01, double* %a, align 8
+  store volatile double 9.100000e+00, double* %b, align 8
+  %0 = load volatile double, double* %b, align 8
+  %1 = load volatile double, double* %a, align 8
   %mul = fmul double %0, %1
   store double %mul, double* %c, align 8
   ret i32 0
diff --git a/llvm/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll b/llvm/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll
index 6bf8224..ccc287c 100644
--- a/llvm/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll
+++ b/llvm/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll
@@ -12,10 +12,10 @@
   %b = alloca double, align 8
   %c = alloca double, align 8
   store i32 0, i32* %retval
-  store double 1.540000e+01, double* %a, align 8
-  store double 9.100000e+00, double* %b, align 8
-  %0 = load double, double* %a, align 8
-  %1 = load double, double* %b, align 8
+  store volatile double 1.540000e+01, double* %a, align 8
+  store volatile double 9.100000e+00, double* %b, align 8
+  %0 = load volatile double, double* %a, align 8
+  %1 = load volatile double, double* %b, align 8
   %add = fadd double %0, %1
   store double %add, double* %c, align 8
   %2 = load double, double* %c, align 8
diff --git a/llvm/test/CodeGen/Hexagon/dsub.ll b/llvm/test/CodeGen/Hexagon/dsub.ll
index 8b37301..d7e44b3 100644
--- a/llvm/test/CodeGen/Hexagon/dsub.ll
+++ b/llvm/test/CodeGen/Hexagon/dsub.ll
@@ -8,10 +8,10 @@
   %a = alloca double, align 8
   %b = alloca double, align 8
   %c = alloca double, align 8
-  store double 1.540000e+01, double* %a, align 8
-  store double 9.100000e+00, double* %b, align 8
-  %0 = load double, double* %b, align 8
-  %1 = load double, double* %a, align 8
+  store volatile double 1.540000e+01, double* %a, align 8
+  store volatile double 9.100000e+00, double* %b, align 8
+  %0 = load volatile double, double* %b, align 8
+  %1 = load volatile double, double* %a, align 8
   %sub = fsub double %0, %1
   store double %sub, double* %c, align 8
   ret i32 0
diff --git a/llvm/test/CodeGen/Hexagon/fadd.ll b/llvm/test/CodeGen/Hexagon/fadd.ll
index 0418c17..65c6182 100644
--- a/llvm/test/CodeGen/Hexagon/fadd.ll
+++ b/llvm/test/CodeGen/Hexagon/fadd.ll
@@ -8,10 +8,10 @@
   %a = alloca float, align 4
   %b = alloca float, align 4
   %c = alloca float, align 4
-  store float 0x402ECCCCC0000000, float* %a, align 4
-  store float 0x4022333340000000, float* %b, align 4
-  %0 = load float, float* %a, align 4
-  %1 = load float, float* %b, align 4
+  store volatile float 0x402ECCCCC0000000, float* %a, align 4
+  store volatile float 0x4022333340000000, float* %b, align 4
+  %0 = load volatile float, float* %a, align 4
+  %1 = load volatile float, float* %b, align 4
   %add = fadd float %0, %1
   store float %add, float* %c, align 4
   ret i32 0
diff --git a/llvm/test/CodeGen/Hexagon/fmul.ll b/llvm/test/CodeGen/Hexagon/fmul.ll
index 552f98e..e20e293 100644
--- a/llvm/test/CodeGen/Hexagon/fmul.ll
+++ b/llvm/test/CodeGen/Hexagon/fmul.ll
@@ -9,10 +9,10 @@
   %a = alloca float, align 4
   %b = alloca float, align 4
   %c = alloca float, align 4
-  store float 0x402ECCCCC0000000, float* %a, align 4
-  store float 0x4022333340000000, float* %b, align 4
-  %0 = load float, float* %b, align 4
-  %1 = load float, float* %a, align 4
+  store volatile float 0x402ECCCCC0000000, float* %a, align 4
+  store volatile float 0x4022333340000000, float* %b, align 4
+  %0 = load volatile float, float* %b, align 4
+  %1 = load volatile float, float* %a, align 4
   %mul = fmul float %0, %1
   store float %mul, float* %c, align 4
   ret i32 0
diff --git a/llvm/test/CodeGen/Hexagon/fsub.ll b/llvm/test/CodeGen/Hexagon/fsub.ll
index d7b0e2f..e9a1fa3 100644
--- a/llvm/test/CodeGen/Hexagon/fsub.ll
+++ b/llvm/test/CodeGen/Hexagon/fsub.ll
@@ -8,10 +8,10 @@
   %a = alloca float, align 4
   %b = alloca float, align 4
   %c = alloca float, align 4
-  store float 0x402ECCCCC0000000, float* %a, align 4
-  store float 0x4022333340000000, float* %b, align 4
-  %0 = load float, float* %b, align 4
-  %1 = load float, float* %a, align 4
+  store volatile float 0x402ECCCCC0000000, float* %a, align 4
+  store volatile float 0x4022333340000000, float* %b, align 4
+  %0 = load volatile float, float* %b, align 4
+  %1 = load volatile float, float* %a, align 4
   %sub = fsub float %0, %1
   store float %sub, float* %c, align 4
   ret i32 0