Add support for -mstrict-align compiler option for ARM targets.
rdar://12340498


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@167620 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/include/llvm/Target/TargetOptions.h b/include/llvm/Target/TargetOptions.h
index 68ca567..75cfa8c 100644
--- a/include/llvm/Target/TargetOptions.h
+++ b/include/llvm/Target/TargetOptions.h
@@ -48,10 +48,10 @@
           UseSoftFloat(false), NoZerosInBSS(false), JITExceptionHandling(false),
           JITEmitDebugInfo(false), JITEmitDebugInfoToDisk(false),
           GuaranteedTailCallOpt(false), DisableTailCalls(false),
-          StackAlignmentOverride(0), RealignStack(true), EnableFastISel(false),
-          PositionIndependentExecutable(false), EnableSegmentedStacks(false),
-          UseInitArray(false), TrapFuncName(""), FloatABIType(FloatABI::Default),
-          AllowFPOpFusion(FPOpFusion::Standard)
+          StackAlignmentOverride(0), RealignStack(true), StrictAlign(false),
+          EnableFastISel(false), PositionIndependentExecutable(false),
+          EnableSegmentedStacks(false), UseInitArray(false), TrapFuncName(""),
+          FloatABIType(FloatABI::Default), AllowFPOpFusion(FPOpFusion::Standard)
     {}
 
     /// PrintMachineCode - This flag is enabled when the -print-machineinstrs
@@ -155,6 +155,10 @@
     /// automatically realigned, if needed.
     unsigned RealignStack : 1;
 
+    /// StrictAlign - This flag indicates that all memory accesses must be
+    /// aligned. (ARM only)
+    unsigned StrictAlign : 1;
+
     /// SSPBufferSize - The minimum size of buffers that will receive stack
     /// smashing protection when -fstack-protection is used.
     unsigned SSPBufferSize;
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index 6611862..7527c84 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -1028,7 +1028,8 @@
       RC = &ARM::GPRRegClass;
       break;
     case MVT::i16:
-      if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
+      if (Alignment && Alignment < 2 && (!Subtarget->allowsUnalignedMem() ||
+                                         TM.Options.StrictAlign))
         return false;
 
       if (isThumb2) {
@@ -1043,7 +1044,8 @@
       RC = &ARM::GPRRegClass;
       break;
     case MVT::i32:
-      if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
+      if (Alignment && Alignment < 4 && (!Subtarget->allowsUnalignedMem() ||
+                                         TM.Options.StrictAlign))
         return false;
 
       if (isThumb2) {
@@ -1152,7 +1154,8 @@
       }
       break;
     case MVT::i16:
-      if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
+      if (Alignment && Alignment < 2 && (!Subtarget->allowsUnalignedMem() ||
+                                         TM.Options.StrictAlign))
         return false;
 
       if (isThumb2) {
@@ -1166,7 +1169,8 @@
       }
       break;
     case MVT::i32:
-      if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
+      if (Alignment && Alignment < 4 && (!Subtarget->allowsUnalignedMem() ||
+                                         TM.Options.StrictAlign))
         return false;
 
       if (isThumb2) {
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 3b9558b..65cc49e 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -9119,7 +9119,8 @@
 
 bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
   // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus
-  bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
+  bool AllowsUnaligned = Subtarget->allowsUnalignedMem() &&
+    !getTargetMachine().Options.StrictAlign;
 
   switch (VT.getSimpleVT().SimpleTy) {
   default: