Merge "Optimizing/ARM: Fix AddConstant() to adhere to set_cc." am: 0ac7a8e505
am: 84f8ba6329

* commit '84f8ba632953b08fd9d73c1e152cef9c9652c961':
  Optimizing/ARM: Fix AddConstant() to adhere to set_cc.
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 98a1a8f..b79c2f0 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -878,7 +878,15 @@
                                      Register rn,
                                      Opcode opcode,
                                      uint32_t immediate,
+                                     SetCc set_cc,
                                      ShifterOperand* shifter_op) = 0;
+  bool ShifterOperandCanHold(Register rd,
+                             Register rn,
+                             Opcode opcode,
+                             uint32_t immediate,
+                             ShifterOperand* shifter_op) {
+    return ShifterOperandCanHold(rd, rn, opcode, immediate, kCcDontCare, shifter_op);
+  }
 
   virtual bool ShifterOperandCanAlwaysHold(uint32_t immediate) = 0;
 
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index a7dbacd..ebca25b 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -57,6 +57,7 @@
                                            Register rn ATTRIBUTE_UNUSED,
                                            Opcode opcode ATTRIBUTE_UNUSED,
                                            uint32_t immediate,
+                                           SetCc set_cc ATTRIBUTE_UNUSED,
                                            ShifterOperand* shifter_op) {
   return ShifterOperandCanHoldArm32(immediate, shifter_op);
 }
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index ce3a872..bf332fe 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -297,7 +297,9 @@
                              Register rn,
                              Opcode opcode,
                              uint32_t immediate,
+                             SetCc set_cc,
                              ShifterOperand* shifter_op) OVERRIDE;
+  using ArmAssembler::ShifterOperandCanHold;  // Don't hide the non-virtual override.
 
   bool ShifterOperandCanAlwaysHold(uint32_t immediate) OVERRIDE;
 
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index cdeb443..f341030 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -500,6 +500,7 @@
                                             Register rn ATTRIBUTE_UNUSED,
                                             Opcode opcode,
                                             uint32_t immediate,
+                                            SetCc set_cc,
                                             ShifterOperand* shifter_op) {
   shifter_op->type_ = ShifterOperand::kImmediate;
   shifter_op->immed_ = immediate;
@@ -508,7 +509,8 @@
   switch (opcode) {
     case ADD:
     case SUB:
-      if (immediate < (1 << 12)) {    // Less than (or equal to) 12 bits can always be done.
+      // Less than (or equal to) 12 bits can be done if we don't need to set condition codes.
+      if (immediate < (1 << 12) && set_cc != kCcSet) {
         return true;
       }
       return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
@@ -1239,7 +1241,10 @@
       // The only thumb1 instructions with a register and an immediate are ADD and SUB
       // with a 3-bit immediate, and RSB with zero immediate.
       if (opcode == ADD || opcode == SUB) {
-        if (!IsUint<3>(so.GetImmediate())) {
+        if ((cond == AL) ? set_cc == kCcKeep : set_cc == kCcSet) {
+          return true;  // Cannot match "setflags".
+        }
+        if (!IsUint<3>(so.GetImmediate()) && !IsUint<3>(-so.GetImmediate())) {
           return true;
         }
       } else {
@@ -1249,8 +1254,12 @@
       // ADD, SUB, CMP and MOV may be thumb1 only if the immediate is 8 bits.
       if (!(opcode == ADD || opcode == SUB || opcode == MOV || opcode == CMP)) {
         return true;
+      } else if (opcode != CMP && ((cond == AL) ? set_cc == kCcKeep : set_cc == kCcSet)) {
+        return true;  // Cannot match "setflags" for ADD, SUB or MOV.
       } else {
-        if (!IsUint<8>(so.GetImmediate())) {
+        // For ADD and SUB allow also negative 8-bit immediate as we will emit the oposite opcode.
+        if (!IsUint<8>(so.GetImmediate()) &&
+            (opcode == MOV || opcode == CMP || !IsUint<8>(-so.GetImmediate()))) {
           return true;
         }
       }
@@ -1602,12 +1611,18 @@
   uint8_t rn_shift = 3;
   uint8_t immediate_shift = 0;
   bool use_immediate = false;
-  uint32_t immediate = 0;  // Should be at most 9 bits but keep the full immediate for CHECKs.
+  uint32_t immediate = 0;  // Should be at most 10 bits but keep the full immediate for CHECKs.
   uint8_t thumb_opcode;
 
   if (so.IsImmediate()) {
     use_immediate = true;
     immediate = so.GetImmediate();
+    if (!IsUint<10>(immediate)) {
+      // Flip ADD/SUB.
+      opcode = (opcode == ADD) ? SUB : ADD;
+      immediate = -immediate;
+      DCHECK(IsUint<10>(immediate));  // More stringent checks below.
+    }
   }
 
   switch (opcode) {
@@ -1644,7 +1659,7 @@
           dp_opcode = 2U /* 0b10 */;
           thumb_opcode = 3U /* 0b11 */;
           opcode_shift = 12;
-          CHECK_LT(immediate, (1u << 9));
+          CHECK(IsUint<9>(immediate));
           CHECK_ALIGNED(immediate, 4);
 
           // Remove rd and rn from instruction by orring it with immed and clearing bits.
@@ -1658,7 +1673,7 @@
           dp_opcode = 2U /* 0b10 */;
           thumb_opcode = 5U /* 0b101 */;
           opcode_shift = 11;
-          CHECK_LT(immediate, (1u << 10));
+          CHECK(IsUint<10>(immediate));
           CHECK_ALIGNED(immediate, 4);
 
           // Remove rn from instruction.
@@ -1668,11 +1683,13 @@
           immediate >>= 2;
         } else if (rn != rd) {
           // Must use T1.
+          CHECK(IsUint<3>(immediate));
           opcode_shift = 9;
           thumb_opcode = 14U /* 0b01110 */;
           immediate_shift = 6;
         } else {
           // T2 encoding.
+          CHECK(IsUint<8>(immediate));
           opcode_shift = 11;
           thumb_opcode = 6U /* 0b110 */;
           rd_shift = 8;
@@ -1702,7 +1719,7 @@
           dp_opcode = 2U /* 0b10 */;
           thumb_opcode = 0x61 /* 0b1100001 */;
           opcode_shift = 7;
-          CHECK_LT(immediate, (1u << 9));
+          CHECK(IsUint<9>(immediate));
           CHECK_ALIGNED(immediate, 4);
 
           // Remove rd and rn from instruction by orring it with immed and clearing bits.
@@ -1713,11 +1730,13 @@
           immediate >>= 2;
         } else if (rn != rd) {
           // Must use T1.
+          CHECK(IsUint<3>(immediate));
           opcode_shift = 9;
           thumb_opcode = 15U /* 0b01111 */;
           immediate_shift = 6;
         } else {
           // T2 encoding.
+          CHECK(IsUint<8>(immediate));
           opcode_shift = 11;
           thumb_opcode = 7U /* 0b111 */;
           rd_shift = 8;
@@ -3401,25 +3420,30 @@
   // positive values and sub for negatives ones, which would slightly improve
   // the readability of generated code for some constants.
   ShifterOperand shifter_op;
-  if (ShifterOperandCanHold(rd, rn, ADD, value, &shifter_op)) {
+  if (ShifterOperandCanHold(rd, rn, ADD, value, set_cc, &shifter_op)) {
     add(rd, rn, shifter_op, cond, set_cc);
-  } else if (ShifterOperandCanHold(rd, rn, SUB, -value, &shifter_op)) {
+  } else if (ShifterOperandCanHold(rd, rn, SUB, -value, set_cc, &shifter_op)) {
     sub(rd, rn, shifter_op, cond, set_cc);
   } else {
     CHECK(rn != IP);
-    if (ShifterOperandCanHold(rd, rn, MVN, ~value, &shifter_op)) {
-      mvn(IP, shifter_op, cond, kCcKeep);
-      add(rd, rn, ShifterOperand(IP), cond, set_cc);
-    } else if (ShifterOperandCanHold(rd, rn, MVN, ~(-value), &shifter_op)) {
-      mvn(IP, shifter_op, cond, kCcKeep);
-      sub(rd, rn, ShifterOperand(IP), cond, set_cc);
+    // If rd != rn, use rd as temp. This alows 16-bit ADD/SUB in more situations than using IP.
+    Register temp = (rd != rn) ? rd : IP;
+    if (ShifterOperandCanHold(temp, kNoRegister, MVN, ~value, set_cc, &shifter_op)) {
+      mvn(temp, shifter_op, cond, kCcKeep);
+      add(rd, rn, ShifterOperand(temp), cond, set_cc);
+    } else if (ShifterOperandCanHold(temp, kNoRegister, MVN, ~(-value), set_cc, &shifter_op)) {
+      mvn(temp, shifter_op, cond, kCcKeep);
+      sub(rd, rn, ShifterOperand(temp), cond, set_cc);
+    } else if (High16Bits(-value) == 0) {
+      movw(temp, Low16Bits(-value), cond);
+      sub(rd, rn, ShifterOperand(temp), cond, set_cc);
     } else {
-      movw(IP, Low16Bits(value), cond);
+      movw(temp, Low16Bits(value), cond);
       uint16_t value_high = High16Bits(value);
       if (value_high != 0) {
-        movt(IP, value_high, cond);
+        movt(temp, value_high, cond);
       }
-      add(rd, rn, ShifterOperand(IP), cond, set_cc);
+      add(rd, rn, ShifterOperand(temp), cond, set_cc);
     }
   }
 }
@@ -3429,9 +3453,9 @@
   // positive values and sub for negatives ones, which would slightly improve
   // the readability of generated code for some constants.
   ShifterOperand shifter_op;
-  if (ShifterOperandCanHold(kNoRegister, rn, CMP, value, &shifter_op)) {
+  if (ShifterOperandCanHold(kNoRegister, rn, CMP, value, kCcSet, &shifter_op)) {
     cmp(rn, shifter_op, cond);
-  } else if (ShifterOperandCanHold(kNoRegister, rn, CMN, ~value, &shifter_op)) {
+  } else if (ShifterOperandCanHold(kNoRegister, rn, CMN, ~value, kCcSet, &shifter_op)) {
     cmn(rn, shifter_op, cond);
   } else {
     CHECK(rn != IP);
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 9aeece8..bf07b2d 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -342,7 +342,9 @@
                              Register rn,
                              Opcode opcode,
                              uint32_t immediate,
+                             SetCc set_cc,
                              ShifterOperand* shifter_op) OVERRIDE;
+  using ArmAssembler::ShifterOperandCanHold;  // Don't hide the non-virtual override.
 
   bool ShifterOperandCanAlwaysHold(uint32_t immediate) OVERRIDE;
 
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 5ae2cc2..0ef0dc1 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -135,7 +135,8 @@
     toolsdir.c_str(), filename);
   if (kPrintResults) {
     // Print the results only, don't check. This is used to generate new output for inserting
-    // into the .inc file.
+    // into the .inc file, so let's add the appropriate prefix/suffix needed in the C++ code.
+    strcat(cmd, " | sed '-es/^/  \"/' | sed '-es/$/\\\\n\",/'");
     int cmd_result3 = system(cmd);
     ASSERT_EQ(cmd_result3, 0) << strerror(errno);
   } else {
@@ -1379,6 +1380,252 @@
   EmitAndCheck(&assembler, "CompareAndBranch");
 }
 
+TEST(Thumb2AssemblerTest, AddConstant) {
+  arm::Thumb2Assembler assembler;
+
+  // Low registers, Rd != Rn.
+  __ AddConstant(R0, R1, 0);                          // MOV.
+  __ AddConstant(R0, R1, 1);                          // 16-bit ADDS, encoding T1.
+  __ AddConstant(R0, R1, 7);                          // 16-bit ADDS, encoding T1.
+  __ AddConstant(R0, R1, 8);                          // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R1, 255);                        // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R1, 256);                        // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R1, 257);                        // 32-bit ADD, encoding T4.
+  __ AddConstant(R0, R1, 0xfff);                      // 32-bit ADD, encoding T4.
+  __ AddConstant(R0, R1, 0x1000);                     // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R1, 0x1001);                     // MVN+SUB.
+  __ AddConstant(R0, R1, 0x1002);                     // MOVW+ADD.
+  __ AddConstant(R0, R1, 0xffff);                     // MOVW+ADD.
+  __ AddConstant(R0, R1, 0x10000);                    // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R1, 0x10001);                    // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R1, 0x10002);                    // MVN+SUB.
+  __ AddConstant(R0, R1, 0x10003);                    // MOVW+MOVT+ADD.
+  __ AddConstant(R0, R1, -1);                         // 16-bit SUBS.
+  __ AddConstant(R0, R1, -7);                         // 16-bit SUBS.
+  __ AddConstant(R0, R1, -8);                         // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R1, -255);                       // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R1, -256);                       // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R1, -257);                       // 32-bit SUB, encoding T4.
+  __ AddConstant(R0, R1, -0xfff);                     // 32-bit SUB, encoding T4.
+  __ AddConstant(R0, R1, -0x1000);                    // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R1, -0x1001);                    // MVN+ADD.
+  __ AddConstant(R0, R1, -0x1002);                    // MOVW+SUB.
+  __ AddConstant(R0, R1, -0xffff);                    // MOVW+SUB.
+  __ AddConstant(R0, R1, -0x10000);                   // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R1, -0x10001);                   // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R1, -0x10002);                   // MVN+ADD.
+  __ AddConstant(R0, R1, -0x10003);                   // MOVW+MOVT+ADD.
+
+  // Low registers, Rd == Rn.
+  __ AddConstant(R0, R0, 0);                          // Nothing.
+  __ AddConstant(R1, R1, 1);                          // 16-bit ADDS, encoding T2,
+  __ AddConstant(R0, R0, 7);                          // 16-bit ADDS, encoding T2.
+  __ AddConstant(R1, R1, 8);                          // 16-bit ADDS, encoding T2.
+  __ AddConstant(R0, R0, 255);                        // 16-bit ADDS, encoding T2.
+  __ AddConstant(R1, R1, 256);                        // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R0, 257);                        // 32-bit ADD, encoding T4.
+  __ AddConstant(R1, R1, 0xfff);                      // 32-bit ADD, encoding T4.
+  __ AddConstant(R0, R0, 0x1000);                     // 32-bit ADD, encoding T3.
+  __ AddConstant(R1, R1, 0x1001);                     // MVN+SUB.
+  __ AddConstant(R0, R0, 0x1002);                     // MOVW+ADD.
+  __ AddConstant(R1, R1, 0xffff);                     // MOVW+ADD.
+  __ AddConstant(R0, R0, 0x10000);                    // 32-bit ADD, encoding T3.
+  __ AddConstant(R1, R1, 0x10001);                    // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R0, 0x10002);                    // MVN+SUB.
+  __ AddConstant(R1, R1, 0x10003);                    // MOVW+MOVT+ADD.
+  __ AddConstant(R0, R0, -1);                         // 16-bit SUBS, encoding T2.
+  __ AddConstant(R1, R1, -7);                         // 16-bit SUBS, encoding T2.
+  __ AddConstant(R0, R0, -8);                         // 16-bit SUBS, encoding T2.
+  __ AddConstant(R1, R1, -255);                       // 16-bit SUBS, encoding T2.
+  __ AddConstant(R0, R0, -256);                       // 32-bit SUB, encoding T3.
+  __ AddConstant(R1, R1, -257);                       // 32-bit SUB, encoding T4.
+  __ AddConstant(R0, R0, -0xfff);                     // 32-bit SUB, encoding T4.
+  __ AddConstant(R1, R1, -0x1000);                    // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R0, -0x1001);                    // MVN+ADD.
+  __ AddConstant(R1, R1, -0x1002);                    // MOVW+SUB.
+  __ AddConstant(R0, R0, -0xffff);                    // MOVW+SUB.
+  __ AddConstant(R1, R1, -0x10000);                   // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R0, -0x10001);                   // 32-bit SUB, encoding T3.
+  __ AddConstant(R1, R1, -0x10002);                   // MVN+ADD.
+  __ AddConstant(R0, R0, -0x10003);                   // MOVW+MOVT+ADD.
+
+  // High registers.
+  __ AddConstant(R8, R8, 0);                          // Nothing.
+  __ AddConstant(R8, R1, 1);                          // 32-bit ADD, encoding T3,
+  __ AddConstant(R0, R8, 7);                          // 32-bit ADD, encoding T3.
+  __ AddConstant(R8, R8, 8);                          // 32-bit ADD, encoding T3.
+  __ AddConstant(R8, R1, 255);                        // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R8, 256);                        // 32-bit ADD, encoding T3.
+  __ AddConstant(R8, R8, 257);                        // 32-bit ADD, encoding T4.
+  __ AddConstant(R8, R1, 0xfff);                      // 32-bit ADD, encoding T4.
+  __ AddConstant(R0, R8, 0x1000);                     // 32-bit ADD, encoding T3.
+  __ AddConstant(R8, R8, 0x1001);                     // MVN+SUB.
+  __ AddConstant(R0, R1, 0x1002);                     // MOVW+ADD.
+  __ AddConstant(R0, R8, 0xffff);                     // MOVW+ADD.
+  __ AddConstant(R8, R8, 0x10000);                    // 32-bit ADD, encoding T3.
+  __ AddConstant(R8, R1, 0x10001);                    // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R8, 0x10002);                    // MVN+SUB.
+  __ AddConstant(R0, R8, 0x10003);                    // MOVW+MOVT+ADD.
+  __ AddConstant(R8, R8, -1);                         // 32-bit ADD, encoding T3.
+  __ AddConstant(R8, R1, -7);                         // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R8, -8);                         // 32-bit SUB, encoding T3.
+  __ AddConstant(R8, R8, -255);                       // 32-bit SUB, encoding T3.
+  __ AddConstant(R8, R1, -256);                       // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R8, -257);                       // 32-bit SUB, encoding T4.
+  __ AddConstant(R8, R8, -0xfff);                     // 32-bit SUB, encoding T4.
+  __ AddConstant(R8, R1, -0x1000);                    // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R8, -0x1001);                    // MVN+ADD.
+  __ AddConstant(R0, R1, -0x1002);                    // MOVW+SUB.
+  __ AddConstant(R8, R1, -0xffff);                    // MOVW+SUB.
+  __ AddConstant(R0, R8, -0x10000);                   // 32-bit SUB, encoding T3.
+  __ AddConstant(R8, R8, -0x10001);                   // 32-bit SUB, encoding T3.
+  __ AddConstant(R8, R1, -0x10002);                   // MVN+SUB.
+  __ AddConstant(R0, R8, -0x10003);                   // MOVW+MOVT+ADD.
+
+  // Low registers, Rd != Rn, kCcKeep.
+  __ AddConstant(R0, R1, 0, AL, kCcKeep);             // MOV.
+  __ AddConstant(R0, R1, 1, AL, kCcKeep);             // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R1, 7, AL, kCcKeep);             // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R1, 8, AL, kCcKeep);             // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R1, 255, AL, kCcKeep);           // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R1, 256, AL, kCcKeep);           // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R1, 257, AL, kCcKeep);           // 32-bit ADD, encoding T4.
+  __ AddConstant(R0, R1, 0xfff, AL, kCcKeep);         // 32-bit ADD, encoding T4.
+  __ AddConstant(R0, R1, 0x1000, AL, kCcKeep);        // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R1, 0x1001, AL, kCcKeep);        // MVN+SUB.
+  __ AddConstant(R0, R1, 0x1002, AL, kCcKeep);        // MOVW+ADD.
+  __ AddConstant(R0, R1, 0xffff, AL, kCcKeep);        // MOVW+ADD.
+  __ AddConstant(R0, R1, 0x10000, AL, kCcKeep);       // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R1, 0x10001, AL, kCcKeep);       // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R1, 0x10002, AL, kCcKeep);       // MVN+SUB.
+  __ AddConstant(R0, R1, 0x10003, AL, kCcKeep);       // MOVW+MOVT+ADD.
+  __ AddConstant(R0, R1, -1, AL, kCcKeep);            // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R1, -7, AL, kCcKeep);            // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R1, -8, AL, kCcKeep);            // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R1, -255, AL, kCcKeep);          // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R1, -256, AL, kCcKeep);          // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R1, -257, AL, kCcKeep);          // 32-bit SUB, encoding T4.
+  __ AddConstant(R0, R1, -0xfff, AL, kCcKeep);        // 32-bit SUB, encoding T4.
+  __ AddConstant(R0, R1, -0x1000, AL, kCcKeep);       // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R1, -0x1001, AL, kCcKeep);       // MVN+ADD.
+  __ AddConstant(R0, R1, -0x1002, AL, kCcKeep);       // MOVW+SUB.
+  __ AddConstant(R0, R1, -0xffff, AL, kCcKeep);       // MOVW+SUB.
+  __ AddConstant(R0, R1, -0x10000, AL, kCcKeep);      // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R1, -0x10001, AL, kCcKeep);      // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R1, -0x10002, AL, kCcKeep);      // MVN+ADD.
+  __ AddConstant(R0, R1, -0x10003, AL, kCcKeep);      // MOVW+MOVT+ADD.
+
+  // Low registers, Rd == Rn, kCcKeep.
+  __ AddConstant(R0, R0, 0, AL, kCcKeep);             // Nothing.
+  __ AddConstant(R1, R1, 1, AL, kCcKeep);             // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R0, 7, AL, kCcKeep);             // 32-bit ADD, encoding T3.
+  __ AddConstant(R1, R1, 8, AL, kCcKeep);             // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R0, 255, AL, kCcKeep);           // 32-bit ADD, encoding T3.
+  __ AddConstant(R1, R1, 256, AL, kCcKeep);           // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R0, 257, AL, kCcKeep);           // 32-bit ADD, encoding T4.
+  __ AddConstant(R1, R1, 0xfff, AL, kCcKeep);         // 32-bit ADD, encoding T4.
+  __ AddConstant(R0, R0, 0x1000, AL, kCcKeep);        // 32-bit ADD, encoding T3.
+  __ AddConstant(R1, R1, 0x1001, AL, kCcKeep);        // MVN+SUB.
+  __ AddConstant(R0, R0, 0x1002, AL, kCcKeep);        // MOVW+ADD.
+  __ AddConstant(R1, R1, 0xffff, AL, kCcKeep);        // MOVW+ADD.
+  __ AddConstant(R0, R0, 0x10000, AL, kCcKeep);       // 32-bit ADD, encoding T3.
+  __ AddConstant(R1, R1, 0x10001, AL, kCcKeep);       // 32-bit ADD, encoding T3.
+  __ AddConstant(R0, R0, 0x10002, AL, kCcKeep);       // MVN+SUB.
+  __ AddConstant(R1, R1, 0x10003, AL, kCcKeep);       // MOVW+MOVT+ADD.
+  __ AddConstant(R0, R0, -1, AL, kCcKeep);            // 32-bit ADD, encoding T3.
+  __ AddConstant(R1, R1, -7, AL, kCcKeep);            // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R0, -8, AL, kCcKeep);            // 32-bit SUB, encoding T3.
+  __ AddConstant(R1, R1, -255, AL, kCcKeep);          // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R0, -256, AL, kCcKeep);          // 32-bit SUB, encoding T3.
+  __ AddConstant(R1, R1, -257, AL, kCcKeep);          // 32-bit SUB, encoding T4.
+  __ AddConstant(R0, R0, -0xfff, AL, kCcKeep);        // 32-bit SUB, encoding T4.
+  __ AddConstant(R1, R1, -0x1000, AL, kCcKeep);       // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R0, -0x1001, AL, kCcKeep);       // MVN+ADD.
+  __ AddConstant(R1, R1, -0x1002, AL, kCcKeep);       // MOVW+SUB.
+  __ AddConstant(R0, R0, -0xffff, AL, kCcKeep);       // MOVW+SUB.
+  __ AddConstant(R1, R1, -0x10000, AL, kCcKeep);      // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R0, -0x10001, AL, kCcKeep);      // 32-bit SUB, encoding T3.
+  __ AddConstant(R1, R1, -0x10002, AL, kCcKeep);      // MVN+ADD.
+  __ AddConstant(R0, R0, -0x10003, AL, kCcKeep);      // MOVW+MOVT+ADD.
+
+  // Low registers, Rd != Rn, kCcSet.
+  __ AddConstant(R0, R1, 0, AL, kCcSet);              // 16-bit ADDS.
+  __ AddConstant(R0, R1, 1, AL, kCcSet);              // 16-bit ADDS.
+  __ AddConstant(R0, R1, 7, AL, kCcSet);              // 16-bit ADDS.
+  __ AddConstant(R0, R1, 8, AL, kCcSet);              // 32-bit ADDS, encoding T3.
+  __ AddConstant(R0, R1, 255, AL, kCcSet);            // 32-bit ADDS, encoding T3.
+  __ AddConstant(R0, R1, 256, AL, kCcSet);            // 32-bit ADDS, encoding T3.
+  __ AddConstant(R0, R1, 257, AL, kCcSet);            // MVN+SUBS.
+  __ AddConstant(R0, R1, 0xfff, AL, kCcSet);          // MOVW+ADDS.
+  __ AddConstant(R0, R1, 0x1000, AL, kCcSet);         // 32-bit ADDS, encoding T3.
+  __ AddConstant(R0, R1, 0x1001, AL, kCcSet);         // MVN+SUBS.
+  __ AddConstant(R0, R1, 0x1002, AL, kCcSet);         // MOVW+ADDS.
+  __ AddConstant(R0, R1, 0xffff, AL, kCcSet);         // MOVW+ADDS.
+  __ AddConstant(R0, R1, 0x10000, AL, kCcSet);        // 32-bit ADDS, encoding T3.
+  __ AddConstant(R0, R1, 0x10001, AL, kCcSet);        // 32-bit ADDS, encoding T3.
+  __ AddConstant(R0, R1, 0x10002, AL, kCcSet);        // MVN+SUBS.
+  __ AddConstant(R0, R1, 0x10003, AL, kCcSet);        // MOVW+MOVT+ADDS.
+  __ AddConstant(R0, R1, -1, AL, kCcSet);             // 16-bit SUBS.
+  __ AddConstant(R0, R1, -7, AL, kCcSet);             // 16-bit SUBS.
+  __ AddConstant(R0, R1, -8, AL, kCcSet);             // 32-bit SUBS, encoding T3.
+  __ AddConstant(R0, R1, -255, AL, kCcSet);           // 32-bit SUBS, encoding T3.
+  __ AddConstant(R0, R1, -256, AL, kCcSet);           // 32-bit SUBS, encoding T3.
+  __ AddConstant(R0, R1, -257, AL, kCcSet);           // MVN+ADDS.
+  __ AddConstant(R0, R1, -0xfff, AL, kCcSet);         // MOVW+SUBS.
+  __ AddConstant(R0, R1, -0x1000, AL, kCcSet);        // 32-bit SUBS, encoding T3.
+  __ AddConstant(R0, R1, -0x1001, AL, kCcSet);        // MVN+ADDS.
+  __ AddConstant(R0, R1, -0x1002, AL, kCcSet);        // MOVW+SUBS.
+  __ AddConstant(R0, R1, -0xffff, AL, kCcSet);        // MOVW+SUBS.
+  __ AddConstant(R0, R1, -0x10000, AL, kCcSet);       // 32-bit SUBS, encoding T3.
+  __ AddConstant(R0, R1, -0x10001, AL, kCcSet);       // 32-bit SUBS, encoding T3.
+  __ AddConstant(R0, R1, -0x10002, AL, kCcSet);       // MVN+ADDS.
+  __ AddConstant(R0, R1, -0x10003, AL, kCcSet);       // MOVW+MOVT+ADDS.
+
+  // Low registers, Rd == Rn, kCcSet.
+  __ AddConstant(R0, R0, 0, AL, kCcSet);              // 16-bit ADDS, encoding T2.
+  __ AddConstant(R1, R1, 1, AL, kCcSet);              // 16-bit ADDS, encoding T2.
+  __ AddConstant(R0, R0, 7, AL, kCcSet);              // 16-bit ADDS, encoding T2.
+  __ AddConstant(R1, R1, 8, AL, kCcSet);              // 16-bit ADDS, encoding T2.
+  __ AddConstant(R0, R0, 255, AL, kCcSet);            // 16-bit ADDS, encoding T2.
+  __ AddConstant(R1, R1, 256, AL, kCcSet);            // 32-bit ADDS, encoding T3.
+  __ AddConstant(R0, R0, 257, AL, kCcSet);            // MVN+SUBS.
+  __ AddConstant(R1, R1, 0xfff, AL, kCcSet);          // MOVW+ADDS.
+  __ AddConstant(R0, R0, 0x1000, AL, kCcSet);         // 32-bit ADDS, encoding T3.
+  __ AddConstant(R1, R1, 0x1001, AL, kCcSet);         // MVN+SUBS.
+  __ AddConstant(R0, R0, 0x1002, AL, kCcSet);         // MOVW+ADDS.
+  __ AddConstant(R1, R1, 0xffff, AL, kCcSet);         // MOVW+ADDS.
+  __ AddConstant(R0, R0, 0x10000, AL, kCcSet);        // 32-bit ADDS, encoding T3.
+  __ AddConstant(R1, R1, 0x10001, AL, kCcSet);        // 32-bit ADDS, encoding T3.
+  __ AddConstant(R0, R0, 0x10002, AL, kCcSet);        // MVN+SUBS.
+  __ AddConstant(R1, R1, 0x10003, AL, kCcSet);        // MOVW+MOVT+ADDS.
+  __ AddConstant(R0, R0, -1, AL, kCcSet);             // 16-bit SUBS, encoding T2.
+  __ AddConstant(R1, R1, -7, AL, kCcSet);             // 16-bit SUBS, encoding T2.
+  __ AddConstant(R0, R0, -8, AL, kCcSet);             // 16-bit SUBS, encoding T2.
+  __ AddConstant(R1, R1, -255, AL, kCcSet);           // 16-bit SUBS, encoding T2.
+  __ AddConstant(R0, R0, -256, AL, kCcSet);           // 32-bit SUB, encoding T3.
+  __ AddConstant(R1, R1, -257, AL, kCcSet);           // MNV+ADDS.
+  __ AddConstant(R0, R0, -0xfff, AL, kCcSet);         // MOVW+SUBS.
+  __ AddConstant(R1, R1, -0x1000, AL, kCcSet);        // 32-bit SUB, encoding T3.
+  __ AddConstant(R0, R0, -0x1001, AL, kCcSet);        // MVN+ADDS.
+  __ AddConstant(R1, R1, -0x1002, AL, kCcSet);        // MOVW+SUBS.
+  __ AddConstant(R0, R0, -0xffff, AL, kCcSet);        // MOVW+SUBS.
+  __ AddConstant(R1, R1, -0x10000, AL, kCcSet);       // 32-bit SUBS, encoding T3.
+  __ AddConstant(R0, R0, -0x10001, AL, kCcSet);       // 32-bit SUBS, encoding T3.
+  __ AddConstant(R1, R1, -0x10002, AL, kCcSet);       // MVN+ADDS.
+  __ AddConstant(R0, R0, -0x10003, AL, kCcSet);       // MOVW+MOVT+ADDS.
+
+  __ it(EQ);
+  __ AddConstant(R0, R1, 1, EQ, kCcSet);              // 32-bit ADDS, encoding T3.
+  __ it(NE);
+  __ AddConstant(R0, R1, 1, NE, kCcKeep);             // 16-bit ADDS, encoding T1.
+  __ it(GE);
+  __ AddConstant(R0, R0, 1, GE, kCcSet);              // 32-bit ADDS, encoding T3.
+  __ it(LE);
+  __ AddConstant(R0, R0, 1, LE, kCcKeep);             // 16-bit ADDS, encoding T2.
+
+  EmitAndCheck(&assembler, "AddConstant");
+}
+
 #undef __
 }  // namespace arm
 }  // namespace art
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 886295e..f07f8c7 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -5052,6 +5052,324 @@
   nullptr
 };
 
+const char* AddConstantResults[] = {
+  "   0:	4608      	mov	r0, r1\n",
+  "   2:	1c48      	adds	r0, r1, #1\n",
+  "   4:	1dc8      	adds	r0, r1, #7\n",
+  "   6:	f101 0008 	add.w	r0, r1, #8\n",
+  "   a:	f101 00ff 	add.w	r0, r1, #255	; 0xff\n",
+  "   e:	f501 7080 	add.w	r0, r1, #256	; 0x100\n",
+  "  12:	f201 1001 	addw	r0, r1, #257	; 0x101\n",
+  "  16:	f601 70ff 	addw	r0, r1, #4095	; 0xfff\n",
+  "  1a:	f501 5080 	add.w	r0, r1, #4096	; 0x1000\n",
+  "  1e:	f46f 5080 	mvn.w	r0, #4096	; 0x1000\n",
+  "  22:	1a08      	subs	r0, r1, r0\n",
+  "  24:	f241 0002 	movw	r0, #4098	; 0x1002\n",
+  "  28:	1808      	adds	r0, r1, r0\n",
+  "  2a:	f64f 70ff 	movw	r0, #65535	; 0xffff\n",
+  "  2e:	1808      	adds	r0, r1, r0\n",
+  "  30:	f501 3080 	add.w	r0, r1, #65536	; 0x10000\n",
+  "  34:	f101 1001 	add.w	r0, r1, #65537	; 0x10001\n",
+  "  38:	f06f 1001 	mvn.w	r0, #65537	; 0x10001\n",
+  "  3c:	1a08      	subs	r0, r1, r0\n",
+  "  3e:	f240 0003 	movw	r0, #3\n",
+  "  42:	f2c0 0001 	movt	r0, #1\n",
+  "  46:	1808      	adds	r0, r1, r0\n",
+  "  48:	1e48      	subs	r0, r1, #1\n",
+  "  4a:	1fc8      	subs	r0, r1, #7\n",
+  "  4c:	f1a1 0008 	sub.w	r0, r1, #8\n",
+  "  50:	f1a1 00ff 	sub.w	r0, r1, #255	; 0xff\n",
+  "  54:	f5a1 7080 	sub.w	r0, r1, #256	; 0x100\n",
+  "  58:	f2a1 1001 	subw	r0, r1, #257	; 0x101\n",
+  "  5c:	f6a1 70ff 	subw	r0, r1, #4095	; 0xfff\n",
+  "  60:	f5a1 5080 	sub.w	r0, r1, #4096	; 0x1000\n",
+  "  64:	f46f 5080 	mvn.w	r0, #4096	; 0x1000\n",
+  "  68:	1808      	adds	r0, r1, r0\n",
+  "  6a:	f241 0002 	movw	r0, #4098	; 0x1002\n",
+  "  6e:	1a08      	subs	r0, r1, r0\n",
+  "  70:	f64f 70ff 	movw	r0, #65535	; 0xffff\n",
+  "  74:	1a08      	subs	r0, r1, r0\n",
+  "  76:	f5a1 3080 	sub.w	r0, r1, #65536	; 0x10000\n",
+  "  7a:	f1a1 1001 	sub.w	r0, r1, #65537	; 0x10001\n",
+  "  7e:	f06f 1001 	mvn.w	r0, #65537	; 0x10001\n",
+  "  82:	1808      	adds	r0, r1, r0\n",
+  "  84:	f64f 70fd 	movw	r0, #65533	; 0xfffd\n",
+  "  88:	f6cf 70fe 	movt	r0, #65534	; 0xfffe\n",
+  "  8c:	1808      	adds	r0, r1, r0\n",
+  "  8e:	3101      	adds	r1, #1\n",
+  "  90:	3007      	adds	r0, #7\n",
+  "  92:	3108      	adds	r1, #8\n",
+  "  94:	30ff      	adds	r0, #255	; 0xff\n",
+  "  96:	f501 7180 	add.w	r1, r1, #256	; 0x100\n",
+  "  9a:	f200 1001 	addw	r0, r0, #257	; 0x101\n",
+  "  9e:	f601 71ff 	addw	r1, r1, #4095	; 0xfff\n",
+  "  a2:	f500 5080 	add.w	r0, r0, #4096	; 0x1000\n",
+  "  a6:	f46f 5c80 	mvn.w	ip, #4096	; 0x1000\n",
+  "  aa:	eba1 010c 	sub.w	r1, r1, ip\n",
+  "  ae:	f241 0c02 	movw	ip, #4098	; 0x1002\n",
+  "  b2:	4460      	add	r0, ip\n",
+  "  b4:	f64f 7cff 	movw	ip, #65535	; 0xffff\n",
+  "  b8:	4461      	add	r1, ip\n",
+  "  ba:	f500 3080 	add.w	r0, r0, #65536	; 0x10000\n",
+  "  be:	f101 1101 	add.w	r1, r1, #65537	; 0x10001\n",
+  "  c2:	f06f 1c01 	mvn.w	ip, #65537	; 0x10001\n",
+  "  c6:	eba0 000c 	sub.w	r0, r0, ip\n",
+  "  ca:	f240 0c03 	movw	ip, #3\n",
+  "  ce:	f2c0 0c01 	movt	ip, #1\n",
+  "  d2:	4461      	add	r1, ip\n",
+  "  d4:	3801      	subs	r0, #1\n",
+  "  d6:	3907      	subs	r1, #7\n",
+  "  d8:	3808      	subs	r0, #8\n",
+  "  da:	39ff      	subs	r1, #255	; 0xff\n",
+  "  dc:	f5a0 7080 	sub.w	r0, r0, #256	; 0x100\n",
+  "  e0:	f2a1 1101 	subw	r1, r1, #257	; 0x101\n",
+  "  e4:	f6a0 70ff 	subw	r0, r0, #4095	; 0xfff\n",
+  "  e8:	f5a1 5180 	sub.w	r1, r1, #4096	; 0x1000\n",
+  "  ec:	f46f 5c80 	mvn.w	ip, #4096	; 0x1000\n",
+  "  f0:	4460      	add	r0, ip\n",
+  "  f2:	f241 0c02 	movw	ip, #4098	; 0x1002\n",
+  "  f6:	eba1 010c 	sub.w	r1, r1, ip\n",
+  "  fa:	f64f 7cff 	movw	ip, #65535	; 0xffff\n",
+  "  fe:	eba0 000c 	sub.w	r0, r0, ip\n",
+  " 102:	f5a1 3180 	sub.w	r1, r1, #65536	; 0x10000\n",
+  " 106:	f1a0 1001 	sub.w	r0, r0, #65537	; 0x10001\n",
+  " 10a:	f06f 1c01 	mvn.w	ip, #65537	; 0x10001\n",
+  " 10e:	4461      	add	r1, ip\n",
+  " 110:	f64f 7cfd 	movw	ip, #65533	; 0xfffd\n",
+  " 114:	f6cf 7cfe 	movt	ip, #65534	; 0xfffe\n",
+  " 118:	4460      	add	r0, ip\n",
+  " 11a:	f101 0801 	add.w	r8, r1, #1\n",
+  " 11e:	f108 0007 	add.w	r0, r8, #7\n",
+  " 122:	f108 0808 	add.w	r8, r8, #8\n",
+  " 126:	f101 08ff 	add.w	r8, r1, #255	; 0xff\n",
+  " 12a:	f508 7080 	add.w	r0, r8, #256	; 0x100\n",
+  " 12e:	f208 1801 	addw	r8, r8, #257	; 0x101\n",
+  " 132:	f601 78ff 	addw	r8, r1, #4095	; 0xfff\n",
+  " 136:	f508 5080 	add.w	r0, r8, #4096	; 0x1000\n",
+  " 13a:	f46f 5c80 	mvn.w	ip, #4096	; 0x1000\n",
+  " 13e:	eba8 080c 	sub.w	r8, r8, ip\n",
+  " 142:	f241 0002 	movw	r0, #4098	; 0x1002\n",
+  " 146:	1808      	adds	r0, r1, r0\n",
+  " 148:	f64f 70ff 	movw	r0, #65535	; 0xffff\n",
+  " 14c:	eb08 0000 	add.w	r0, r8, r0\n",
+  " 150:	f508 3880 	add.w	r8, r8, #65536	; 0x10000\n",
+  " 154:	f101 1801 	add.w	r8, r1, #65537	; 0x10001\n",
+  " 158:	f06f 1001 	mvn.w	r0, #65537	; 0x10001\n",
+  " 15c:	eba8 0000 	sub.w	r0, r8, r0\n",
+  " 160:	f240 0003 	movw	r0, #3\n",
+  " 164:	f2c0 0001 	movt	r0, #1\n",
+  " 168:	eb08 0000 	add.w	r0, r8, r0\n",
+  " 16c:	f108 38ff 	add.w	r8, r8, #4294967295	; 0xffffffff\n",
+  " 170:	f1a1 0807 	sub.w	r8, r1, #7\n",
+  " 174:	f1a8 0008 	sub.w	r0, r8, #8\n",
+  " 178:	f1a8 08ff 	sub.w	r8, r8, #255	; 0xff\n",
+  " 17c:	f5a1 7880 	sub.w	r8, r1, #256	; 0x100\n",
+  " 180:	f2a8 1001 	subw	r0, r8, #257	; 0x101\n",
+  " 184:	f6a8 78ff 	subw	r8, r8, #4095	; 0xfff\n",
+  " 188:	f5a1 5880 	sub.w	r8, r1, #4096	; 0x1000\n",
+  " 18c:	f46f 5080 	mvn.w	r0, #4096	; 0x1000\n",
+  " 190:	eb08 0000 	add.w	r0, r8, r0\n",
+  " 194:	f241 0002 	movw	r0, #4098	; 0x1002\n",
+  " 198:	1a08      	subs	r0, r1, r0\n",
+  " 19a:	f64f 78ff 	movw	r8, #65535	; 0xffff\n",
+  " 19e:	eba1 0808 	sub.w	r8, r1, r8\n",
+  " 1a2:	f5a8 3080 	sub.w	r0, r8, #65536	; 0x10000\n",
+  " 1a6:	f1a8 1801 	sub.w	r8, r8, #65537	; 0x10001\n",
+  " 1aa:	f06f 1801 	mvn.w	r8, #65537	; 0x10001\n",
+  " 1ae:	eb01 0808 	add.w	r8, r1, r8\n",
+  " 1b2:	f64f 70fd 	movw	r0, #65533	; 0xfffd\n",
+  " 1b6:	f6cf 70fe 	movt	r0, #65534	; 0xfffe\n",
+  " 1ba:	eb08 0000 	add.w	r0, r8, r0\n",
+  " 1be:	4608      	mov	r0, r1\n",
+  " 1c0:	f101 0001 	add.w	r0, r1, #1\n",
+  " 1c4:	f101 0007 	add.w	r0, r1, #7\n",
+  " 1c8:	f101 0008 	add.w	r0, r1, #8\n",
+  " 1cc:	f101 00ff 	add.w	r0, r1, #255	; 0xff\n",
+  " 1d0:	f501 7080 	add.w	r0, r1, #256	; 0x100\n",
+  " 1d4:	f201 1001 	addw	r0, r1, #257	; 0x101\n",
+  " 1d8:	f601 70ff 	addw	r0, r1, #4095	; 0xfff\n",
+  " 1dc:	f501 5080 	add.w	r0, r1, #4096	; 0x1000\n",
+  " 1e0:	f46f 5080 	mvn.w	r0, #4096	; 0x1000\n",
+  " 1e4:	eba1 0000 	sub.w	r0, r1, r0\n",
+  " 1e8:	f241 0002 	movw	r0, #4098	; 0x1002\n",
+  " 1ec:	eb01 0000 	add.w	r0, r1, r0\n",
+  " 1f0:	f64f 70ff 	movw	r0, #65535	; 0xffff\n",
+  " 1f4:	eb01 0000 	add.w	r0, r1, r0\n",
+  " 1f8:	f501 3080 	add.w	r0, r1, #65536	; 0x10000\n",
+  " 1fc:	f101 1001 	add.w	r0, r1, #65537	; 0x10001\n",
+  " 200:	f06f 1001 	mvn.w	r0, #65537	; 0x10001\n",
+  " 204:	eba1 0000 	sub.w	r0, r1, r0\n",
+  " 208:	f240 0003 	movw	r0, #3\n",
+  " 20c:	f2c0 0001 	movt	r0, #1\n",
+  " 210:	eb01 0000 	add.w	r0, r1, r0\n",
+  " 214:	f101 30ff 	add.w	r0, r1, #4294967295	; 0xffffffff\n",
+  " 218:	f1a1 0007 	sub.w	r0, r1, #7\n",
+  " 21c:	f1a1 0008 	sub.w	r0, r1, #8\n",
+  " 220:	f1a1 00ff 	sub.w	r0, r1, #255	; 0xff\n",
+  " 224:	f5a1 7080 	sub.w	r0, r1, #256	; 0x100\n",
+  " 228:	f2a1 1001 	subw	r0, r1, #257	; 0x101\n",
+  " 22c:	f6a1 70ff 	subw	r0, r1, #4095	; 0xfff\n",
+  " 230:	f5a1 5080 	sub.w	r0, r1, #4096	; 0x1000\n",
+  " 234:	f46f 5080 	mvn.w	r0, #4096	; 0x1000\n",
+  " 238:	eb01 0000 	add.w	r0, r1, r0\n",
+  " 23c:	f241 0002 	movw	r0, #4098	; 0x1002\n",
+  " 240:	eba1 0000 	sub.w	r0, r1, r0\n",
+  " 244:	f64f 70ff 	movw	r0, #65535	; 0xffff\n",
+  " 248:	eba1 0000 	sub.w	r0, r1, r0\n",
+  " 24c:	f5a1 3080 	sub.w	r0, r1, #65536	; 0x10000\n",
+  " 250:	f1a1 1001 	sub.w	r0, r1, #65537	; 0x10001\n",
+  " 254:	f06f 1001 	mvn.w	r0, #65537	; 0x10001\n",
+  " 258:	eb01 0000 	add.w	r0, r1, r0\n",
+  " 25c:	f64f 70fd 	movw	r0, #65533	; 0xfffd\n",
+  " 260:	f6cf 70fe 	movt	r0, #65534	; 0xfffe\n",
+  " 264:	eb01 0000 	add.w	r0, r1, r0\n",
+  " 268:	f101 0101 	add.w	r1, r1, #1\n",
+  " 26c:	f100 0007 	add.w	r0, r0, #7\n",
+  " 270:	f101 0108 	add.w	r1, r1, #8\n",
+  " 274:	f100 00ff 	add.w	r0, r0, #255	; 0xff\n",
+  " 278:	f501 7180 	add.w	r1, r1, #256	; 0x100\n",
+  " 27c:	f200 1001 	addw	r0, r0, #257	; 0x101\n",
+  " 280:	f601 71ff 	addw	r1, r1, #4095	; 0xfff\n",
+  " 284:	f500 5080 	add.w	r0, r0, #4096	; 0x1000\n",
+  " 288:	f46f 5c80 	mvn.w	ip, #4096	; 0x1000\n",
+  " 28c:	eba1 010c 	sub.w	r1, r1, ip\n",
+  " 290:	f241 0c02 	movw	ip, #4098	; 0x1002\n",
+  " 294:	4460      	add	r0, ip\n",
+  " 296:	f64f 7cff 	movw	ip, #65535	; 0xffff\n",
+  " 29a:	4461      	add	r1, ip\n",
+  " 29c:	f500 3080 	add.w	r0, r0, #65536	; 0x10000\n",
+  " 2a0:	f101 1101 	add.w	r1, r1, #65537	; 0x10001\n",
+  " 2a4:	f06f 1c01 	mvn.w	ip, #65537	; 0x10001\n",
+  " 2a8:	eba0 000c 	sub.w	r0, r0, ip\n",
+  " 2ac:	f240 0c03 	movw	ip, #3\n",
+  " 2b0:	f2c0 0c01 	movt	ip, #1\n",
+  " 2b4:	4461      	add	r1, ip\n",
+  " 2b6:	f100 30ff 	add.w	r0, r0, #4294967295	; 0xffffffff\n",
+  " 2ba:	f1a1 0107 	sub.w	r1, r1, #7\n",
+  " 2be:	f1a0 0008 	sub.w	r0, r0, #8\n",
+  " 2c2:	f1a1 01ff 	sub.w	r1, r1, #255	; 0xff\n",
+  " 2c6:	f5a0 7080 	sub.w	r0, r0, #256	; 0x100\n",
+  " 2ca:	f2a1 1101 	subw	r1, r1, #257	; 0x101\n",
+  " 2ce:	f6a0 70ff 	subw	r0, r0, #4095	; 0xfff\n",
+  " 2d2:	f5a1 5180 	sub.w	r1, r1, #4096	; 0x1000\n",
+  " 2d6:	f46f 5c80 	mvn.w	ip, #4096	; 0x1000\n",
+  " 2da:	4460      	add	r0, ip\n",
+  " 2dc:	f241 0c02 	movw	ip, #4098	; 0x1002\n",
+  " 2e0:	eba1 010c 	sub.w	r1, r1, ip\n",
+  " 2e4:	f64f 7cff 	movw	ip, #65535	; 0xffff\n",
+  " 2e8:	eba0 000c 	sub.w	r0, r0, ip\n",
+  " 2ec:	f5a1 3180 	sub.w	r1, r1, #65536	; 0x10000\n",
+  " 2f0:	f1a0 1001 	sub.w	r0, r0, #65537	; 0x10001\n",
+  " 2f4:	f06f 1c01 	mvn.w	ip, #65537	; 0x10001\n",
+  " 2f8:	4461      	add	r1, ip\n",
+  " 2fa:	f64f 7cfd 	movw	ip, #65533	; 0xfffd\n",
+  " 2fe:	f6cf 7cfe 	movt	ip, #65534	; 0xfffe\n",
+  " 302:	4460      	add	r0, ip\n",
+  " 304:	1c08      	adds	r0, r1, #0\n",
+  " 306:	1c48      	adds	r0, r1, #1\n",
+  " 308:	1dc8      	adds	r0, r1, #7\n",
+  " 30a:	f111 0008 	adds.w	r0, r1, #8\n",
+  " 30e:	f111 00ff 	adds.w	r0, r1, #255	; 0xff\n",
+  " 312:	f511 7080 	adds.w	r0, r1, #256	; 0x100\n",
+  " 316:	f46f 7080 	mvn.w	r0, #256	; 0x100\n",
+  " 31a:	1a08      	subs	r0, r1, r0\n",
+  " 31c:	f640 70ff 	movw	r0, #4095	; 0xfff\n",
+  " 320:	1808      	adds	r0, r1, r0\n",
+  " 322:	f511 5080 	adds.w	r0, r1, #4096	; 0x1000\n",
+  " 326:	f46f 5080 	mvn.w	r0, #4096	; 0x1000\n",
+  " 32a:	1a08      	subs	r0, r1, r0\n",
+  " 32c:	f241 0002 	movw	r0, #4098	; 0x1002\n",
+  " 330:	1808      	adds	r0, r1, r0\n",
+  " 332:	f64f 70ff 	movw	r0, #65535	; 0xffff\n",
+  " 336:	1808      	adds	r0, r1, r0\n",
+  " 338:	f511 3080 	adds.w	r0, r1, #65536	; 0x10000\n",
+  " 33c:	f111 1001 	adds.w	r0, r1, #65537	; 0x10001\n",
+  " 340:	f06f 1001 	mvn.w	r0, #65537	; 0x10001\n",
+  " 344:	1a08      	subs	r0, r1, r0\n",
+  " 346:	f240 0003 	movw	r0, #3\n",
+  " 34a:	f2c0 0001 	movt	r0, #1\n",
+  " 34e:	1808      	adds	r0, r1, r0\n",
+  " 350:	1e48      	subs	r0, r1, #1\n",
+  " 352:	1fc8      	subs	r0, r1, #7\n",
+  " 354:	f1b1 0008 	subs.w	r0, r1, #8\n",
+  " 358:	f1b1 00ff 	subs.w	r0, r1, #255	; 0xff\n",
+  " 35c:	f5b1 7080 	subs.w	r0, r1, #256	; 0x100\n",
+  " 360:	f46f 7080 	mvn.w	r0, #256	; 0x100\n",
+  " 364:	1808      	adds	r0, r1, r0\n",
+  " 366:	f640 70ff 	movw	r0, #4095	; 0xfff\n",
+  " 36a:	1a08      	subs	r0, r1, r0\n",
+  " 36c:	f5b1 5080 	subs.w	r0, r1, #4096	; 0x1000\n",
+  " 370:	f46f 5080 	mvn.w	r0, #4096	; 0x1000\n",
+  " 374:	1808      	adds	r0, r1, r0\n",
+  " 376:	f241 0002 	movw	r0, #4098	; 0x1002\n",
+  " 37a:	1a08      	subs	r0, r1, r0\n",
+  " 37c:	f64f 70ff 	movw	r0, #65535	; 0xffff\n",
+  " 380:	1a08      	subs	r0, r1, r0\n",
+  " 382:	f5b1 3080 	subs.w	r0, r1, #65536	; 0x10000\n",
+  " 386:	f1b1 1001 	subs.w	r0, r1, #65537	; 0x10001\n",
+  " 38a:	f06f 1001 	mvn.w	r0, #65537	; 0x10001\n",
+  " 38e:	1808      	adds	r0, r1, r0\n",
+  " 390:	f64f 70fd 	movw	r0, #65533	; 0xfffd\n",
+  " 394:	f6cf 70fe 	movt	r0, #65534	; 0xfffe\n",
+  " 398:	1808      	adds	r0, r1, r0\n",
+  " 39a:	3000      	adds	r0, #0\n",
+  " 39c:	3101      	adds	r1, #1\n",
+  " 39e:	3007      	adds	r0, #7\n",
+  " 3a0:	3108      	adds	r1, #8\n",
+  " 3a2:	30ff      	adds	r0, #255	; 0xff\n",
+  " 3a4:	f511 7180 	adds.w	r1, r1, #256	; 0x100\n",
+  " 3a8:	f46f 7c80 	mvn.w	ip, #256	; 0x100\n",
+  " 3ac:	ebb0 000c 	subs.w	r0, r0, ip\n",
+  " 3b0:	f640 7cff 	movw	ip, #4095	; 0xfff\n",
+  " 3b4:	eb11 010c 	adds.w	r1, r1, ip\n",
+  " 3b8:	f510 5080 	adds.w	r0, r0, #4096	; 0x1000\n",
+  " 3bc:	f46f 5c80 	mvn.w	ip, #4096	; 0x1000\n",
+  " 3c0:	ebb1 010c 	subs.w	r1, r1, ip\n",
+  " 3c4:	f241 0c02 	movw	ip, #4098	; 0x1002\n",
+  " 3c8:	eb10 000c 	adds.w	r0, r0, ip\n",
+  " 3cc:	f64f 7cff 	movw	ip, #65535	; 0xffff\n",
+  " 3d0:	eb11 010c 	adds.w	r1, r1, ip\n",
+  " 3d4:	f510 3080 	adds.w	r0, r0, #65536	; 0x10000\n",
+  " 3d8:	f111 1101 	adds.w	r1, r1, #65537	; 0x10001\n",
+  " 3dc:	f06f 1c01 	mvn.w	ip, #65537	; 0x10001\n",
+  " 3e0:	ebb0 000c 	subs.w	r0, r0, ip\n",
+  " 3e4:	f240 0c03 	movw	ip, #3\n",
+  " 3e8:	f2c0 0c01 	movt	ip, #1\n",
+  " 3ec:	eb11 010c 	adds.w	r1, r1, ip\n",
+  " 3f0:	3801      	subs	r0, #1\n",
+  " 3f2:	3907      	subs	r1, #7\n",
+  " 3f4:	3808      	subs	r0, #8\n",
+  " 3f6:	39ff      	subs	r1, #255	; 0xff\n",
+  " 3f8:	f5b0 7080 	subs.w	r0, r0, #256	; 0x100\n",
+  " 3fc:	f46f 7c80 	mvn.w	ip, #256	; 0x100\n",
+  " 400:	eb11 010c 	adds.w	r1, r1, ip\n",
+  " 404:	f640 7cff 	movw	ip, #4095	; 0xfff\n",
+  " 408:	ebb0 000c 	subs.w	r0, r0, ip\n",
+  " 40c:	f5b1 5180 	subs.w	r1, r1, #4096	; 0x1000\n",
+  " 410:	f46f 5c80 	mvn.w	ip, #4096	; 0x1000\n",
+  " 414:	eb10 000c 	adds.w	r0, r0, ip\n",
+  " 418:	f241 0c02 	movw	ip, #4098	; 0x1002\n",
+  " 41c:	ebb1 010c 	subs.w	r1, r1, ip\n",
+  " 420:	f64f 7cff 	movw	ip, #65535	; 0xffff\n",
+  " 424:	ebb0 000c 	subs.w	r0, r0, ip\n",
+  " 428:	f5b1 3180 	subs.w	r1, r1, #65536	; 0x10000\n",
+  " 42c:	f1b0 1001 	subs.w	r0, r0, #65537	; 0x10001\n",
+  " 430:	f06f 1c01 	mvn.w	ip, #65537	; 0x10001\n",
+  " 434:	eb11 010c 	adds.w	r1, r1, ip\n",
+  " 438:	f64f 7cfd 	movw	ip, #65533	; 0xfffd\n",
+  " 43c:	f6cf 7cfe 	movt	ip, #65534	; 0xfffe\n",
+  " 440:	eb10 000c 	adds.w	r0, r0, ip\n",
+  " 444:	bf08      	it	eq\n",
+  " 446:	f111 0001 	addseq.w	r0, r1, #1\n",
+  " 44a:	bf18      	it	ne\n",
+  " 44c:	1c48      	addne	r0, r1, #1\n",
+  " 44e:	bfa8      	it	ge\n",
+  " 450:	f110 0001 	addsge.w	r0, r0, #1\n",
+  " 454:	bfd8      	it	le\n",
+  " 456:	3001      	addle	r0, #1\n",
+  nullptr
+};
+
 std::map<std::string, const char* const*> test_results;
 void setup_results() {
     test_results["SimpleMov"] = SimpleMovResults;
@@ -5102,4 +5420,5 @@
     test_results["LoadStoreLiteral"] = LoadStoreLiteralResults;
     test_results["LoadStoreLimits"] = LoadStoreLimitsResults;
     test_results["CompareAndBranch"] = CompareAndBranchResults;
+    test_results["AddConstant"] = AddConstantResults;
 }