Version 2.2.18

Added API functions to retrieve information on indexed properties managed by the embedding layer.  Fixes bug 737.

Make ES5 Object.defineProperty support array elements.  Fixes bug 619.

Add heap profiling to the API.

Remove old named property query from the API.

Incremental performance improvements.


git-svn-id: http://v8.googlecode.com/svn/trunk@4875 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 025f28e..16dc5cd 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -279,6 +279,20 @@
     15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
 const Instr kBlxRegPattern =
     B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
+const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
+const Instr kMovMvnPattern = 0xd * B21;
+const Instr kMovMvnFlip = B22;
+const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
+const Instr kCmpCmnPattern = 0x15 * B20;
+const Instr kCmpCmnFlip = B21;
+const Instr kALUMask = 0x6f * B21;
+const Instr kAddPattern = 0x4 * B21;
+const Instr kSubPattern = 0x2 * B21;
+const Instr kBicPattern = 0xe * B21;
+const Instr kAndPattern = 0x0 * B21;
+const Instr kAddSubFlip = 0x6 * B21;
+const Instr kAndBicFlip = 0xe * B21;
+
 // A mask for the Rd register for push, pop, ldr, str instructions.
 const Instr kRdMask = 0x0000f000;
 static const int kRdShift = 12;
@@ -627,6 +641,9 @@
 
 
 // Low-level code emission routines depending on the addressing mode.
+// If this returns true then you have to use the rotate_imm and immed_8
+// that it returns, because it may have already changed the instruction
+// to match them!
 static bool fits_shifter(uint32_t imm32,
                          uint32_t* rotate_imm,
                          uint32_t* immed_8,
@@ -640,11 +657,34 @@
       return true;
     }
   }
-  // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
-  if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
-    if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
-      *instr ^= 0x2*B21;
-      return true;
+  // If the opcode is one with a complementary version and the complementary
+  // immediate fits, change the opcode.
+  if (instr != NULL) {
+    if ((*instr & kMovMvnMask) == kMovMvnPattern) {
+      if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+        *instr ^= kMovMvnFlip;
+        return true;
+      }
+    } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
+      if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
+        *instr ^= kCmpCmnFlip;
+        return true;
+      }
+    } else {
+      Instr alu_insn = (*instr & kALUMask);
+      if (alu_insn == kAddPattern ||
+          alu_insn == kSubPattern) {
+        if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
+          *instr ^= kAddSubFlip;
+          return true;
+        }
+      } else if (alu_insn == kAndPattern ||
+                 alu_insn == kBicPattern) {
+        if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+          *instr ^= kAndBicFlip;
+          return true;
+        }
+      }
     }
   }
   return false;
@@ -670,6 +710,14 @@
 }
 
 
+bool Operand::is_single_instruction() const {
+  if (rm_.is_valid()) return true;
+  if (MustUseIp(rmode_)) return false;
+  uint32_t dummy1, dummy2;
+  return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
+}
+
+
 void Assembler::addrmod1(Instr instr,
                          Register rn,
                          Register rd,