Update V8 to r6768 as required by WebKit r78450

Change-Id: Ib8868ff7147a76547a8d1d85f257ebe8546a3d3f
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 1028b0e..5d8df1a 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -318,7 +318,7 @@
   CheckConstPool(true, true);
   add(pc, pc, Operand(index,
                       LSL,
-                      assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize));
+                      Instruction::kInstrSizeLog2 - kSmiTagSize));
   BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
   nop();  // Jump table alignment.
   for (int i = 0; i < targets.length(); i++) {
@@ -369,12 +369,12 @@
 
 void MacroAssembler::InNewSpace(Register object,
                                 Register scratch,
-                                Condition cc,
+                                Condition cond,
                                 Label* branch) {
-  ASSERT(cc == eq || cc == ne);
+  ASSERT(cond == eq || cond == ne);
   and_(scratch, object, Operand(ExternalReference::new_space_mask()));
   cmp(scratch, Operand(ExternalReference::new_space_start()));
-  b(cc, branch);
+  b(cond, branch);
 }
 
 
@@ -485,6 +485,21 @@
   PopSafepointRegisters();
 }
 
+void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register reg) {
+  str(reg, SafepointRegistersAndDoublesSlot(reg));
+}
+
+
+void MacroAssembler::StoreToSafepointRegisterSlot(Register reg) {
+  str(reg, SafepointRegisterSlot(reg));
+}
+
+
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register reg) {
+  ldr(reg, SafepointRegisterSlot(reg));
+}
+
+
 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
   // The registers are pushed starting with the highest encoding,
   // which means that lowest encodings are closest to the stack pointer.
@@ -493,6 +508,19 @@
 }
 
 
+MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
+  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+
+MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+  // General purpose registers are pushed last on the stack.
+  int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
+  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
+  return MemOperand(sp, doubles_size + register_offset);
+}
+
+
 void MacroAssembler::Ldrd(Register dst1, Register dst2,
                           const MemOperand& src, Condition cond) {
   ASSERT(src.rm().is(no_reg));
@@ -604,38 +632,21 @@
 }
 
 
-void MacroAssembler::EnterExitFrame(bool save_doubles) {
-  // r0 is argc.
-  // Compute callee's stack pointer before making changes and save it as
-  // ip register so that it is restored as sp register on exit, thereby
-  // popping the args.
-
-  // ip = sp + kPointerSize * #args;
-  add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
-
-  // Compute the argv pointer and keep it in a callee-saved register.
-  sub(r6, ip, Operand(kPointerSize));
-
-  // Prepare the stack to be aligned when calling into C. After this point there
-  // are 5 pushes before the call into C, so the stack needs to be aligned after
-  // 5 pushes.
-  int frame_alignment = ActivationFrameAlignment();
-  int frame_alignment_mask = frame_alignment - 1;
-  if (frame_alignment != kPointerSize) {
-    // The following code needs to be more general if this assert does not hold.
-    ASSERT(frame_alignment == 2 * kPointerSize);
-    // With 5 pushes left the frame must be unaligned at this point.
-    mov(r7, Operand(Smi::FromInt(0)));
-    tst(sp, Operand((frame_alignment - kPointerSize) & frame_alignment_mask));
-    push(r7, eq);  // Push if aligned to make it unaligned.
-  }
-
-  // Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
-  stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
+  // Setup the frame structure on the stack.
+  ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
+  ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
+  ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
+  Push(lr, fp);
   mov(fp, Operand(sp));  // Setup new frame pointer.
-
+  // Reserve room for saved entry sp and code object.
+  sub(sp, sp, Operand(2 * kPointerSize));
+  if (FLAG_debug_code) {
+    mov(ip, Operand(0));
+    str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
+  }
   mov(ip, Operand(CodeObject()));
-  push(ip);  // Accessed from ExitFrame::code_slot.
+  str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
 
   // Save the frame pointer and the context in top.
   mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
@@ -643,31 +654,32 @@
   mov(ip, Operand(ExternalReference(Top::k_context_address)));
   str(cp, MemOperand(ip));
 
-  // Setup argc and the builtin function in callee-saved registers.
-  mov(r4, Operand(r0));
-  mov(r5, Operand(r1));
-
   // Optionally save all double registers.
   if (save_doubles) {
-    // TODO(regis): Use vstrm instruction.
-    // The stack alignment code above made sp unaligned, so add space for one
-    // more double register and use aligned addresses.
-    ASSERT(kDoubleSize == frame_alignment);
-    // Mark the frame as containing doubles by pushing a non-valid return
-    // address, i.e. 0.
-    ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
-    mov(ip, Operand(0));  // Marker and alignment word.
-    push(ip);
-    int space = DwVfpRegister::kNumRegisters * kDoubleSize + kPointerSize;
-    sub(sp, sp, Operand(space));
+    sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize));
+    const int offset = -2 * kPointerSize;
     for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
       DwVfpRegister reg = DwVfpRegister::from_code(i);
-      vstr(reg, sp, i * kDoubleSize + kPointerSize);
+      vstr(reg, fp, offset - ((i + 1) * kDoubleSize));
     }
-    // Note that d0 will be accessible at fp - 2*kPointerSize -
-    // DwVfpRegister::kNumRegisters * kDoubleSize, since the code slot and the
-    // alignment word were pushed after the fp.
+    // Note that d0 will be accessible at
+    //   fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
+    // since the sp slot and code slot were pushed after the fp.
   }
+
+  // Reserve place for the return address and stack space and align the frame
+  // preparing for calling the runtime function.
+  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+  sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
+  if (frame_alignment > 0) {
+    ASSERT(IsPowerOf2(frame_alignment));
+    and_(sp, sp, Operand(-frame_alignment));
+  }
+
+  // Set the exit frame sp value to point just before the return address
+  // location.
+  add(ip, sp, Operand(kPointerSize));
+  str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
 }
 
 
@@ -705,12 +717,10 @@
 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
   // Optionally restore all double registers.
   if (save_doubles) {
-    // TODO(regis): Use vldrm instruction.
     for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
       DwVfpRegister reg = DwVfpRegister::from_code(i);
-      // Register d15 is just below the marker.
-      const int offset = ExitFrameConstants::kMarkerOffset;
-      vldr(reg, fp, (i - DwVfpRegister::kNumRegisters) * kDoubleSize + offset);
+      const int offset = -2 * kPointerSize;
+      vldr(reg, fp, offset - ((i + 1) * kDoubleSize));
     }
   }
 
@@ -726,9 +736,12 @@
   str(r3, MemOperand(ip));
 #endif
 
-  // Pop the arguments, restore registers, and return.
-  mov(sp, Operand(fp));  // respect ABI stack constraint
-  ldm(ia, sp, fp.bit() | sp.bit() | pc.bit());
+  // Tear down the exit frame, pop the arguments, and return. Callee-saved
+  // register r4 still holds argc.
+  mov(sp, Operand(fp));
+  ldm(ia_w, sp, fp.bit() | lr.bit());
+  add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
+  mov(pc, lr);
 }
 
 
@@ -923,7 +936,7 @@
   ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   tst(scratch, Operand(kIsNotStringMask));
-  b(nz, fail);
+  b(ne, fail);
 }
 
 
@@ -1137,7 +1150,8 @@
 
   // Calculate new top and bail out if new space is exhausted. Use result
   // to calculate the new top.
-  add(scratch2, result, Operand(obj_size_reg));
+  add(scratch2, result, Operand(obj_size_reg), SetCC);
+  b(cs, gc_required);
   cmp(scratch2, Operand(ip));
   b(hi, gc_required);
   str(scratch2, MemOperand(topaddr));
@@ -1216,10 +1230,11 @@
   // to calculate the new top. Object size may be in words so a shift is
   // required to get the number of bytes.
   if ((flags & SIZE_IN_WORDS) != 0) {
-    add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2));
+    add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
   } else {
-    add(scratch2, result, Operand(object_size));
+    add(scratch2, result, Operand(object_size), SetCC);
   }
+  b(cs, gc_required);
   cmp(scratch2, Operand(ip));
   b(hi, gc_required);
 
@@ -1382,7 +1397,7 @@
                               Label* fail,
                               bool is_heap_object) {
   if (!is_heap_object) {
-    BranchOnSmi(obj, fail);
+    JumpIfSmi(obj, fail);
   }
   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
   mov(ip, Operand(map));
@@ -1397,7 +1412,7 @@
                               Label* fail,
                               bool is_heap_object) {
   if (!is_heap_object) {
-    BranchOnSmi(obj, fail);
+    JumpIfSmi(obj, fail);
   }
   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
   LoadRoot(ip, index);
@@ -1411,7 +1426,7 @@
                                              Register scratch,
                                              Label* miss) {
   // Check that the receiver isn't a smi.
-  BranchOnSmi(function, miss);
+  JumpIfSmi(function, miss);
 
   // Check that the function really is a function.  Load map into result reg.
   CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
@@ -1454,17 +1469,115 @@
 
 
 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
-  ASSERT(allow_stub_calls());  // stub calls are not allowed in some stubs
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
   Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
 }
 
 
 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
-  ASSERT(allow_stub_calls());  // stub calls are not allowed in some stubs
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
 }
 
 
+MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  Object* result;
+  { MaybeObject* maybe_result = stub->TryGetCode();
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+  return result;
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+  return ref0.address() - ref1.address();
+}
+
+
+MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
+    ApiFunction* function, int stack_space) {
+  ExternalReference next_address =
+      ExternalReference::handle_scope_next_address();
+  const int kNextOffset = 0;
+  const int kLimitOffset = AddressOffset(
+      ExternalReference::handle_scope_limit_address(),
+      next_address);
+  const int kLevelOffset = AddressOffset(
+      ExternalReference::handle_scope_level_address(),
+      next_address);
+
+  // Allocate HandleScope in callee-save registers.
+  mov(r7, Operand(next_address));
+  ldr(r4, MemOperand(r7, kNextOffset));
+  ldr(r5, MemOperand(r7, kLimitOffset));
+  ldr(r6, MemOperand(r7, kLevelOffset));
+  add(r6, r6, Operand(1));
+  str(r6, MemOperand(r7, kLevelOffset));
+
+  // Native call returns to the DirectCEntry stub which redirects to the
+  // return address pushed on stack (could have moved after GC).
+  // DirectCEntry stub itself is generated early and never moves.
+  DirectCEntryStub stub;
+  stub.GenerateCall(this, function);
+
+  Label promote_scheduled_exception;
+  Label delete_allocated_handles;
+  Label leave_exit_frame;
+
+  // If result is non-zero, dereference to get the result value
+  // otherwise set it to undefined.
+  cmp(r0, Operand(0));
+  LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+  ldr(r0, MemOperand(r0), ne);
+
+  // No more valid handles (the result handle was the last one). Restore
+  // previous handle scope.
+  str(r4, MemOperand(r7, kNextOffset));
+  if (FLAG_debug_code) {
+    ldr(r1, MemOperand(r7, kLevelOffset));
+    cmp(r1, r6);
+    Check(eq, "Unexpected level after return from api call");
+  }
+  sub(r6, r6, Operand(1));
+  str(r6, MemOperand(r7, kLevelOffset));
+  ldr(ip, MemOperand(r7, kLimitOffset));
+  cmp(r5, ip);
+  b(ne, &delete_allocated_handles);
+
+  // Check if the function scheduled an exception.
+  bind(&leave_exit_frame);
+  LoadRoot(r4, Heap::kTheHoleValueRootIndex);
+  mov(ip, Operand(ExternalReference::scheduled_exception_address()));
+  ldr(r5, MemOperand(ip));
+  cmp(r4, r5);
+  b(ne, &promote_scheduled_exception);
+
+  // LeaveExitFrame expects unwind space to be in r4.
+  mov(r4, Operand(stack_space));
+  LeaveExitFrame(false);
+
+  bind(&promote_scheduled_exception);
+  MaybeObject* result = TryTailCallExternalReference(
+      ExternalReference(Runtime::kPromoteScheduledException), 0, 1);
+  if (result->IsFailure()) {
+    return result;
+  }
+
+  // HandleScope limit has changed. Delete allocated extensions.
+  bind(&delete_allocated_handles);
+  str(r5, MemOperand(r7, kLimitOffset));
+  mov(r4, r0);
+  PrepareCallCFunction(0, r5);
+  CallCFunction(ExternalReference::delete_handle_scope_extensions(), 0);
+  mov(r0, r4);
+  jmp(&leave_exit_frame);
+
+  return result;
+}
+
+
 void MacroAssembler::IllegalOperation(int num_arguments) {
   if (num_arguments > 0) {
     add(sp, sp, Operand(num_arguments * kPointerSize));
@@ -1510,7 +1623,7 @@
   Label done;
   if ((flags & OBJECT_NOT_SMI) == 0) {
     Label not_smi;
-    BranchOnNotSmi(object, &not_smi);
+    JumpIfNotSmi(object, &not_smi);
     // Remove smi tag and convert to double.
     mov(scratch1, Operand(object, ASR, kSmiTagSize));
     vmov(scratch3, scratch1);
@@ -1556,13 +1669,14 @@
                                     Register dest,
                                     Register scratch,
                                     Register scratch2,
+                                    DwVfpRegister double_scratch,
                                     Label *not_int32) {
   if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     sub(scratch, source, Operand(kHeapObjectTag));
-    vldr(d0, scratch, HeapNumber::kValueOffset);
-    vcvt_s32_f64(s0, d0);
-    vmov(dest, s0);
+    vldr(double_scratch, scratch, HeapNumber::kValueOffset);
+    vcvt_s32_f64(double_scratch.low(), double_scratch);
+    vmov(dest, double_scratch.low());
     // Signed vcvt instruction will saturate to the minimum (0x80000000) or
     // maximun (0x7fffffff) signed 32bits integer when the double is out of
     // range. When substracting one, the minimum signed integer becomes the
@@ -1659,6 +1773,13 @@
 }
 
 
+void MacroAssembler::GetLeastBitsFromInt32(Register dst,
+                                           Register src,
+                                           int num_least_bits) {
+  and_(dst, src, Operand((1 << num_least_bits) - 1));
+}
+
+
 void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
   // All parameters are on the stack.  r0 has the return value after call.
 
@@ -1718,6 +1839,17 @@
 }
 
 
+MaybeObject* MacroAssembler::TryTailCallExternalReference(
+    const ExternalReference& ext, int num_arguments, int result_size) {
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  mov(r0, Operand(num_arguments));
+  return TryJumpToExternalReference(ext);
+}
+
+
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -1736,6 +1868,18 @@
 }
 
 
+MaybeObject* MacroAssembler::TryJumpToExternalReference(
+    const ExternalReference& builtin) {
+#if defined(__thumb__)
+  // Thumb mode builtin.
+  ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
+#endif
+  mov(r1, Operand(builtin));
+  CEntryStub stub(1);
+  return TryTailCallStub(&stub);
+}
+
+
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeJSFlags flags,
                                    PostCallGenerator* post_call_generator) {
@@ -1803,9 +1947,9 @@
 }
 
 
-void MacroAssembler::Assert(Condition cc, const char* msg) {
+void MacroAssembler::Assert(Condition cond, const char* msg) {
   if (FLAG_debug_code)
-    Check(cc, msg);
+    Check(cond, msg);
 }
 
 
@@ -1838,9 +1982,9 @@
 }
 
 
-void MacroAssembler::Check(Condition cc, const char* msg) {
+void MacroAssembler::Check(Condition cond, const char* msg) {
   Label L;
-  b(cc, &L);
+  b(cond, &L);
   Abort(msg);
   // will not return here
   bind(&L);
@@ -1933,10 +2077,21 @@
 }
 
 
+void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
+    Register reg,
+    Register scratch,
+    Label* not_power_of_two_or_zero) {
+  sub(scratch, reg, Operand(1), SetCC);
+  b(mi, not_power_of_two_or_zero);
+  tst(scratch, reg);
+  b(ne, not_power_of_two_or_zero);
+}
+
+
 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
                                       Register reg2,
                                       Label* on_not_both_smi) {
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   tst(reg1, Operand(kSmiTagMask));
   tst(reg2, Operand(kSmiTagMask), eq);
   b(ne, on_not_both_smi);
@@ -1946,7 +2101,7 @@
 void MacroAssembler::JumpIfEitherSmi(Register reg1,
                                      Register reg2,
                                      Label* on_either_smi) {
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   tst(reg1, Operand(kSmiTagMask));
   tst(reg2, Operand(kSmiTagMask), ne);
   b(eq, on_either_smi);
@@ -1954,12 +2109,40 @@
 
 
 void MacroAssembler::AbortIfSmi(Register object) {
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   tst(object, Operand(kSmiTagMask));
   Assert(ne, "Operand is a smi");
 }
 
 
+void MacroAssembler::AbortIfNotSmi(Register object) {
+  STATIC_ASSERT(kSmiTag == 0);
+  tst(object, Operand(kSmiTagMask));
+  Assert(eq, "Operand is not smi");
+}
+
+
+void MacroAssembler::AbortIfNotRootValue(Register src,
+                                         Heap::RootListIndex root_value_index,
+                                         const char* message) {
+  ASSERT(!src.is(ip));
+  LoadRoot(ip, root_value_index);
+  cmp(src, ip);
+  Assert(eq, message);
+}
+
+
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+                                         Register heap_number_map,
+                                         Register scratch,
+                                         Label* on_not_heap_number) {
+  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+  cmp(scratch, heap_number_map);
+  b(ne, on_not_heap_number);
+}
+
+
 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
     Register first,
     Register second,
@@ -1986,7 +2169,7 @@
                                                          Register scratch2,
                                                          Label* failure) {
   // Check that neither is a smi.
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   and_(scratch1, first, Operand(second));
   tst(scratch1, Operand(kSmiTagMask));
   b(eq, failure);
@@ -2063,7 +2246,7 @@
 void MacroAssembler::CountLeadingZeros(Register zeros,   // Answer.
                                        Register source,  // Input.
                                        Register scratch) {
-  ASSERT(!zeros.is(source) || !source.is(zeros));
+  ASSERT(!zeros.is(source) || !source.is(scratch));
   ASSERT(!zeros.is(scratch));
   ASSERT(!scratch.is(ip));
   ASSERT(!source.is(ip));
@@ -2185,7 +2368,26 @@
 }
 
 
-#ifdef ENABLE_DEBUGGER_SUPPORT
+void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
+                               Register result) {
+  const uint32_t kLdrOffsetMask = (1 << 12) - 1;
+  const int32_t kPCRegOffset = 2 * kPointerSize;
+  ldr(result, MemOperand(ldr_location));
+  if (FLAG_debug_code) {
+    // Check that the instruction is a ldr reg, [pc + offset] .
+    and_(result, result, Operand(kLdrPCPattern));
+    cmp(result, Operand(kLdrPCPattern));
+    Check(eq, "The instruction to patch should be a load from pc.");
+    // Result was clobbered. Restore it.
+    ldr(result, MemOperand(ldr_location));
+  }
+  // Get the address of the constant.
+  and_(result, result, Operand(kLdrOffsetMask));
+  add(result, ldr_location, Operand(result));
+  add(result, result, Operand(kPCRegOffset));
+}
+
+
 CodePatcher::CodePatcher(byte* address, int instructions)
     : address_(address),
       instructions_(instructions),
@@ -2208,15 +2410,21 @@
 }
 
 
-void CodePatcher::Emit(Instr x) {
-  masm()->emit(x);
+void CodePatcher::Emit(Instr instr) {
+  masm()->emit(instr);
 }
 
 
 void CodePatcher::Emit(Address addr) {
   masm()->emit(reinterpret_cast<Instr>(addr));
 }
-#endif  // ENABLE_DEBUGGER_SUPPORT
+
+
+void CodePatcher::EmitCondition(Condition cond) {
+  Instr instr = Assembler::instr_at(masm_.pc_);
+  instr = (instr & ~kCondMask) | cond;
+  masm_.emit(instr);
+}
 
 
 } }  // namespace v8::internal