Merge "jni: Add @CriticalNative support for MIPS32/MIPS64"
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index 3fb7b56..33f4d77 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -222,7 +222,11 @@
                                                      bool is_synchronized,
                                                      bool is_critical_native,
                                                      const char* shorty)
-    : JniCallingConvention(is_static, is_synchronized, is_critical_native, shorty, kArm64PointerSize) {
+    : JniCallingConvention(is_static,
+                           is_synchronized,
+                           is_critical_native,
+                           shorty,
+                           kArm64PointerSize) {
 }
 
 uint32_t Arm64JniCallingConvention::CoreSpillMask() const {
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 9859b5d..36a87a8 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -152,24 +152,6 @@
                                                                    bool is_critical_native,
                                                                    const char* shorty,
                                                                    InstructionSet instruction_set) {
-  if (UNLIKELY(is_critical_native)) {
-    // Sanity check that the requested JNI instruction set
-    // is supported for critical natives. Not every one is.
-    switch (instruction_set) {
-      case kX86_64:
-      case kX86:
-      case kArm64:
-      case kArm:
-      case kThumb2:
-        break;
-      default:
-        is_critical_native = false;
-        LOG(WARNING) << "@CriticalNative support not implemented for " << instruction_set
-                     << "; will crash at runtime if trying to invoke such a method.";
-        // TODO: implement for MIPS/MIPS64
-    }
-  }
-
   switch (instruction_set) {
 #ifdef ART_ENABLE_CODEGEN_arm
     case kArm:
@@ -191,12 +173,18 @@
 #ifdef ART_ENABLE_CODEGEN_mips
     case kMips:
       return std::unique_ptr<JniCallingConvention>(
-          new (arena) mips::MipsJniCallingConvention(is_static, is_synchronized, shorty));
+          new (arena) mips::MipsJniCallingConvention(is_static,
+                                                     is_synchronized,
+                                                     is_critical_native,
+                                                     shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
     case kMips64:
       return std::unique_ptr<JniCallingConvention>(
-          new (arena) mips64::Mips64JniCallingConvention(is_static, is_synchronized, shorty));
+          new (arena) mips64::Mips64JniCallingConvention(is_static,
+                                                         is_synchronized,
+                                                         is_critical_native,
+                                                         shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
     case kX86:
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index f541d8f..335a2df 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -370,14 +370,6 @@
     kObjectOrClass = 1
   };
 
-  // TODO: remove this constructor once all are changed to the below one.
-  JniCallingConvention(bool is_static,
-                       bool is_synchronized,
-                       const char* shorty,
-                       PointerSize frame_pointer_size)
-      : CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size),
-        is_critical_native_(false) {}
-
   JniCallingConvention(bool is_static,
                        bool is_synchronized,
                        bool is_critical_native,
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index f5ab5f7..e6948ec 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -23,6 +23,13 @@
 namespace art {
 namespace mips {
 
+// Up to how many float-like (float, double) args can be enregistered in floating-point registers.
+// The rest of the args must go in integer registers or on the stack.
+constexpr size_t kMaxFloatOrDoubleRegisterArguments = 2u;
+// Up to how many integer-like (pointers, objects, longs, int, short, bool, etc) args can be
+// enregistered. The rest of the args must go on the stack.
+constexpr size_t kMaxIntLikeRegisterArguments = 4u;
+
 static const Register kCoreArgumentRegisters[] = { A0, A1, A2, A3 };
 static const FRegister kFArgumentRegisters[] = { F12, F14 };
 static const DRegister kDArgumentRegisters[] = { D6, D7 };
@@ -170,23 +177,134 @@
 }
 // JNI calling convention
 
-MipsJniCallingConvention::MipsJniCallingConvention(bool is_static, bool is_synchronized,
+MipsJniCallingConvention::MipsJniCallingConvention(bool is_static,
+                                                   bool is_synchronized,
+                                                   bool is_critical_native,
                                                    const char* shorty)
-    : JniCallingConvention(is_static, is_synchronized, shorty, kMipsPointerSize) {
-  // Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject
-  // or jclass for static methods and the JNIEnv. We start at the aligned register A2.
+    : JniCallingConvention(is_static,
+                           is_synchronized,
+                           is_critical_native,
+                           shorty,
+                           kMipsPointerSize) {
+  // SYSTEM V - Application Binary Interface (MIPS RISC Processor):
+  // Data Representation - Fundamental Types (3-4) specifies fundamental alignments for each type.
+  //   "Each member is assigned to the lowest available offset with the appropriate alignment. This
+  // may require internal padding, depending on the previous member."
+  //
+  // All of our stack arguments are usually 4-byte aligned, however longs and doubles must be 8
+  // bytes aligned. Add padding to maintain 8-byte alignment invariant.
+  //
+  // Compute padding to ensure longs and doubles are not split in o32.
   size_t padding = 0;
-  for (size_t cur_arg = IsStatic() ? 0 : 1, cur_reg = 2; cur_arg < NumArgs(); cur_arg++) {
+  size_t cur_arg, cur_reg;
+  if (LIKELY(HasExtraArgumentsForJni())) {
+    // Ignore the 'this' jobject or jclass for static methods and the JNIEnv.
+    // We start at the aligned register A2.
+    //
+    // Ignore the first 2 parameters because they are guaranteed to be aligned.
+    cur_arg = NumImplicitArgs();  // Skip the "this" argument.
+    cur_reg = 2;  // Skip {A0=JNIEnv, A1=jobject} / {A0=JNIEnv, A1=jclass} parameters (start at A2).
+  } else {
+    // Check every parameter.
+    cur_arg = 0;
+    cur_reg = 0;
+  }
+
+  // Shift across a logical register mapping that looks like:
+  //
+  //   | A0 | A1 | A2 | A3 | SP+16 | SP+20 | SP+24 | ... | SP+n | SP+n+4 |
+  //
+  //   or some of variants with floating-point registers (F12 and F14), for example
+  //
+  //   | F12     | F14 | A3 | SP+16 | SP+20 | SP+24 | ... | SP+n | SP+n+4 |
+  //
+  //   (where SP is the stack pointer at the start of called function).
+  //
+  // Any time there would normally be a long/double in an odd logical register,
+  // we have to push out the rest of the mappings by 4 bytes to maintain an 8-byte alignment.
+  //
+  // This works for both physical register pairs {A0, A1}, {A2, A3},
+  // floating-point registers F12, F14 and for when the value is on the stack.
+  //
+  // For example:
+  // (a) long would normally go into A1, but we shift it into A2
+  //  | INT | (PAD) | LONG    |
+  //  | A0  |  A1   | A2 | A3 |
+  //
+  // (b) long would normally go into A3, but we shift it into SP
+  //  | INT | INT | INT | (PAD) | LONG        |
+  //  | A0  | A1  | A2  |  A3   | SP+16 SP+20 |
+  //
+  // where INT is any <=4 byte arg, and LONG is any 8-byte arg.
+  for (; cur_arg < NumArgs(); cur_arg++) {
     if (IsParamALongOrDouble(cur_arg)) {
       if ((cur_reg & 1) != 0) {
         padding += 4;
-        cur_reg++;  // additional bump to ensure alignment
+        cur_reg++;   // Additional bump to ensure alignment.
       }
-      cur_reg++;  // additional bump to skip extra long word
+      cur_reg += 2;  // Bump the iterator twice for every long argument.
+    } else {
+      cur_reg++;     // Bump the iterator for every argument.
     }
-    cur_reg++;  // bump the iterator for every argument
   }
-  padding_ = padding;
+  if (cur_reg < kMaxIntLikeRegisterArguments) {
+    // As a special case when, as a result of shifting (or not) there are no arguments on the stack,
+    // we actually have 0 stack padding.
+    //
+    // For example with @CriticalNative and:
+    // (int, long) -> shifts the long but doesn't need to pad the stack
+    //
+    //          shift
+    //           \/
+    //  | INT | (PAD) | LONG      | (EMPTY) ...
+    //  | r0  |  r1   |  r2  | r3 |   SP    ...
+    //                                /\
+    //                          no stack padding
+    padding_ = 0;
+  } else {
+    padding_ = padding;
+  }
+
+  // Argument Passing (3-17):
+  //   "When the first argument is integral, the remaining arguments are passed in the integer
+  // registers."
+  //
+  //   "The rules that determine which arguments go into registers and which ones must be passed on
+  // the stack are most easily explained by considering the list of arguments as a structure,
+  // aligned according to normal structure rules. Mapping of this structure into the combination of
+  // stack and registers is as follows: up to two leading floating-point arguments can be passed in
+  // $f12 and $f14; everything else with a structure offset greater than or equal to 16 is passed on
+  // the stack. The remainder of the arguments are passed in $4..$7 based on their structure offset.
+  // Holes left in the structure for alignment are unused, whether in registers or in the stack."
+  //
+  // For example with @CriticalNative and:
+  // (a) first argument is not floating-point, so all go into integer registers
+  //  | INT | FLOAT | DOUBLE  |
+  //  | A0  |  A1   | A2 | A3 |
+  // (b) first argument is floating-point, but 2nd is integer
+  //  | FLOAT | INT | DOUBLE  |
+  //  |  F12  | A1  | A2 | A3 |
+  // (c) first two arguments are floating-point (float, double)
+  //  | FLAOT | (PAD) | DOUBLE |  INT  |
+  //  |  F12  |       |  F14   | SP+16 |
+  // (d) first two arguments are floating-point (double, float)
+  //  | DOUBLE | FLOAT | INT |
+  //  |  F12   |  F14  | A3  |
+  // (e) first three arguments are floating-point, but just first two will go into fp registers
+  //  | DOUBLE | FLOAT | FLOAT |
+  //  |  F12   |  F14  |  A3   |
+  //
+  // Find out if the first argument is a floating-point. In that case, floating-point registers will
+  // be used for up to two leading floating-point arguments. Otherwise, all arguments will be passed
+  // using integer registers.
+  use_fp_arg_registers_ = false;
+  if (is_critical_native) {
+    if (NumArgs() > 0) {
+      if (IsParamAFloatOrDouble(0)) {
+        use_fp_arg_registers_ = true;
+      }
+    }
+  }
 }
 
 uint32_t MipsJniCallingConvention::CoreSpillMask() const {
@@ -202,74 +320,127 @@
 }
 
 size_t MipsJniCallingConvention::FrameSize() {
-  // ArtMethod*, RA and callee save area size, local reference segment state
-  size_t frame_data_size = static_cast<size_t>(kMipsPointerSize) +
-      (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
-  // References plus 2 words for HandleScope header
-  size_t handle_scope_size = HandleScope::SizeOf(kMipsPointerSize, ReferenceCount());
-  // Plus return value spill area size
-  return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
+  // ArtMethod*, RA and callee save area size, local reference segment state.
+  const size_t method_ptr_size = static_cast<size_t>(kMipsPointerSize);
+  const size_t ra_return_addr_size = kFramePointerSize;
+  const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
+
+  size_t frame_data_size = method_ptr_size + ra_return_addr_size + callee_save_area_size;
+
+  if (LIKELY(HasLocalReferenceSegmentState())) {
+    // Local reference segment state.
+    frame_data_size += kFramePointerSize;
+  }
+
+  // References plus 2 words for HandleScope header.
+  const size_t handle_scope_size = HandleScope::SizeOf(kMipsPointerSize, ReferenceCount());
+
+  size_t total_size = frame_data_size;
+  if (LIKELY(HasHandleScope())) {
+    // HandleScope is sometimes excluded.
+    total_size += handle_scope_size;    // Handle scope size.
+  }
+
+  // Plus return value spill area size.
+  total_size += SizeOfReturnValue();
+
+  return RoundUp(total_size, kStackAlignment);
 }
 
 size_t MipsJniCallingConvention::OutArgSize() {
-  return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize + padding_, kStackAlignment);
+  // Argument Passing (3-17):
+  //   "Despite the fact that some or all of the arguments to a function are passed in registers,
+  // always allocate space on the stack for all arguments. This stack space should be a structure
+  // large enough to contain all the arguments, aligned according to normal structure rules (after
+  // promotion and structure return pointer insertion). The locations within the stack frame used
+  // for arguments are called the home locations."
+  //
+  // Allocate 16 bytes for home locations + space needed for stack arguments.
+  return RoundUp(
+      (kMaxIntLikeRegisterArguments + NumberOfOutgoingStackArgs()) * kFramePointerSize + padding_,
+      kStackAlignment);
 }
 
 ArrayRef<const ManagedRegister> MipsJniCallingConvention::CalleeSaveRegisters() const {
   return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
 }
 
-// JniCallingConvention ABI follows AAPCS where longs and doubles must occur
-// in even register numbers and stack slots
+// JniCallingConvention ABI follows o32 where longs and doubles must occur
+// in even register numbers and stack slots.
 void MipsJniCallingConvention::Next() {
   JniCallingConvention::Next();
-  size_t arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
-  if ((itr_args_ >= 2) &&
-      (arg_pos < NumArgs()) &&
-      IsParamALongOrDouble(arg_pos)) {
-    // itr_slots_ needs to be an even number, according to AAPCS.
-    if ((itr_slots_ & 0x1u) != 0) {
+
+  if (LIKELY(HasNext())) {  // Avoid CHECK failure for IsCurrentParam
+    // Ensure slot is 8-byte aligned for longs/doubles (o32).
+    if (IsCurrentParamALongOrDouble() && ((itr_slots_ & 0x1u) != 0)) {
+      // itr_slots_ needs to be an even number, according to o32.
       itr_slots_++;
     }
   }
 }
 
 bool MipsJniCallingConvention::IsCurrentParamInRegister() {
-  return itr_slots_ < 4;
+  // Argument Passing (3-17):
+  //   "The rules that determine which arguments go into registers and which ones must be passed on
+  // the stack are most easily explained by considering the list of arguments as a structure,
+  // aligned according to normal structure rules. Mapping of this structure into the combination of
+  // stack and registers is as follows: up to two leading floating-point arguments can be passed in
+  // $f12 and $f14; everything else with a structure offset greater than or equal to 16 is passed on
+  // the stack. The remainder of the arguments are passed in $4..$7 based on their structure offset.
+  // Holes left in the structure for alignment are unused, whether in registers or in the stack."
+  //
+  // Even when floating-point registers are used, there can be up to 4 arguments passed in
+  // registers.
+  return itr_slots_ < kMaxIntLikeRegisterArguments;
 }
 
 bool MipsJniCallingConvention::IsCurrentParamOnStack() {
   return !IsCurrentParamInRegister();
 }
 
-static const Register kJniArgumentRegisters[] = {
-  A0, A1, A2, A3
-};
 ManagedRegister MipsJniCallingConvention::CurrentParamRegister() {
-  CHECK_LT(itr_slots_, 4u);
-  int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
-  if ((itr_args_ >= 2) && IsParamALongOrDouble(arg_pos)) {
-    CHECK_EQ(itr_slots_, 2u);
-    return MipsManagedRegister::FromRegisterPair(A2_A3);
+  CHECK_LT(itr_slots_, kMaxIntLikeRegisterArguments);
+  // Up to two leading floating-point arguments can be passed in floating-point registers.
+  if (use_fp_arg_registers_ && (itr_args_ < kMaxFloatOrDoubleRegisterArguments)) {
+    if (IsCurrentParamAFloatOrDouble()) {
+      if (IsCurrentParamADouble()) {
+        return MipsManagedRegister::FromDRegister(kDArgumentRegisters[itr_args_]);
+      } else {
+        return MipsManagedRegister::FromFRegister(kFArgumentRegisters[itr_args_]);
+      }
+    }
+  }
+  // All other arguments (including other floating-point arguments) will be passed in integer
+  // registers.
+  if (IsCurrentParamALongOrDouble()) {
+    if (itr_slots_ == 0u) {
+      return MipsManagedRegister::FromRegisterPair(A0_A1);
+    } else {
+      CHECK_EQ(itr_slots_, 2u);
+      return MipsManagedRegister::FromRegisterPair(A2_A3);
+    }
   } else {
-    return
-      MipsManagedRegister::FromCoreRegister(kJniArgumentRegisters[itr_slots_]);
+    return MipsManagedRegister::FromCoreRegister(kCoreArgumentRegisters[itr_slots_]);
   }
 }
 
 FrameOffset MipsJniCallingConvention::CurrentParamStackOffset() {
-  CHECK_GE(itr_slots_, 4u);
+  CHECK_GE(itr_slots_, kMaxIntLikeRegisterArguments);
   size_t offset = displacement_.Int32Value() - OutArgSize() + (itr_slots_ * kFramePointerSize);
   CHECK_LT(offset, OutArgSize());
   return FrameOffset(offset);
 }
 
 size_t MipsJniCallingConvention::NumberOfOutgoingStackArgs() {
-  size_t static_args = IsStatic() ? 1 : 0;  // count jclass
-  // regular argument parameters and this
-  size_t param_args = NumArgs() + NumLongOrDoubleArgs();
-  // count JNIEnv*
-  return static_args + param_args + 1;
+  size_t static_args = HasSelfClass() ? 1 : 0;            // Count jclass.
+  // Regular argument parameters and this.
+  size_t param_args = NumArgs() + NumLongOrDoubleArgs();  // Twice count 8-byte args.
+  // Count JNIEnv* less arguments in registers.
+  size_t internal_args = (HasJniEnv() ? 1 : 0);
+  size_t total_args = static_args + param_args + internal_args;
+
+  return total_args - std::min(kMaxIntLikeRegisterArguments, static_cast<size_t>(total_args));
 }
+
 }  // namespace mips
 }  // namespace art
diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h
index e95a738..ad3f118 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.h
+++ b/compiler/jni/quick/mips/calling_convention_mips.h
@@ -54,14 +54,17 @@
 
 class MipsJniCallingConvention FINAL : public JniCallingConvention {
  public:
-  MipsJniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+  MipsJniCallingConvention(bool is_static,
+                           bool is_synchronized,
+                           bool is_critical_native,
+                           const char* shorty);
   ~MipsJniCallingConvention() OVERRIDE {}
   // Calling convention
   ManagedRegister ReturnRegister() OVERRIDE;
   ManagedRegister IntReturnRegister() OVERRIDE;
   ManagedRegister InterproceduralScratchRegister() OVERRIDE;
   // JNI calling convention
-  void Next() OVERRIDE;  // Override default behavior for AAPCS
+  void Next() OVERRIDE;  // Override default behavior for o32.
   size_t FrameSize() OVERRIDE;
   size_t OutArgSize() OVERRIDE;
   ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
@@ -82,8 +85,9 @@
   size_t NumberOfOutgoingStackArgs() OVERRIDE;
 
  private:
-  // Padding to ensure longs and doubles are not split in AAPCS
+  // Padding to ensure longs and doubles are not split in o32.
   size_t padding_;
+  size_t use_fp_arg_registers_;
 
   DISALLOW_COPY_AND_ASSIGN(MipsJniCallingConvention);
 };
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc
index 8341e8e..afe6a76 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.cc
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc
@@ -23,6 +23,9 @@
 namespace art {
 namespace mips64 {
 
+// Up to kow many args can be enregistered. The rest of the args must go on the stack.
+constexpr size_t kMaxRegisterArguments = 8u;
+
 static const GpuRegister kGpuArgumentRegisters[] = {
   A0, A1, A2, A3, A4, A5, A6, A7
 };
@@ -150,9 +153,15 @@
 
 // JNI calling convention
 
-Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static, bool is_synchronized,
+Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static,
+                                                       bool is_synchronized,
+                                                       bool is_critical_native,
                                                        const char* shorty)
-    : JniCallingConvention(is_static, is_synchronized, shorty, kMips64PointerSize) {
+    : JniCallingConvention(is_static,
+                           is_synchronized,
+                           is_critical_native,
+                           shorty,
+                           kMips64PointerSize) {
 }
 
 uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
@@ -168,13 +177,28 @@
 }
 
 size_t Mips64JniCallingConvention::FrameSize() {
-  // ArtMethod*, RA and callee save area size, local reference segment state
-  size_t frame_data_size = kFramePointerSize +
-      (CalleeSaveRegisters().size() + 1) * kFramePointerSize + sizeof(uint32_t);
-  // References plus 2 words for HandleScope header
+  // ArtMethod*, RA and callee save area size, local reference segment state.
+  size_t method_ptr_size = static_cast<size_t>(kFramePointerSize);
+  size_t ra_and_callee_save_area_size = (CalleeSaveRegisters().size() + 1) * kFramePointerSize;
+
+  size_t frame_data_size = method_ptr_size + ra_and_callee_save_area_size;
+  if (LIKELY(HasLocalReferenceSegmentState())) {                     // Local ref. segment state.
+    // Local reference segment state is sometimes excluded.
+    frame_data_size += sizeof(uint32_t);
+  }
+  // References plus 2 words for HandleScope header.
   size_t handle_scope_size = HandleScope::SizeOf(kMips64PointerSize, ReferenceCount());
-  // Plus return value spill area size
-  return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
+
+  size_t total_size = frame_data_size;
+  if (LIKELY(HasHandleScope())) {
+    // HandleScope is sometimes excluded.
+    total_size += handle_scope_size;                                 // Handle scope size.
+  }
+
+  // Plus return value spill area size.
+  total_size += SizeOfReturnValue();
+
+  return RoundUp(total_size, kStackAlignment);
 }
 
 size_t Mips64JniCallingConvention::OutArgSize() {
@@ -186,7 +210,7 @@
 }
 
 bool Mips64JniCallingConvention::IsCurrentParamInRegister() {
-  return itr_args_ < 8;
+  return itr_args_ < kMaxRegisterArguments;
 }
 
 bool Mips64JniCallingConvention::IsCurrentParamOnStack() {
@@ -204,7 +228,8 @@
 
 FrameOffset Mips64JniCallingConvention::CurrentParamStackOffset() {
   CHECK(IsCurrentParamOnStack());
-  size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_args_ - 8) * kFramePointerSize);
+  size_t args_on_stack = itr_args_ - kMaxRegisterArguments;
+  size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize);
   CHECK_LT(offset, OutArgSize());
   return FrameOffset(offset);
 }
@@ -214,7 +239,7 @@
   size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni();
 
   // Nothing on the stack unless there are more than 8 arguments
-  return (all_args > 8) ? all_args - 8 : 0;
+  return (all_args > kMaxRegisterArguments) ? all_args - kMaxRegisterArguments : 0;
 }
 }  // namespace mips64
 }  // namespace art
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.h b/compiler/jni/quick/mips64/calling_convention_mips64.h
index a5fd111..faedaef 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.h
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.h
@@ -54,7 +54,10 @@
 
 class Mips64JniCallingConvention FINAL : public JniCallingConvention {
  public:
-  Mips64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+  Mips64JniCallingConvention(bool is_static,
+                             bool is_synchronized,
+                             bool is_critical_native,
+                             const char* shorty);
   ~Mips64JniCallingConvention() OVERRIDE {}
   // Calling convention
   ManagedRegister ReturnRegister() OVERRIDE;