Ian Rogers | b033c75 | 2011-07-20 12:22:35 -0700 | [diff] [blame^] | 1 | // Copyright 2011 Google Inc. All Rights Reserved. |
| 2 | // Author: irogers@google.com (Ian Rogers) |
| 3 | #include "src/jni_compiler.h" |
| 4 | #include <sys/mman.h> |
| 5 | #include "src/assembler.h" |
| 6 | #include "src/calling_convention.h" |
| 7 | #include "src/macros.h" |
| 8 | #include "src/managed_register.h" |
| 9 | #include "src/logging.h" |
| 10 | #include "src/thread.h" |
| 11 | |
| 12 | namespace art { |
| 13 | |
| 14 | // Generate the JNI bridge for the given method, general contract: |
| 15 | // - Arguments are in the managed runtime format, either on stack or in |
| 16 | // registers, a reference to the method object is supplied as part of this |
| 17 | // convention. |
| 18 | // |
| 19 | void JniCompiler::Compile(Assembler* jni_asm, Method* native_method) { |
| 20 | CHECK(native_method->IsNative()); |
| 21 | JniCallingConvention jni_conv(native_method); |
| 22 | ManagedRuntimeCallingConvention mr_conv(native_method); |
| 23 | const bool is_static = native_method->IsStatic(); |
| 24 | |
| 25 | // 1. Build the frame |
| 26 | const size_t frame_size(jni_conv.FrameSize()); |
| 27 | jni_asm->BuildFrame(frame_size, mr_conv.MethodRegister()); |
| 28 | |
| 29 | // 2. Save callee save registers that aren't callee save in the native code |
| 30 | // TODO: implement computing the difference of the callee saves |
| 31 | // and saving |
| 32 | |
| 33 | // 3. Set up the StackHandleBlock |
| 34 | mr_conv.ResetIterator(FrameOffset(frame_size)); |
| 35 | jni_conv.ResetIterator(FrameOffset(0)); |
| 36 | jni_asm->StoreImmediateToFrame(jni_conv.ShbNumRefsOffset(), |
| 37 | jni_conv.HandleCount(), |
| 38 | mr_conv.InterproceduralScratchRegister()); |
| 39 | jni_asm->CopyRawPtrFromThread(jni_conv.ShbLinkOffset(), |
| 40 | Thread::TopShbOffset(), |
| 41 | mr_conv.InterproceduralScratchRegister()); |
| 42 | jni_asm->StoreStackOffsetToThread(Thread::TopShbOffset(), |
| 43 | jni_conv.ShbOffset(), |
| 44 | mr_conv.InterproceduralScratchRegister()); |
| 45 | |
| 46 | // 4. Place incoming reference arguments into handle block |
| 47 | jni_conv.Next(); // Skip JNIEnv* |
| 48 | // 4.5. Create Class argument for static methods out of passed method |
| 49 | if (is_static) { |
| 50 | FrameOffset handle_offset = jni_conv.CurrentParamHandleOffset(); |
| 51 | // Check handle offset is within frame |
| 52 | CHECK_LT(handle_offset.Uint32Value(), frame_size); |
| 53 | jni_asm->LoadRef(jni_conv.InterproceduralScratchRegister(), |
| 54 | mr_conv.MethodRegister(), Method::ClassOffset()); |
| 55 | jni_asm->ValidateRef(jni_conv.InterproceduralScratchRegister(), false); |
| 56 | jni_asm->StoreRef(handle_offset, jni_conv.InterproceduralScratchRegister()); |
| 57 | jni_conv.Next(); // handlerized so move to next argument |
| 58 | } |
| 59 | while (mr_conv.HasNext()) { |
| 60 | CHECK(jni_conv.HasNext()); |
| 61 | bool ref_param = jni_conv.IsCurrentParamAReference(); |
| 62 | CHECK(!ref_param || mr_conv.IsCurrentParamAReference()); |
| 63 | // References need handlerization and the handle address passing |
| 64 | if (ref_param) { |
| 65 | // Compute handle offset, note null is handlerized but its boxed value |
| 66 | // must be NULL |
| 67 | FrameOffset handle_offset = jni_conv.CurrentParamHandleOffset(); |
| 68 | // Check handle offset is within frame |
| 69 | CHECK_LT(handle_offset.Uint32Value(), frame_size); |
| 70 | bool input_in_reg = mr_conv.IsCurrentParamInRegister(); |
| 71 | CHECK(input_in_reg || mr_conv.IsCurrentParamOnStack()); |
| 72 | if (input_in_reg) { |
| 73 | LOG(FATAL) << "UNTESTED"; |
| 74 | ManagedRegister in_reg = mr_conv.CurrentParamRegister(); |
| 75 | jni_asm->ValidateRef(in_reg, mr_conv.IsCurrentParamPossiblyNull()); |
| 76 | jni_asm->StoreRef(handle_offset, in_reg); |
| 77 | } else { |
| 78 | FrameOffset in_off = mr_conv.CurrentParamStackOffset(); |
| 79 | jni_asm->ValidateRef(in_off, mr_conv.IsCurrentParamPossiblyNull()); |
| 80 | jni_asm->CopyRef(handle_offset, in_off, |
| 81 | mr_conv.InterproceduralScratchRegister()); |
| 82 | } |
| 83 | } |
| 84 | mr_conv.Next(); |
| 85 | jni_conv.Next(); |
| 86 | } |
| 87 | |
| 88 | // 5. Acquire lock for synchronized methods. Done here as references are held |
| 89 | // live in handle block but we're in managed code and can work on |
| 90 | // references |
| 91 | if (native_method->IsSynchronized()) { |
| 92 | jni_conv.ResetIterator(FrameOffset(0)); |
| 93 | jni_conv.Next(); // skip JNI environment |
| 94 | jni_asm->LockReferenceOnStack(jni_conv.CurrentParamHandleOffset()); |
| 95 | } |
| 96 | |
| 97 | // 6. Transition from being in managed to native code |
| 98 | // TODO: write out anchor, ensure the transition to native follow a store |
| 99 | // fence. |
| 100 | jni_asm->StoreImmediateToThread(Thread::StateOffset(), Thread::kNative, |
| 101 | mr_conv.InterproceduralScratchRegister()); |
| 102 | |
| 103 | // 7. Move frame down to allow space for out going args. Do for as short a |
| 104 | // time as possible to aid profiling.. |
| 105 | const size_t out_arg_size = jni_conv.OutArgSize(); |
| 106 | jni_asm->IncreaseFrameSize(out_arg_size); |
| 107 | |
| 108 | // 8. Iterate over arguments placing values from managed calling convention in |
| 109 | // to the convention required for a native call (shuffling). For references |
| 110 | // place an index/pointer to the reference after checking whether it is |
| 111 | // NULL (which must be encoded as NULL). |
| 112 | // NB. we do this prior to materializing the JNIEnv* and static's jclass to |
| 113 | // give as many free registers for the shuffle as possible |
| 114 | mr_conv.ResetIterator(FrameOffset(frame_size+out_arg_size)); |
| 115 | jni_conv.ResetIterator(FrameOffset(out_arg_size)); |
| 116 | jni_conv.Next(); // Skip JNIEnv* |
| 117 | if (is_static) { |
| 118 | FrameOffset handle_offset = jni_conv.CurrentParamHandleOffset(); |
| 119 | if (jni_conv.IsCurrentParamOnStack()) { |
| 120 | FrameOffset out_off = jni_conv.CurrentParamStackOffset(); |
| 121 | jni_asm->CreateStackHandle(out_off, handle_offset, |
| 122 | mr_conv.InterproceduralScratchRegister(), |
| 123 | false); |
| 124 | } else { |
| 125 | ManagedRegister out_reg = jni_conv.CurrentParamRegister(); |
| 126 | jni_asm->CreateStackHandle(out_reg, handle_offset, |
| 127 | ManagedRegister::NoRegister(), false); |
| 128 | } |
| 129 | jni_conv.Next(); |
| 130 | } |
| 131 | while (mr_conv.HasNext()) { |
| 132 | CHECK(jni_conv.HasNext()); |
| 133 | bool input_in_reg = mr_conv.IsCurrentParamInRegister(); |
| 134 | bool output_in_reg = jni_conv.IsCurrentParamInRegister(); |
| 135 | FrameOffset handle_offset(0); |
| 136 | bool null_allowed = false; |
| 137 | bool ref_param = jni_conv.IsCurrentParamAReference(); |
| 138 | CHECK(!ref_param || mr_conv.IsCurrentParamAReference()); |
| 139 | CHECK(input_in_reg || mr_conv.IsCurrentParamOnStack()); |
| 140 | CHECK(output_in_reg || jni_conv.IsCurrentParamOnStack()); |
| 141 | // References need handlerization and the handle address passing |
| 142 | if (ref_param) { |
| 143 | null_allowed = mr_conv.IsCurrentParamPossiblyNull(); |
| 144 | // Compute handle offset. Note null is placed in the SHB but the jobject |
| 145 | // passed to the native code must be null (not a pointer into the SHB |
| 146 | // as with regular references). |
| 147 | handle_offset = jni_conv.CurrentParamHandleOffset(); |
| 148 | // Check handle offset is within frame. |
| 149 | CHECK_LT(handle_offset.Uint32Value(), (frame_size+out_arg_size)); |
| 150 | } |
| 151 | if (input_in_reg && output_in_reg) { |
| 152 | LOG(FATAL) << "UNTESTED"; |
| 153 | ManagedRegister in_reg = mr_conv.CurrentParamRegister(); |
| 154 | ManagedRegister out_reg = jni_conv.CurrentParamRegister(); |
| 155 | if (ref_param) { |
| 156 | jni_asm->CreateStackHandle(out_reg, handle_offset, in_reg, |
| 157 | null_allowed); |
| 158 | } else { |
| 159 | jni_asm->Move(out_reg, in_reg); |
| 160 | } |
| 161 | } else if (!input_in_reg && !output_in_reg) { |
| 162 | FrameOffset out_off = jni_conv.CurrentParamStackOffset(); |
| 163 | if (ref_param) { |
| 164 | jni_asm->CreateStackHandle(out_off, handle_offset, |
| 165 | mr_conv.InterproceduralScratchRegister(), |
| 166 | null_allowed); |
| 167 | } else { |
| 168 | FrameOffset in_off = mr_conv.CurrentParamStackOffset(); |
| 169 | size_t param_size = mr_conv.CurrentParamSizeInBytes(); |
| 170 | CHECK_EQ(param_size, jni_conv.CurrentParamSizeInBytes()); |
| 171 | jni_asm->Copy(out_off, in_off, mr_conv.InterproceduralScratchRegister(), |
| 172 | param_size); |
| 173 | } |
| 174 | } else if (!input_in_reg && output_in_reg) { |
| 175 | LOG(FATAL) << "UNTESTED"; |
| 176 | FrameOffset in_off = mr_conv.CurrentParamStackOffset(); |
| 177 | ManagedRegister out_reg = jni_conv.CurrentParamRegister(); |
| 178 | // Check that incoming stack arguments are above the current stack frame. |
| 179 | CHECK_GT(in_off.Uint32Value(), frame_size); |
| 180 | if (ref_param) { |
| 181 | jni_asm->CreateStackHandle(out_reg, handle_offset, |
| 182 | ManagedRegister::NoRegister(), null_allowed); |
| 183 | } else { |
| 184 | unsigned int param_size = mr_conv.CurrentParamSizeInBytes(); |
| 185 | CHECK_EQ(param_size, jni_conv.CurrentParamSizeInBytes()); |
| 186 | jni_asm->Load(out_reg, in_off, param_size); |
| 187 | } |
| 188 | } else { |
| 189 | LOG(FATAL) << "UNTESTED"; |
| 190 | CHECK(input_in_reg && !output_in_reg); |
| 191 | ManagedRegister in_reg = mr_conv.CurrentParamRegister(); |
| 192 | FrameOffset out_off = jni_conv.CurrentParamStackOffset(); |
| 193 | // Check outgoing argument is within frame |
| 194 | CHECK_LT(out_off.Uint32Value(), frame_size); |
| 195 | if (ref_param) { |
| 196 | // TODO: recycle value in in_reg rather than reload from handle |
| 197 | jni_asm->CreateStackHandle(out_off, handle_offset, |
| 198 | mr_conv.InterproceduralScratchRegister(), |
| 199 | null_allowed); |
| 200 | } else { |
| 201 | size_t param_size = mr_conv.CurrentParamSizeInBytes(); |
| 202 | CHECK_EQ(param_size, jni_conv.CurrentParamSizeInBytes()); |
| 203 | jni_asm->Store(out_off, in_reg, param_size); |
| 204 | } |
| 205 | } |
| 206 | mr_conv.Next(); |
| 207 | jni_conv.Next(); |
| 208 | } |
| 209 | // 9. Create 1st argument, the JNI environment ptr |
| 210 | jni_conv.ResetIterator(FrameOffset(out_arg_size)); |
| 211 | if (jni_conv.IsCurrentParamInRegister()) { |
| 212 | jni_asm->LoadRawPtrFromThread(jni_conv.CurrentParamRegister(), |
| 213 | Thread::JniEnvOffset()); |
| 214 | } else { |
| 215 | jni_asm->CopyRawPtrFromThread(jni_conv.CurrentParamStackOffset(), |
| 216 | Thread::JniEnvOffset(), |
| 217 | jni_conv.InterproceduralScratchRegister()); |
| 218 | } |
| 219 | |
| 220 | // 10. Plant call to native code associated with method |
| 221 | jni_asm->Call(mr_conv.MethodRegister(), Method::NativeMethodOffset(), |
| 222 | mr_conv.InterproceduralScratchRegister()); |
| 223 | |
| 224 | // 11. Release outgoing argument area |
| 225 | jni_asm->DecreaseFrameSize(out_arg_size); |
| 226 | |
| 227 | // 12. Transition from being in native to managed code, possibly entering a |
| 228 | // safepoint |
| 229 | jni_asm->StoreImmediateToThread(Thread::StateOffset(), Thread::kRunnable, |
| 230 | mr_conv.InterproceduralScratchRegister()); |
| 231 | // TODO: check for safepoint transition |
| 232 | |
| 233 | // 13. Move to first handle offset |
| 234 | jni_conv.ResetIterator(FrameOffset(0)); |
| 235 | jni_conv.Next(); // skip JNI environment |
| 236 | |
| 237 | // 14. Release lock for synchronized methods (done in the managed state so |
| 238 | // references can be touched) |
| 239 | if (native_method->IsSynchronized()) { |
| 240 | jni_asm->UnLockReferenceOnStack(jni_conv.CurrentParamHandleOffset()); |
| 241 | } |
| 242 | |
| 243 | // 15. Place result in correct register possibly dehandlerizing |
| 244 | if (jni_conv.IsReturnAReference()) { |
| 245 | jni_asm->LoadReferenceFromStackHandle(mr_conv.ReturnRegister(), |
| 246 | jni_conv.ReturnRegister(), |
| 247 | jni_conv.CurrentParamHandleOffset()); |
| 248 | } else { |
| 249 | jni_asm->Move(mr_conv.ReturnRegister(), jni_conv.ReturnRegister()); |
| 250 | } |
| 251 | |
| 252 | // 16. Remove stack handle block from thread |
| 253 | jni_asm->CopyRawPtrToThread(Thread::TopShbOffset(), jni_conv.ShbLinkOffset(), |
| 254 | jni_conv.InterproceduralScratchRegister()); |
| 255 | |
| 256 | // 17. Remove activation |
| 257 | jni_asm->RemoveFrame(frame_size); |
| 258 | |
| 259 | // 18. Finalize code generation |
| 260 | size_t cs = jni_asm->CodeSize(); |
| 261 | MemoryRegion code(AllocateCode(cs), cs); |
| 262 | jni_asm->FinalizeInstructions(code); |
| 263 | native_method->SetCode(code.pointer()); |
| 264 | } |
| 265 | |
| 266 | void* JniCompiler::AllocateCode(size_t size) { |
| 267 | CHECK_LT(((jni_code_top_ - jni_code_) + size), jni_code_size_); |
| 268 | void *result = jni_code_top_; |
| 269 | jni_code_top_ += size; |
| 270 | return result; |
| 271 | } |
| 272 | |
| 273 | JniCompiler::JniCompiler() { |
| 274 | // TODO: this shouldn't be managed by the JniCompiler, we should have a |
| 275 | // code cache. |
| 276 | jni_code_size_ = 4096; |
| 277 | jni_code_ = static_cast<byte*>(mmap(NULL, jni_code_size_, |
| 278 | PROT_READ | PROT_WRITE | PROT_EXEC, |
| 279 | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)); |
| 280 | CHECK_NE(MAP_FAILED, jni_code_); |
| 281 | jni_code_top_ = jni_code_; |
| 282 | } |
| 283 | |
| 284 | JniCompiler::~JniCompiler() { |
| 285 | // TODO: this shouldn't be managed by the JniCompiler, we should have a |
| 286 | // code cache. |
| 287 | CHECK_EQ(0, munmap(jni_code_, jni_code_size_)); |
| 288 | } |
| 289 | |
| 290 | } // namespace art |