blob: 1103dab52ceec5f002e60c1ab2b02510a2a6aed0 [file] [log] [blame]
Sebastien Hertzd45a1f52014-01-09 14:56:54 +01001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Sebastien Hertzfd3077e2014-04-23 10:32:43 +020017#ifndef ART_RUNTIME_QUICK_EXCEPTION_HANDLER_H_
18#define ART_RUNTIME_QUICK_EXCEPTION_HANDLER_H_
Sebastien Hertzd45a1f52014-01-09 14:56:54 +010019
Andreas Gampe57943812017-12-06 21:39:13 -080020#include <android-base/logging.h>
21
Andreas Gampe794ad762015-02-23 08:12:24 -080022#include "base/macros.h"
Sebastien Hertzfd3077e2014-04-23 10:32:43 +020023#include "base/mutex.h"
Nicolas Geoffray4e92c3c2017-05-08 09:34:26 +010024#include "deoptimization_kind.h"
Vladimir Marko3a21e382016-09-02 12:38:38 +010025#include "stack_reference.h"
Sebastien Hertzd45a1f52014-01-09 14:56:54 +010026
27namespace art {
28
Sebastien Hertzfd3077e2014-04-23 10:32:43 +020029namespace mirror {
Sebastien Hertzfd3077e2014-04-23 10:32:43 +020030class Throwable;
31} // namespace mirror
Mathieu Chartiere401d142015-04-22 13:56:20 -070032class ArtMethod;
Sebastien Hertzfd3077e2014-04-23 10:32:43 +020033class Context;
Vladimir Marko3a21e382016-09-02 12:38:38 +010034class OatQuickMethodHeader;
Sebastien Hertzfd3077e2014-04-23 10:32:43 +020035class Thread;
Sebastien Hertzfd3077e2014-04-23 10:32:43 +020036class ShadowFrame;
Vladimir Marko3a21e382016-09-02 12:38:38 +010037class StackVisitor;
Sebastien Hertzfd3077e2014-04-23 10:32:43 +020038
Elliott Hughes956af0f2014-12-11 14:34:28 -080039// Manages exception delivery for Quick backend.
Sebastien Hertzfd3077e2014-04-23 10:32:43 +020040class QuickExceptionHandler {
Sebastien Hertzd45a1f52014-01-09 14:56:54 +010041 public:
Sebastien Hertzfd3077e2014-04-23 10:32:43 +020042 QuickExceptionHandler(Thread* self, bool is_deoptimization)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070043 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertzd45a1f52014-01-09 14:56:54 +010044
Andreas Gampe65b798e2015-04-06 09:35:22 -070045 NO_RETURN ~QuickExceptionHandler() {
Sebastien Hertzd45a1f52014-01-09 14:56:54 +010046 LOG(FATAL) << "UNREACHABLE"; // Expected to take long jump.
Ian Rogers2c4257b2014-10-24 14:20:06 -070047 UNREACHABLE();
Sebastien Hertzd45a1f52014-01-09 14:56:54 +010048 }
49
Sebastien Hertz520633b2015-09-08 17:03:36 +020050 // Find the catch handler for the given exception.
Mathieu Chartierf5769e12017-01-10 15:54:41 -080051 void FindCatch(ObjPtr<mirror::Throwable> exception) REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertz520633b2015-09-08 17:03:36 +020052
Mingyao Yangf711f2c2016-05-23 12:29:39 -070053 // Deoptimize the stack to the upcall/some code that's not deoptimizeable. For
54 // every compiled frame, we create a "copy" shadow frame that will be executed
55 // with the interpreter.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070056 void DeoptimizeStack() REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yangf711f2c2016-05-23 12:29:39 -070057
58 // Deoptimize a single frame. It's directly triggered from compiled code. It
59 // has the following properties:
60 // - It deoptimizes a single frame, which can include multiple inlined frames.
61 // - It doesn't have return result or pending exception at the deoptimization point.
62 // - It always deoptimizes, even if IsDeoptimizeable() returns false for the
63 // code, since HDeoptimize always saves the full environment. So it overrides
64 // the result of IsDeoptimizeable().
65 // - It can be either full-fragment, or partial-fragment deoptimization, depending
66 // on whether that single frame covers full or partial fragment.
Nicolas Geoffray4e92c3c2017-05-08 09:34:26 +010067 void DeoptimizeSingleFrame(DeoptimizationKind kind) REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yangf711f2c2016-05-23 12:29:39 -070068
69 void DeoptimizePartialFragmentFixup(uintptr_t return_pc)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070070 REQUIRES_SHARED(Locks::mutator_lock_);
Andreas Gampe639bdd12015-06-03 11:22:45 -070071
Sebastien Hertz520633b2015-09-08 17:03:36 +020072 // Update the instrumentation stack by removing all methods that will be unwound
73 // by the exception being thrown.
Mingyao Yangf711f2c2016-05-23 12:29:39 -070074 // Return the return pc of the last frame that's unwound.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070075 uintptr_t UpdateInstrumentationStack() REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertz520633b2015-09-08 17:03:36 +020076
David Brazdil77a48ae2015-09-15 12:34:04 +000077 // Set up environment before delivering an exception to optimized code.
78 void SetCatchEnvironmentForOptimizedHandler(StackVisitor* stack_visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070079 REQUIRES_SHARED(Locks::mutator_lock_);
David Brazdil77a48ae2015-09-15 12:34:04 +000080
Sebastien Hertz520633b2015-09-08 17:03:36 +020081 // Long jump either to a catch handler or to the upcall.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070082 NO_RETURN void DoLongJump(bool smash_caller_saves = true) REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertzd45a1f52014-01-09 14:56:54 +010083
Mathieu Chartiere401d142015-04-22 13:56:20 -070084 void SetHandlerQuickFrame(ArtMethod** handler_quick_frame) {
Sebastien Hertzd45a1f52014-01-09 14:56:54 +010085 handler_quick_frame_ = handler_quick_frame;
86 }
87
88 void SetHandlerQuickFramePc(uintptr_t handler_quick_frame_pc) {
89 handler_quick_frame_pc_ = handler_quick_frame_pc;
90 }
91
Nicolas Geoffray524e7ea2015-10-16 17:13:34 +010092 void SetHandlerMethodHeader(const OatQuickMethodHeader* handler_method_header) {
93 handler_method_header_ = handler_method_header;
94 }
95
Andreas Gampe639bdd12015-06-03 11:22:45 -070096 void SetHandlerQuickArg0(uintptr_t handler_quick_arg0) {
97 handler_quick_arg0_ = handler_quick_arg0;
98 }
99
Mathieu Chartiere401d142015-04-22 13:56:20 -0700100 ArtMethod* GetHandlerMethod() const {
Ian Rogers5cf98192014-05-29 21:31:50 -0700101 return handler_method_;
102 }
103
Mathieu Chartiere401d142015-04-22 13:56:20 -0700104 void SetHandlerMethod(ArtMethod* handler_quick_method) {
Ian Rogers5cf98192014-05-29 21:31:50 -0700105 handler_method_ = handler_quick_method;
106 }
107
108 uint32_t GetHandlerDexPc() const {
109 return handler_dex_pc_;
110 }
111
Sebastien Hertzd45a1f52014-01-09 14:56:54 +0100112 void SetHandlerDexPc(uint32_t dex_pc) {
113 handler_dex_pc_ = dex_pc;
114 }
115
Roland Levillainb77b6982017-06-08 18:03:48 +0100116 bool GetClearException() const {
117 return clear_exception_;
118 }
119
Sebastien Hertzd45a1f52014-01-09 14:56:54 +0100120 void SetClearException(bool clear_exception) {
121 clear_exception_ = clear_exception;
122 }
123
Hiroshi Yamauchi649278c2014-08-13 11:12:22 -0700124 void SetHandlerFrameDepth(size_t frame_depth) {
125 handler_frame_depth_ = frame_depth;
Sebastien Hertzd45a1f52014-01-09 14:56:54 +0100126 }
127
Mingyao Yangf711f2c2016-05-23 12:29:39 -0700128 bool IsFullFragmentDone() const {
129 return full_fragment_done_;
130 }
131
132 void SetFullFragmentDone(bool full_fragment_done) {
133 full_fragment_done_ = full_fragment_done;
134 }
135
Andreas Gampe639bdd12015-06-03 11:22:45 -0700136 // Walk the stack frames of the given thread, printing out non-runtime methods with their types
Mingyao Yangf711f2c2016-05-23 12:29:39 -0700137 // of frames. Helps to verify that partial-fragment deopt really works as expected.
Andreas Gampe639bdd12015-06-03 11:22:45 -0700138 static void DumpFramesWithType(Thread* self, bool details = false)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700139 REQUIRES_SHARED(Locks::mutator_lock_);
Andreas Gampe639bdd12015-06-03 11:22:45 -0700140
Sebastien Hertzd45a1f52014-01-09 14:56:54 +0100141 private:
142 Thread* const self_;
143 Context* const context_;
Sebastien Hertz520633b2015-09-08 17:03:36 +0200144 // Should we deoptimize the stack?
Sebastien Hertzd45a1f52014-01-09 14:56:54 +0100145 const bool is_deoptimization_;
Sebastien Hertzd45a1f52014-01-09 14:56:54 +0100146 // Is method tracing active?
Sebastien Hertz520633b2015-09-08 17:03:36 +0200147 const bool method_tracing_active_;
Sebastien Hertzd45a1f52014-01-09 14:56:54 +0100148 // Quick frame with found handler or last frame if no handler found.
Mathieu Chartiere401d142015-04-22 13:56:20 -0700149 ArtMethod** handler_quick_frame_;
Sebastien Hertzd45a1f52014-01-09 14:56:54 +0100150 // PC to branch to for the handler.
151 uintptr_t handler_quick_frame_pc_;
Nicolas Geoffray524e7ea2015-10-16 17:13:34 +0100152 // Quick code of the handler.
153 const OatQuickMethodHeader* handler_method_header_;
Andreas Gampe639bdd12015-06-03 11:22:45 -0700154 // The value for argument 0.
155 uintptr_t handler_quick_arg0_;
Ian Rogers5cf98192014-05-29 21:31:50 -0700156 // The handler method to report to the debugger.
Mathieu Chartiere401d142015-04-22 13:56:20 -0700157 ArtMethod* handler_method_;
Ian Rogers5cf98192014-05-29 21:31:50 -0700158 // The handler's dex PC, zero implies an uncaught exception.
Sebastien Hertzd45a1f52014-01-09 14:56:54 +0100159 uint32_t handler_dex_pc_;
160 // Should the exception be cleared as the catch block has no move-exception?
161 bool clear_exception_;
Hiroshi Yamauchi649278c2014-08-13 11:12:22 -0700162 // Frame depth of the catch handler or the upcall.
163 size_t handler_frame_depth_;
Mingyao Yangf711f2c2016-05-23 12:29:39 -0700164 // Does the handler successfully walk the full fragment (not stopped
165 // by some code that's not deoptimizeable)? Even single-frame deoptimization
166 // can set this to true if the fragment contains only one quick frame.
167 bool full_fragment_done_;
168
169 void PrepareForLongJumpToInvokeStubOrInterpreterBridge()
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700170 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertzd45a1f52014-01-09 14:56:54 +0100171
Sebastien Hertzfd3077e2014-04-23 10:32:43 +0200172 DISALLOW_COPY_AND_ASSIGN(QuickExceptionHandler);
Sebastien Hertzd45a1f52014-01-09 14:56:54 +0100173};
174
175} // namespace art
Sebastien Hertzfd3077e2014-04-23 10:32:43 +0200176#endif // ART_RUNTIME_QUICK_EXCEPTION_HANDLER_H_