blob: ecbbd09c859e213e43105a7e2d55574759fe4061 [file] [log] [blame]
Vladimir Markob163bb72015-03-31 21:49:49 +01001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "linker/arm/relative_patcher_arm_base.h"
18
19#include "compiled_method.h"
20#include "oat.h"
21#include "output_stream.h"
22
23namespace art {
24namespace linker {
25
26uint32_t ArmBaseRelativePatcher::ReserveSpace(uint32_t offset,
27 const CompiledMethod* compiled_method) {
28 return ReserveSpaceInternal(offset, compiled_method, 0u);
29}
30
31uint32_t ArmBaseRelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) {
32 if (current_thunk_to_write_ == thunk_locations_.size()) {
33 return offset;
34 }
35 uint32_t aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_);
36 if (UNLIKELY(aligned_offset == thunk_locations_[current_thunk_to_write_])) {
37 ++current_thunk_to_write_;
38 uint32_t aligned_code_delta = aligned_offset - offset;
39 if (aligned_code_delta != 0u && !WriteCodeAlignment(out, aligned_code_delta)) {
40 return 0u;
41 }
42 if (UNLIKELY(!WriteRelCallThunk(out, ArrayRef<const uint8_t>(thunk_code_)))) {
43 return 0u;
44 }
45 uint32_t thunk_end_offset = aligned_offset + thunk_code_.size();
46 // Align after writing chunk, see the ReserveSpace() above.
47 offset = CompiledMethod::AlignCode(thunk_end_offset, instruction_set_);
48 aligned_code_delta = offset - thunk_end_offset;
49 if (aligned_code_delta != 0u && !WriteCodeAlignment(out, aligned_code_delta)) {
50 return 0u;
51 }
52 }
53 return offset;
54}
55
56ArmBaseRelativePatcher::ArmBaseRelativePatcher(RelativePatcherTargetProvider* provider,
57 InstructionSet instruction_set,
58 std::vector<uint8_t> thunk_code,
59 uint32_t max_positive_displacement,
60 uint32_t max_negative_displacement)
61 : provider_(provider), instruction_set_(instruction_set), thunk_code_(thunk_code),
62 max_positive_displacement_(max_positive_displacement),
63 max_negative_displacement_(max_negative_displacement),
64 thunk_locations_(), current_thunk_to_write_(0u), unprocessed_patches_() {
65}
66
67uint32_t ArmBaseRelativePatcher::ReserveSpaceInternal(uint32_t offset,
68 const CompiledMethod* compiled_method,
69 uint32_t max_extra_space) {
70 // NOTE: The final thunk can be reserved from InitCodeMethodVisitor::EndClass() while it
71 // may be written early by WriteCodeMethodVisitor::VisitMethod() for a deduplicated chunk
72 // of code. To avoid any alignment discrepancies for the final chunk, we always align the
73 // offset after reserving of writing any chunk.
74 if (UNLIKELY(compiled_method == nullptr)) {
75 uint32_t aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_);
76 bool needs_thunk = ReserveSpaceProcessPatches(aligned_offset);
77 if (needs_thunk) {
78 thunk_locations_.push_back(aligned_offset);
79 offset = CompiledMethod::AlignCode(aligned_offset + thunk_code_.size(), instruction_set_);
80 }
81 return offset;
82 }
83 DCHECK(compiled_method->GetQuickCode() != nullptr);
84 uint32_t quick_code_size = compiled_method->GetQuickCode()->size();
85 uint32_t quick_code_offset = compiled_method->AlignCode(offset) + sizeof(OatQuickMethodHeader);
86 uint32_t next_aligned_offset = compiled_method->AlignCode(quick_code_offset + quick_code_size);
87 // Adjust for extra space required by the subclass.
88 next_aligned_offset = compiled_method->AlignCode(next_aligned_offset + max_extra_space);
89 if (!unprocessed_patches_.empty() &&
90 next_aligned_offset - unprocessed_patches_.front().second > max_positive_displacement_) {
91 bool needs_thunk = ReserveSpaceProcessPatches(next_aligned_offset);
92 if (needs_thunk) {
93 // A single thunk will cover all pending patches.
94 unprocessed_patches_.clear();
95 uint32_t thunk_location = compiled_method->AlignCode(offset);
96 thunk_locations_.push_back(thunk_location);
97 offset = CompiledMethod::AlignCode(thunk_location + thunk_code_.size(), instruction_set_);
98 }
99 }
100 for (const LinkerPatch& patch : compiled_method->GetPatches()) {
101 if (patch.Type() == kLinkerPatchCallRelative) {
102 unprocessed_patches_.emplace_back(patch.TargetMethod(),
103 quick_code_offset + patch.LiteralOffset());
104 }
105 }
106 return offset;
107}
108
109uint32_t ArmBaseRelativePatcher::CalculateDisplacement(uint32_t patch_offset,
110 uint32_t target_offset) {
111 // Unsigned arithmetic with its well-defined overflow behavior is just fine here.
112 uint32_t displacement = target_offset - patch_offset;
113 // NOTE: With unsigned arithmetic we do mean to use && rather than || below.
114 if (displacement > max_positive_displacement_ && displacement < -max_negative_displacement_) {
115 // Unwritten thunks have higher offsets, check if it's within range.
116 DCHECK(current_thunk_to_write_ == thunk_locations_.size() ||
117 thunk_locations_[current_thunk_to_write_] > patch_offset);
118 if (current_thunk_to_write_ != thunk_locations_.size() &&
119 thunk_locations_[current_thunk_to_write_] - patch_offset < max_positive_displacement_) {
120 displacement = thunk_locations_[current_thunk_to_write_] - patch_offset;
121 } else {
122 // We must have a previous thunk then.
123 DCHECK_NE(current_thunk_to_write_, 0u);
124 DCHECK_LT(thunk_locations_[current_thunk_to_write_ - 1], patch_offset);
125 displacement = thunk_locations_[current_thunk_to_write_ - 1] - patch_offset;
126 DCHECK(displacement >= -max_negative_displacement_);
127 }
128 }
129 return displacement;
130}
131
132bool ArmBaseRelativePatcher::ReserveSpaceProcessPatches(uint32_t next_aligned_offset) {
133 // Process as many patches as possible, stop only on unresolved targets or calls too far back.
134 while (!unprocessed_patches_.empty()) {
135 uint32_t patch_offset = unprocessed_patches_.front().second;
136 auto result = provider_->FindMethodOffset(unprocessed_patches_.front().first);
137 if (!result.first) {
138 // If still unresolved, check if we have a thunk within range.
139 DCHECK(thunk_locations_.empty() || thunk_locations_.back() <= patch_offset);
140 if (thunk_locations_.empty() ||
141 patch_offset - thunk_locations_.back() > max_negative_displacement_) {
142 return next_aligned_offset - patch_offset > max_positive_displacement_;
143 }
144 } else if (result.second >= patch_offset) {
145 DCHECK_LE(result.second - patch_offset, max_positive_displacement_);
146 } else {
147 // When calling back, check if we have a thunk that's closer than the actual target.
148 uint32_t target_offset =
149 (thunk_locations_.empty() || result.second > thunk_locations_.back())
150 ? result.second
151 : thunk_locations_.back();
152 DCHECK_GT(patch_offset, target_offset);
153 if (patch_offset - target_offset > max_negative_displacement_) {
154 return true;
155 }
156 }
157 unprocessed_patches_.pop_front();
158 }
159 return false;
160}
161
162} // namespace linker
163} // namespace art