Orion Hodson | 01ecfa1 | 2019-07-18 12:57:47 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2019 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "hidden_api_jni.h" |
| 18 | #include "hidden_api.h" |
| 19 | |
| 20 | #if defined(__linux__) |
| 21 | |
| 22 | #include <dlfcn.h> |
| 23 | #include <link.h> |
| 24 | |
| 25 | #include <mutex> |
| 26 | |
| 27 | #include "android-base/logging.h" |
| 28 | |
| 29 | #include "unwindstack/Regs.h" |
| 30 | #include "unwindstack/RegsGetLocal.h" |
| 31 | #include "unwindstack/Memory.h" |
| 32 | #include "unwindstack/Unwinder.h" |
| 33 | |
| 34 | #include "base/bit_utils.h" |
| 35 | #include "base/casts.h" |
| 36 | #include "base/file_utils.h" |
| 37 | #include "base/globals.h" |
| 38 | #include "base/memory_type_table.h" |
| 39 | #include "base/string_view_cpp20.h" |
| 40 | |
| 41 | namespace art { |
| 42 | namespace hiddenapi { |
| 43 | |
| 44 | namespace { |
| 45 | |
| 46 | // The maximum number of frames to back trace through when performing Core Platform API checks of |
| 47 | // native code. |
| 48 | static constexpr size_t kMaxFrames = 3; |
| 49 | |
| 50 | static std::mutex gUnwindingMutex; |
| 51 | |
| 52 | struct UnwindHelper { |
| 53 | explicit UnwindHelper(size_t max_depth) |
| 54 | : memory_(unwindstack::Memory::CreateProcessMemory(getpid())), |
| 55 | jit_(memory_), |
| 56 | dex_(memory_), |
| 57 | unwinder_(max_depth, &maps_, memory_) { |
| 58 | CHECK(maps_.Parse()); |
| 59 | unwinder_.SetJitDebug(&jit_, unwindstack::Regs::CurrentArch()); |
| 60 | unwinder_.SetDexFiles(&dex_, unwindstack::Regs::CurrentArch()); |
| 61 | unwinder_.SetResolveNames(false); |
| 62 | unwindstack::Elf::SetCachingEnabled(false); |
| 63 | } |
| 64 | |
| 65 | unwindstack::Unwinder* Unwinder() { return &unwinder_; } |
| 66 | |
| 67 | private: |
| 68 | unwindstack::LocalMaps maps_; |
| 69 | std::shared_ptr<unwindstack::Memory> memory_; |
| 70 | unwindstack::JitDebug jit_; |
| 71 | unwindstack::DexFiles dex_; |
| 72 | unwindstack::Unwinder unwinder_; |
| 73 | }; |
| 74 | |
| 75 | static UnwindHelper& GetUnwindHelper() { |
| 76 | static UnwindHelper helper(kMaxFrames); |
| 77 | return helper; |
| 78 | } |
| 79 | |
| 80 | } // namespace |
| 81 | |
| 82 | enum class SharedObjectKind { |
| 83 | kRuntime = 0, |
| 84 | kApexModule = 1, |
| 85 | kOther = 2 |
| 86 | }; |
| 87 | |
| 88 | std::ostream& operator<<(std::ostream& os, SharedObjectKind kind) { |
| 89 | switch (kind) { |
| 90 | case SharedObjectKind::kRuntime: |
| 91 | os << "Runtime"; |
| 92 | break; |
| 93 | case SharedObjectKind::kApexModule: |
| 94 | os << "APEX Module"; |
| 95 | break; |
| 96 | case SharedObjectKind::kOther: |
| 97 | os << "Other"; |
| 98 | break; |
| 99 | } |
| 100 | return os; |
| 101 | } |
| 102 | |
| 103 | // Class holding Cached ranges of loaded shared objects to facilitate checks of field and method |
| 104 | // resolutions within the Core Platform API for native callers. |
| 105 | class CodeRangeCache final { |
| 106 | public: |
| 107 | static CodeRangeCache& GetSingleton() { |
| 108 | static CodeRangeCache Singleton; |
| 109 | return Singleton; |
| 110 | } |
| 111 | |
| 112 | SharedObjectKind GetSharedObjectKind(void* pc) { |
| 113 | uintptr_t address = reinterpret_cast<uintptr_t>(pc); |
| 114 | SharedObjectKind kind; |
| 115 | if (Find(address, &kind)) { |
| 116 | return kind; |
| 117 | } |
| 118 | return SharedObjectKind::kOther; |
| 119 | } |
| 120 | |
| 121 | bool HasCache() const { |
| 122 | return memory_type_table_.Size() != 0; |
| 123 | } |
| 124 | |
| 125 | void BuildCache() { |
| 126 | DCHECK(!HasCache()); |
| 127 | art::MemoryTypeTable<SharedObjectKind>::Builder builder; |
| 128 | builder_ = &builder; |
| 129 | libjavacore_loaded_ = false; |
| 130 | libnativehelper_loaded_ = false; |
| 131 | libopenjdk_loaded_ = false; |
| 132 | |
| 133 | // Iterate over ELF headers populating table_builder with executable ranges. |
| 134 | dl_iterate_phdr(VisitElfInfo, this); |
| 135 | memory_type_table_ = builder_->Build(); |
| 136 | |
| 137 | // Check expected libraries loaded when iterating headers. |
| 138 | CHECK(libjavacore_loaded_); |
| 139 | CHECK(libnativehelper_loaded_); |
| 140 | CHECK(libopenjdk_loaded_); |
| 141 | builder_ = nullptr; |
| 142 | } |
| 143 | |
| 144 | void DropCache() { |
| 145 | memory_type_table_ = {}; |
| 146 | } |
| 147 | |
| 148 | private: |
| 149 | CodeRangeCache() {} |
| 150 | |
| 151 | bool Find(uintptr_t address, SharedObjectKind* kind) const { |
| 152 | const art::MemoryTypeRange<SharedObjectKind>* range = memory_type_table_.Lookup(address); |
| 153 | if (range == nullptr) { |
| 154 | return false; |
| 155 | } |
| 156 | *kind = range->Type(); |
| 157 | return true; |
| 158 | } |
| 159 | |
| 160 | static int VisitElfInfo(struct dl_phdr_info *info, size_t size ATTRIBUTE_UNUSED, void *data) |
| 161 | NO_THREAD_SAFETY_ANALYSIS { |
| 162 | auto cache = reinterpret_cast<CodeRangeCache*>(data); |
| 163 | art::MemoryTypeTable<SharedObjectKind>::Builder* builder = cache->builder_; |
| 164 | |
| 165 | for (size_t i = 0u; i < info->dlpi_phnum; ++i) { |
| 166 | const ElfW(Phdr)& phdr = info->dlpi_phdr[i]; |
| 167 | if (phdr.p_type != PT_LOAD || ((phdr.p_flags & PF_X) != PF_X)) { |
| 168 | continue; // Skip anything other than code pages |
| 169 | } |
| 170 | uintptr_t start = info->dlpi_addr + phdr.p_vaddr; |
| 171 | const uintptr_t limit = art::RoundUp(start + phdr.p_memsz, art::kPageSize); |
| 172 | SharedObjectKind kind = GetKind(info->dlpi_name, start, limit); |
| 173 | art::MemoryTypeRange<SharedObjectKind> range{start, limit, kind}; |
| 174 | if (!builder->Add(range)) { |
| 175 | LOG(WARNING) << "Overlapping/invalid range found in ELF headers: " << range; |
| 176 | } |
| 177 | } |
| 178 | |
| 179 | // Update sanity check state. |
| 180 | std::string_view dlpi_name{info->dlpi_name}; |
| 181 | if (!cache->libjavacore_loaded_) { |
| 182 | cache->libjavacore_loaded_ = art::EndsWith(dlpi_name, kLibjavacore); |
| 183 | } |
| 184 | if (!cache->libnativehelper_loaded_) { |
| 185 | cache->libnativehelper_loaded_ = art::EndsWith(dlpi_name, kLibnativehelper); |
| 186 | } |
| 187 | if (!cache->libopenjdk_loaded_) { |
| 188 | cache->libopenjdk_loaded_ = art::EndsWith(dlpi_name, kLibopenjdk); |
| 189 | } |
| 190 | |
| 191 | return 0; |
| 192 | } |
| 193 | |
| 194 | static SharedObjectKind GetKind(const char* so_name, uintptr_t start, uintptr_t limit) { |
| 195 | uintptr_t runtime_method = reinterpret_cast<uintptr_t>(CodeRangeCache::GetKind); |
| 196 | if (runtime_method >= start && runtime_method < limit) { |
| 197 | return SharedObjectKind::kRuntime; |
| 198 | } |
| 199 | return art::LocationIsOnApex(so_name) ? SharedObjectKind::kApexModule |
| 200 | : SharedObjectKind::kOther; |
| 201 | } |
| 202 | |
| 203 | art::MemoryTypeTable<SharedObjectKind> memory_type_table_; |
| 204 | |
| 205 | // Table builder, only valid during BuildCache(). |
| 206 | art::MemoryTypeTable<SharedObjectKind>::Builder* builder_; |
| 207 | |
| 208 | // Sanity checking state. |
| 209 | bool libjavacore_loaded_; |
| 210 | bool libnativehelper_loaded_; |
| 211 | bool libopenjdk_loaded_; |
| 212 | |
| 213 | static constexpr std::string_view kLibjavacore = "libjavacore.so"; |
| 214 | static constexpr std::string_view kLibnativehelper = "libnativehelper.so"; |
| 215 | static constexpr std::string_view kLibopenjdk = art::kIsDebugBuild ? "libopenjdkd.so" |
| 216 | : "libopenjdk.so"; |
| 217 | |
| 218 | DISALLOW_COPY_AND_ASSIGN(CodeRangeCache); |
| 219 | }; |
| 220 | |
| 221 | // Cookie for tracking approvals of Core Platform API use. The Thread class has a per thread field |
| 222 | // that stores these values. This is necessary because we can't change the JNI interfaces and some |
| 223 | // paths call into each other, ie checked JNI typically calls plain JNI. |
| 224 | struct CorePlatformApiCookie final { |
| 225 | bool approved:1; // Whether the outermost ScopedCorePlatformApiCheck instance is approved. |
| 226 | uint32_t depth:31; // Count of nested ScopedCorePlatformApiCheck instances. |
| 227 | }; |
| 228 | |
| 229 | ScopedCorePlatformApiCheck::ScopedCorePlatformApiCheck() { |
| 230 | Thread* self = Thread::Current(); |
| 231 | CorePlatformApiCookie cookie = |
| 232 | bit_cast<CorePlatformApiCookie, uint32_t>(self->CorePlatformApiCookie()); |
| 233 | bool is_core_platform_api_approved = false; // Default value for non-device testing. |
| 234 | if (!kIsTargetBuild) { |
| 235 | // On target device, if policy says enforcement is disabled, then treat all callers as |
| 236 | // approved. |
| 237 | auto policy = Runtime::Current()->GetCorePlatformApiEnforcementPolicy(); |
| 238 | if (policy == hiddenapi::EnforcementPolicy::kDisabled) { |
| 239 | is_core_platform_api_approved = true; |
| 240 | } else if (cookie.depth == 0) { |
| 241 | // On target device, only check the caller at depth 0 (the outermost entry into JNI |
| 242 | // interface). |
| 243 | DCHECK_EQ(cookie.approved, false); |
| 244 | void* caller_pc = CaptureCallerPc(); |
| 245 | if (caller_pc != nullptr) { |
| 246 | SharedObjectKind kind = CodeRangeCache::GetSingleton().GetSharedObjectKind(caller_pc); |
| 247 | is_core_platform_api_approved = ((kind == SharedObjectKind::kRuntime) || |
| 248 | (kind == SharedObjectKind::kApexModule)); |
| 249 | } |
| 250 | } |
| 251 | } |
| 252 | |
| 253 | // Update cookie |
| 254 | if (is_core_platform_api_approved) { |
| 255 | cookie.approved = true; |
| 256 | } |
| 257 | cookie.depth += 1; |
| 258 | self->SetCorePlatformApiCookie(bit_cast<uint32_t, CorePlatformApiCookie>(cookie)); |
| 259 | } |
| 260 | |
| 261 | ScopedCorePlatformApiCheck::~ScopedCorePlatformApiCheck() { |
| 262 | Thread* self = Thread::Current(); |
| 263 | // Update cookie, decrementing depth and clearing approved flag if this is the outermost |
| 264 | // instance. |
| 265 | CorePlatformApiCookie cookie = |
| 266 | bit_cast<CorePlatformApiCookie, uint32_t>(self->CorePlatformApiCookie()); |
| 267 | DCHECK_NE(cookie.depth, 0u); |
| 268 | cookie.depth -= 1u; |
| 269 | if (cookie.depth == 0u) { |
| 270 | cookie.approved = false; |
| 271 | } |
| 272 | self->SetCorePlatformApiCookie(bit_cast<uint32_t, CorePlatformApiCookie>(cookie)); |
| 273 | } |
| 274 | |
| 275 | bool ScopedCorePlatformApiCheck::IsCurrentCallerApproved(Thread* self) { |
| 276 | CorePlatformApiCookie cookie = |
| 277 | bit_cast<CorePlatformApiCookie, uint32_t>(self->CorePlatformApiCookie()); |
| 278 | DCHECK_GT(cookie.depth, 0u); |
| 279 | return cookie.approved; |
| 280 | } |
| 281 | |
| 282 | void* ScopedCorePlatformApiCheck::CaptureCallerPc() { |
| 283 | std::lock_guard<std::mutex> guard(gUnwindingMutex); |
| 284 | unwindstack::Unwinder* unwinder = GetUnwindHelper().Unwinder(); |
| 285 | std::unique_ptr<unwindstack::Regs> regs(unwindstack::Regs::CreateFromLocal()); |
| 286 | RegsGetLocal(regs.get()); |
| 287 | unwinder->SetRegs(regs.get()); |
| 288 | unwinder->Unwind(); |
| 289 | for (auto it = unwinder->frames().begin(); it != unwinder->frames().end(); ++it) { |
| 290 | // Unwind to frame above the tlsJniStackMarker. The stack markers should be on the first frame |
| 291 | // calling JNI methods. |
| 292 | if (it->sp > reinterpret_cast<uint64_t>(this)) { |
| 293 | return reinterpret_cast<void*>(it->pc); |
| 294 | } |
| 295 | } |
| 296 | return nullptr; |
| 297 | } |
| 298 | |
| 299 | void JniInitializeNativeCallerCheck() { |
| 300 | // This method should be called only once and before there are multiple runtime threads. |
| 301 | DCHECK(!CodeRangeCache::GetSingleton().HasCache()); |
| 302 | CodeRangeCache::GetSingleton().BuildCache(); |
| 303 | } |
| 304 | |
| 305 | void JniShutdownNativeCallerCheck() { |
| 306 | CodeRangeCache::GetSingleton().DropCache(); |
| 307 | } |
| 308 | |
| 309 | } // namespace hiddenapi |
| 310 | } // namespace art |
| 311 | |
| 312 | #else // __linux__ |
| 313 | |
| 314 | namespace art { |
| 315 | namespace hiddenapi { |
| 316 | |
| 317 | ScopedCorePlatformApiCheck::ScopedCorePlatformApiCheck() {} |
| 318 | |
| 319 | ScopedCorePlatformApiCheck::~ScopedCorePlatformApiCheck() {} |
| 320 | |
| 321 | bool ScopedCorePlatformApiCheck::IsCurrentCallerApproved(Thread* self ATTRIBUTE_UNUSED) { |
| 322 | return false; |
| 323 | } |
| 324 | |
| 325 | void JniInitializeNativeCallerCheck() {} |
| 326 | |
| 327 | void JniShutdownNativeCallerCheck() {} |
| 328 | |
| 329 | } // namespace hiddenapi |
| 330 | } // namespace art |
| 331 | |
| 332 | #endif // __linux__ |