Merge "Revert "Revert Revert "Use interpret-only instead of verify-at-runtime when testing JIT"""
diff --git a/benchmark/const-class/info.txt b/benchmark/const-class/info.txt
new file mode 100644
index 0000000..ed0b827
--- /dev/null
+++ b/benchmark/const-class/info.txt
@@ -0,0 +1 @@
+Benchmarks for repeating const-class instructions in a loop.
diff --git a/benchmark/const-class/src/ConstClassBenchmark.java b/benchmark/const-class/src/ConstClassBenchmark.java
new file mode 100644
index 0000000..d45b49f
--- /dev/null
+++ b/benchmark/const-class/src/ConstClassBenchmark.java
@@ -0,0 +1,1071 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class ConstClassBenchmark {
+    // Define 1025 classes with consecutive type indexes in the dex file.
+    // The tests below rely on the knowledge that ART uses the low 10 bits
+    // of the type index as the hash into DexCache types array.
+    // Note: n == n + 1024 (mod 2^10), n + 1 != n + 1023 (mod 2^10).
+    public static class TestClass_0000 {}
+    public static class TestClass_0001 {}
+    public static class TestClass_0002 {}
+    public static class TestClass_0003 {}
+    public static class TestClass_0004 {}
+    public static class TestClass_0005 {}
+    public static class TestClass_0006 {}
+    public static class TestClass_0007 {}
+    public static class TestClass_0008 {}
+    public static class TestClass_0009 {}
+    public static class TestClass_0010 {}
+    public static class TestClass_0011 {}
+    public static class TestClass_0012 {}
+    public static class TestClass_0013 {}
+    public static class TestClass_0014 {}
+    public static class TestClass_0015 {}
+    public static class TestClass_0016 {}
+    public static class TestClass_0017 {}
+    public static class TestClass_0018 {}
+    public static class TestClass_0019 {}
+    public static class TestClass_0020 {}
+    public static class TestClass_0021 {}
+    public static class TestClass_0022 {}
+    public static class TestClass_0023 {}
+    public static class TestClass_0024 {}
+    public static class TestClass_0025 {}
+    public static class TestClass_0026 {}
+    public static class TestClass_0027 {}
+    public static class TestClass_0028 {}
+    public static class TestClass_0029 {}
+    public static class TestClass_0030 {}
+    public static class TestClass_0031 {}
+    public static class TestClass_0032 {}
+    public static class TestClass_0033 {}
+    public static class TestClass_0034 {}
+    public static class TestClass_0035 {}
+    public static class TestClass_0036 {}
+    public static class TestClass_0037 {}
+    public static class TestClass_0038 {}
+    public static class TestClass_0039 {}
+    public static class TestClass_0040 {}
+    public static class TestClass_0041 {}
+    public static class TestClass_0042 {}
+    public static class TestClass_0043 {}
+    public static class TestClass_0044 {}
+    public static class TestClass_0045 {}
+    public static class TestClass_0046 {}
+    public static class TestClass_0047 {}
+    public static class TestClass_0048 {}
+    public static class TestClass_0049 {}
+    public static class TestClass_0050 {}
+    public static class TestClass_0051 {}
+    public static class TestClass_0052 {}
+    public static class TestClass_0053 {}
+    public static class TestClass_0054 {}
+    public static class TestClass_0055 {}
+    public static class TestClass_0056 {}
+    public static class TestClass_0057 {}
+    public static class TestClass_0058 {}
+    public static class TestClass_0059 {}
+    public static class TestClass_0060 {}
+    public static class TestClass_0061 {}
+    public static class TestClass_0062 {}
+    public static class TestClass_0063 {}
+    public static class TestClass_0064 {}
+    public static class TestClass_0065 {}
+    public static class TestClass_0066 {}
+    public static class TestClass_0067 {}
+    public static class TestClass_0068 {}
+    public static class TestClass_0069 {}
+    public static class TestClass_0070 {}
+    public static class TestClass_0071 {}
+    public static class TestClass_0072 {}
+    public static class TestClass_0073 {}
+    public static class TestClass_0074 {}
+    public static class TestClass_0075 {}
+    public static class TestClass_0076 {}
+    public static class TestClass_0077 {}
+    public static class TestClass_0078 {}
+    public static class TestClass_0079 {}
+    public static class TestClass_0080 {}
+    public static class TestClass_0081 {}
+    public static class TestClass_0082 {}
+    public static class TestClass_0083 {}
+    public static class TestClass_0084 {}
+    public static class TestClass_0085 {}
+    public static class TestClass_0086 {}
+    public static class TestClass_0087 {}
+    public static class TestClass_0088 {}
+    public static class TestClass_0089 {}
+    public static class TestClass_0090 {}
+    public static class TestClass_0091 {}
+    public static class TestClass_0092 {}
+    public static class TestClass_0093 {}
+    public static class TestClass_0094 {}
+    public static class TestClass_0095 {}
+    public static class TestClass_0096 {}
+    public static class TestClass_0097 {}
+    public static class TestClass_0098 {}
+    public static class TestClass_0099 {}
+    public static class TestClass_0100 {}
+    public static class TestClass_0101 {}
+    public static class TestClass_0102 {}
+    public static class TestClass_0103 {}
+    public static class TestClass_0104 {}
+    public static class TestClass_0105 {}
+    public static class TestClass_0106 {}
+    public static class TestClass_0107 {}
+    public static class TestClass_0108 {}
+    public static class TestClass_0109 {}
+    public static class TestClass_0110 {}
+    public static class TestClass_0111 {}
+    public static class TestClass_0112 {}
+    public static class TestClass_0113 {}
+    public static class TestClass_0114 {}
+    public static class TestClass_0115 {}
+    public static class TestClass_0116 {}
+    public static class TestClass_0117 {}
+    public static class TestClass_0118 {}
+    public static class TestClass_0119 {}
+    public static class TestClass_0120 {}
+    public static class TestClass_0121 {}
+    public static class TestClass_0122 {}
+    public static class TestClass_0123 {}
+    public static class TestClass_0124 {}
+    public static class TestClass_0125 {}
+    public static class TestClass_0126 {}
+    public static class TestClass_0127 {}
+    public static class TestClass_0128 {}
+    public static class TestClass_0129 {}
+    public static class TestClass_0130 {}
+    public static class TestClass_0131 {}
+    public static class TestClass_0132 {}
+    public static class TestClass_0133 {}
+    public static class TestClass_0134 {}
+    public static class TestClass_0135 {}
+    public static class TestClass_0136 {}
+    public static class TestClass_0137 {}
+    public static class TestClass_0138 {}
+    public static class TestClass_0139 {}
+    public static class TestClass_0140 {}
+    public static class TestClass_0141 {}
+    public static class TestClass_0142 {}
+    public static class TestClass_0143 {}
+    public static class TestClass_0144 {}
+    public static class TestClass_0145 {}
+    public static class TestClass_0146 {}
+    public static class TestClass_0147 {}
+    public static class TestClass_0148 {}
+    public static class TestClass_0149 {}
+    public static class TestClass_0150 {}
+    public static class TestClass_0151 {}
+    public static class TestClass_0152 {}
+    public static class TestClass_0153 {}
+    public static class TestClass_0154 {}
+    public static class TestClass_0155 {}
+    public static class TestClass_0156 {}
+    public static class TestClass_0157 {}
+    public static class TestClass_0158 {}
+    public static class TestClass_0159 {}
+    public static class TestClass_0160 {}
+    public static class TestClass_0161 {}
+    public static class TestClass_0162 {}
+    public static class TestClass_0163 {}
+    public static class TestClass_0164 {}
+    public static class TestClass_0165 {}
+    public static class TestClass_0166 {}
+    public static class TestClass_0167 {}
+    public static class TestClass_0168 {}
+    public static class TestClass_0169 {}
+    public static class TestClass_0170 {}
+    public static class TestClass_0171 {}
+    public static class TestClass_0172 {}
+    public static class TestClass_0173 {}
+    public static class TestClass_0174 {}
+    public static class TestClass_0175 {}
+    public static class TestClass_0176 {}
+    public static class TestClass_0177 {}
+    public static class TestClass_0178 {}
+    public static class TestClass_0179 {}
+    public static class TestClass_0180 {}
+    public static class TestClass_0181 {}
+    public static class TestClass_0182 {}
+    public static class TestClass_0183 {}
+    public static class TestClass_0184 {}
+    public static class TestClass_0185 {}
+    public static class TestClass_0186 {}
+    public static class TestClass_0187 {}
+    public static class TestClass_0188 {}
+    public static class TestClass_0189 {}
+    public static class TestClass_0190 {}
+    public static class TestClass_0191 {}
+    public static class TestClass_0192 {}
+    public static class TestClass_0193 {}
+    public static class TestClass_0194 {}
+    public static class TestClass_0195 {}
+    public static class TestClass_0196 {}
+    public static class TestClass_0197 {}
+    public static class TestClass_0198 {}
+    public static class TestClass_0199 {}
+    public static class TestClass_0200 {}
+    public static class TestClass_0201 {}
+    public static class TestClass_0202 {}
+    public static class TestClass_0203 {}
+    public static class TestClass_0204 {}
+    public static class TestClass_0205 {}
+    public static class TestClass_0206 {}
+    public static class TestClass_0207 {}
+    public static class TestClass_0208 {}
+    public static class TestClass_0209 {}
+    public static class TestClass_0210 {}
+    public static class TestClass_0211 {}
+    public static class TestClass_0212 {}
+    public static class TestClass_0213 {}
+    public static class TestClass_0214 {}
+    public static class TestClass_0215 {}
+    public static class TestClass_0216 {}
+    public static class TestClass_0217 {}
+    public static class TestClass_0218 {}
+    public static class TestClass_0219 {}
+    public static class TestClass_0220 {}
+    public static class TestClass_0221 {}
+    public static class TestClass_0222 {}
+    public static class TestClass_0223 {}
+    public static class TestClass_0224 {}
+    public static class TestClass_0225 {}
+    public static class TestClass_0226 {}
+    public static class TestClass_0227 {}
+    public static class TestClass_0228 {}
+    public static class TestClass_0229 {}
+    public static class TestClass_0230 {}
+    public static class TestClass_0231 {}
+    public static class TestClass_0232 {}
+    public static class TestClass_0233 {}
+    public static class TestClass_0234 {}
+    public static class TestClass_0235 {}
+    public static class TestClass_0236 {}
+    public static class TestClass_0237 {}
+    public static class TestClass_0238 {}
+    public static class TestClass_0239 {}
+    public static class TestClass_0240 {}
+    public static class TestClass_0241 {}
+    public static class TestClass_0242 {}
+    public static class TestClass_0243 {}
+    public static class TestClass_0244 {}
+    public static class TestClass_0245 {}
+    public static class TestClass_0246 {}
+    public static class TestClass_0247 {}
+    public static class TestClass_0248 {}
+    public static class TestClass_0249 {}
+    public static class TestClass_0250 {}
+    public static class TestClass_0251 {}
+    public static class TestClass_0252 {}
+    public static class TestClass_0253 {}
+    public static class TestClass_0254 {}
+    public static class TestClass_0255 {}
+    public static class TestClass_0256 {}
+    public static class TestClass_0257 {}
+    public static class TestClass_0258 {}
+    public static class TestClass_0259 {}
+    public static class TestClass_0260 {}
+    public static class TestClass_0261 {}
+    public static class TestClass_0262 {}
+    public static class TestClass_0263 {}
+    public static class TestClass_0264 {}
+    public static class TestClass_0265 {}
+    public static class TestClass_0266 {}
+    public static class TestClass_0267 {}
+    public static class TestClass_0268 {}
+    public static class TestClass_0269 {}
+    public static class TestClass_0270 {}
+    public static class TestClass_0271 {}
+    public static class TestClass_0272 {}
+    public static class TestClass_0273 {}
+    public static class TestClass_0274 {}
+    public static class TestClass_0275 {}
+    public static class TestClass_0276 {}
+    public static class TestClass_0277 {}
+    public static class TestClass_0278 {}
+    public static class TestClass_0279 {}
+    public static class TestClass_0280 {}
+    public static class TestClass_0281 {}
+    public static class TestClass_0282 {}
+    public static class TestClass_0283 {}
+    public static class TestClass_0284 {}
+    public static class TestClass_0285 {}
+    public static class TestClass_0286 {}
+    public static class TestClass_0287 {}
+    public static class TestClass_0288 {}
+    public static class TestClass_0289 {}
+    public static class TestClass_0290 {}
+    public static class TestClass_0291 {}
+    public static class TestClass_0292 {}
+    public static class TestClass_0293 {}
+    public static class TestClass_0294 {}
+    public static class TestClass_0295 {}
+    public static class TestClass_0296 {}
+    public static class TestClass_0297 {}
+    public static class TestClass_0298 {}
+    public static class TestClass_0299 {}
+    public static class TestClass_0300 {}
+    public static class TestClass_0301 {}
+    public static class TestClass_0302 {}
+    public static class TestClass_0303 {}
+    public static class TestClass_0304 {}
+    public static class TestClass_0305 {}
+    public static class TestClass_0306 {}
+    public static class TestClass_0307 {}
+    public static class TestClass_0308 {}
+    public static class TestClass_0309 {}
+    public static class TestClass_0310 {}
+    public static class TestClass_0311 {}
+    public static class TestClass_0312 {}
+    public static class TestClass_0313 {}
+    public static class TestClass_0314 {}
+    public static class TestClass_0315 {}
+    public static class TestClass_0316 {}
+    public static class TestClass_0317 {}
+    public static class TestClass_0318 {}
+    public static class TestClass_0319 {}
+    public static class TestClass_0320 {}
+    public static class TestClass_0321 {}
+    public static class TestClass_0322 {}
+    public static class TestClass_0323 {}
+    public static class TestClass_0324 {}
+    public static class TestClass_0325 {}
+    public static class TestClass_0326 {}
+    public static class TestClass_0327 {}
+    public static class TestClass_0328 {}
+    public static class TestClass_0329 {}
+    public static class TestClass_0330 {}
+    public static class TestClass_0331 {}
+    public static class TestClass_0332 {}
+    public static class TestClass_0333 {}
+    public static class TestClass_0334 {}
+    public static class TestClass_0335 {}
+    public static class TestClass_0336 {}
+    public static class TestClass_0337 {}
+    public static class TestClass_0338 {}
+    public static class TestClass_0339 {}
+    public static class TestClass_0340 {}
+    public static class TestClass_0341 {}
+    public static class TestClass_0342 {}
+    public static class TestClass_0343 {}
+    public static class TestClass_0344 {}
+    public static class TestClass_0345 {}
+    public static class TestClass_0346 {}
+    public static class TestClass_0347 {}
+    public static class TestClass_0348 {}
+    public static class TestClass_0349 {}
+    public static class TestClass_0350 {}
+    public static class TestClass_0351 {}
+    public static class TestClass_0352 {}
+    public static class TestClass_0353 {}
+    public static class TestClass_0354 {}
+    public static class TestClass_0355 {}
+    public static class TestClass_0356 {}
+    public static class TestClass_0357 {}
+    public static class TestClass_0358 {}
+    public static class TestClass_0359 {}
+    public static class TestClass_0360 {}
+    public static class TestClass_0361 {}
+    public static class TestClass_0362 {}
+    public static class TestClass_0363 {}
+    public static class TestClass_0364 {}
+    public static class TestClass_0365 {}
+    public static class TestClass_0366 {}
+    public static class TestClass_0367 {}
+    public static class TestClass_0368 {}
+    public static class TestClass_0369 {}
+    public static class TestClass_0370 {}
+    public static class TestClass_0371 {}
+    public static class TestClass_0372 {}
+    public static class TestClass_0373 {}
+    public static class TestClass_0374 {}
+    public static class TestClass_0375 {}
+    public static class TestClass_0376 {}
+    public static class TestClass_0377 {}
+    public static class TestClass_0378 {}
+    public static class TestClass_0379 {}
+    public static class TestClass_0380 {}
+    public static class TestClass_0381 {}
+    public static class TestClass_0382 {}
+    public static class TestClass_0383 {}
+    public static class TestClass_0384 {}
+    public static class TestClass_0385 {}
+    public static class TestClass_0386 {}
+    public static class TestClass_0387 {}
+    public static class TestClass_0388 {}
+    public static class TestClass_0389 {}
+    public static class TestClass_0390 {}
+    public static class TestClass_0391 {}
+    public static class TestClass_0392 {}
+    public static class TestClass_0393 {}
+    public static class TestClass_0394 {}
+    public static class TestClass_0395 {}
+    public static class TestClass_0396 {}
+    public static class TestClass_0397 {}
+    public static class TestClass_0398 {}
+    public static class TestClass_0399 {}
+    public static class TestClass_0400 {}
+    public static class TestClass_0401 {}
+    public static class TestClass_0402 {}
+    public static class TestClass_0403 {}
+    public static class TestClass_0404 {}
+    public static class TestClass_0405 {}
+    public static class TestClass_0406 {}
+    public static class TestClass_0407 {}
+    public static class TestClass_0408 {}
+    public static class TestClass_0409 {}
+    public static class TestClass_0410 {}
+    public static class TestClass_0411 {}
+    public static class TestClass_0412 {}
+    public static class TestClass_0413 {}
+    public static class TestClass_0414 {}
+    public static class TestClass_0415 {}
+    public static class TestClass_0416 {}
+    public static class TestClass_0417 {}
+    public static class TestClass_0418 {}
+    public static class TestClass_0419 {}
+    public static class TestClass_0420 {}
+    public static class TestClass_0421 {}
+    public static class TestClass_0422 {}
+    public static class TestClass_0423 {}
+    public static class TestClass_0424 {}
+    public static class TestClass_0425 {}
+    public static class TestClass_0426 {}
+    public static class TestClass_0427 {}
+    public static class TestClass_0428 {}
+    public static class TestClass_0429 {}
+    public static class TestClass_0430 {}
+    public static class TestClass_0431 {}
+    public static class TestClass_0432 {}
+    public static class TestClass_0433 {}
+    public static class TestClass_0434 {}
+    public static class TestClass_0435 {}
+    public static class TestClass_0436 {}
+    public static class TestClass_0437 {}
+    public static class TestClass_0438 {}
+    public static class TestClass_0439 {}
+    public static class TestClass_0440 {}
+    public static class TestClass_0441 {}
+    public static class TestClass_0442 {}
+    public static class TestClass_0443 {}
+    public static class TestClass_0444 {}
+    public static class TestClass_0445 {}
+    public static class TestClass_0446 {}
+    public static class TestClass_0447 {}
+    public static class TestClass_0448 {}
+    public static class TestClass_0449 {}
+    public static class TestClass_0450 {}
+    public static class TestClass_0451 {}
+    public static class TestClass_0452 {}
+    public static class TestClass_0453 {}
+    public static class TestClass_0454 {}
+    public static class TestClass_0455 {}
+    public static class TestClass_0456 {}
+    public static class TestClass_0457 {}
+    public static class TestClass_0458 {}
+    public static class TestClass_0459 {}
+    public static class TestClass_0460 {}
+    public static class TestClass_0461 {}
+    public static class TestClass_0462 {}
+    public static class TestClass_0463 {}
+    public static class TestClass_0464 {}
+    public static class TestClass_0465 {}
+    public static class TestClass_0466 {}
+    public static class TestClass_0467 {}
+    public static class TestClass_0468 {}
+    public static class TestClass_0469 {}
+    public static class TestClass_0470 {}
+    public static class TestClass_0471 {}
+    public static class TestClass_0472 {}
+    public static class TestClass_0473 {}
+    public static class TestClass_0474 {}
+    public static class TestClass_0475 {}
+    public static class TestClass_0476 {}
+    public static class TestClass_0477 {}
+    public static class TestClass_0478 {}
+    public static class TestClass_0479 {}
+    public static class TestClass_0480 {}
+    public static class TestClass_0481 {}
+    public static class TestClass_0482 {}
+    public static class TestClass_0483 {}
+    public static class TestClass_0484 {}
+    public static class TestClass_0485 {}
+    public static class TestClass_0486 {}
+    public static class TestClass_0487 {}
+    public static class TestClass_0488 {}
+    public static class TestClass_0489 {}
+    public static class TestClass_0490 {}
+    public static class TestClass_0491 {}
+    public static class TestClass_0492 {}
+    public static class TestClass_0493 {}
+    public static class TestClass_0494 {}
+    public static class TestClass_0495 {}
+    public static class TestClass_0496 {}
+    public static class TestClass_0497 {}
+    public static class TestClass_0498 {}
+    public static class TestClass_0499 {}
+    public static class TestClass_0500 {}
+    public static class TestClass_0501 {}
+    public static class TestClass_0502 {}
+    public static class TestClass_0503 {}
+    public static class TestClass_0504 {}
+    public static class TestClass_0505 {}
+    public static class TestClass_0506 {}
+    public static class TestClass_0507 {}
+    public static class TestClass_0508 {}
+    public static class TestClass_0509 {}
+    public static class TestClass_0510 {}
+    public static class TestClass_0511 {}
+    public static class TestClass_0512 {}
+    public static class TestClass_0513 {}
+    public static class TestClass_0514 {}
+    public static class TestClass_0515 {}
+    public static class TestClass_0516 {}
+    public static class TestClass_0517 {}
+    public static class TestClass_0518 {}
+    public static class TestClass_0519 {}
+    public static class TestClass_0520 {}
+    public static class TestClass_0521 {}
+    public static class TestClass_0522 {}
+    public static class TestClass_0523 {}
+    public static class TestClass_0524 {}
+    public static class TestClass_0525 {}
+    public static class TestClass_0526 {}
+    public static class TestClass_0527 {}
+    public static class TestClass_0528 {}
+    public static class TestClass_0529 {}
+    public static class TestClass_0530 {}
+    public static class TestClass_0531 {}
+    public static class TestClass_0532 {}
+    public static class TestClass_0533 {}
+    public static class TestClass_0534 {}
+    public static class TestClass_0535 {}
+    public static class TestClass_0536 {}
+    public static class TestClass_0537 {}
+    public static class TestClass_0538 {}
+    public static class TestClass_0539 {}
+    public static class TestClass_0540 {}
+    public static class TestClass_0541 {}
+    public static class TestClass_0542 {}
+    public static class TestClass_0543 {}
+    public static class TestClass_0544 {}
+    public static class TestClass_0545 {}
+    public static class TestClass_0546 {}
+    public static class TestClass_0547 {}
+    public static class TestClass_0548 {}
+    public static class TestClass_0549 {}
+    public static class TestClass_0550 {}
+    public static class TestClass_0551 {}
+    public static class TestClass_0552 {}
+    public static class TestClass_0553 {}
+    public static class TestClass_0554 {}
+    public static class TestClass_0555 {}
+    public static class TestClass_0556 {}
+    public static class TestClass_0557 {}
+    public static class TestClass_0558 {}
+    public static class TestClass_0559 {}
+    public static class TestClass_0560 {}
+    public static class TestClass_0561 {}
+    public static class TestClass_0562 {}
+    public static class TestClass_0563 {}
+    public static class TestClass_0564 {}
+    public static class TestClass_0565 {}
+    public static class TestClass_0566 {}
+    public static class TestClass_0567 {}
+    public static class TestClass_0568 {}
+    public static class TestClass_0569 {}
+    public static class TestClass_0570 {}
+    public static class TestClass_0571 {}
+    public static class TestClass_0572 {}
+    public static class TestClass_0573 {}
+    public static class TestClass_0574 {}
+    public static class TestClass_0575 {}
+    public static class TestClass_0576 {}
+    public static class TestClass_0577 {}
+    public static class TestClass_0578 {}
+    public static class TestClass_0579 {}
+    public static class TestClass_0580 {}
+    public static class TestClass_0581 {}
+    public static class TestClass_0582 {}
+    public static class TestClass_0583 {}
+    public static class TestClass_0584 {}
+    public static class TestClass_0585 {}
+    public static class TestClass_0586 {}
+    public static class TestClass_0587 {}
+    public static class TestClass_0588 {}
+    public static class TestClass_0589 {}
+    public static class TestClass_0590 {}
+    public static class TestClass_0591 {}
+    public static class TestClass_0592 {}
+    public static class TestClass_0593 {}
+    public static class TestClass_0594 {}
+    public static class TestClass_0595 {}
+    public static class TestClass_0596 {}
+    public static class TestClass_0597 {}
+    public static class TestClass_0598 {}
+    public static class TestClass_0599 {}
+    public static class TestClass_0600 {}
+    public static class TestClass_0601 {}
+    public static class TestClass_0602 {}
+    public static class TestClass_0603 {}
+    public static class TestClass_0604 {}
+    public static class TestClass_0605 {}
+    public static class TestClass_0606 {}
+    public static class TestClass_0607 {}
+    public static class TestClass_0608 {}
+    public static class TestClass_0609 {}
+    public static class TestClass_0610 {}
+    public static class TestClass_0611 {}
+    public static class TestClass_0612 {}
+    public static class TestClass_0613 {}
+    public static class TestClass_0614 {}
+    public static class TestClass_0615 {}
+    public static class TestClass_0616 {}
+    public static class TestClass_0617 {}
+    public static class TestClass_0618 {}
+    public static class TestClass_0619 {}
+    public static class TestClass_0620 {}
+    public static class TestClass_0621 {}
+    public static class TestClass_0622 {}
+    public static class TestClass_0623 {}
+    public static class TestClass_0624 {}
+    public static class TestClass_0625 {}
+    public static class TestClass_0626 {}
+    public static class TestClass_0627 {}
+    public static class TestClass_0628 {}
+    public static class TestClass_0629 {}
+    public static class TestClass_0630 {}
+    public static class TestClass_0631 {}
+    public static class TestClass_0632 {}
+    public static class TestClass_0633 {}
+    public static class TestClass_0634 {}
+    public static class TestClass_0635 {}
+    public static class TestClass_0636 {}
+    public static class TestClass_0637 {}
+    public static class TestClass_0638 {}
+    public static class TestClass_0639 {}
+    public static class TestClass_0640 {}
+    public static class TestClass_0641 {}
+    public static class TestClass_0642 {}
+    public static class TestClass_0643 {}
+    public static class TestClass_0644 {}
+    public static class TestClass_0645 {}
+    public static class TestClass_0646 {}
+    public static class TestClass_0647 {}
+    public static class TestClass_0648 {}
+    public static class TestClass_0649 {}
+    public static class TestClass_0650 {}
+    public static class TestClass_0651 {}
+    public static class TestClass_0652 {}
+    public static class TestClass_0653 {}
+    public static class TestClass_0654 {}
+    public static class TestClass_0655 {}
+    public static class TestClass_0656 {}
+    public static class TestClass_0657 {}
+    public static class TestClass_0658 {}
+    public static class TestClass_0659 {}
+    public static class TestClass_0660 {}
+    public static class TestClass_0661 {}
+    public static class TestClass_0662 {}
+    public static class TestClass_0663 {}
+    public static class TestClass_0664 {}
+    public static class TestClass_0665 {}
+    public static class TestClass_0666 {}
+    public static class TestClass_0667 {}
+    public static class TestClass_0668 {}
+    public static class TestClass_0669 {}
+    public static class TestClass_0670 {}
+    public static class TestClass_0671 {}
+    public static class TestClass_0672 {}
+    public static class TestClass_0673 {}
+    public static class TestClass_0674 {}
+    public static class TestClass_0675 {}
+    public static class TestClass_0676 {}
+    public static class TestClass_0677 {}
+    public static class TestClass_0678 {}
+    public static class TestClass_0679 {}
+    public static class TestClass_0680 {}
+    public static class TestClass_0681 {}
+    public static class TestClass_0682 {}
+    public static class TestClass_0683 {}
+    public static class TestClass_0684 {}
+    public static class TestClass_0685 {}
+    public static class TestClass_0686 {}
+    public static class TestClass_0687 {}
+    public static class TestClass_0688 {}
+    public static class TestClass_0689 {}
+    public static class TestClass_0690 {}
+    public static class TestClass_0691 {}
+    public static class TestClass_0692 {}
+    public static class TestClass_0693 {}
+    public static class TestClass_0694 {}
+    public static class TestClass_0695 {}
+    public static class TestClass_0696 {}
+    public static class TestClass_0697 {}
+    public static class TestClass_0698 {}
+    public static class TestClass_0699 {}
+    public static class TestClass_0700 {}
+    public static class TestClass_0701 {}
+    public static class TestClass_0702 {}
+    public static class TestClass_0703 {}
+    public static class TestClass_0704 {}
+    public static class TestClass_0705 {}
+    public static class TestClass_0706 {}
+    public static class TestClass_0707 {}
+    public static class TestClass_0708 {}
+    public static class TestClass_0709 {}
+    public static class TestClass_0710 {}
+    public static class TestClass_0711 {}
+    public static class TestClass_0712 {}
+    public static class TestClass_0713 {}
+    public static class TestClass_0714 {}
+    public static class TestClass_0715 {}
+    public static class TestClass_0716 {}
+    public static class TestClass_0717 {}
+    public static class TestClass_0718 {}
+    public static class TestClass_0719 {}
+    public static class TestClass_0720 {}
+    public static class TestClass_0721 {}
+    public static class TestClass_0722 {}
+    public static class TestClass_0723 {}
+    public static class TestClass_0724 {}
+    public static class TestClass_0725 {}
+    public static class TestClass_0726 {}
+    public static class TestClass_0727 {}
+    public static class TestClass_0728 {}
+    public static class TestClass_0729 {}
+    public static class TestClass_0730 {}
+    public static class TestClass_0731 {}
+    public static class TestClass_0732 {}
+    public static class TestClass_0733 {}
+    public static class TestClass_0734 {}
+    public static class TestClass_0735 {}
+    public static class TestClass_0736 {}
+    public static class TestClass_0737 {}
+    public static class TestClass_0738 {}
+    public static class TestClass_0739 {}
+    public static class TestClass_0740 {}
+    public static class TestClass_0741 {}
+    public static class TestClass_0742 {}
+    public static class TestClass_0743 {}
+    public static class TestClass_0744 {}
+    public static class TestClass_0745 {}
+    public static class TestClass_0746 {}
+    public static class TestClass_0747 {}
+    public static class TestClass_0748 {}
+    public static class TestClass_0749 {}
+    public static class TestClass_0750 {}
+    public static class TestClass_0751 {}
+    public static class TestClass_0752 {}
+    public static class TestClass_0753 {}
+    public static class TestClass_0754 {}
+    public static class TestClass_0755 {}
+    public static class TestClass_0756 {}
+    public static class TestClass_0757 {}
+    public static class TestClass_0758 {}
+    public static class TestClass_0759 {}
+    public static class TestClass_0760 {}
+    public static class TestClass_0761 {}
+    public static class TestClass_0762 {}
+    public static class TestClass_0763 {}
+    public static class TestClass_0764 {}
+    public static class TestClass_0765 {}
+    public static class TestClass_0766 {}
+    public static class TestClass_0767 {}
+    public static class TestClass_0768 {}
+    public static class TestClass_0769 {}
+    public static class TestClass_0770 {}
+    public static class TestClass_0771 {}
+    public static class TestClass_0772 {}
+    public static class TestClass_0773 {}
+    public static class TestClass_0774 {}
+    public static class TestClass_0775 {}
+    public static class TestClass_0776 {}
+    public static class TestClass_0777 {}
+    public static class TestClass_0778 {}
+    public static class TestClass_0779 {}
+    public static class TestClass_0780 {}
+    public static class TestClass_0781 {}
+    public static class TestClass_0782 {}
+    public static class TestClass_0783 {}
+    public static class TestClass_0784 {}
+    public static class TestClass_0785 {}
+    public static class TestClass_0786 {}
+    public static class TestClass_0787 {}
+    public static class TestClass_0788 {}
+    public static class TestClass_0789 {}
+    public static class TestClass_0790 {}
+    public static class TestClass_0791 {}
+    public static class TestClass_0792 {}
+    public static class TestClass_0793 {}
+    public static class TestClass_0794 {}
+    public static class TestClass_0795 {}
+    public static class TestClass_0796 {}
+    public static class TestClass_0797 {}
+    public static class TestClass_0798 {}
+    public static class TestClass_0799 {}
+    public static class TestClass_0800 {}
+    public static class TestClass_0801 {}
+    public static class TestClass_0802 {}
+    public static class TestClass_0803 {}
+    public static class TestClass_0804 {}
+    public static class TestClass_0805 {}
+    public static class TestClass_0806 {}
+    public static class TestClass_0807 {}
+    public static class TestClass_0808 {}
+    public static class TestClass_0809 {}
+    public static class TestClass_0810 {}
+    public static class TestClass_0811 {}
+    public static class TestClass_0812 {}
+    public static class TestClass_0813 {}
+    public static class TestClass_0814 {}
+    public static class TestClass_0815 {}
+    public static class TestClass_0816 {}
+    public static class TestClass_0817 {}
+    public static class TestClass_0818 {}
+    public static class TestClass_0819 {}
+    public static class TestClass_0820 {}
+    public static class TestClass_0821 {}
+    public static class TestClass_0822 {}
+    public static class TestClass_0823 {}
+    public static class TestClass_0824 {}
+    public static class TestClass_0825 {}
+    public static class TestClass_0826 {}
+    public static class TestClass_0827 {}
+    public static class TestClass_0828 {}
+    public static class TestClass_0829 {}
+    public static class TestClass_0830 {}
+    public static class TestClass_0831 {}
+    public static class TestClass_0832 {}
+    public static class TestClass_0833 {}
+    public static class TestClass_0834 {}
+    public static class TestClass_0835 {}
+    public static class TestClass_0836 {}
+    public static class TestClass_0837 {}
+    public static class TestClass_0838 {}
+    public static class TestClass_0839 {}
+    public static class TestClass_0840 {}
+    public static class TestClass_0841 {}
+    public static class TestClass_0842 {}
+    public static class TestClass_0843 {}
+    public static class TestClass_0844 {}
+    public static class TestClass_0845 {}
+    public static class TestClass_0846 {}
+    public static class TestClass_0847 {}
+    public static class TestClass_0848 {}
+    public static class TestClass_0849 {}
+    public static class TestClass_0850 {}
+    public static class TestClass_0851 {}
+    public static class TestClass_0852 {}
+    public static class TestClass_0853 {}
+    public static class TestClass_0854 {}
+    public static class TestClass_0855 {}
+    public static class TestClass_0856 {}
+    public static class TestClass_0857 {}
+    public static class TestClass_0858 {}
+    public static class TestClass_0859 {}
+    public static class TestClass_0860 {}
+    public static class TestClass_0861 {}
+    public static class TestClass_0862 {}
+    public static class TestClass_0863 {}
+    public static class TestClass_0864 {}
+    public static class TestClass_0865 {}
+    public static class TestClass_0866 {}
+    public static class TestClass_0867 {}
+    public static class TestClass_0868 {}
+    public static class TestClass_0869 {}
+    public static class TestClass_0870 {}
+    public static class TestClass_0871 {}
+    public static class TestClass_0872 {}
+    public static class TestClass_0873 {}
+    public static class TestClass_0874 {}
+    public static class TestClass_0875 {}
+    public static class TestClass_0876 {}
+    public static class TestClass_0877 {}
+    public static class TestClass_0878 {}
+    public static class TestClass_0879 {}
+    public static class TestClass_0880 {}
+    public static class TestClass_0881 {}
+    public static class TestClass_0882 {}
+    public static class TestClass_0883 {}
+    public static class TestClass_0884 {}
+    public static class TestClass_0885 {}
+    public static class TestClass_0886 {}
+    public static class TestClass_0887 {}
+    public static class TestClass_0888 {}
+    public static class TestClass_0889 {}
+    public static class TestClass_0890 {}
+    public static class TestClass_0891 {}
+    public static class TestClass_0892 {}
+    public static class TestClass_0893 {}
+    public static class TestClass_0894 {}
+    public static class TestClass_0895 {}
+    public static class TestClass_0896 {}
+    public static class TestClass_0897 {}
+    public static class TestClass_0898 {}
+    public static class TestClass_0899 {}
+    public static class TestClass_0900 {}
+    public static class TestClass_0901 {}
+    public static class TestClass_0902 {}
+    public static class TestClass_0903 {}
+    public static class TestClass_0904 {}
+    public static class TestClass_0905 {}
+    public static class TestClass_0906 {}
+    public static class TestClass_0907 {}
+    public static class TestClass_0908 {}
+    public static class TestClass_0909 {}
+    public static class TestClass_0910 {}
+    public static class TestClass_0911 {}
+    public static class TestClass_0912 {}
+    public static class TestClass_0913 {}
+    public static class TestClass_0914 {}
+    public static class TestClass_0915 {}
+    public static class TestClass_0916 {}
+    public static class TestClass_0917 {}
+    public static class TestClass_0918 {}
+    public static class TestClass_0919 {}
+    public static class TestClass_0920 {}
+    public static class TestClass_0921 {}
+    public static class TestClass_0922 {}
+    public static class TestClass_0923 {}
+    public static class TestClass_0924 {}
+    public static class TestClass_0925 {}
+    public static class TestClass_0926 {}
+    public static class TestClass_0927 {}
+    public static class TestClass_0928 {}
+    public static class TestClass_0929 {}
+    public static class TestClass_0930 {}
+    public static class TestClass_0931 {}
+    public static class TestClass_0932 {}
+    public static class TestClass_0933 {}
+    public static class TestClass_0934 {}
+    public static class TestClass_0935 {}
+    public static class TestClass_0936 {}
+    public static class TestClass_0937 {}
+    public static class TestClass_0938 {}
+    public static class TestClass_0939 {}
+    public static class TestClass_0940 {}
+    public static class TestClass_0941 {}
+    public static class TestClass_0942 {}
+    public static class TestClass_0943 {}
+    public static class TestClass_0944 {}
+    public static class TestClass_0945 {}
+    public static class TestClass_0946 {}
+    public static class TestClass_0947 {}
+    public static class TestClass_0948 {}
+    public static class TestClass_0949 {}
+    public static class TestClass_0950 {}
+    public static class TestClass_0951 {}
+    public static class TestClass_0952 {}
+    public static class TestClass_0953 {}
+    public static class TestClass_0954 {}
+    public static class TestClass_0955 {}
+    public static class TestClass_0956 {}
+    public static class TestClass_0957 {}
+    public static class TestClass_0958 {}
+    public static class TestClass_0959 {}
+    public static class TestClass_0960 {}
+    public static class TestClass_0961 {}
+    public static class TestClass_0962 {}
+    public static class TestClass_0963 {}
+    public static class TestClass_0964 {}
+    public static class TestClass_0965 {}
+    public static class TestClass_0966 {}
+    public static class TestClass_0967 {}
+    public static class TestClass_0968 {}
+    public static class TestClass_0969 {}
+    public static class TestClass_0970 {}
+    public static class TestClass_0971 {}
+    public static class TestClass_0972 {}
+    public static class TestClass_0973 {}
+    public static class TestClass_0974 {}
+    public static class TestClass_0975 {}
+    public static class TestClass_0976 {}
+    public static class TestClass_0977 {}
+    public static class TestClass_0978 {}
+    public static class TestClass_0979 {}
+    public static class TestClass_0980 {}
+    public static class TestClass_0981 {}
+    public static class TestClass_0982 {}
+    public static class TestClass_0983 {}
+    public static class TestClass_0984 {}
+    public static class TestClass_0985 {}
+    public static class TestClass_0986 {}
+    public static class TestClass_0987 {}
+    public static class TestClass_0988 {}
+    public static class TestClass_0989 {}
+    public static class TestClass_0990 {}
+    public static class TestClass_0991 {}
+    public static class TestClass_0992 {}
+    public static class TestClass_0993 {}
+    public static class TestClass_0994 {}
+    public static class TestClass_0995 {}
+    public static class TestClass_0996 {}
+    public static class TestClass_0997 {}
+    public static class TestClass_0998 {}
+    public static class TestClass_0999 {}
+    public static class TestClass_1000 {}
+    public static class TestClass_1001 {}
+    public static class TestClass_1002 {}
+    public static class TestClass_1003 {}
+    public static class TestClass_1004 {}
+    public static class TestClass_1005 {}
+    public static class TestClass_1006 {}
+    public static class TestClass_1007 {}
+    public static class TestClass_1008 {}
+    public static class TestClass_1009 {}
+    public static class TestClass_1010 {}
+    public static class TestClass_1011 {}
+    public static class TestClass_1012 {}
+    public static class TestClass_1013 {}
+    public static class TestClass_1014 {}
+    public static class TestClass_1015 {}
+    public static class TestClass_1016 {}
+    public static class TestClass_1017 {}
+    public static class TestClass_1018 {}
+    public static class TestClass_1019 {}
+    public static class TestClass_1020 {}
+    public static class TestClass_1021 {}
+    public static class TestClass_1022 {}
+    public static class TestClass_1023 {}
+    public static class TestClass_1024 {}
+
+    public void timeConstClassWithConflict(int count) {
+        Class<?> class0001 = TestClass_0001.class;
+        for (int i = 0; i < count; ++i) {
+            $noinline$foo(class0001);  // Prevent LICM on the TestClass_xxxx.class below.
+            $noinline$foo(TestClass_0000.class);
+            $noinline$foo(TestClass_1024.class);
+        }
+    }
+
+    public void timeConstClassWithoutConflict(int count) {
+        Class<?> class0000 = TestClass_0000.class;
+        for (int i = 0; i < count; ++i) {
+            $noinline$foo(class0000);  // Prevent LICM on the TestClass_xxxx.class below.
+            $noinline$foo(TestClass_0001.class);
+            $noinline$foo(TestClass_1023.class);
+        }
+    }
+
+    static void $noinline$foo(Class<?> s) {
+        if (doThrow) { throw new Error(); }
+    }
+
+    public static boolean doThrow = false;
+}
diff --git a/benchmark/const-string/src/ConstStringBenchmark.java b/benchmark/const-string/src/ConstStringBenchmark.java
index 2beb0a4..2359a5f 100644
--- a/benchmark/const-string/src/ConstStringBenchmark.java
+++ b/benchmark/const-string/src/ConstStringBenchmark.java
@@ -18,6 +18,7 @@
     // Initialize 1025 strings with consecutive string indexes in the dex file.
     // The tests below rely on the knowledge that ART uses the low 10 bits
     // of the string index as the hash into DexCache strings array.
+    // Note: n == n + 1024 (mod 2^10), n + 1 != n + 1023 (mod 2^10).
     public static final String string_0000 = "TestString_0000";
     public static final String string_0001 = "TestString_0001";
     public static final String string_0002 = "TestString_0002";
@@ -1045,21 +1046,21 @@
     public static final String string_1024 = "TestString_1024";
 
     public void timeConstStringsWithConflict(int count) {
-      for (int i = 0; i < count; ++i) {
-        $noinline$foo("TestString_0000");
-        $noinline$foo("TestString_1024");
-      }
+        for (int i = 0; i < count; ++i) {
+            $noinline$foo("TestString_0000");
+            $noinline$foo("TestString_1024");
+        }
     }
 
     public void timeConstStringsWithoutConflict(int count) {
-      for (int i = 0; i < count; ++i) {
-        $noinline$foo("TestString_0001");
-        $noinline$foo("TestString_1023");
-      }
+        for (int i = 0; i < count; ++i) {
+            $noinline$foo("TestString_0001");
+            $noinline$foo("TestString_1023");
+        }
     }
 
     static void $noinline$foo(String s) {
-      if (doThrow) { throw new Error(); }
+        if (doThrow) { throw new Error(); }
     }
 
     public static boolean doThrow = false;
diff --git a/benchmark/string-indexof/info.txt b/benchmark/string-indexof/info.txt
new file mode 100644
index 0000000..cc04217
--- /dev/null
+++ b/benchmark/string-indexof/info.txt
@@ -0,0 +1 @@
+Benchmarks for repeating String.indexOf() instructions in a loop.
diff --git a/benchmark/string-indexof/src/StringIndexOfBenchmark.java b/benchmark/string-indexof/src/StringIndexOfBenchmark.java
new file mode 100644
index 0000000..481a27a
--- /dev/null
+++ b/benchmark/string-indexof/src/StringIndexOfBenchmark.java
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class StringIndexOfBenchmark {
+    public static final String string36 = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";  // length = 36
+
+    public void timeIndexOf0(int count) {
+        final char c = '0';
+        String s = string36;
+        for (int i = 0; i < count; ++i) {
+            $noinline$indexOf(s, c);
+        }
+    }
+
+    public void timeIndexOf1(int count) {
+        final char c = '1';
+        String s = string36;
+        for (int i = 0; i < count; ++i) {
+            $noinline$indexOf(s, c);
+        }
+    }
+
+    public void timeIndexOf2(int count) {
+        final char c = '2';
+        String s = string36;
+        for (int i = 0; i < count; ++i) {
+            $noinline$indexOf(s, c);
+        }
+    }
+
+    public void timeIndexOf3(int count) {
+        final char c = '3';
+        String s = string36;
+        for (int i = 0; i < count; ++i) {
+            $noinline$indexOf(s, c);
+        }
+    }
+
+    public void timeIndexOf4(int count) {
+        final char c = '4';
+        String s = string36;
+        for (int i = 0; i < count; ++i) {
+            $noinline$indexOf(s, c);
+        }
+    }
+
+    public void timeIndexOf7(int count) {
+        final char c = '7';
+        String s = string36;
+        for (int i = 0; i < count; ++i) {
+            $noinline$indexOf(s, c);
+        }
+    }
+
+    public void timeIndexOf8(int count) {
+        final char c = '8';
+        String s = string36;
+        for (int i = 0; i < count; ++i) {
+            $noinline$indexOf(s, c);
+        }
+    }
+
+    public void timeIndexOfF(int count) {
+        final char c = 'F';
+        String s = string36;
+        for (int i = 0; i < count; ++i) {
+            $noinline$indexOf(s, c);
+        }
+    }
+
+    public void timeIndexOfG(int count) {
+        final char c = 'G';
+        String s = string36;
+        for (int i = 0; i < count; ++i) {
+            $noinline$indexOf(s, c);
+        }
+    }
+
+    public void timeIndexOfV(int count) {
+        final char c = 'V';
+        String s = string36;
+        for (int i = 0; i < count; ++i) {
+            $noinline$indexOf(s, c);
+        }
+    }
+
+    public void timeIndexOfW(int count) {
+        final char c = 'W';
+        String s = string36;
+        for (int i = 0; i < count; ++i) {
+            $noinline$indexOf(s, c);
+        }
+    }
+
+    public void timeIndexOf_(int count) {
+        final char c = '_';
+        String s = string36;
+        for (int i = 0; i < count; ++i) {
+            $noinline$indexOf(s, c);
+        }
+    }
+
+    static int $noinline$indexOf(String s, char c) {
+        if (doThrow) { throw new Error(); }
+        return s.indexOf(c);
+    }
+
+    public static boolean doThrow = false;
+}
diff --git a/build/Android.bp b/build/Android.bp
index 9156027..cd9d74a 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -145,6 +145,10 @@
         "external/vixl/src",
         "external/zlib",
     ],
+
+    tidy_checks: [
+        "-google-default-arguments",
+    ],
 }
 
 art_debug_defaults {
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index d2e3371..291db8b 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -124,12 +124,17 @@
 ART_TEST_RUN_TEST_MULTI_IMAGE ?= $(ART_TEST_FULL)
 
 # Define the command run on test failure. $(1) is the name of the test. Executed by the shell.
+# If the test was a top-level make target (e.g. `test-art-host-gtest-codegen_test64`), the command
+# fails with exit status 1 (returned by the last `grep` statement below).
+# Otherwise (e.g., if the test was run as a prerequisite of a compound test command, such as
+# `test-art-host-gtest-codegen_test`), the command does not fail, as this would break rules running
+# ART_TEST_PREREQ_FINISHED as one of their actions, which expects *all* prerequisites *not* to fail.
 define ART_TEST_FAILED
   ( [ -f $(ART_HOST_TEST_DIR)/skipped/$(1) ] || \
     (mkdir -p $(ART_HOST_TEST_DIR)/failed/ && touch $(ART_HOST_TEST_DIR)/failed/$(1) && \
       echo $(ART_TEST_KNOWN_FAILING) | grep -q $(1) \
         && (echo -e "$(1) \e[91mKNOWN FAILURE\e[0m") \
-        || (echo -e "$(1) \e[91mFAILED\e[0m" >&2 )))
+        || (echo -e "$(1) \e[91mFAILED\e[0m" >&2; echo $(MAKECMDGOALS) | grep -q -v $(1))))
 endef
 
 ifeq ($(ART_TEST_QUIET),true)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 4f273e5..4fce235 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -25,6 +25,7 @@
 GTEST_DEX_DIRECTORIES := \
   AbstractMethod \
   AllFields \
+  DexToDexDecompiler \
   ExceptionHandle \
   GetMethodSignature \
   ImageLayoutA \
@@ -105,7 +106,8 @@
 ART_GTEST_stub_test_DEX_DEPS := AllFields
 ART_GTEST_transaction_test_DEX_DEPS := Transaction
 ART_GTEST_type_lookup_table_test_DEX_DEPS := Lookup
-ART_GTEST_verifier_deps_test_DEX_DEPS := VerifierDeps
+ART_GTEST_verifier_deps_test_DEX_DEPS := VerifierDeps MultiDex
+ART_GTEST_dex_to_dex_decompiler_test_DEX_DEPS := VerifierDeps DexToDexDecompiler
 
 # The elf writer test has dependencies on core.oat.
 ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_IMAGE_optimizing_no-pic_64) $(HOST_CORE_IMAGE_optimizing_no-pic_32)
diff --git a/cmdline/cmdline.h b/cmdline/cmdline.h
index dec9c83..6e042c3 100644
--- a/cmdline/cmdline.h
+++ b/cmdline/cmdline.h
@@ -234,7 +234,7 @@
     // Checks for --boot-image location.
     {
       std::string boot_image_location = boot_image_location_;
-      size_t file_name_idx = boot_image_location.rfind("/");
+      size_t file_name_idx = boot_image_location.rfind('/');
       if (file_name_idx == std::string::npos) {  // Prevent a InsertIsaDirectory check failure.
         *error_msg = "Boot image location must have a / in it";
         return false;
@@ -244,7 +244,7 @@
       // This prevents a common error "Could not create an image space..." when initing the Runtime.
       if (file_name_idx != std::string::npos) {
         std::string no_file_name = boot_image_location.substr(0, file_name_idx);
-        size_t ancestor_dirs_idx = no_file_name.rfind("/");
+        size_t ancestor_dirs_idx = no_file_name.rfind('/');
 
         std::string parent_dir_name;
         if (ancestor_dirs_idx != std::string::npos) {
diff --git a/cmdline/cmdline_parser.h b/cmdline/cmdline_parser.h
index cfc0967..d82fd48 100644
--- a/cmdline/cmdline_parser.h
+++ b/cmdline/cmdline_parser.h
@@ -390,7 +390,7 @@
         // Unlike regular argument definitions, when a value gets parsed into its
         // stronger type, we just throw it away.
 
-        if (ign.find("_") != std::string::npos) {  // Does the arg-def have a wildcard?
+        if (ign.find('_') != std::string::npos) {  // Does the arg-def have a wildcard?
           // pretend this is a string, e.g. -Xjitconfig:<anythinggoeshere>
           auto&& builder = Define(ignore_name).template WithType<std::string>().IntoIgnore();
           assert(&builder == this);
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index cad5104..550e8c4 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -78,7 +78,7 @@
     return memcmp(std::addressof(expected), std::addressof(actual), sizeof(expected)) == 0;
   }
 
-  bool UsuallyEquals(const char* expected, std::string actual) {
+  bool UsuallyEquals(const char* expected, const std::string& actual) {
     return std::string(expected) == actual;
   }
 
@@ -129,7 +129,7 @@
     parser_ = ParsedOptions::MakeParser(false);  // do not ignore unrecognized options
   }
 
-  static ::testing::AssertionResult IsResultSuccessful(CmdlineResult result) {
+  static ::testing::AssertionResult IsResultSuccessful(const CmdlineResult& result) {
     if (result.IsSuccess()) {
       return ::testing::AssertionSuccess();
     } else {
@@ -138,7 +138,7 @@
     }
   }
 
-  static ::testing::AssertionResult IsResultFailure(CmdlineResult result,
+  static ::testing::AssertionResult IsResultFailure(const CmdlineResult& result,
                                                     CmdlineResult::Status failure_status) {
     if (result.IsSuccess()) {
       return ::testing::AssertionFailure() << " got success but expected failure: "
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 13a3235..156ca9e 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -182,7 +182,7 @@
 struct CmdlineType<Memory<Divisor>> : CmdlineTypeParser<Memory<Divisor>> {
   using typename CmdlineTypeParser<Memory<Divisor>>::Result;
 
-  Result Parse(const std::string arg) {
+  Result Parse(const std::string& arg) {
     CMDLINE_DEBUG_LOG << "Parsing memory: " << arg << std::endl;
     size_t val = ParseMemoryOption(arg.c_str(), Divisor);
     CMDLINE_DEBUG_LOG << "Memory parsed to size_t value: " << val << std::endl;
@@ -496,11 +496,7 @@
 struct XGcOption {
   // These defaults are used when the command line arguments for -Xgc:
   // are either omitted completely or partially.
-  gc::CollectorType collector_type_ = kUseReadBarrier ?
-                                           // If RB is enabled (currently a build-time decision),
-                                           // use CC as the default GC.
-                                           gc::kCollectorTypeCC :
-                                           gc::kCollectorTypeDefault;
+  gc::CollectorType collector_type_ = gc::kCollectorTypeDefault;
   bool verify_pre_gc_heap_ = false;
   bool verify_pre_sweeping_heap_ = kIsDebugBuild;
   bool verify_post_gc_heap_ = false;
@@ -580,10 +576,6 @@
     : background_collector_type_(background_collector_type) {}
   BackgroundGcOption()
     : background_collector_type_(gc::kCollectorTypeNone) {
-
-    if (kUseReadBarrier) {
-      background_collector_type_ = gc::kCollectorTypeCCBackground;  // Background compaction for CC.
-    }
   }
 
   operator gc::CollectorType() const { return background_collector_type_; }
@@ -696,7 +688,7 @@
   }
 
   static std::string RemovePrefix(const std::string& source) {
-    size_t prefix_idx = source.find(":");
+    size_t prefix_idx = source.find(':');
 
     if (prefix_idx == std::string::npos) {
       return "";
diff --git a/cmdline/detail/cmdline_parse_argument_detail.h b/cmdline/detail/cmdline_parse_argument_detail.h
index 84beff5..14eac30 100644
--- a/cmdline/detail/cmdline_parse_argument_detail.h
+++ b/cmdline/detail/cmdline_parse_argument_detail.h
@@ -108,7 +108,7 @@
       // If this is true, then the wildcard matching later on can still fail, so this is not
       // a guarantee that the argument is correct, it's more of a strong hint that the
       // user-provided input *probably* was trying to match this argument.
-      size_t MaybeMatches(TokenRange token_list) const {
+      size_t MaybeMatches(const TokenRange& token_list) const {
         auto best_match = FindClosestMatch(token_list);
 
         return best_match.second;
@@ -118,7 +118,7 @@
       //
       // Returns the token range that was the closest match and the # of tokens that
       // this range was matched up until.
-      std::pair<const TokenRange*, size_t> FindClosestMatch(TokenRange token_list) const {
+      std::pair<const TokenRange*, size_t> FindClosestMatch(const TokenRange& token_list) const {
         const TokenRange* best_match_ptr = nullptr;
 
         size_t best_match = 0;
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 61f682c..e2a450d 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -28,6 +28,7 @@
         "compiled_method.cc",
         "debug/elf_debug_writer.cc",
         "dex/dex_to_dex_compiler.cc",
+        "dex/dex_to_dex_decompiler.cc",
         "dex/verified_method.cc",
         "dex/verification_results.cc",
         "dex/quick_compiler_callbacks.cc",
@@ -42,6 +43,7 @@
         "linker/vector_output_stream.cc",
         "linker/relative_patcher.cc",
         "jit/jit_compiler.cc",
+        "jit/jit_logger.cc",
         "jni/quick/calling_convention.cc",
         "jni/quick/jni_compiler.cc",
         "optimizing/block_builder.cc",
@@ -105,6 +107,7 @@
                 "optimizing/instruction_simplifier_arm.cc",
                 "optimizing/instruction_simplifier_shared.cc",
                 "optimizing/intrinsics_arm.cc",
+                "optimizing/intrinsics_arm_vixl.cc",
                 "utils/arm/assembler_arm.cc",
                 "utils/arm/assembler_arm_vixl.cc",
                 "utils/arm/assembler_thumb2.cc",
@@ -203,7 +206,8 @@
 
 gensrcs {
     name: "art_compiler_operator_srcs",
-    cmd: "art/tools/generate-operator-out.py art/compiler $in > $out",
+    cmd: "$(location generate-operator-out.py) art/compiler $(in) > $(out)",
+    tool_files: ["generate-operator-out.py"],
     srcs: [
         "compiled_method.h",
         "dex/dex_to_dex_compiler.h",
@@ -309,6 +313,7 @@
     srcs: [
         "compiled_method_test.cc",
         "debug/dwarf/dwarf_test.cc",
+        "dex/dex_to_dex_decompiler_test.cc",
         "driver/compiled_method_storage_test.cc",
         "driver/compiler_driver_test.cc",
         "elf_writer_test.cc",
@@ -342,6 +347,7 @@
         "utils/string_reference_test.cc",
         "utils/swap_space_test.cc",
         "utils/test_dex_file_builder_test.cc",
+        "verifier_deps_test.cc",
 
         "jni/jni_cfi_test.cc",
         "optimizing/codegen_test.cc",
@@ -415,6 +421,7 @@
         },
         mips: {
             srcs: [
+                "optimizing/emit_swap_mips_test.cc",
                 "utils/mips/assembler_mips_test.cc",
                 "utils/mips/assembler_mips32r6_test.cc",
             ],
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 51bf9ea..2f9164c 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -156,7 +156,7 @@
 
     const InstructionSet instruction_set = kRuntimeISA;
     // Take the default set of instruction features from the build.
-    instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
+    instruction_set_features_ = InstructionSetFeatures::FromCppDefines();
 
     runtime_->SetInstructionSet(instruction_set);
     for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
diff --git a/compiler/compiler.h b/compiler/compiler.h
index 9a69456..2ca0b77 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -18,6 +18,7 @@
 #define ART_COMPILER_COMPILER_H_
 
 #include "dex_file.h"
+#include "base/mutex.h"
 #include "os.h"
 
 namespace art {
@@ -34,6 +35,7 @@
 class CompiledMethod;
 template<class T> class Handle;
 class OatWriter;
+class Thread;
 
 class Compiler {
  public:
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 9c1d72b..cf69f46 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -233,6 +233,8 @@
                  << " by replacing it with 2 NOPs at dex pc "
                  << StringPrintf("0x%x", dex_pc) << " in method "
                  << GetDexFile().PrettyMethod(unit_.GetDexMethodIndex(), true);
+  quickened_info_.push_back(QuickenedInfo(dex_pc, inst->VRegA_21c()));
+  quickened_info_.push_back(QuickenedInfo(dex_pc, inst->VRegB_21c()));
   // We are modifying 4 consecutive bytes.
   inst->SetOpcode(Instruction::NOP);
   inst->SetVRegA_10x(0u);  // keep compliant with verifier.
diff --git a/compiler/dex/dex_to_dex_compiler.h b/compiler/dex/dex_to_dex_compiler.h
index 3fad6d4..0a00d45 100644
--- a/compiler/dex/dex_to_dex_compiler.h
+++ b/compiler/dex/dex_to_dex_compiler.h
@@ -17,8 +17,6 @@
 #ifndef ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_
 #define ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_
 
-#include "jni.h"
-
 #include "dex_file.h"
 #include "invoke_type.h"
 
diff --git a/compiler/dex/dex_to_dex_decompiler.cc b/compiler/dex/dex_to_dex_decompiler.cc
new file mode 100644
index 0000000..051125e
--- /dev/null
+++ b/compiler/dex/dex_to_dex_decompiler.cc
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex_to_dex_decompiler.h"
+
+#include "base/logging.h"
+#include "base/mutex.h"
+#include "dex_file-inl.h"
+#include "dex_instruction-inl.h"
+#include "optimizing/bytecode_utils.h"
+
+namespace art {
+namespace optimizer {
+
+class DexDecompiler {
+ public:
+  DexDecompiler(const DexFile::CodeItem& code_item, const ArrayRef<const uint8_t>& quickened_info)
+    : code_item_(code_item),
+      quickened_info_ptr_(quickened_info.data()),
+      quickened_info_end_(quickened_info.data() + quickened_info.size()) {}
+
+  bool Decompile();
+
+ private:
+  void DecompileInstanceFieldAccess(Instruction* inst,
+                                    uint32_t dex_pc,
+                                    Instruction::Code new_opcode) {
+    uint16_t index = GetIndexAt(dex_pc);
+    inst->SetOpcode(new_opcode);
+    inst->SetVRegC_22c(index);
+  }
+
+  void DecompileInvokeVirtual(Instruction* inst,
+                              uint32_t dex_pc,
+                              Instruction::Code new_opcode,
+                              bool is_range) {
+    uint16_t index = GetIndexAt(dex_pc);
+    inst->SetOpcode(new_opcode);
+    if (is_range) {
+      inst->SetVRegB_3rc(index);
+    } else {
+      inst->SetVRegB_35c(index);
+    }
+  }
+
+  void DecompileNop(Instruction* inst, uint32_t dex_pc) {
+    if (quickened_info_ptr_ == quickened_info_end_) {
+      return;
+    }
+    const uint8_t* temporary_pointer = quickened_info_ptr_;
+    uint32_t quickened_pc = DecodeUnsignedLeb128(&temporary_pointer);
+    if (quickened_pc != dex_pc) {
+      return;
+    }
+    uint16_t reference_index = GetIndexAt(dex_pc);
+    uint16_t type_index = GetIndexAt(dex_pc);
+    inst->SetOpcode(Instruction::CHECK_CAST);
+    inst->SetVRegA_21c(reference_index);
+    inst->SetVRegB_21c(type_index);
+  }
+
+  uint16_t GetIndexAt(uint32_t dex_pc) {
+    // Note that as a side effect, DecodeUnsignedLeb128 update the given pointer
+    // to the new position in the buffer.
+    DCHECK_LT(quickened_info_ptr_, quickened_info_end_);
+    uint32_t quickened_pc = DecodeUnsignedLeb128(&quickened_info_ptr_);
+    DCHECK_LT(quickened_info_ptr_, quickened_info_end_);
+    uint16_t index = DecodeUnsignedLeb128(&quickened_info_ptr_);
+    DCHECK_LE(quickened_info_ptr_, quickened_info_end_);
+    DCHECK_EQ(quickened_pc, dex_pc);
+    return index;
+  }
+
+  const DexFile::CodeItem& code_item_;
+  const uint8_t* quickened_info_ptr_;
+  const uint8_t* const quickened_info_end_;
+
+  DISALLOW_COPY_AND_ASSIGN(DexDecompiler);
+};
+
+bool DexDecompiler::Decompile() {
+  // We need to iterate over the code item, and not over the quickening data,
+  // because the RETURN_VOID quickening is not encoded in the quickening data. Because
+  // unquickening is a rare need and not performance sensitive, it is not worth the
+  // added storage to also add the RETURN_VOID quickening in the quickened data.
+  for (CodeItemIterator it(code_item_); !it.Done(); it.Advance()) {
+    uint32_t dex_pc = it.CurrentDexPc();
+    Instruction* inst = const_cast<Instruction*>(&it.CurrentInstruction());
+
+    switch (inst->Opcode()) {
+      case Instruction::RETURN_VOID_NO_BARRIER:
+        inst->SetOpcode(Instruction::RETURN_VOID);
+        break;
+
+      case Instruction::NOP:
+        DecompileNop(inst, dex_pc);
+        break;
+
+      case Instruction::IGET_QUICK:
+        DecompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET);
+        break;
+
+      case Instruction::IGET_WIDE_QUICK:
+        DecompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_WIDE);
+        break;
+
+      case Instruction::IGET_OBJECT_QUICK:
+        DecompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_OBJECT);
+        break;
+
+      case Instruction::IGET_BOOLEAN_QUICK:
+        DecompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_BOOLEAN);
+        break;
+
+      case Instruction::IGET_BYTE_QUICK:
+        DecompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_BYTE);
+        break;
+
+      case Instruction::IGET_CHAR_QUICK:
+        DecompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_CHAR);
+        break;
+
+      case Instruction::IGET_SHORT_QUICK:
+        DecompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_SHORT);
+        break;
+
+      case Instruction::IPUT_QUICK:
+        DecompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT);
+        break;
+
+      case Instruction::IPUT_BOOLEAN_QUICK:
+        DecompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_BOOLEAN);
+        break;
+
+      case Instruction::IPUT_BYTE_QUICK:
+        DecompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_BYTE);
+        break;
+
+      case Instruction::IPUT_CHAR_QUICK:
+        DecompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_CHAR);
+        break;
+
+      case Instruction::IPUT_SHORT_QUICK:
+        DecompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_SHORT);
+        break;
+
+      case Instruction::IPUT_WIDE_QUICK:
+        DecompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_WIDE);
+        break;
+
+      case Instruction::IPUT_OBJECT_QUICK:
+        DecompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_OBJECT);
+        break;
+
+      case Instruction::INVOKE_VIRTUAL_QUICK:
+        DecompileInvokeVirtual(inst, dex_pc, Instruction::INVOKE_VIRTUAL, false);
+        break;
+
+      case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
+        DecompileInvokeVirtual(inst, dex_pc, Instruction::INVOKE_VIRTUAL_RANGE, true);
+        break;
+
+      default:
+        break;
+    }
+  }
+
+  if (quickened_info_ptr_ != quickened_info_end_) {
+    LOG(ERROR) << "Failed to use all values in quickening info."
+               << " Actual: " << std::hex << quickened_info_ptr_
+               << " Expected: " << quickened_info_end_;
+    return false;
+  }
+
+  return true;
+}
+
+bool ArtDecompileDEX(const DexFile::CodeItem& code_item,
+                     const ArrayRef<const uint8_t>& quickened_info) {
+  DexDecompiler decompiler(code_item, quickened_info);
+  return decompiler.Decompile();
+}
+
+}  // namespace optimizer
+}  // namespace art
diff --git a/compiler/dex/dex_to_dex_decompiler.h b/compiler/dex/dex_to_dex_decompiler.h
new file mode 100644
index 0000000..5502ca2
--- /dev/null
+++ b/compiler/dex/dex_to_dex_decompiler.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_DEX_TO_DEX_DECOMPILER_H_
+#define ART_COMPILER_DEX_DEX_TO_DEX_DECOMPILER_H_
+
+#include "base/array_ref.h"
+#include "dex_file.h"
+
+namespace art {
+namespace optimizer {
+
+// "Decompile", that is unquicken, the code item provided, given the
+// associated quickening data.
+// TODO: code_item isn't really a const element, but changing it
+// to non-const has too many repercussions on the code base. We make it
+// consistent with DexToDexCompiler, but we should really change it to
+// DexFile::CodeItem*.
+bool ArtDecompileDEX(const DexFile::CodeItem& code_item,
+                     const ArrayRef<const uint8_t>& quickened_data);
+
+}  // namespace optimizer
+}  // namespace art
+
+#endif  // ART_COMPILER_DEX_DEX_TO_DEX_DECOMPILER_H_
diff --git a/compiler/dex/dex_to_dex_decompiler_test.cc b/compiler/dex/dex_to_dex_decompiler_test.cc
new file mode 100644
index 0000000..ea6c7a2
--- /dev/null
+++ b/compiler/dex/dex_to_dex_decompiler_test.cc
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex/dex_to_dex_decompiler.h"
+
+#include "class_linker.h"
+#include "compiler/common_compiler_test.h"
+#include "compiler/compiled_method.h"
+#include "compiler/driver/compiler_options.h"
+#include "compiler/driver/compiler_driver.h"
+#include "compiler_callbacks.h"
+#include "dex_file.h"
+#include "handle_scope-inl.h"
+#include "verifier/method_verifier-inl.h"
+#include "mirror/class_loader.h"
+#include "runtime.h"
+#include "thread.h"
+#include "scoped_thread_state_change-inl.h"
+
+namespace art {
+
+class DexToDexDecompilerTest : public CommonCompilerTest {
+ public:
+  void CompileAll(jobject class_loader) REQUIRES(!Locks::mutator_lock_) {
+    TimingLogger timings("CompilerDriverTest::CompileAll", false, false);
+    TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
+    compiler_options_->boot_image_ = false;
+    compiler_options_->SetCompilerFilter(CompilerFilter::kInterpretOnly);
+    compiler_driver_->CompileAll(class_loader,
+                                 GetDexFiles(class_loader),
+                                 /* verifier_deps */ nullptr,
+                                 &timings);
+  }
+
+  void RunTest(const char* dex_name) {
+    Thread* self = Thread::Current();
+    // First load the original dex file.
+    jobject original_class_loader;
+    {
+      ScopedObjectAccess soa(self);
+      original_class_loader = LoadDex(dex_name);
+    }
+    const DexFile* original_dex_file = GetDexFiles(original_class_loader)[0];
+
+    // Load the dex file again and make it writable to quicken them.
+    jobject class_loader;
+    const DexFile* updated_dex_file = nullptr;
+    {
+      ScopedObjectAccess soa(self);
+      class_loader = LoadDex(dex_name);
+      updated_dex_file = GetDexFiles(class_loader)[0];
+      Runtime::Current()->GetClassLinker()->RegisterDexFile(
+          *updated_dex_file, soa.Decode<mirror::ClassLoader>(class_loader).Ptr());
+    }
+    // The dex files should be identical.
+    int cmp = memcmp(original_dex_file->Begin(),
+                     updated_dex_file->Begin(),
+                     updated_dex_file->Size());
+    ASSERT_EQ(0, cmp);
+
+    updated_dex_file->EnableWrite();
+    CompileAll(class_loader);
+    // The dex files should be different after quickening.
+    cmp = memcmp(original_dex_file->Begin(), updated_dex_file->Begin(), updated_dex_file->Size());
+    ASSERT_NE(0, cmp);
+
+    // Unquicken the dex file.
+    for (uint32_t i = 0; i < updated_dex_file->NumClassDefs(); ++i) {
+      const DexFile::ClassDef& class_def = updated_dex_file->GetClassDef(i);
+      const uint8_t* class_data = updated_dex_file->GetClassData(class_def);
+      if (class_data == nullptr) {
+        continue;
+      }
+      ClassDataItemIterator it(*updated_dex_file, class_data);
+      // Skip fields
+      while (it.HasNextStaticField()) {
+        it.Next();
+      }
+      while (it.HasNextInstanceField()) {
+        it.Next();
+      }
+
+      // Unquicken each method.
+      while (it.HasNextDirectMethod()) {
+        uint32_t method_idx = it.GetMemberIndex();
+        CompiledMethod* compiled_method =
+            compiler_driver_->GetCompiledMethod(MethodReference(updated_dex_file, method_idx));
+        ArrayRef<const uint8_t> table;
+        if (compiled_method != nullptr) {
+          table = compiled_method->GetVmapTable();
+        }
+        optimizer::ArtDecompileDEX(*it.GetMethodCodeItem(), table);
+        it.Next();
+      }
+      while (it.HasNextVirtualMethod()) {
+        uint32_t method_idx = it.GetMemberIndex();
+        CompiledMethod* compiled_method =
+            compiler_driver_->GetCompiledMethod(MethodReference(updated_dex_file, method_idx));
+        ArrayRef<const uint8_t> table;
+        if (compiled_method != nullptr) {
+          table = compiled_method->GetVmapTable();
+        }
+        optimizer::ArtDecompileDEX(*it.GetMethodCodeItem(), table);
+        it.Next();
+      }
+      DCHECK(!it.HasNext());
+    }
+
+    // Make sure after unquickening we go back to the same contents as the original dex file.
+    cmp = memcmp(original_dex_file->Begin(), updated_dex_file->Begin(), updated_dex_file->Size());
+    ASSERT_EQ(0, cmp);
+  }
+};
+
+TEST_F(DexToDexDecompilerTest, VerifierDeps) {
+  RunTest("VerifierDeps");
+}
+
+TEST_F(DexToDexDecompilerTest, DexToDexDecompiler) {
+  RunTest("DexToDexDecompiler");
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index 34fd88b..db0fdaa 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -18,6 +18,7 @@
 #define ART_COMPILER_DEX_QUICK_COMPILER_CALLBACKS_H_
 
 #include "compiler_callbacks.h"
+#include "verifier/verifier_deps.h"
 
 namespace art {
 
@@ -46,16 +47,16 @@
     }
 
     verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE {
-      return verifier_deps_;
+      return verifier_deps_.get();
     }
 
-    void SetVerifierDeps(verifier::VerifierDeps* deps) {
-      verifier_deps_ = deps;
+    void SetVerifierDeps(verifier::VerifierDeps* deps) OVERRIDE {
+      verifier_deps_.reset(deps);
     }
 
   private:
     VerificationResults* const verification_results_;
-    verifier::VerifierDeps* verifier_deps_;
+    std::unique_ptr<verifier::VerifierDeps> verifier_deps_;
 };
 
 }  // namespace art
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index dbde41c..aa0d10b 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -24,6 +24,8 @@
 #include <malloc.h>  // For mallinfo
 #endif
 
+#include "android-base/strings.h"
+
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/array_ref.h"
@@ -37,6 +39,7 @@
 #include "compiled_class.h"
 #include "compiled_method.h"
 #include "compiler.h"
+#include "compiler_callbacks.h"
 #include "compiler_driver-inl.h"
 #include "dex_compilation_unit.h"
 #include "dex_file-inl.h"
@@ -72,6 +75,7 @@
 #include "verifier/method_verifier.h"
 #include "verifier/method_verifier-inl.h"
 #include "verifier/verifier_log_mode.h"
+#include "verifier/verifier_deps.h"
 
 namespace art {
 
@@ -390,6 +394,7 @@
 
 void CompilerDriver::CompileAll(jobject class_loader,
                                 const std::vector<const DexFile*>& dex_files,
+                                verifier::VerifierDeps* verifier_deps,
                                 TimingLogger* timings) {
   DCHECK(!Runtime::Current()->IsStarted());
 
@@ -401,7 +406,7 @@
   // 2) Resolve all classes
   // 3) Attempt to verify all classes
   // 4) Attempt to initialize image classes, and trivially initialized classes
-  PreCompile(class_loader, dex_files, timings);
+  PreCompile(class_loader, dex_files, verifier_deps, timings);
   if (GetCompilerOptions().IsBootImage()) {
     // We don't need to setup the intrinsics for non boot image compilation, as
     // those compilations will pick up a boot image that have the ArtMethod already
@@ -533,14 +538,9 @@
               : optimizer::DexToDexCompilationLevel::kRequired);
     }
   } else if ((access_flags & kAccNative) != 0) {
-    const InstructionSet instruction_set = driver->GetInstructionSet();
-    const bool use_generic_jni =
-        // Are we extracting only and have support for generic JNI down calls?
-        (!driver->GetCompilerOptions().IsJniCompilationEnabled() &&
-             InstructionSetHasGenericJniStub(instruction_set)) ||
-        // Always punt to generic JNI for MIPS because of no support for @CriticalNative. b/31743474
-        (instruction_set == kMips || instruction_set == kMips64);
-    if (use_generic_jni) {
+    // Are we extracting only and have support for generic JNI down calls?
+    if (!driver->GetCompilerOptions().IsJniCompilationEnabled() &&
+        InstructionSetHasGenericJniStub(driver->GetInstructionSet())) {
       // Leaving this empty will trigger the generic JNI version
     } else {
       // Look-up the ArtMethod associated with this code_item (if any)
@@ -673,7 +673,7 @@
 
   InitializeThreadPools();
 
-  PreCompile(jclass_loader, dex_files, timings);
+  PreCompile(jclass_loader, dex_files, /* verifier_deps */ nullptr, timings);
 
   // Can we run DEX-to-DEX compiler on this class ?
   optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level =
@@ -870,6 +870,7 @@
 
 void CompilerDriver::PreCompile(jobject class_loader,
                                 const std::vector<const DexFile*>& dex_files,
+                                verifier::VerifierDeps* verifier_deps,
                                 TimingLogger* timings) {
   CheckThreadPools();
 
@@ -903,7 +904,7 @@
     VLOG(compiler) << "Resolve const-strings: " << GetMemoryUsageString(false);
   }
 
-  Verify(class_loader, dex_files, timings);
+  Verify(class_loader, dex_files, verifier_deps, timings);
   VLOG(compiler) << "Verify: " << GetMemoryUsageString(false);
 
   if (had_hard_verifier_failure_ && GetCompilerOptions().AbortOnHardVerifierFailure()) {
@@ -967,11 +968,12 @@
     return true;
   }
   DCHECK(profile_compilation_info_ != nullptr);
-  bool result = profile_compilation_info_->ContainsClass(dex_file, class_idx);
+  const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_idx);
+  uint16_t type_idx = class_def.class_idx_;
+  bool result = profile_compilation_info_->ContainsClass(dex_file, type_idx);
   if (kDebugProfileGuidedCompilation) {
-    LOG(INFO) << "[ProfileGuidedCompilation] "
-        << (result ? "Verified" : "Skipped") << " method:"
-        << dex_file.GetClassDescriptor(dex_file.GetClassDef(class_idx));
+    LOG(INFO) << "[ProfileGuidedCompilation] " << (result ? "Verified" : "Skipped") << " method:"
+        << dex_file.GetClassDescriptor(class_def);
   }
   return result;
 }
@@ -1519,7 +1521,7 @@
 
   if (!use_dex_cache) {
     bool method_in_image = false;
-    const std::vector<gc::space::ImageSpace*> image_spaces = heap->GetBootImageSpaces();
+    const std::vector<gc::space::ImageSpace*>& image_spaces = heap->GetBootImageSpaces();
     for (gc::space::ImageSpace* image_space : image_spaces) {
       const auto& method_section = image_space->GetImageHeader().GetMethodsSection();
       if (method_section.Contains(reinterpret_cast<uint8_t*>(method) - image_space->Begin())) {
@@ -1929,15 +1931,61 @@
   }
 }
 
-void CompilerDriver::Verify(jobject class_loader,
+void CompilerDriver::Verify(jobject jclass_loader,
                             const std::vector<const DexFile*>& dex_files,
+                            verifier::VerifierDeps* verifier_deps,
                             TimingLogger* timings) {
+  if (verifier_deps != nullptr) {
+    TimingLogger::ScopedTiming t("Fast Verify", timings);
+    ScopedObjectAccess soa(Thread::Current());
+    StackHandleScope<2> hs(soa.Self());
+    Handle<mirror::ClassLoader> class_loader(
+        hs.NewHandle(soa.Decode<mirror::ClassLoader>(jclass_loader)));
+    MutableHandle<mirror::Class> cls(hs.NewHandle<mirror::Class>(nullptr));
+    ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+    if (verifier_deps->ValidateDependencies(class_loader, soa.Self())) {
+      // We successfully validated the dependencies, now update class status
+      // of verified classes. Note that the dependencies also record which classes
+      // could not be fully verified; we could try again, but that would hurt verification
+      // time. So instead we assume these classes still need to be verified at
+      // runtime.
+      for (const DexFile* dex_file : dex_files) {
+        // Fetch the list of unverified classes and turn it into a set for faster
+        // lookups.
+        const std::vector<uint16_t>& unverified_classes =
+            verifier_deps->GetUnverifiedClasses(*dex_file);
+        std::set<uint16_t> set(unverified_classes.begin(), unverified_classes.end());
+        for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
+          const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+          const char* descriptor = dex_file->GetClassDescriptor(class_def);
+          cls.Assign(class_linker->FindClass(soa.Self(), descriptor, class_loader));
+          if (cls.Get() == nullptr) {
+            CHECK(soa.Self()->IsExceptionPending());
+            soa.Self()->ClearException();
+          } else if (set.find(class_def.class_idx_) == set.end()) {
+            ObjectLock<mirror::Class> lock(soa.Self(), cls);
+            mirror::Class::SetStatus(cls, mirror::Class::kStatusVerified, soa.Self());
+          }
+        }
+      }
+      return;
+    }
+  }
+
+  // If there is no passed `verifier_deps` (because of non-existing vdex), or
+  // the passed `verifier_deps` is not valid anymore, create a new one for
+  // non boot image compilation. The verifier will need it to record the new dependencies.
+  // Then dex2oat can update the vdex file with these new dependencies.
+  if (!GetCompilerOptions().IsBootImage()) {
+    Runtime::Current()->GetCompilerCallbacks()->SetVerifierDeps(
+        new verifier::VerifierDeps(dex_files));
+  }
   // Note: verification should not be pulling in classes anymore when compiling the boot image,
   //       as all should have been resolved before. As such, doing this in parallel should still
   //       be deterministic.
   for (const DexFile* dex_file : dex_files) {
     CHECK(dex_file != nullptr);
-    VerifyDexFile(class_loader,
+    VerifyDexFile(jclass_loader,
                   *dex_file,
                   dex_files,
                   parallel_thread_pool_.get(),
@@ -1968,6 +2016,7 @@
         hs.NewHandle(soa.Decode<mirror::ClassLoader>(jclass_loader)));
     Handle<mirror::Class> klass(
         hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
+    verifier::MethodVerifier::FailureKind failure_kind;
     if (klass.Get() == nullptr) {
       CHECK(soa.Self()->IsExceptionPending());
       soa.Self()->ClearException();
@@ -1980,7 +2029,8 @@
       Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(
           soa.Self(), dex_file, false)));
       std::string error_msg;
-      if (verifier::MethodVerifier::VerifyClass(soa.Self(),
+      failure_kind =
+          verifier::MethodVerifier::VerifyClass(soa.Self(),
                                                 &dex_file,
                                                 dex_cache,
                                                 class_loader,
@@ -1988,15 +2038,15 @@
                                                 Runtime::Current()->GetCompilerCallbacks(),
                                                 true /* allow soft failures */,
                                                 log_level_,
-                                                &error_msg) ==
-                                                    verifier::MethodVerifier::kHardFailure) {
+                                                &error_msg);
+      if (failure_kind == verifier::MethodVerifier::kHardFailure) {
         LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
                    << " because: " << error_msg;
         manager_->GetCompiler()->SetHadHardVerifierFailure();
       }
     } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
       CHECK(klass->IsResolved()) << klass->PrettyClass();
-      class_linker->VerifyClass(soa.Self(), klass, log_level_);
+      failure_kind = class_linker->VerifyClass(soa.Self(), klass, log_level_);
 
       if (klass->IsErroneous()) {
         // ClassLinker::VerifyClass throws, which isn't useful in the compiler.
@@ -2011,10 +2061,21 @@
       // It is *very* problematic if there are verification errors in the boot classpath. For example,
       // we rely on things working OK without verification when the decryption dialog is brought up.
       // So abort in a debug build if we find this violated.
-      DCHECK(!manager_->GetCompiler()->GetCompilerOptions().IsBootImage() || klass->IsVerified())
-          << "Boot classpath class " << klass->PrettyClass()
-          << " failed to fully verify.";
+      if (kIsDebugBuild) {
+        // TODO(narayan): Remove this special case for signature polymorphic
+        // invokes once verifier support is fully implemented.
+        if (manager_->GetCompiler()->GetCompilerOptions().IsBootImage() &&
+            !android::base::StartsWith(descriptor, "Ljava/lang/invoke/")) {
+          DCHECK(klass->IsVerified()) << "Boot classpath class " << klass->PrettyClass()
+              << " failed to fully verify: state= " << klass->GetStatus();
+        }
+      }
+    } else {
+      // Make the skip a soft failure, essentially being considered as verify at runtime.
+      failure_kind = verifier::MethodVerifier::kSoftFailure;
     }
+    verifier::VerifierDeps::MaybeRecordVerificationStatus(
+        dex_file, class_def.class_idx_, failure_kind);
     soa.Self()->AssertNoPendingException();
   }
 
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 9a4dd85..1bd3546 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -50,6 +50,8 @@
 
 namespace verifier {
 class MethodVerifier;
+class VerifierDeps;
+class VerifierDepsTest;
 }  // namespace verifier
 
 class BitVector;
@@ -116,6 +118,7 @@
 
   void CompileAll(jobject class_loader,
                   const std::vector<const DexFile*>& dex_files,
+                  verifier::VerifierDeps* verifier_deps,
                   TimingLogger* timings)
       REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_, !dex_to_dex_references_lock_);
 
@@ -414,6 +417,7 @@
  private:
   void PreCompile(jobject class_loader,
                   const std::vector<const DexFile*>& dex_files,
+                  verifier::VerifierDeps* verifier_deps,
                   TimingLogger* timings)
       REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
 
@@ -436,7 +440,9 @@
 
   void Verify(jobject class_loader,
               const std::vector<const DexFile*>& dex_files,
+              verifier::VerifierDeps* verifier_deps,
               TimingLogger* timings);
+
   void VerifyDexFile(jobject class_loader,
                      const DexFile& dex_file,
                      const std::vector<const DexFile*>& dex_files,
@@ -578,6 +584,8 @@
   const BitVector* current_dex_to_dex_methods_;
 
   friend class CompileClassVisitor;
+  friend class DexToDexDecompilerTest;
+  friend class verifier::VerifierDepsTest;
   DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
 };
 
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 845028d..9679a79 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -43,6 +43,7 @@
     TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
     compiler_driver_->CompileAll(class_loader,
                                  GetDexFiles(class_loader),
+                                 /* verifier_deps */ nullptr,
                                  &timings);
     t.NewTiming("MakeAllExecutable");
     MakeAllExecutable(class_loader);
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 4eb6954..9c62f80 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -29,6 +29,10 @@
 
 namespace art {
 
+namespace verifier {
+  class VerifierDepsTest;
+}
+
 class DexFile;
 
 class CompilerOptions FINAL {
@@ -337,7 +341,9 @@
   const std::vector<std::string>* passes_to_run_;
 
   friend class Dex2Oat;
+  friend class DexToDexDecompilerTest;
   friend class CommonCompilerTest;
+  friend class verifier::VerifierDepsTest;
 
   DISALLOW_COPY_AND_ASSIGN(CompilerOptions);
 };
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 31a7529..7c02384 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -879,7 +879,7 @@
     elf_header.e_ident[EI_MAG2]       = ELFMAG2;
     elf_header.e_ident[EI_MAG3]       = ELFMAG3;
     elf_header.e_ident[EI_CLASS]      = (sizeof(Elf_Addr) == sizeof(Elf32_Addr))
-                                         ? ELFCLASS32 : ELFCLASS64;;
+                                         ? ELFCLASS32 : ELFCLASS64;
     elf_header.e_ident[EI_DATA]       = ELFDATA2LSB;
     elf_header.e_ident[EI_VERSION]    = EV_CURRENT;
     elf_header.e_ident[EI_OSABI]      = ELFOSABI_LINUX;
diff --git a/compiler/generate-operator-out.py b/compiler/generate-operator-out.py
new file mode 120000
index 0000000..cc291d2
--- /dev/null
+++ b/compiler/generate-operator-out.py
@@ -0,0 +1 @@
+../tools/generate-operator-out.py
\ No newline at end of file
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 8fdf6fc..fcb8979 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -189,7 +189,7 @@
       TimingLogger timings("ImageTest::WriteRead", false, false);
       TimingLogger::ScopedTiming t("CompileAll", &timings);
       driver->SetDexFilesForOatFile(class_path);
-      driver->CompileAll(class_loader, class_path, &timings);
+      driver->CompileAll(class_loader, class_path, /* verifier_deps */ nullptr, &timings);
 
       t.NewTiming("WriteElf");
       SafeMap<std::string, std::string> key_value_store;
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index d1ac139..d7b7403 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -51,6 +51,7 @@
 #include "lock_word.h"
 #include "mirror/array-inl.h"
 #include "mirror/class-inl.h"
+#include "mirror/class_ext.h"
 #include "mirror/class_loader.h"
 #include "mirror/dex_cache.h"
 #include "mirror/dex_cache-inl.h"
@@ -696,7 +697,7 @@
   return true;
 }
 
-class ComputeLazyFieldsForClassesVisitor : public ClassVisitor {
+class ImageWriter::ComputeLazyFieldsForClassesVisitor : public ClassVisitor {
  public:
   bool operator()(ObjPtr<Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
     StackHandleScope<1> hs(Thread::Current());
@@ -757,7 +758,8 @@
   if (klass->GetStatus() == mirror::Class::kStatusError) {
     result = true;
   } else {
-    CHECK(klass->GetVerifyError() == nullptr) << klass->PrettyClass();
+    ObjPtr<mirror::ClassExt> ext(klass->GetExtData());
+    CHECK(ext.IsNull() || ext->GetVerifyError() == nullptr) << klass->PrettyClass();
   }
   if (!result) {
     // Check interfaces since these wont be visited through VisitReferences.)
@@ -835,7 +837,7 @@
   return true;
 }
 
-class NonImageClassesVisitor : public ClassVisitor {
+class ImageWriter::NonImageClassesVisitor : public ClassVisitor {
  public:
   explicit NonImageClassesVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {}
 
@@ -963,21 +965,21 @@
 mirror::String* ImageWriter::FindInternedString(mirror::String* string) {
   Thread* const self = Thread::Current();
   for (const ImageInfo& image_info : image_infos_) {
-    mirror::String* const found = image_info.intern_table_->LookupStrong(self, string);
+    ObjPtr<mirror::String> const found = image_info.intern_table_->LookupStrong(self, string);
     DCHECK(image_info.intern_table_->LookupWeak(self, string) == nullptr)
         << string->ToModifiedUtf8();
     if (found != nullptr) {
-      return found;
+      return found.Ptr();
     }
   }
   if (compile_app_image_) {
     Runtime* const runtime = Runtime::Current();
-    mirror::String* found = runtime->GetInternTable()->LookupStrong(self, string);
+    ObjPtr<mirror::String> found = runtime->GetInternTable()->LookupStrong(self, string);
     // If we found it in the runtime intern table it could either be in the boot image or interned
     // during app image compilation. If it was in the boot image return that, otherwise return null
     // since it belongs to another image space.
-    if (found != nullptr && runtime->GetHeap()->ObjectIsInBootImageSpace(found)) {
-      return found;
+    if (found != nullptr && runtime->GetHeap()->ObjectIsInBootImageSpace(found.Ptr())) {
+      return found.Ptr();
     }
     DCHECK(runtime->GetInternTable()->LookupWeak(self, string) == nullptr)
         << string->ToModifiedUtf8();
@@ -1088,7 +1090,8 @@
       mirror::String* interned = FindInternedString(obj->AsString());
       if (interned == nullptr) {
         // Not in another image space, insert to our table.
-        interned = GetImageInfo(oat_index).intern_table_->InternStrongImageString(obj->AsString());
+        interned =
+            GetImageInfo(oat_index).intern_table_->InternStrongImageString(obj->AsString()).Ptr();
         DCHECK_EQ(interned, obj);
       }
     } else if (obj->IsDexCache()) {
@@ -1448,7 +1451,7 @@
     for (size_t i = 0, count = dex_file->NumStringIds(); i < count; ++i) {
       uint32_t utf16_length;
       const char* utf8_data = dex_file->StringDataAndUtf16LengthByIdx(i, &utf16_length);
-      mirror::String* string = intern_table->LookupStrong(self, utf16_length, utf8_data);
+      mirror::String* string = intern_table->LookupStrong(self, utf16_length, utf8_data).Ptr();
       TryAssignBinSlot(work_stack, string, oat_index);
     }
   }
@@ -1698,7 +1701,7 @@
   return reinterpret_cast<ArtMethod*>(image_info.image_begin_ + it->second.offset);
 }
 
-class FixupRootVisitor : public RootVisitor {
+class ImageWriter::FixupRootVisitor : public RootVisitor {
  public:
   explicit FixupRootVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {
   }
@@ -1941,7 +1944,7 @@
 }
 
 // Rewrite all the references in the copied object to point to their image address equivalent
-class FixupVisitor {
+class ImageWriter::FixupVisitor {
  public:
   FixupVisitor(ImageWriter* image_writer, Object* copy) : image_writer_(image_writer), copy_(copy) {
   }
@@ -1977,7 +1980,7 @@
   mirror::Object* const copy_;
 };
 
-class FixupClassVisitor FINAL : public FixupVisitor {
+class ImageWriter::FixupClassVisitor FINAL : public FixupVisitor {
  public:
   FixupClassVisitor(ImageWriter* image_writer, Object* copy) : FixupVisitor(image_writer, copy) {
   }
@@ -2042,7 +2045,7 @@
   }
 }
 
-class NativeLocationVisitor {
+class ImageWriter::NativeLocationVisitor {
  public:
   explicit NativeLocationVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {}
 
@@ -2067,13 +2070,8 @@
 void ImageWriter::FixupObject(Object* orig, Object* copy) {
   DCHECK(orig != nullptr);
   DCHECK(copy != nullptr);
-  if (kUseBakerOrBrooksReadBarrier) {
-    orig->AssertReadBarrierPointer();
-    if (kUseBrooksReadBarrier) {
-      // Note the address 'copy' isn't the same as the image address of 'orig'.
-      copy->SetReadBarrierPointer(GetImageAddress(orig));
-      DCHECK_EQ(copy->GetReadBarrierPointer(), GetImageAddress(orig));
-    }
+  if (kUseBakerReadBarrier) {
+    orig->AssertReadBarrierState();
   }
   auto* klass = orig->GetClass();
   if (klass->IsIntArrayClass() || klass->IsLongArrayClass()) {
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index c9cf4cb..24fad46 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -582,14 +582,15 @@
   // Map of dex files to the indexes of oat files that they were compiled into.
   const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map_;
 
-  friend class ContainsBootClassLoaderNonImageClassVisitor;
-  friend class FixupClassVisitor;
-  friend class FixupRootVisitor;
-  friend class FixupVisitor;
+  class ComputeLazyFieldsForClassesVisitor;
+  class FixupClassVisitor;
+  class FixupRootVisitor;
+  class FixupVisitor;
   class GetRootsVisitor;
-  friend class NativeLocationVisitor;
-  friend class NonImageClassesVisitor;
+  class NativeLocationVisitor;
+  class NonImageClassesVisitor;
   class VisitReferencesVisitor;
+
   DISALLOW_COPY_AND_ASSIGN(ImageWriter);
 };
 
diff --git a/compiler/intrinsics_list.h b/compiler/intrinsics_list.h
index b617387..555baf6 100644
--- a/compiler/intrinsics_list.h
+++ b/compiler/intrinsics_list.h
@@ -108,8 +108,10 @@
   V(StringCompareTo, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "compareTo", "(Ljava/lang/String;)I") \
   V(StringEquals, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "equals", "(Ljava/lang/Object;)Z") \
   V(StringGetCharsNoCheck, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "getCharsNoCheck", "(II[CI)V") \
-  V(StringIndexOf, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "indexOf", "(I)I") \
-  V(StringIndexOfAfter, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "indexOf", "(II)I") \
+  V(StringIndexOf, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/String;", "indexOf", "(I)I") \
+  V(StringIndexOfAfter, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/String;", "indexOf", "(II)I") \
+  V(StringStringIndexOf, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "indexOf", "(Ljava/lang/String;)I") \
+  V(StringStringIndexOfAfter, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "indexOf", "(Ljava/lang/String;I)I") \
   V(StringIsEmpty, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/String;", "isEmpty", "()Z") \
   V(StringLength, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/String;", "length", "()I") \
   V(StringNewStringFromBytes, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringFactory;", "newStringFromBytes", "([BIII)Ljava/lang/String;") \
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index c398703..9dfb434 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -124,30 +124,30 @@
     if (option.starts_with("--instruction-set-variant=")) {
       StringPiece str = option.substr(strlen("--instruction-set-variant=")).data();
       VLOG(compiler) << "JIT instruction set variant " << str;
-      instruction_set_features_.reset(InstructionSetFeatures::FromVariant(
-          instruction_set, str.as_string(), &error_msg));
+      instruction_set_features_ = InstructionSetFeatures::FromVariant(
+          instruction_set, str.as_string(), &error_msg);
       if (instruction_set_features_ == nullptr) {
         LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
       }
     } else if (option.starts_with("--instruction-set-features=")) {
       StringPiece str = option.substr(strlen("--instruction-set-features=")).data();
       VLOG(compiler) << "JIT instruction set features " << str;
-      if (instruction_set_features_.get() == nullptr) {
-        instruction_set_features_.reset(InstructionSetFeatures::FromVariant(
-            instruction_set, "default", &error_msg));
+      if (instruction_set_features_ == nullptr) {
+        instruction_set_features_ = InstructionSetFeatures::FromVariant(
+            instruction_set, "default", &error_msg);
         if (instruction_set_features_ == nullptr) {
           LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
         }
       }
-      instruction_set_features_.reset(
-          instruction_set_features_->AddFeaturesFromString(str.as_string(), &error_msg));
+      instruction_set_features_ =
+          instruction_set_features_->AddFeaturesFromString(str.as_string(), &error_msg);
       if (instruction_set_features_ == nullptr) {
         LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
       }
     }
   }
   if (instruction_set_features_ == nullptr) {
-    instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
+    instruction_set_features_ = InstructionSetFeatures::FromCppDefines();
   }
   cumulative_logger_.reset(new CumulativeLogger("jit times"));
   compiler_driver_.reset(new CompilerDriver(
@@ -171,19 +171,10 @@
 
   size_t thread_count = compiler_driver_->GetThreadCount();
   if (compiler_options_->GetGenerateDebugInfo()) {
-#ifdef ART_TARGET_ANDROID
-    const char* prefix = "/data/misc/trace";
-#else
-    const char* prefix = "/tmp";
-#endif
     DCHECK_EQ(thread_count, 1u)
         << "Generating debug info only works with one compiler thread";
-    std::string perf_filename = std::string(prefix) + "/perf-" + std::to_string(getpid()) + ".map";
-    perf_file_.reset(OS::CreateEmptyFileWriteOnly(perf_filename.c_str()));
-    if (perf_file_ == nullptr) {
-      LOG(ERROR) << "Could not create perf file at " << perf_filename <<
-                    " Are you on a user build? Perf only works on userdebug/eng builds";
-    }
+    jit_logger_.reset(new JitLogger());
+    jit_logger_->OpenLog();
   }
 
   size_t inline_depth_limit = compiler_driver_->GetCompilerOptions().GetInlineDepthLimit();
@@ -192,9 +183,8 @@
 }
 
 JitCompiler::~JitCompiler() {
-  if (perf_file_ != nullptr) {
-    UNUSED(perf_file_->Flush());
-    UNUSED(perf_file_->Close());
+  if (compiler_options_->GetGenerateDebugInfo()) {
+    jit_logger_->CloseLog();
   }
 }
 
@@ -218,19 +208,8 @@
     TimingLogger::ScopedTiming t2("Compiling", &logger);
     JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
     success = compiler_driver_->GetCompiler()->JitCompile(self, code_cache, method, osr);
-    if (success && (perf_file_ != nullptr)) {
-      const void* ptr = method->GetEntryPointFromQuickCompiledCode();
-      std::ostringstream stream;
-      stream << std::hex
-             << reinterpret_cast<uintptr_t>(ptr)
-             << " "
-             << code_cache->GetMemorySizeOfCodePointer(ptr)
-             << " "
-             << method->PrettyMethod()
-             << std::endl;
-      std::string str = stream.str();
-      bool res = perf_file_->WriteFully(str.c_str(), str.size());
-      CHECK(res);
+    if (success && (jit_logger_ != nullptr)) {
+      jit_logger_->WriteLog(code_cache, method);
     }
   }
 
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index ea2747c..f0f24d3 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -19,6 +19,7 @@
 
 #include "base/mutex.h"
 #include "compiled_method.h"
+#include "jit_logger.h"
 #include "driver/compiler_driver.h"
 #include "driver/compiler_options.h"
 
@@ -50,7 +51,7 @@
   std::unique_ptr<CumulativeLogger> cumulative_logger_;
   std::unique_ptr<CompilerDriver> compiler_driver_;
   std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
-  std::unique_ptr<File> perf_file_;
+  std::unique_ptr<JitLogger> jit_logger_;
 
   JitCompiler();
 
diff --git a/compiler/jit/jit_logger.cc b/compiler/jit/jit_logger.cc
new file mode 100644
index 0000000..9ce3b0c
--- /dev/null
+++ b/compiler/jit/jit_logger.cc
@@ -0,0 +1,312 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jit_logger.h"
+
+#include "arch/instruction_set.h"
+#include "art_method-inl.h"
+#include "base/time_utils.h"
+#include "base/unix_file/fd_file.h"
+#include "driver/compiler_driver.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
+
+namespace art {
+namespace jit {
+
+#ifdef ART_TARGET_ANDROID
+static const char* kLogPrefix = "/data/misc/trace";
+#else
+static const char* kLogPrefix = "/tmp";
+#endif
+
+// File format of perf-PID.map:
+// +---------------------+
+// |ADDR SIZE symbolname1|
+// |ADDR SIZE symbolname2|
+// |...                  |
+// +---------------------+
+void JitLogger::OpenPerfMapLog() {
+  std::string pid_str = std::to_string(getpid());
+  std::string perf_filename = std::string(kLogPrefix) + "/perf-" + pid_str + ".map";
+  perf_file_.reset(OS::CreateEmptyFileWriteOnly(perf_filename.c_str()));
+  if (perf_file_ == nullptr) {
+    LOG(ERROR) << "Could not create perf file at " << perf_filename <<
+      " Are you on a user build? Perf only works on userdebug/eng builds";
+  }
+}
+
+void JitLogger::WritePerfMapLog(JitCodeCache* code_cache, ArtMethod* method) {
+  if (perf_file_ != nullptr) {
+    const void* ptr = method->GetEntryPointFromQuickCompiledCode();
+    size_t code_size = code_cache->GetMemorySizeOfCodePointer(ptr);
+    std::string method_name = method->PrettyMethod();
+
+    std::ostringstream stream;
+    stream << std::hex
+           << reinterpret_cast<uintptr_t>(ptr)
+           << " "
+           << code_size
+           << " "
+           << method_name
+           << std::endl;
+    std::string str = stream.str();
+    bool res = perf_file_->WriteFully(str.c_str(), str.size());
+    if (!res) {
+      LOG(WARNING) << "Failed to write jitted method info in log: write failure.";
+    }
+  } else {
+    LOG(WARNING) << "Failed to write jitted method info in log: log file doesn't exist.";
+  }
+}
+
+void JitLogger::ClosePerfMapLog() {
+  if (perf_file_ != nullptr) {
+    UNUSED(perf_file_->Flush());
+    UNUSED(perf_file_->Close());
+  }
+}
+
+//  File format of jit-PID.jump:
+//
+//  +--------------------------------+
+//  |  PerfJitHeader                 |
+//  +--------------------------------+
+//  |  PerfJitCodeLoad {             | .
+//  |    struct PerfJitBase;         |  .
+//  |    uint32_t process_id_;       |   .
+//  |    uint32_t thread_id_;        |   .
+//  |    uint64_t vma_;              |   .
+//  |    uint64_t code_address_;     |   .
+//  |    uint64_t code_size_;        |   .
+//  |    uint64_t code_id_;          |   .
+//  |  }                             |   .
+//  +-                              -+   .
+//  |  method_name'\0'               |   +--> one jitted method
+//  +-                              -+   .
+//  |  jitted code binary            |   .
+//  |  ...                           |   .
+//  +--------------------------------+   .
+//  |  PerfJitCodeDebugInfo     {    |   .
+//  |    struct PerfJitBase;         |   .
+//  |    uint64_t address_;          |   .
+//  |    uint64_t entry_count_;      |   .
+//  |    struct PerfJitDebugEntry;   |  .
+//  |  }                             | .
+//  +--------------------------------+
+//  |  PerfJitCodeLoad               |
+//     ...
+//
+struct PerfJitHeader {
+  uint32_t magic_;            // Characters "JiTD"
+  uint32_t version_;          // Header version
+  uint32_t size_;             // Total size of header
+  uint32_t elf_mach_target_;  // Elf mach target
+  uint32_t reserved_;         // Reserved, currently not used
+  uint32_t process_id_;       // Process ID of the JIT compiler
+  uint64_t time_stamp_;       // Timestamp when the header is generated
+  uint64_t flags_;            // Currently the flags are only used for choosing clock for timestamp,
+                              // we set it to 0 to tell perf that we use CLOCK_MONOTONIC clock.
+  static const uint32_t kMagic = 0x4A695444;  // "JiTD"
+  static const uint32_t kVersion = 1;
+};
+
+// Each record starts with such basic information: event type, total size, and timestamp.
+struct PerfJitBase {
+  enum PerfJitEvent {
+    // A jitted code load event.
+    // In ART JIT, it is used to log a new method is jit compiled and committed to jit-code-cache.
+    // Note that such kLoad event supports code cache GC in ART JIT.
+    // For every kLoad event recorded in jit-PID.dump and every perf sample recorded in perf.data,
+    // each event/sample has time stamp. In case code cache GC happens in ART JIT, and a new
+    // jitted method is committed to the same address of a previously deleted method,
+    // the time stamp information can help profiler to tell whether this sample belongs to the
+    // era of the first jitted method, or does it belong to the period of the second jitted method.
+    // JitCodeCache doesn't have to record any event on 'code delete'.
+    kLoad = 0,
+
+    // A jitted code move event, i,e. a jitted code moved from one address to another address.
+    // It helps profiler to map samples to the right symbol even when the code is moved.
+    // In ART JIT, this event can help log such behavior:
+    // A jitted method is recorded in previous kLoad event, but due to some reason,
+    // it is moved to another address in jit-code-cache.
+    kMove = 1,
+
+    // Logs debug line/column information.
+    kDebugInfo = 2,
+
+    // Logs JIT VM end of life event.
+    kClose = 3
+  };
+  uint32_t event_;       // Must be one of the events defined in PerfJitEvent.
+  uint32_t size_;        // Total size of this event record.
+                         // For example, for kLoad event, size of the event record is:
+                         // sizeof(PerfJitCodeLoad) + method_name.size() + compiled code size.
+  uint64_t time_stamp_;  // Timestamp for the event.
+};
+
+// Logs a jitted code load event (kLoad).
+// In ART JIT, it is used to log a new method is jit compiled and commited to jit-code-cache.
+struct PerfJitCodeLoad : PerfJitBase {
+  uint32_t process_id_;    // Process ID who performs the jit code load.
+                           // In ART JIT, it is the pid of the JIT compiler.
+  uint32_t thread_id_;     // Thread ID who performs the jit code load.
+                           // In ART JIT, it is the tid of the JIT compiler.
+  uint64_t vma_;           // Address of the code section. In ART JIT, because code_address_
+                           // uses absolute address, this field is 0.
+  uint64_t code_address_;  // Address where is jitted code is loaded.
+  uint64_t code_size_;     // Size of the jitted code.
+  uint64_t code_id_;       // Unique ID for each jitted code.
+};
+
+// This structure is for source line/column mapping.
+// Currently this feature is not implemented in ART JIT yet.
+struct PerfJitDebugEntry {
+  uint64_t address_;      // Code address which maps to the line/column in source.
+  uint32_t line_number_;  // Source line number starting at 1.
+  uint32_t column_;       // Column discriminator, default 0.
+  const char name_[0];    // Followed by null-terminated name or \0xff\0 if same as previous.
+};
+
+// Logs debug line information (kDebugInfo).
+// This structure is for source line/column mapping.
+// Currently this feature is not implemented in ART JIT yet.
+struct PerfJitCodeDebugInfo : PerfJitBase {
+  uint64_t address_;              // Starting code address which the debug info describes.
+  uint64_t entry_count_;          // How many instances of PerfJitDebugEntry.
+  PerfJitDebugEntry entries_[0];  // Followed by entry_count_ instances of PerfJitDebugEntry.
+};
+
+static uint32_t GetElfMach() {
+#if defined(__arm__)
+  static const uint32_t kElfMachARM = 0x28;
+  return kElfMachARM;
+#elif defined(__aarch64__)
+  static const uint32_t kElfMachARM64 = 0xB7;
+  return kElfMachARM64;
+#elif defined(__i386__)
+  static const uint32_t kElfMachIA32 = 0x3;
+  return kElfMachIA32;
+#elif defined(__x86_64__)
+  static const uint32_t kElfMachX64 = 0x3E;
+  return kElfMachX64;
+#else
+  UNIMPLEMENTED(WARNING) << "Unsupported architecture in JitLogger";
+  return 0;
+#endif
+}
+
+void JitLogger::OpenMarkerFile() {
+  int fd = jit_dump_file_->Fd();
+  // The 'perf inject' tool requires that the jit-PID.dump file
+  // must have a mmap(PROT_READ|PROT_EXEC) record in perf.data.
+  marker_address_ = mmap(nullptr, kPageSize, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0);
+  if (marker_address_ == MAP_FAILED) {
+    LOG(WARNING) << "Failed to create record in perf.data. JITed code profiling will not work.";
+    return;
+  }
+}
+
+void JitLogger::CloseMarkerFile() {
+  if (marker_address_ != nullptr) {
+    munmap(marker_address_, kPageSize);
+  }
+}
+
+void JitLogger::WriteJitDumpDebugInfo() {
+  // In the future, we can add java source file line/column mapping here.
+}
+
+void JitLogger::WriteJitDumpHeader() {
+  PerfJitHeader header;
+
+  std::memset(&header, 0, sizeof(header));
+  header.magic_ = PerfJitHeader::kMagic;
+  header.version_ = PerfJitHeader::kVersion;
+  header.size_ = sizeof(header);
+  header.elf_mach_target_ = GetElfMach();
+  header.process_id_ = static_cast<uint32_t>(getpid());
+  header.time_stamp_ = art::NanoTime();  // CLOCK_MONOTONIC clock is required.
+  header.flags_ = 0;
+
+  bool res = jit_dump_file_->WriteFully(reinterpret_cast<const char*>(&header), sizeof(header));
+  if (!res) {
+    LOG(WARNING) << "Failed to write profiling log. The 'perf inject' tool will not work.";
+  }
+}
+
+void JitLogger::OpenJitDumpLog() {
+  std::string pid_str = std::to_string(getpid());
+  std::string jitdump_filename = std::string(kLogPrefix) + "/jit-" + pid_str + ".dump";
+
+  jit_dump_file_.reset(OS::CreateEmptyFile(jitdump_filename.c_str()));
+  if (jit_dump_file_ == nullptr) {
+    LOG(ERROR) << "Could not create jit dump file at " << jitdump_filename <<
+      " Are you on a user build? Perf only works on userdebug/eng builds";
+    return;
+  }
+
+  OpenMarkerFile();
+
+  // Continue to write jit-PID.dump file even above OpenMarkerFile() fails.
+  // Even if that means 'perf inject' tool cannot work, developers can still use other tools
+  // to map the samples in perf.data to the information (symbol,address,code) recorded
+  // in the jit-PID.dump file, and still proceed the jitted code analysis.
+  WriteJitDumpHeader();
+}
+
+void JitLogger::WriteJitDumpLog(JitCodeCache* code_cache, ArtMethod* method) {
+  if (jit_dump_file_ != nullptr) {
+    const void* code = method->GetEntryPointFromQuickCompiledCode();
+    size_t code_size = code_cache->GetMemorySizeOfCodePointer(code);
+    std::string method_name = method->PrettyMethod();
+
+    PerfJitCodeLoad jit_code;
+    std::memset(&jit_code, 0, sizeof(jit_code));
+    jit_code.event_ = PerfJitCodeLoad::kLoad;
+    jit_code.size_ = sizeof(jit_code) + method_name.size() + 1 + code_size;
+    jit_code.time_stamp_ = art::NanoTime();    // CLOCK_MONOTONIC clock is required.
+    jit_code.process_id_ = static_cast<uint32_t>(getpid());
+    jit_code.thread_id_ = static_cast<uint32_t>(art::GetTid());
+    jit_code.vma_ = 0x0;
+    jit_code.code_address_ = reinterpret_cast<uint64_t>(code);
+    jit_code.code_size_ = code_size;
+    jit_code.code_id_ = code_index_++;
+
+    // Write one complete jitted method info, including:
+    // - PerfJitCodeLoad structure
+    // - Method name
+    // - Complete generated code of this method
+    //
+    // Use UNUSED() here to avoid compiler warnings.
+    UNUSED(jit_dump_file_->WriteFully(reinterpret_cast<const char*>(&jit_code), sizeof(jit_code)));
+    UNUSED(jit_dump_file_->WriteFully(method_name.c_str(), method_name.size() + 1));
+    UNUSED(jit_dump_file_->WriteFully(code, code_size));
+
+    WriteJitDumpDebugInfo();
+  }
+}
+
+void JitLogger::CloseJitDumpLog() {
+  if (jit_dump_file_ != nullptr) {
+    CloseMarkerFile();
+    UNUSED(jit_dump_file_->Flush());
+    UNUSED(jit_dump_file_->Close());
+  }
+}
+
+}  // namespace jit
+}  // namespace art
diff --git a/compiler/jit/jit_logger.h b/compiler/jit/jit_logger.h
new file mode 100644
index 0000000..0f8cfe4
--- /dev/null
+++ b/compiler/jit/jit_logger.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_JIT_JIT_LOGGER_H_
+#define ART_COMPILER_JIT_JIT_LOGGER_H_
+
+#include "base/mutex.h"
+#include "compiled_method.h"
+#include "driver/compiler_driver.h"
+#include "driver/compiler_options.h"
+
+namespace art {
+
+class ArtMethod;
+
+namespace jit {
+
+//
+// JitLogger supports two approaches of perf profiling.
+//
+// (1) perf-map:
+//     The perf-map mechanism generates perf-PID.map file,
+//     which provides simple "address, size, method_name" information to perf,
+//     and allows perf to map samples in jit-code-cache to jitted method symbols.
+//
+//     Command line Example:
+//       $ perf record dalvikvm -Xcompiler-option --generate-debug-info -cp <classpath> Test
+//       $ perf report
+//     NOTE:
+//       - Make sure that the perf-PID.map file is available for 'perf report' tool to access,
+//         so that jitted method can be displayed.
+//
+//
+// (2) perf-inject:
+//     The perf-inject mechansim generates jit-PID.dump file,
+//     which provides rich informations about a jitted method.
+//     It allows perf or other profiling tools to do advanced analysis on jitted code,
+//     for example instruction level profiling.
+//
+//     Command line Example:
+//       $ perf record -k mono dalvikvm -Xcompiler-option --generate-debug-info -cp <classpath> Test
+//       $ perf inject -i perf.data -o perf.data.jitted
+//       $ perf report -i perf.data.jitted
+//       $ perf annotate -i perf.data.jitted
+//     NOTE:
+//       REQUIREMENTS
+//       - The 'perf record -k mono' option requires 4.1 (or higher) Linux kernel.
+//       - The 'perf inject' (generating jit ELF files feature) requires perf 4.6 (or higher).
+//       PERF RECORD
+//       - The '-k mono' option tells 'perf record' to use CLOCK_MONOTONIC clock during sampling;
+//         which is required by 'perf inject', to make sure that both perf.data and jit-PID.dump
+//         have unified clock source for timestamps.
+//       PERF INJECT
+//       - The 'perf inject' tool injects information from jit-PID.dump into perf.data file,
+//         and generates small ELF files (jitted-TID-CODEID.so) for each jitted method.
+//       - On Android devices, the jit-PID.dump file is generated in /data/misc/trace/ folder, and
+//         such location is recorded in perf.data file.
+//         The 'perf inject' tool is going to look for jit-PID.dump and generates small ELF files in
+//         this /data/misc/trace/ folder.
+//         Make sure that you have the read/write access to /data/misc/trace/ folder.
+//       - On non-Android devices, the jit-PID.dump file is generated in /tmp/ folder, and
+//         'perf inject' tool operates on this folder.
+//         Make sure that you have the read/write access to /tmp/ folder.
+//       - If you are executing 'perf inject' on non-Android devices (host), but perf.data and
+//         jit-PID.dump files are adb-pulled from Android devices, make sure that there is a
+//         /data/misc/trace/ folder on host, and jit-PID.dump file is copied to this folder.
+//       - Currently 'perf inject' doesn't provide option to change the path for jit-PID.dump and
+//         generated ELF files.
+//       PERF ANNOTATE
+//       - The 'perf annotate' tool displays assembly level profiling report.
+//         Source code can also be displayed if the ELF file has debug symbols.
+//       - Make sure above small ELF files are available for 'perf annotate' tool to access,
+//         so that jitted code can be displayed in assembly view.
+//
+class JitLogger {
+  public:
+    JitLogger() : code_index_(0), marker_address_(nullptr) {}
+
+    void OpenLog() {
+      OpenPerfMapLog();
+      OpenJitDumpLog();
+    }
+
+    void WriteLog(JitCodeCache* code_cache, ArtMethod* method)
+        REQUIRES_SHARED(Locks::mutator_lock_) {
+      WritePerfMapLog(code_cache, method);
+      WriteJitDumpLog(code_cache, method);
+    }
+
+    void CloseLog() {
+      ClosePerfMapLog();
+      CloseJitDumpLog();
+    }
+
+  private:
+    // For perf-map profiling
+    void OpenPerfMapLog();
+    void WritePerfMapLog(JitCodeCache* code_cache, ArtMethod* method)
+        REQUIRES_SHARED(Locks::mutator_lock_);
+    void ClosePerfMapLog();
+
+    // For perf-inject profiling
+    void OpenJitDumpLog();
+    void WriteJitDumpLog(JitCodeCache* code_cache, ArtMethod* method)
+        REQUIRES_SHARED(Locks::mutator_lock_);
+    void CloseJitDumpLog();
+
+    void OpenMarkerFile();
+    void CloseMarkerFile();
+    void WriteJitDumpHeader();
+    void WriteJitDumpDebugInfo();
+
+    std::unique_ptr<File> perf_file_;
+    std::unique_ptr<File> jit_dump_file_;
+    uint64_t code_index_;
+    void* marker_address_;
+
+    DISALLOW_COPY_AND_ASSIGN(JitLogger);
+};
+
+}  // namespace jit
+}  // namespace art
+
+#endif  // ART_COMPILER_JIT_JIT_LOGGER_H_
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index ca1dc69..21042a3 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -20,6 +20,7 @@
 #include <math.h>
 
 #include "art_method-inl.h"
+#include "base/bit_utils.h"
 #include "class_linker.h"
 #include "common_compiler_test.h"
 #include "compiler.h"
@@ -366,7 +367,9 @@
   void StackArgsIntsFirstImpl();
   void StackArgsFloatsFirstImpl();
   void StackArgsMixedImpl();
+#if defined(__mips__) && defined(__LP64__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
   void StackArgsSignExtendedMips64Impl();
+#endif
 
   void NormalNativeImpl();
   void FastNativeImpl();
@@ -532,6 +535,25 @@
   BaseHandleScope* const handle_scope_;
 };
 
+// Number of references allocated in JNI ShadowFrames on the given thread.
+static size_t NumJniShadowFrameReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
+  return self->GetManagedStack()->NumJniShadowFrameReferences();
+}
+
+// Number of references in handle scope on the given thread.
+static size_t NumHandleReferences(Thread* self) {
+  size_t count = 0;
+  for (BaseHandleScope* cur = self->GetTopHandleScope(); cur != nullptr; cur = cur->GetLink()) {
+    count += cur->NumberOfReferences();
+  }
+  return count;
+}
+
+// Number of references allocated in handle scopes & JNI shadow frames on this thread.
+static size_t NumStackReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
+  return NumHandleReferences(self) + NumJniShadowFrameReferences(self);
+}
+
 static void expectNumStackReferences(size_t val1, size_t val2) {
   // In rare cases when JNI functions call themselves recursively,
   // disable this test because it will have a false negative.
@@ -539,7 +561,7 @@
     /* @CriticalNative doesn't build a HandleScope, so this test is meaningless then. */
     ScopedObjectAccess soa(Thread::Current());
 
-    size_t actual_num = Thread::Current()->NumStackReferences();
+    size_t actual_num = NumStackReferences(Thread::Current());
     // XX: Not too sure what's going on.
     // Sometimes null references get placed and sometimes they don't?
     EXPECT_TRUE(val1 == actual_num || val2 == actual_num)
@@ -2126,50 +2148,43 @@
 
 JNI_TEST_CRITICAL(StackArgsMixed)
 
-void Java_MyClassNatives_stackArgsSignExtendedMips64(JNIEnv*, jclass, jint i1, jint i2, jint i3,
-                                                     jint i4, jint i5, jint i6, jint i7, jint i8) {
-  EXPECT_EQ(i1, 1);
-  EXPECT_EQ(i2, 2);
-  EXPECT_EQ(i3, 3);
-  EXPECT_EQ(i4, 4);
-  EXPECT_EQ(i5, 5);
-  EXPECT_EQ(i6, 6);
-  EXPECT_EQ(i7, 7);
-  EXPECT_EQ(i8, -8);
-
 #if defined(__mips__) && defined(__LP64__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
-  // Mips64 ABI requires that arguments passed through stack be sign-extended 8B slots.
-  // First 8 arguments are passed through registers, check i7 and i8.
-  uint32_t stack1_high = *(&i7 + 1);
-  uint32_t stack2_high = *(&i8 + 1);
-
-  EXPECT_EQ(stack1_high, static_cast<uint32_t>(0));
-  EXPECT_EQ(stack2_high, static_cast<uint32_t>(0xffffffff));
-#else
-  LOG(INFO) << "Skipping stackArgsSignExtendedMips64 as there is nothing to be done on "
-            << kRuntimeISA;
-  // Force-print to std::cout so it's also outside the logcat.
-  std::cout << "Skipping stackArgsSignExtendedMips64 as there is nothing to be done on "
-            << kRuntimeISA << std::endl;
-#endif
+// Function will fetch the last argument passed from caller that is now on top of the stack and
+// return it as a 8B long. That way we can test if the caller has properly sign-extended the
+// value when placing it on the stack.
+__attribute__((naked))
+jlong Java_MyClassNatives_getStackArgSignExtendedMips64(
+    JNIEnv*, jclass,                      // Arguments passed from caller
+    jint, jint, jint, jint, jint, jint,   // through regs a0 to a7.
+    jint) {                               // The last argument will be passed on the stack.
+  __asm__(
+      ".set noreorder\n\t"                // Just return and store 8 bytes from the top of the stack
+      "jr  $ra\n\t"                       // in v0 (in branch delay slot). This should be the last
+      "ld  $v0, 0($sp)\n\t");             // argument. It is a 32-bit int, but it should be sign
+                                          // extended and it occupies 64-bit location.
 }
 
 void JniCompilerTest::StackArgsSignExtendedMips64Impl() {
-  SetUpForTest(true, "stackArgsSignExtendedMips64", "(IIIIIIII)V",
-               CURRENT_JNI_WRAPPER(Java_MyClassNatives_stackArgsSignExtendedMips64));
-  jint i1 = 1;
-  jint i2 = 2;
-  jint i3 = 3;
-  jint i4 = 4;
-  jint i5 = 5;
-  jint i6 = 6;
-  jint i7 = 7;
-  jint i8 = -8;
+  uint64_t ret;
+  SetUpForTest(true,
+               "getStackArgSignExtendedMips64",
+               "(IIIIIII)J",
+               // Don't use wrapper because this is raw assembly function.
+               reinterpret_cast<void*>(&Java_MyClassNatives_getStackArgSignExtendedMips64));
 
-  env_->CallStaticVoidMethod(jklass_, jmethod_, i1, i2, i3, i4, i5, i6, i7, i8);
+  // Mips64 ABI requires that arguments passed through stack be sign-extended 8B slots.
+  // First 8 arguments are passed through registers.
+  // Final argument's value is 7. When sign-extended, higher stack bits should be 0.
+  ret = env_->CallStaticLongMethod(jklass_, jmethod_, 1, 2, 3, 4, 5, 6, 7);
+  EXPECT_EQ(High32Bits(ret), static_cast<uint32_t>(0));
+
+  // Final argument's value is -8.  When sign-extended, higher stack bits should be 0xffffffff.
+  ret = env_->CallStaticLongMethod(jklass_, jmethod_, 1, 2, 3, 4, 5, 6, -8);
+  EXPECT_EQ(High32Bits(ret), static_cast<uint32_t>(0xffffffff));
 }
 
-JNI_TEST_CRITICAL(StackArgsSignExtendedMips64)
+JNI_TEST(StackArgsSignExtendedMips64)
+#endif
 
 void Java_MyClassNatives_normalNative(JNIEnv*, jclass) {
   // Intentionally left empty.
@@ -2183,8 +2198,7 @@
                "()V",
                CURRENT_JNI_WRAPPER(Java_MyClassNatives_normalNative));
 
-  ScopedObjectAccess soa(Thread::Current());
-  ArtMethod* method = soa.DecodeMethod(jmethod_);
+  ArtMethod* method = jni::DecodeArtMethod(jmethod_);
   ASSERT_TRUE(method != nullptr);
 
   EXPECT_FALSE(method->IsAnnotatedWithCriticalNative());
@@ -2206,8 +2220,7 @@
                "()V",
                CURRENT_JNI_WRAPPER(Java_MyClassNatives_fastNative));
 
-  ScopedObjectAccess soa(Thread::Current());
-  ArtMethod* method = soa.DecodeMethod(jmethod_);
+  ArtMethod* method = jni::DecodeArtMethod(jmethod_);
   ASSERT_TRUE(method != nullptr);
 
   EXPECT_FALSE(method->IsAnnotatedWithCriticalNative());
@@ -2236,8 +2249,7 @@
   UpdateCurrentJni(JniKind::kCritical);
   ASSERT_TRUE(IsCurrentJniCritical());
 
-  ScopedObjectAccess soa(Thread::Current());
-  ArtMethod* method = soa.DecodeMethod(jmethod_);
+  ArtMethod* method = jni::DecodeArtMethod(jmethod_);
   ASSERT_TRUE(method != nullptr);
 
   EXPECT_TRUE(method->IsAnnotatedWithCriticalNative());
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index 3fb7b56..33f4d77 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -222,7 +222,11 @@
                                                      bool is_synchronized,
                                                      bool is_critical_native,
                                                      const char* shorty)
-    : JniCallingConvention(is_static, is_synchronized, is_critical_native, shorty, kArm64PointerSize) {
+    : JniCallingConvention(is_static,
+                           is_synchronized,
+                           is_critical_native,
+                           shorty,
+                           kArm64PointerSize) {
 }
 
 uint32_t Arm64JniCallingConvention::CoreSpillMask() const {
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 9859b5d..36a87a8 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -152,24 +152,6 @@
                                                                    bool is_critical_native,
                                                                    const char* shorty,
                                                                    InstructionSet instruction_set) {
-  if (UNLIKELY(is_critical_native)) {
-    // Sanity check that the requested JNI instruction set
-    // is supported for critical natives. Not every one is.
-    switch (instruction_set) {
-      case kX86_64:
-      case kX86:
-      case kArm64:
-      case kArm:
-      case kThumb2:
-        break;
-      default:
-        is_critical_native = false;
-        LOG(WARNING) << "@CriticalNative support not implemented for " << instruction_set
-                     << "; will crash at runtime if trying to invoke such a method.";
-        // TODO: implement for MIPS/MIPS64
-    }
-  }
-
   switch (instruction_set) {
 #ifdef ART_ENABLE_CODEGEN_arm
     case kArm:
@@ -191,12 +173,18 @@
 #ifdef ART_ENABLE_CODEGEN_mips
     case kMips:
       return std::unique_ptr<JniCallingConvention>(
-          new (arena) mips::MipsJniCallingConvention(is_static, is_synchronized, shorty));
+          new (arena) mips::MipsJniCallingConvention(is_static,
+                                                     is_synchronized,
+                                                     is_critical_native,
+                                                     shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
     case kMips64:
       return std::unique_ptr<JniCallingConvention>(
-          new (arena) mips64::Mips64JniCallingConvention(is_static, is_synchronized, shorty));
+          new (arena) mips64::Mips64JniCallingConvention(is_static,
+                                                         is_synchronized,
+                                                         is_critical_native,
+                                                         shorty));
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
     case kX86:
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index f541d8f..335a2df 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -370,14 +370,6 @@
     kObjectOrClass = 1
   };
 
-  // TODO: remove this constructor once all are changed to the below one.
-  JniCallingConvention(bool is_static,
-                       bool is_synchronized,
-                       const char* shorty,
-                       PointerSize frame_pointer_size)
-      : CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size),
-        is_critical_native_(false) {}
-
   JniCallingConvention(bool is_static,
                        bool is_synchronized,
                        bool is_critical_native,
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index f5ab5f7..e6948ec 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -23,6 +23,13 @@
 namespace art {
 namespace mips {
 
+// Up to how many float-like (float, double) args can be enregistered in floating-point registers.
+// The rest of the args must go in integer registers or on the stack.
+constexpr size_t kMaxFloatOrDoubleRegisterArguments = 2u;
+// Up to how many integer-like (pointers, objects, longs, int, short, bool, etc) args can be
+// enregistered. The rest of the args must go on the stack.
+constexpr size_t kMaxIntLikeRegisterArguments = 4u;
+
 static const Register kCoreArgumentRegisters[] = { A0, A1, A2, A3 };
 static const FRegister kFArgumentRegisters[] = { F12, F14 };
 static const DRegister kDArgumentRegisters[] = { D6, D7 };
@@ -170,23 +177,134 @@
 }
 // JNI calling convention
 
-MipsJniCallingConvention::MipsJniCallingConvention(bool is_static, bool is_synchronized,
+MipsJniCallingConvention::MipsJniCallingConvention(bool is_static,
+                                                   bool is_synchronized,
+                                                   bool is_critical_native,
                                                    const char* shorty)
-    : JniCallingConvention(is_static, is_synchronized, shorty, kMipsPointerSize) {
-  // Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject
-  // or jclass for static methods and the JNIEnv. We start at the aligned register A2.
+    : JniCallingConvention(is_static,
+                           is_synchronized,
+                           is_critical_native,
+                           shorty,
+                           kMipsPointerSize) {
+  // SYSTEM V - Application Binary Interface (MIPS RISC Processor):
+  // Data Representation - Fundamental Types (3-4) specifies fundamental alignments for each type.
+  //   "Each member is assigned to the lowest available offset with the appropriate alignment. This
+  // may require internal padding, depending on the previous member."
+  //
+  // All of our stack arguments are usually 4-byte aligned, however longs and doubles must be 8
+  // bytes aligned. Add padding to maintain 8-byte alignment invariant.
+  //
+  // Compute padding to ensure longs and doubles are not split in o32.
   size_t padding = 0;
-  for (size_t cur_arg = IsStatic() ? 0 : 1, cur_reg = 2; cur_arg < NumArgs(); cur_arg++) {
+  size_t cur_arg, cur_reg;
+  if (LIKELY(HasExtraArgumentsForJni())) {
+    // Ignore the 'this' jobject or jclass for static methods and the JNIEnv.
+    // We start at the aligned register A2.
+    //
+    // Ignore the first 2 parameters because they are guaranteed to be aligned.
+    cur_arg = NumImplicitArgs();  // Skip the "this" argument.
+    cur_reg = 2;  // Skip {A0=JNIEnv, A1=jobject} / {A0=JNIEnv, A1=jclass} parameters (start at A2).
+  } else {
+    // Check every parameter.
+    cur_arg = 0;
+    cur_reg = 0;
+  }
+
+  // Shift across a logical register mapping that looks like:
+  //
+  //   | A0 | A1 | A2 | A3 | SP+16 | SP+20 | SP+24 | ... | SP+n | SP+n+4 |
+  //
+  //   or some of variants with floating-point registers (F12 and F14), for example
+  //
+  //   | F12     | F14 | A3 | SP+16 | SP+20 | SP+24 | ... | SP+n | SP+n+4 |
+  //
+  //   (where SP is the stack pointer at the start of called function).
+  //
+  // Any time there would normally be a long/double in an odd logical register,
+  // we have to push out the rest of the mappings by 4 bytes to maintain an 8-byte alignment.
+  //
+  // This works for both physical register pairs {A0, A1}, {A2, A3},
+  // floating-point registers F12, F14 and for when the value is on the stack.
+  //
+  // For example:
+  // (a) long would normally go into A1, but we shift it into A2
+  //  | INT | (PAD) | LONG    |
+  //  | A0  |  A1   | A2 | A3 |
+  //
+  // (b) long would normally go into A3, but we shift it into SP
+  //  | INT | INT | INT | (PAD) | LONG        |
+  //  | A0  | A1  | A2  |  A3   | SP+16 SP+20 |
+  //
+  // where INT is any <=4 byte arg, and LONG is any 8-byte arg.
+  for (; cur_arg < NumArgs(); cur_arg++) {
     if (IsParamALongOrDouble(cur_arg)) {
       if ((cur_reg & 1) != 0) {
         padding += 4;
-        cur_reg++;  // additional bump to ensure alignment
+        cur_reg++;   // Additional bump to ensure alignment.
       }
-      cur_reg++;  // additional bump to skip extra long word
+      cur_reg += 2;  // Bump the iterator twice for every long argument.
+    } else {
+      cur_reg++;     // Bump the iterator for every argument.
     }
-    cur_reg++;  // bump the iterator for every argument
   }
-  padding_ = padding;
+  if (cur_reg < kMaxIntLikeRegisterArguments) {
+    // As a special case when, as a result of shifting (or not) there are no arguments on the stack,
+    // we actually have 0 stack padding.
+    //
+    // For example with @CriticalNative and:
+    // (int, long) -> shifts the long but doesn't need to pad the stack
+    //
+    //          shift
+    //           \/
+    //  | INT | (PAD) | LONG      | (EMPTY) ...
+    //  | r0  |  r1   |  r2  | r3 |   SP    ...
+    //                                /\
+    //                          no stack padding
+    padding_ = 0;
+  } else {
+    padding_ = padding;
+  }
+
+  // Argument Passing (3-17):
+  //   "When the first argument is integral, the remaining arguments are passed in the integer
+  // registers."
+  //
+  //   "The rules that determine which arguments go into registers and which ones must be passed on
+  // the stack are most easily explained by considering the list of arguments as a structure,
+  // aligned according to normal structure rules. Mapping of this structure into the combination of
+  // stack and registers is as follows: up to two leading floating-point arguments can be passed in
+  // $f12 and $f14; everything else with a structure offset greater than or equal to 16 is passed on
+  // the stack. The remainder of the arguments are passed in $4..$7 based on their structure offset.
+  // Holes left in the structure for alignment are unused, whether in registers or in the stack."
+  //
+  // For example with @CriticalNative and:
+  // (a) first argument is not floating-point, so all go into integer registers
+  //  | INT | FLOAT | DOUBLE  |
+  //  | A0  |  A1   | A2 | A3 |
+  // (b) first argument is floating-point, but 2nd is integer
+  //  | FLOAT | INT | DOUBLE  |
+  //  |  F12  | A1  | A2 | A3 |
+  // (c) first two arguments are floating-point (float, double)
+  //  | FLAOT | (PAD) | DOUBLE |  INT  |
+  //  |  F12  |       |  F14   | SP+16 |
+  // (d) first two arguments are floating-point (double, float)
+  //  | DOUBLE | FLOAT | INT |
+  //  |  F12   |  F14  | A3  |
+  // (e) first three arguments are floating-point, but just first two will go into fp registers
+  //  | DOUBLE | FLOAT | FLOAT |
+  //  |  F12   |  F14  |  A3   |
+  //
+  // Find out if the first argument is a floating-point. In that case, floating-point registers will
+  // be used for up to two leading floating-point arguments. Otherwise, all arguments will be passed
+  // using integer registers.
+  use_fp_arg_registers_ = false;
+  if (is_critical_native) {
+    if (NumArgs() > 0) {
+      if (IsParamAFloatOrDouble(0)) {
+        use_fp_arg_registers_ = true;
+      }
+    }
+  }
 }
 
 uint32_t MipsJniCallingConvention::CoreSpillMask() const {
@@ -202,74 +320,127 @@
 }
 
 size_t MipsJniCallingConvention::FrameSize() {
-  // ArtMethod*, RA and callee save area size, local reference segment state
-  size_t frame_data_size = static_cast<size_t>(kMipsPointerSize) +
-      (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
-  // References plus 2 words for HandleScope header
-  size_t handle_scope_size = HandleScope::SizeOf(kMipsPointerSize, ReferenceCount());
-  // Plus return value spill area size
-  return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
+  // ArtMethod*, RA and callee save area size, local reference segment state.
+  const size_t method_ptr_size = static_cast<size_t>(kMipsPointerSize);
+  const size_t ra_return_addr_size = kFramePointerSize;
+  const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
+
+  size_t frame_data_size = method_ptr_size + ra_return_addr_size + callee_save_area_size;
+
+  if (LIKELY(HasLocalReferenceSegmentState())) {
+    // Local reference segment state.
+    frame_data_size += kFramePointerSize;
+  }
+
+  // References plus 2 words for HandleScope header.
+  const size_t handle_scope_size = HandleScope::SizeOf(kMipsPointerSize, ReferenceCount());
+
+  size_t total_size = frame_data_size;
+  if (LIKELY(HasHandleScope())) {
+    // HandleScope is sometimes excluded.
+    total_size += handle_scope_size;    // Handle scope size.
+  }
+
+  // Plus return value spill area size.
+  total_size += SizeOfReturnValue();
+
+  return RoundUp(total_size, kStackAlignment);
 }
 
 size_t MipsJniCallingConvention::OutArgSize() {
-  return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize + padding_, kStackAlignment);
+  // Argument Passing (3-17):
+  //   "Despite the fact that some or all of the arguments to a function are passed in registers,
+  // always allocate space on the stack for all arguments. This stack space should be a structure
+  // large enough to contain all the arguments, aligned according to normal structure rules (after
+  // promotion and structure return pointer insertion). The locations within the stack frame used
+  // for arguments are called the home locations."
+  //
+  // Allocate 16 bytes for home locations + space needed for stack arguments.
+  return RoundUp(
+      (kMaxIntLikeRegisterArguments + NumberOfOutgoingStackArgs()) * kFramePointerSize + padding_,
+      kStackAlignment);
 }
 
 ArrayRef<const ManagedRegister> MipsJniCallingConvention::CalleeSaveRegisters() const {
   return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
 }
 
-// JniCallingConvention ABI follows AAPCS where longs and doubles must occur
-// in even register numbers and stack slots
+// JniCallingConvention ABI follows o32 where longs and doubles must occur
+// in even register numbers and stack slots.
 void MipsJniCallingConvention::Next() {
   JniCallingConvention::Next();
-  size_t arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
-  if ((itr_args_ >= 2) &&
-      (arg_pos < NumArgs()) &&
-      IsParamALongOrDouble(arg_pos)) {
-    // itr_slots_ needs to be an even number, according to AAPCS.
-    if ((itr_slots_ & 0x1u) != 0) {
+
+  if (LIKELY(HasNext())) {  // Avoid CHECK failure for IsCurrentParam
+    // Ensure slot is 8-byte aligned for longs/doubles (o32).
+    if (IsCurrentParamALongOrDouble() && ((itr_slots_ & 0x1u) != 0)) {
+      // itr_slots_ needs to be an even number, according to o32.
       itr_slots_++;
     }
   }
 }
 
 bool MipsJniCallingConvention::IsCurrentParamInRegister() {
-  return itr_slots_ < 4;
+  // Argument Passing (3-17):
+  //   "The rules that determine which arguments go into registers and which ones must be passed on
+  // the stack are most easily explained by considering the list of arguments as a structure,
+  // aligned according to normal structure rules. Mapping of this structure into the combination of
+  // stack and registers is as follows: up to two leading floating-point arguments can be passed in
+  // $f12 and $f14; everything else with a structure offset greater than or equal to 16 is passed on
+  // the stack. The remainder of the arguments are passed in $4..$7 based on their structure offset.
+  // Holes left in the structure for alignment are unused, whether in registers or in the stack."
+  //
+  // Even when floating-point registers are used, there can be up to 4 arguments passed in
+  // registers.
+  return itr_slots_ < kMaxIntLikeRegisterArguments;
 }
 
 bool MipsJniCallingConvention::IsCurrentParamOnStack() {
   return !IsCurrentParamInRegister();
 }
 
-static const Register kJniArgumentRegisters[] = {
-  A0, A1, A2, A3
-};
 ManagedRegister MipsJniCallingConvention::CurrentParamRegister() {
-  CHECK_LT(itr_slots_, 4u);
-  int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
-  if ((itr_args_ >= 2) && IsParamALongOrDouble(arg_pos)) {
-    CHECK_EQ(itr_slots_, 2u);
-    return MipsManagedRegister::FromRegisterPair(A2_A3);
+  CHECK_LT(itr_slots_, kMaxIntLikeRegisterArguments);
+  // Up to two leading floating-point arguments can be passed in floating-point registers.
+  if (use_fp_arg_registers_ && (itr_args_ < kMaxFloatOrDoubleRegisterArguments)) {
+    if (IsCurrentParamAFloatOrDouble()) {
+      if (IsCurrentParamADouble()) {
+        return MipsManagedRegister::FromDRegister(kDArgumentRegisters[itr_args_]);
+      } else {
+        return MipsManagedRegister::FromFRegister(kFArgumentRegisters[itr_args_]);
+      }
+    }
+  }
+  // All other arguments (including other floating-point arguments) will be passed in integer
+  // registers.
+  if (IsCurrentParamALongOrDouble()) {
+    if (itr_slots_ == 0u) {
+      return MipsManagedRegister::FromRegisterPair(A0_A1);
+    } else {
+      CHECK_EQ(itr_slots_, 2u);
+      return MipsManagedRegister::FromRegisterPair(A2_A3);
+    }
   } else {
-    return
-      MipsManagedRegister::FromCoreRegister(kJniArgumentRegisters[itr_slots_]);
+    return MipsManagedRegister::FromCoreRegister(kCoreArgumentRegisters[itr_slots_]);
   }
 }
 
 FrameOffset MipsJniCallingConvention::CurrentParamStackOffset() {
-  CHECK_GE(itr_slots_, 4u);
+  CHECK_GE(itr_slots_, kMaxIntLikeRegisterArguments);
   size_t offset = displacement_.Int32Value() - OutArgSize() + (itr_slots_ * kFramePointerSize);
   CHECK_LT(offset, OutArgSize());
   return FrameOffset(offset);
 }
 
 size_t MipsJniCallingConvention::NumberOfOutgoingStackArgs() {
-  size_t static_args = IsStatic() ? 1 : 0;  // count jclass
-  // regular argument parameters and this
-  size_t param_args = NumArgs() + NumLongOrDoubleArgs();
-  // count JNIEnv*
-  return static_args + param_args + 1;
+  size_t static_args = HasSelfClass() ? 1 : 0;            // Count jclass.
+  // Regular argument parameters and this.
+  size_t param_args = NumArgs() + NumLongOrDoubleArgs();  // Twice count 8-byte args.
+  // Count JNIEnv* less arguments in registers.
+  size_t internal_args = (HasJniEnv() ? 1 : 0);
+  size_t total_args = static_args + param_args + internal_args;
+
+  return total_args - std::min(kMaxIntLikeRegisterArguments, static_cast<size_t>(total_args));
 }
+
 }  // namespace mips
 }  // namespace art
diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h
index e95a738..ad3f118 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.h
+++ b/compiler/jni/quick/mips/calling_convention_mips.h
@@ -54,14 +54,17 @@
 
 class MipsJniCallingConvention FINAL : public JniCallingConvention {
  public:
-  MipsJniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+  MipsJniCallingConvention(bool is_static,
+                           bool is_synchronized,
+                           bool is_critical_native,
+                           const char* shorty);
   ~MipsJniCallingConvention() OVERRIDE {}
   // Calling convention
   ManagedRegister ReturnRegister() OVERRIDE;
   ManagedRegister IntReturnRegister() OVERRIDE;
   ManagedRegister InterproceduralScratchRegister() OVERRIDE;
   // JNI calling convention
-  void Next() OVERRIDE;  // Override default behavior for AAPCS
+  void Next() OVERRIDE;  // Override default behavior for o32.
   size_t FrameSize() OVERRIDE;
   size_t OutArgSize() OVERRIDE;
   ArrayRef<const ManagedRegister> CalleeSaveRegisters() const OVERRIDE;
@@ -82,8 +85,9 @@
   size_t NumberOfOutgoingStackArgs() OVERRIDE;
 
  private:
-  // Padding to ensure longs and doubles are not split in AAPCS
+  // Padding to ensure longs and doubles are not split in o32.
   size_t padding_;
+  size_t use_fp_arg_registers_;
 
   DISALLOW_COPY_AND_ASSIGN(MipsJniCallingConvention);
 };
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc
index 8341e8e..afe6a76 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.cc
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc
@@ -23,6 +23,9 @@
 namespace art {
 namespace mips64 {
 
+// Up to kow many args can be enregistered. The rest of the args must go on the stack.
+constexpr size_t kMaxRegisterArguments = 8u;
+
 static const GpuRegister kGpuArgumentRegisters[] = {
   A0, A1, A2, A3, A4, A5, A6, A7
 };
@@ -150,9 +153,15 @@
 
 // JNI calling convention
 
-Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static, bool is_synchronized,
+Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static,
+                                                       bool is_synchronized,
+                                                       bool is_critical_native,
                                                        const char* shorty)
-    : JniCallingConvention(is_static, is_synchronized, shorty, kMips64PointerSize) {
+    : JniCallingConvention(is_static,
+                           is_synchronized,
+                           is_critical_native,
+                           shorty,
+                           kMips64PointerSize) {
 }
 
 uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
@@ -168,13 +177,28 @@
 }
 
 size_t Mips64JniCallingConvention::FrameSize() {
-  // ArtMethod*, RA and callee save area size, local reference segment state
-  size_t frame_data_size = kFramePointerSize +
-      (CalleeSaveRegisters().size() + 1) * kFramePointerSize + sizeof(uint32_t);
-  // References plus 2 words for HandleScope header
+  // ArtMethod*, RA and callee save area size, local reference segment state.
+  size_t method_ptr_size = static_cast<size_t>(kFramePointerSize);
+  size_t ra_and_callee_save_area_size = (CalleeSaveRegisters().size() + 1) * kFramePointerSize;
+
+  size_t frame_data_size = method_ptr_size + ra_and_callee_save_area_size;
+  if (LIKELY(HasLocalReferenceSegmentState())) {                     // Local ref. segment state.
+    // Local reference segment state is sometimes excluded.
+    frame_data_size += sizeof(uint32_t);
+  }
+  // References plus 2 words for HandleScope header.
   size_t handle_scope_size = HandleScope::SizeOf(kMips64PointerSize, ReferenceCount());
-  // Plus return value spill area size
-  return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
+
+  size_t total_size = frame_data_size;
+  if (LIKELY(HasHandleScope())) {
+    // HandleScope is sometimes excluded.
+    total_size += handle_scope_size;                                 // Handle scope size.
+  }
+
+  // Plus return value spill area size.
+  total_size += SizeOfReturnValue();
+
+  return RoundUp(total_size, kStackAlignment);
 }
 
 size_t Mips64JniCallingConvention::OutArgSize() {
@@ -186,7 +210,7 @@
 }
 
 bool Mips64JniCallingConvention::IsCurrentParamInRegister() {
-  return itr_args_ < 8;
+  return itr_args_ < kMaxRegisterArguments;
 }
 
 bool Mips64JniCallingConvention::IsCurrentParamOnStack() {
@@ -204,7 +228,8 @@
 
 FrameOffset Mips64JniCallingConvention::CurrentParamStackOffset() {
   CHECK(IsCurrentParamOnStack());
-  size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_args_ - 8) * kFramePointerSize);
+  size_t args_on_stack = itr_args_ - kMaxRegisterArguments;
+  size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize);
   CHECK_LT(offset, OutArgSize());
   return FrameOffset(offset);
 }
@@ -214,7 +239,7 @@
   size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni();
 
   // Nothing on the stack unless there are more than 8 arguments
-  return (all_args > 8) ? all_args - 8 : 0;
+  return (all_args > kMaxRegisterArguments) ? all_args - kMaxRegisterArguments : 0;
 }
 }  // namespace mips64
 }  // namespace art
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.h b/compiler/jni/quick/mips64/calling_convention_mips64.h
index a5fd111..faedaef 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.h
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.h
@@ -54,7 +54,10 @@
 
 class Mips64JniCallingConvention FINAL : public JniCallingConvention {
  public:
-  Mips64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+  Mips64JniCallingConvention(bool is_static,
+                             bool is_synchronized,
+                             bool is_critical_native,
+                             const char* shorty);
   ~Mips64JniCallingConvention() OVERRIDE {}
   // Calling convention
   ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index fd1b135..102637f 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -92,7 +92,7 @@
                      const std::vector<std::string>& compiler_options,
                      /*out*/std::string* error_msg) {
     ASSERT_TRUE(error_msg != nullptr);
-    insn_features_.reset(InstructionSetFeatures::FromVariant(insn_set, "default", error_msg));
+    insn_features_ = InstructionSetFeatures::FromVariant(insn_set, "default", error_msg);
     ASSERT_TRUE(insn_features_ != nullptr) << error_msg;
     compiler_options_.reset(new CompilerOptions);
     for (const std::string& option : compiler_options) {
@@ -377,7 +377,8 @@
   if (kCompile) {
     TimingLogger timings2("OatTest::WriteRead", false, false);
     compiler_driver_->SetDexFilesForOatFile(class_linker->GetBootClassPath());
-    compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings2);
+    compiler_driver_->CompileAll(
+        class_loader, class_linker->GetBootClassPath(), /* verifier_deps */ nullptr, &timings2);
   }
 
   ScratchFile tmp_oat, tmp_vdex(tmp_oat, ".vdex");
@@ -391,7 +392,8 @@
   ASSERT_TRUE(success);
 
   if (kCompile) {  // OatWriter strips the code, regenerate to compare
-    compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
+    compiler_driver_->CompileAll(
+        class_loader, class_linker->GetBootClassPath(), /* verifier_deps */ nullptr, &timings);
   }
   std::unique_ptr<OatFile> oat_file(OatFile::Open(tmp_oat.GetFilename(),
                                                   tmp_oat.GetFilename(),
@@ -515,7 +517,7 @@
                                   soa.Decode<mirror::ClassLoader>(class_loader).Ptr());
   }
   compiler_driver_->SetDexFilesForOatFile(dex_files);
-  compiler_driver_->CompileAll(class_loader, dex_files, &timings);
+  compiler_driver_->CompileAll(class_loader, dex_files, /* verifier_deps */ nullptr, &timings);
 
   ScratchFile tmp_oat, tmp_vdex(tmp_oat, ".vdex");
   SafeMap<std::string, std::string> key_value_store;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 6cbca7a..f9173f5 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1759,7 +1759,7 @@
   }
 
   std::vector<uint8_t> buffer;
-  verifier_deps->Encode(&buffer);
+  verifier_deps->Encode(*dex_files_, &buffer);
 
   if (!vdex_out->WriteFully(buffer.data(), buffer.size())) {
     PLOG(ERROR) << "Failed to write verifier deps."
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 529fc9e..7dc094b 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -548,7 +548,21 @@
   void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
     DCHECK(!IsAddedBlock(block));
     first_index_bounds_check_map_.clear();
-    HGraphVisitor::VisitBasicBlock(block);
+    // Visit phis and instructions using a safe iterator. The iteration protects
+    // against deleting the current instruction during iteration. However, it
+    // must advance next_ if that instruction is deleted during iteration.
+    for (HInstruction* instruction = block->GetFirstPhi(); instruction != nullptr;) {
+      DCHECK(instruction->IsInBlock());
+      next_ = instruction->GetNext();
+      instruction->Accept(this);
+      instruction = next_;
+    }
+    for (HInstruction* instruction = block->GetFirstInstruction(); instruction != nullptr;) {
+      DCHECK(instruction->IsInBlock());
+      next_ = instruction->GetNext();
+      instruction->Accept(this);
+      instruction = next_;
+    }
     // We should never deoptimize from an osr method, otherwise we might wrongly optimize
     // code dominated by the deoptimization.
     if (!GetGraph()->IsCompilingOsr()) {
@@ -1798,7 +1812,12 @@
   }
 
   /** Helper method to replace an instruction with another instruction. */
-  static void ReplaceInstruction(HInstruction* instruction, HInstruction* replacement) {
+  void ReplaceInstruction(HInstruction* instruction, HInstruction* replacement) {
+    // Safe iteration.
+    if (instruction == next_) {
+      next_ = next_->GetNext();
+    }
+    // Replace and remove.
     instruction->ReplaceWith(replacement);
     instruction->GetBlock()->RemoveInstruction(instruction);
   }
@@ -1831,6 +1850,9 @@
   // Range analysis based on induction variables.
   InductionVarRange induction_range_;
 
+  // Safe iteration.
+  HInstruction* next_;
+
   DISALLOW_COPY_AND_ASSIGN(BCEVisitor);
 };
 
@@ -1845,8 +1867,8 @@
   // that value dominated by that instruction fits in that range. Range of that
   // value can be narrowed further down in the dominator tree.
   BCEVisitor visitor(graph_, side_effects_, induction_analysis_);
-  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    HBasicBlock* current = it.Current();
+  for (size_t i = 0, size = graph_->GetReversePostOrder().size(); i != size; ++i) {
+    HBasicBlock* current = graph_->GetReversePostOrder()[i];
     if (visitor.IsAddedBlock(current)) {
       // Skip added blocks. Their effects are already taken care of.
       continue;
@@ -1855,8 +1877,11 @@
     // Skip forward to the current block in case new basic blocks were inserted
     // (which always appear earlier in reverse post order) to avoid visiting the
     // same basic block twice.
-    for ( ; !it.Done() && it.Current() != current; it.Advance()) {
-    }
+    size_t new_size = graph_->GetReversePostOrder().size();
+    DCHECK_GE(new_size, size);
+    i += new_size - size;
+    DCHECK_EQ(current, graph_->GetReversePostOrder()[i]);
+    size = new_size;
   }
 
   // Perform cleanup.
diff --git a/compiler/optimizing/bytecode_utils.h b/compiler/optimizing/bytecode_utils.h
index 6dfffce..133afa4 100644
--- a/compiler/optimizing/bytecode_utils.h
+++ b/compiler/optimizing/bytecode_utils.h
@@ -26,7 +26,8 @@
 
 class CodeItemIterator : public ValueObject {
  public:
-  CodeItemIterator(const DexFile::CodeItem& code_item, uint32_t start_dex_pc = 0u)
+  explicit CodeItemIterator(const DexFile::CodeItem& code_item) : CodeItemIterator(code_item, 0u) {}
+  CodeItemIterator(const DexFile::CodeItem& code_item, uint32_t start_dex_pc)
       : code_ptr_(code_item.insns_ + start_dex_pc),
         code_end_(code_item.insns_ + code_item.insns_size_in_code_units_),
         dex_pc_(start_dex_pc) {}
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 0f8cdbb..8b450e1 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -671,9 +671,9 @@
     return;
   }
   ArenaVector<HSuspendCheck*> loop_headers(graph.GetArena()->Adapter(kArenaAllocMisc));
-  for (HReversePostOrderIterator it(graph); !it.Done(); it.Advance()) {
-    if (it.Current()->IsLoopHeader()) {
-      HSuspendCheck* suspend_check = it.Current()->GetLoopInformation()->GetSuspendCheck();
+  for (HBasicBlock* block : graph.GetReversePostOrder()) {
+    if (block->IsLoopHeader()) {
+      HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck();
       if (!suspend_check->GetEnvironment()->IsFromInlinedInvoke()) {
         loop_headers.push_back(suspend_check);
       }
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index a81f24e..bf246ad 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -30,6 +30,7 @@
 #include "memory_region.h"
 #include "nodes.h"
 #include "optimizing_compiler_stats.h"
+#include "read_barrier_option.h"
 #include "stack_map_stream.h"
 #include "utils/label.h"
 
@@ -50,6 +51,9 @@
 // Maximum value for a primitive long.
 static int64_t constexpr kPrimLongMax = INT64_C(0x7fffffffffffffff);
 
+static constexpr ReadBarrierOption kCompilerReadBarrierOption =
+    kEmitCompilerReadBarrier ? kWithReadBarrier : kWithoutReadBarrier;
+
 class Assembler;
 class CodeGenerator;
 class CompilerDriver;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 9f92b20..7c3a2c6 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -489,8 +489,6 @@
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
-    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
-                                                        : locations->Out();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
 
@@ -504,26 +502,26 @@
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
     InvokeRuntimeCallingConvention calling_convention;
-    codegen->EmitParallelMoves(
-        locations->InAt(1),
-        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-        Primitive::kPrimNot,
-        object_class,
-        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
-        Primitive::kPrimNot);
-
+    codegen->EmitParallelMoves(locations->InAt(0),
+                               Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+                               Primitive::kPrimNot,
+                               locations->InAt(1),
+                               Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+                               Primitive::kPrimNot);
     if (instruction_->IsInstanceOf()) {
       arm_codegen->InvokeRuntime(kQuickInstanceofNonTrivial,
                                  instruction_,
                                  instruction_->GetDexPc(),
                                  this);
-      CheckEntrypointTypes<
-          kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
+      CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
       arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
     } else {
       DCHECK(instruction_->IsCheckCast());
-      arm_codegen->InvokeRuntime(kQuickCheckCast, instruction_, instruction_->GetDexPc(), this);
-      CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+      arm_codegen->InvokeRuntime(kQuickCheckInstanceOf,
+                                 instruction_,
+                                 instruction_->GetDexPc(),
+                                 this);
+      CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
     }
 
     if (!is_fatal_) {
@@ -601,11 +599,23 @@
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM);
 };
 
-// Slow path marking an object during a read barrier.
+// Slow path marking an object reference `ref` during a read
+// barrier. The field `obj.field` in the object `obj` holding this
+// reference does not get updated by this slow path after marking (see
+// ReadBarrierMarkAndUpdateFieldSlowPathARM below for that).
+//
+// This means that after the execution of this slow path, `ref` will
+// always be up-to-date, but `obj.field` may not; i.e., after the
+// flip, `ref` will be a to-space reference, but `obj.field` will
+// probably still be a from-space reference (unless it gets updated by
+// another thread, or if another thread installed another object
+// reference (different from `ref`) in `obj.field`).
 class ReadBarrierMarkSlowPathARM : public SlowPathCodeARM {
  public:
-  ReadBarrierMarkSlowPathARM(HInstruction* instruction, Location obj)
-      : SlowPathCodeARM(instruction), obj_(obj) {
+  ReadBarrierMarkSlowPathARM(HInstruction* instruction,
+                             Location ref,
+                             Location entrypoint = Location::NoLocation())
+      : SlowPathCodeARM(instruction), ref_(ref), entrypoint_(entrypoint) {
     DCHECK(kEmitCompilerReadBarrier);
   }
 
@@ -613,9 +623,9 @@
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
-    Register reg = obj_.AsRegister<Register>();
+    Register ref_reg = ref_.AsRegister<Register>();
     DCHECK(locations->CanCall());
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg));
+    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
     DCHECK(instruction_->IsInstanceFieldGet() ||
            instruction_->IsStaticFieldGet() ||
            instruction_->IsArrayGet() ||
@@ -628,44 +638,230 @@
            (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified()))
         << "Unexpected instruction in read barrier marking slow path: "
         << instruction_->DebugName();
+    // The read barrier instrumentation of object ArrayGet
+    // instructions does not support the HIntermediateAddress
+    // instruction.
+    DCHECK(!(instruction_->IsArrayGet() &&
+             instruction_->AsArrayGet()->GetArray()->IsIntermediateAddress()));
 
     __ Bind(GetEntryLabel());
     // No need to save live registers; it's taken care of by the
     // entrypoint. Also, there is no need to update the stack mask,
     // as this runtime call will not trigger a garbage collection.
     CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
-    DCHECK_NE(reg, SP);
-    DCHECK_NE(reg, LR);
-    DCHECK_NE(reg, PC);
+    DCHECK_NE(ref_reg, SP);
+    DCHECK_NE(ref_reg, LR);
+    DCHECK_NE(ref_reg, PC);
     // IP is used internally by the ReadBarrierMarkRegX entry point
     // as a temporary, it cannot be the entry point's input/output.
-    DCHECK_NE(reg, IP);
-    DCHECK(0 <= reg && reg < kNumberOfCoreRegisters) << reg;
+    DCHECK_NE(ref_reg, IP);
+    DCHECK(0 <= ref_reg && ref_reg < kNumberOfCoreRegisters) << ref_reg;
     // "Compact" slow path, saving two moves.
     //
     // Instead of using the standard runtime calling convention (input
     // and output in R0):
     //
-    //   R0 <- obj
+    //   R0 <- ref
     //   R0 <- ReadBarrierMark(R0)
-    //   obj <- R0
+    //   ref <- R0
     //
-    // we just use rX (the register holding `obj`) as input and output
+    // we just use rX (the register containing `ref`) as input and output
+    // of a dedicated entrypoint:
+    //
+    //   rX <- ReadBarrierMarkRegX(rX)
+    //
+    if (entrypoint_.IsValid()) {
+      arm_codegen->ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction_, this);
+      __ blx(entrypoint_.AsRegister<Register>());
+    } else {
+      int32_t entry_point_offset =
+          CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmPointerSize>(ref_reg);
+      // This runtime call does not require a stack map.
+      arm_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
+    }
+    __ b(GetExitLabel());
+  }
+
+ private:
+  // The location (register) of the marked object reference.
+  const Location ref_;
+
+  // The location of the entrypoint if already loaded.
+  const Location entrypoint_;
+
+  DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathARM);
+};
+
+// Slow path marking an object reference `ref` during a read barrier,
+// and if needed, atomically updating the field `obj.field` in the
+// object `obj` holding this reference after marking (contrary to
+// ReadBarrierMarkSlowPathARM above, which never tries to update
+// `obj.field`).
+//
+// This means that after the execution of this slow path, both `ref`
+// and `obj.field` will be up-to-date; i.e., after the flip, both will
+// hold the same to-space reference (unless another thread installed
+// another object reference (different from `ref`) in `obj.field`).
+class ReadBarrierMarkAndUpdateFieldSlowPathARM : public SlowPathCodeARM {
+ public:
+  ReadBarrierMarkAndUpdateFieldSlowPathARM(HInstruction* instruction,
+                                           Location ref,
+                                           Register obj,
+                                           Location field_offset,
+                                           Register temp1,
+                                           Register temp2)
+      : SlowPathCodeARM(instruction),
+        ref_(ref),
+        obj_(obj),
+        field_offset_(field_offset),
+        temp1_(temp1),
+        temp2_(temp2) {
+    DCHECK(kEmitCompilerReadBarrier);
+  }
+
+  const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkAndUpdateFieldSlowPathARM"; }
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
+    Register ref_reg = ref_.AsRegister<Register>();
+    DCHECK(locations->CanCall());
+    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
+    // This slow path is only used by the UnsafeCASObject intrinsic.
+    DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
+        << "Unexpected instruction in read barrier marking and field updating slow path: "
+        << instruction_->DebugName();
+    DCHECK(instruction_->GetLocations()->Intrinsified());
+    DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
+    DCHECK(field_offset_.IsRegisterPair()) << field_offset_;
+
+    __ Bind(GetEntryLabel());
+
+    // Save the old reference.
+    // Note that we cannot use IP to save the old reference, as IP is
+    // used internally by the ReadBarrierMarkRegX entry point, and we
+    // need the old reference after the call to that entry point.
+    DCHECK_NE(temp1_, IP);
+    __ Mov(temp1_, ref_reg);
+
+    // No need to save live registers; it's taken care of by the
+    // entrypoint. Also, there is no need to update the stack mask,
+    // as this runtime call will not trigger a garbage collection.
+    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+    DCHECK_NE(ref_reg, SP);
+    DCHECK_NE(ref_reg, LR);
+    DCHECK_NE(ref_reg, PC);
+    // IP is used internally by the ReadBarrierMarkRegX entry point
+    // as a temporary, it cannot be the entry point's input/output.
+    DCHECK_NE(ref_reg, IP);
+    DCHECK(0 <= ref_reg && ref_reg < kNumberOfCoreRegisters) << ref_reg;
+    // "Compact" slow path, saving two moves.
+    //
+    // Instead of using the standard runtime calling convention (input
+    // and output in R0):
+    //
+    //   R0 <- ref
+    //   R0 <- ReadBarrierMark(R0)
+    //   ref <- R0
+    //
+    // we just use rX (the register containing `ref`) as input and output
     // of a dedicated entrypoint:
     //
     //   rX <- ReadBarrierMarkRegX(rX)
     //
     int32_t entry_point_offset =
-        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmPointerSize>(reg);
+        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmPointerSize>(ref_reg);
     // This runtime call does not require a stack map.
     arm_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
+
+    // If the new reference is different from the old reference,
+    // update the field in the holder (`*(obj_ + field_offset_)`).
+    //
+    // Note that this field could also hold a different object, if
+    // another thread had concurrently changed it. In that case, the
+    // LDREX/SUBS/ITNE sequence of instructions in the compare-and-set
+    // (CAS) operation below would abort the CAS, leaving the field
+    // as-is.
+    Label done;
+    __ cmp(temp1_, ShifterOperand(ref_reg));
+    __ b(&done, EQ);
+
+    // Update the the holder's field atomically.  This may fail if
+    // mutator updates before us, but it's OK.  This is achieved
+    // using a strong compare-and-set (CAS) operation with relaxed
+    // memory synchronization ordering, where the expected value is
+    // the old reference and the desired value is the new reference.
+
+    // Convenience aliases.
+    Register base = obj_;
+    // The UnsafeCASObject intrinsic uses a register pair as field
+    // offset ("long offset"), of which only the low part contains
+    // data.
+    Register offset = field_offset_.AsRegisterPairLow<Register>();
+    Register expected = temp1_;
+    Register value = ref_reg;
+    Register tmp_ptr = IP;       // Pointer to actual memory.
+    Register tmp = temp2_;       // Value in memory.
+
+    __ add(tmp_ptr, base, ShifterOperand(offset));
+
+    if (kPoisonHeapReferences) {
+      __ PoisonHeapReference(expected);
+      if (value == expected) {
+        // Do not poison `value`, as it is the same register as
+        // `expected`, which has just been poisoned.
+      } else {
+        __ PoisonHeapReference(value);
+      }
+    }
+
+    // do {
+    //   tmp = [r_ptr] - expected;
+    // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
+
+    Label loop_head, exit_loop;
+    __ Bind(&loop_head);
+
+    __ ldrex(tmp, tmp_ptr);
+
+    __ subs(tmp, tmp, ShifterOperand(expected));
+
+    __ it(NE);
+    __ clrex(NE);
+
+    __ b(&exit_loop, NE);
+
+    __ strex(tmp, value, tmp_ptr);
+    __ cmp(tmp, ShifterOperand(1));
+    __ b(&loop_head, EQ);
+
+    __ Bind(&exit_loop);
+
+    if (kPoisonHeapReferences) {
+      __ UnpoisonHeapReference(expected);
+      if (value == expected) {
+        // Do not unpoison `value`, as it is the same register as
+        // `expected`, which has just been unpoisoned.
+      } else {
+        __ UnpoisonHeapReference(value);
+      }
+    }
+
+    __ Bind(&done);
     __ b(GetExitLabel());
   }
 
  private:
-  const Location obj_;
+  // The location (register) of the marked object reference.
+  const Location ref_;
+  // The register containing the object holding the marked object reference field.
+  const Register obj_;
+  // The location of the offset of the marked reference field within `obj_`.
+  Location field_offset_;
 
-  DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathARM);
+  const Register temp1_;
+  const Register temp2_;
+
+  DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkAndUpdateFieldSlowPathARM);
 };
 
 // Slow path generating a read barrier for a heap reference.
@@ -711,6 +907,11 @@
            (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
         << "Unexpected instruction in read barrier for heap reference slow path: "
         << instruction_->DebugName();
+    // The read barrier instrumentation of object ArrayGet
+    // instructions does not support the HIntermediateAddress
+    // instruction.
+    DCHECK(!(instruction_->IsArrayGet() &&
+             instruction_->AsArrayGet()->GetArray()->IsIntermediateAddress()));
 
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
@@ -4658,8 +4859,6 @@
                                         instruction->IsStringCharAt();
   HInstruction* array_instr = instruction->GetArray();
   bool has_intermediate_address = array_instr->IsIntermediateAddress();
-  // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
-  DCHECK(!(has_intermediate_address && kEmitCompilerReadBarrier));
 
   switch (type) {
     case Primitive::kPrimBoolean:
@@ -4667,16 +4866,21 @@
     case Primitive::kPrimShort:
     case Primitive::kPrimChar:
     case Primitive::kPrimInt: {
+      Register length;
+      if (maybe_compressed_char_at) {
+        length = locations->GetTemp(0).AsRegister<Register>();
+        uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+        __ LoadFromOffset(kLoadWord, length, obj, count_offset);
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+      }
       if (index.IsConstant()) {
         int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
         if (maybe_compressed_char_at) {
-          Register length = IP;
           Label uncompressed_load, done;
-          uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
-          __ LoadFromOffset(kLoadWord, length, obj, count_offset);
-          codegen_->MaybeRecordImplicitNullCheck(instruction);
-          __ cmp(length, ShifterOperand(0));
-          __ b(&uncompressed_load, GE);
+          __ Lsrs(length, length, 1u);  // LSRS has a 16-bit encoding, TST (immediate) does not.
+          static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                        "Expecting 0=compressed, 1=uncompressed");
+          __ b(&uncompressed_load, CS);
           __ LoadFromOffset(kLoadUnsignedByte,
                             out_loc.AsRegister<Register>(),
                             obj,
@@ -4711,12 +4915,10 @@
         }
         if (maybe_compressed_char_at) {
           Label uncompressed_load, done;
-          uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
-          Register length = locations->GetTemp(0).AsRegister<Register>();
-          __ LoadFromOffset(kLoadWord, length, obj, count_offset);
-          codegen_->MaybeRecordImplicitNullCheck(instruction);
-          __ cmp(length, ShifterOperand(0));
-          __ b(&uncompressed_load, GE);
+          __ Lsrs(length, length, 1u);  // LSRS has a 16-bit encoding, TST (immediate) does not.
+          static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                        "Expecting 0=compressed, 1=uncompressed");
+          __ b(&uncompressed_load, CS);
           __ ldrb(out_loc.AsRegister<Register>(),
                   Address(temp, index.AsRegister<Register>(), Shift::LSL, 0));
           __ b(&done);
@@ -4732,6 +4934,11 @@
     }
 
     case Primitive::kPrimNot: {
+      // The read barrier instrumentation of object ArrayGet
+      // instructions does not support the HIntermediateAddress
+      // instruction.
+      DCHECK(!(has_intermediate_address && kEmitCompilerReadBarrier));
+
       static_assert(
           sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
           "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
@@ -4872,8 +5079,6 @@
   Location value_loc = locations->InAt(2);
   HInstruction* array_instr = instruction->GetArray();
   bool has_intermediate_address = array_instr->IsIntermediateAddress();
-  // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
-  DCHECK(!(has_intermediate_address && kEmitCompilerReadBarrier));
 
   switch (value_type) {
     case Primitive::kPrimBoolean:
@@ -5118,13 +5323,11 @@
   codegen_->MaybeRecordImplicitNullCheck(instruction);
   // Mask out compression flag from String's array length.
   if (mirror::kUseStringCompression && instruction->IsStringLength()) {
-    __ bic(out, out, ShifterOperand(1u << 31));
+    __ Lsr(out, out, 1u);
   }
 }
 
 void LocationsBuilderARM::VisitIntermediateAddress(HIntermediateAddress* instruction) {
-  // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
-  DCHECK(!kEmitCompilerReadBarrier);
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
 
@@ -5139,9 +5342,6 @@
   Location first = locations->InAt(0);
   Location second = locations->InAt(1);
 
-  // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
-  DCHECK(!kEmitCompilerReadBarrier);
-
   if (second.IsRegister()) {
     __ add(out.AsRegister<Register>(),
            first.AsRegister<Register>(),
@@ -5550,7 +5750,9 @@
   Location out_loc = locations->Out();
   Register out = out_loc.AsRegister<Register>();
 
-  const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+  const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
+      ? kWithoutReadBarrier
+      : kCompilerReadBarrierOption;
   bool generate_null_check = false;
   switch (cls->GetLoadKind()) {
     case HLoadClass::LoadKind::kReferrersClass: {
@@ -5562,17 +5764,17 @@
                               out_loc,
                               current_method,
                               ArtMethod::DeclaringClassOffset().Int32Value(),
-                              requires_read_barrier);
+                              read_barrier_option);
       break;
     }
     case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
-      DCHECK(!requires_read_barrier);
+      DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
       __ LoadLiteral(out, codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
                                                                     cls->GetTypeIndex()));
       break;
     }
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
-      DCHECK(!requires_read_barrier);
+      DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
       CodeGeneratorARM::PcRelativePatchInfo* labels =
           codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
       __ BindTrackedLabel(&labels->movw_label);
@@ -5584,7 +5786,7 @@
       break;
     }
     case HLoadClass::LoadKind::kBootImageAddress: {
-      DCHECK(!requires_read_barrier);
+      DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
       DCHECK_NE(cls->GetAddress(), 0u);
       uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
       __ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
@@ -5604,7 +5806,7 @@
       uint32_t offset = address & MaxInt<uint32_t>(offset_bits);
       __ LoadLiteral(out, codegen_->DeduplicateDexCacheAddressLiteral(base_address));
       // /* GcRoot<mirror::Class> */ out = *(base_address + offset)
-      GenerateGcRootFieldLoad(cls, out_loc, out, offset, requires_read_barrier);
+      GenerateGcRootFieldLoad(cls, out_loc, out, offset, read_barrier_option);
       generate_null_check = !cls->IsInDexCache();
       break;
     }
@@ -5613,7 +5815,7 @@
       HArmDexCacheArraysBase* base = cls->InputAt(0)->AsArmDexCacheArraysBase();
       int32_t offset = cls->GetDexCacheElementOffset() - base->GetElementOffset();
       // /* GcRoot<mirror::Class> */ out = *(dex_cache_arrays_base + offset)
-      GenerateGcRootFieldLoad(cls, out_loc, base_reg, offset, requires_read_barrier);
+      GenerateGcRootFieldLoad(cls, out_loc, base_reg, offset, read_barrier_option);
       generate_null_check = !cls->IsInDexCache();
       break;
     }
@@ -5627,7 +5829,7 @@
                         ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value());
       // /* GcRoot<mirror::Class> */ out = out[type_index]
       size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex());
-      GenerateGcRootFieldLoad(cls, out_loc, out, offset, requires_read_barrier);
+      GenerateGcRootFieldLoad(cls, out_loc, out, offset, read_barrier_option);
       generate_null_check = !cls->IsInDexCache();
     }
   }
@@ -5688,9 +5890,6 @@
       break;
     case HLoadString::LoadKind::kBootImageAddress:
       break;
-    case HLoadString::LoadKind::kDexCacheAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
-      break;
     case HLoadString::LoadKind::kBssEntry:
       DCHECK(!Runtime::Current()->UseJitCompilation());
       break;
@@ -5773,7 +5972,7 @@
       __ movt(temp, /* placeholder */ 0u);
       __ BindTrackedLabel(&labels->add_pc_label);
       __ add(temp, temp, ShifterOperand(PC));
-      GenerateGcRootFieldLoad(load, out_loc, temp, 0);
+      GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption);
       SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
       codegen_->AddSlowPath(slow_path);
       __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
@@ -5829,12 +6028,26 @@
   CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
 }
 
-static bool TypeCheckNeedsATemporary(TypeCheckKind type_check_kind) {
-  return kEmitCompilerReadBarrier &&
-      (kUseBakerReadBarrier ||
-       type_check_kind == TypeCheckKind::kAbstractClassCheck ||
-       type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
-       type_check_kind == TypeCheckKind::kArrayObjectCheck);
+// Temp is used for read barrier.
+static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
+  if (kEmitCompilerReadBarrier &&
+       (kUseBakerReadBarrier ||
+          type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+          type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+          type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
+    return 1;
+  }
+  return 0;
+}
+
+// Interface case has 3 temps, one for holding the number of interfaces, one for the current
+// interface pointer, one for loading the current interface.
+// The other checks have one temp for loading the object's class.
+static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
+  if (type_check_kind == TypeCheckKind::kInterfaceCheck) {
+    return 3;
+  }
+  return 1 + NumberOfInstanceOfTemps(type_check_kind);
 }
 
 void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
@@ -5866,11 +6079,7 @@
   // The "out" register is used as a temporary, so it overlaps with the inputs.
   // Note that TypeCheckSlowPathARM uses this register too.
   locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
-  // When read barriers are enabled, we need a temporary register for
-  // some cases.
-  if (TypeCheckNeedsATemporary(type_check_kind)) {
-    locations->AddTemp(Location::RequiresRegister());
-  }
+  locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind));
 }
 
 void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
@@ -5881,9 +6090,9 @@
   Register cls = locations->InAt(1).AsRegister<Register>();
   Location out_loc = locations->Out();
   Register out = out_loc.AsRegister<Register>();
-  Location maybe_temp_loc = TypeCheckNeedsATemporary(type_check_kind) ?
-      locations->GetTemp(0) :
-      Location::NoLocation();
+  const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind);
+  DCHECK_LE(num_temps, 1u);
+  Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation();
   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
   uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
   uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
@@ -5897,11 +6106,15 @@
     __ CompareAndBranchIfZero(obj, &zero);
   }
 
-  // /* HeapReference<Class> */ out = obj->klass_
-  GenerateReferenceLoadTwoRegisters(instruction, out_loc, obj_loc, class_offset, maybe_temp_loc);
-
   switch (type_check_kind) {
     case TypeCheckKind::kExactCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp_loc,
+                                        kCompilerReadBarrierOption);
       __ cmp(out, ShifterOperand(cls));
       // Classes must be equal for the instanceof to succeed.
       __ b(&zero, NE);
@@ -5911,12 +6124,23 @@
     }
 
     case TypeCheckKind::kAbstractClassCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp_loc,
+                                        kCompilerReadBarrierOption);
       // If the class is abstract, we eagerly fetch the super class of the
       // object to avoid doing a comparison we know will fail.
       Label loop;
       __ Bind(&loop);
       // /* HeapReference<Class> */ out = out->super_class_
-      GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       out_loc,
+                                       super_offset,
+                                       maybe_temp_loc,
+                                       kCompilerReadBarrierOption);
       // If `out` is null, we use it for the result, and jump to `done`.
       __ CompareAndBranchIfZero(out, &done);
       __ cmp(out, ShifterOperand(cls));
@@ -5929,13 +6153,24 @@
     }
 
     case TypeCheckKind::kClassHierarchyCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp_loc,
+                                        kCompilerReadBarrierOption);
       // Walk over the class hierarchy to find a match.
       Label loop, success;
       __ Bind(&loop);
       __ cmp(out, ShifterOperand(cls));
       __ b(&success, EQ);
       // /* HeapReference<Class> */ out = out->super_class_
-      GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       out_loc,
+                                       super_offset,
+                                       maybe_temp_loc,
+                                       kCompilerReadBarrierOption);
       __ CompareAndBranchIfNonZero(out, &loop);
       // If `out` is null, we use it for the result, and jump to `done`.
       __ b(&done);
@@ -5948,13 +6183,24 @@
     }
 
     case TypeCheckKind::kArrayObjectCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp_loc,
+                                        kCompilerReadBarrierOption);
       // Do an exact check.
       Label exact_check;
       __ cmp(out, ShifterOperand(cls));
       __ b(&exact_check, EQ);
       // Otherwise, we need to check that the object's class is a non-primitive array.
       // /* HeapReference<Class> */ out = out->component_type_
-      GenerateReferenceLoadOneRegister(instruction, out_loc, component_offset, maybe_temp_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       out_loc,
+                                       component_offset,
+                                       maybe_temp_loc,
+                                       kCompilerReadBarrierOption);
       // If `out` is null, we use it for the result, and jump to `done`.
       __ CompareAndBranchIfZero(out, &done);
       __ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
@@ -5967,6 +6213,14 @@
     }
 
     case TypeCheckKind::kArrayCheck: {
+      // No read barrier since the slow path will retry upon failure.
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp_loc,
+                                        kWithoutReadBarrier);
       __ cmp(out, ShifterOperand(cls));
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction,
@@ -6050,13 +6304,7 @@
   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::RequiresRegister());
-  // Note that TypeCheckSlowPathARM uses this "temp" register too.
-  locations->AddTemp(Location::RequiresRegister());
-  // When read barriers are enabled, we need an additional temporary
-  // register for some cases.
-  if (TypeCheckNeedsATemporary(type_check_kind)) {
-    locations->AddTemp(Location::RequiresRegister());
-  }
+  locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
 }
 
 void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
@@ -6067,20 +6315,31 @@
   Register cls = locations->InAt(1).AsRegister<Register>();
   Location temp_loc = locations->GetTemp(0);
   Register temp = temp_loc.AsRegister<Register>();
-  Location maybe_temp2_loc = TypeCheckNeedsATemporary(type_check_kind) ?
-      locations->GetTemp(1) :
-      Location::NoLocation();
-  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
-  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
-  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+  const size_t num_temps = NumberOfCheckCastTemps(type_check_kind);
+  DCHECK_LE(num_temps, 3u);
+  Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation();
+  Location maybe_temp3_loc = (num_temps >= 3) ? locations->GetTemp(2) : Location::NoLocation();
+  const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+  const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+  const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+  const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+  const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value();
+  const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value();
+  const uint32_t object_array_data_offset =
+      mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
 
-  bool is_type_check_slow_path_fatal =
-      (type_check_kind == TypeCheckKind::kExactCheck ||
-       type_check_kind == TypeCheckKind::kAbstractClassCheck ||
-       type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
-       type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
-      !instruction->CanThrowIntoCatchBlock();
+  // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
+  // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
+  // read barriers is done for performance and code size reasons.
+  bool is_type_check_slow_path_fatal = false;
+  if (!kEmitCompilerReadBarrier) {
+    is_type_check_slow_path_fatal =
+        (type_check_kind == TypeCheckKind::kExactCheck ||
+         type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+         type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+         type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
+        !instruction->CanThrowIntoCatchBlock();
+  }
   SlowPathCodeARM* type_check_slow_path =
       new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction,
                                                         is_type_check_slow_path_fatal);
@@ -6092,12 +6351,17 @@
     __ CompareAndBranchIfZero(obj, &done);
   }
 
-  // /* HeapReference<Class> */ temp = obj->klass_
-  GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
-
   switch (type_check_kind) {
     case TypeCheckKind::kExactCheck:
     case TypeCheckKind::kArrayCheck: {
+      // /* HeapReference<Class> */ temp = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp2_loc,
+                                        kWithoutReadBarrier);
+
       __ cmp(temp, ShifterOperand(cls));
       // Jump to slow path for throwing the exception or doing a
       // more involved array check.
@@ -6106,34 +6370,44 @@
     }
 
     case TypeCheckKind::kAbstractClassCheck: {
+      // /* HeapReference<Class> */ temp = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp2_loc,
+                                        kWithoutReadBarrier);
+
       // If the class is abstract, we eagerly fetch the super class of the
       // object to avoid doing a comparison we know will fail.
-      Label loop, compare_classes;
+      Label loop;
       __ Bind(&loop);
       // /* HeapReference<Class> */ temp = temp->super_class_
-      GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       temp_loc,
+                                       super_offset,
+                                       maybe_temp2_loc,
+                                       kWithoutReadBarrier);
 
-      // If the class reference currently in `temp` is not null, jump
-      // to the `compare_classes` label to compare it with the checked
-      // class.
-      __ CompareAndBranchIfNonZero(temp, &compare_classes);
-      // Otherwise, jump to the slow path to throw the exception.
-      //
-      // But before, move back the object's class into `temp` before
-      // going into the slow path, as it has been overwritten in the
-      // meantime.
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(
-          instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
-      __ b(type_check_slow_path->GetEntryLabel());
+      // If the class reference currently in `temp` is null, jump to the slow path to throw the
+      // exception.
+      __ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel());
 
-      __ Bind(&compare_classes);
+      // Otherwise, compare the classes.
       __ cmp(temp, ShifterOperand(cls));
       __ b(&loop, NE);
       break;
     }
 
     case TypeCheckKind::kClassHierarchyCheck: {
+      // /* HeapReference<Class> */ temp = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp2_loc,
+                                        kWithoutReadBarrier);
+
       // Walk over the class hierarchy to find a match.
       Label loop;
       __ Bind(&loop);
@@ -6141,65 +6415,52 @@
       __ b(&done, EQ);
 
       // /* HeapReference<Class> */ temp = temp->super_class_
-      GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       temp_loc,
+                                       super_offset,
+                                       maybe_temp2_loc,
+                                       kWithoutReadBarrier);
 
-      // If the class reference currently in `temp` is not null, jump
-      // back at the beginning of the loop.
-      __ CompareAndBranchIfNonZero(temp, &loop);
-      // Otherwise, jump to the slow path to throw the exception.
-      //
-      // But before, move back the object's class into `temp` before
-      // going into the slow path, as it has been overwritten in the
-      // meantime.
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(
-          instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
-      __ b(type_check_slow_path->GetEntryLabel());
+      // If the class reference currently in `temp` is null, jump to the slow path to throw the
+      // exception.
+      __ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel());
+      // Otherwise, jump to the beginning of the loop.
+      __ b(&loop);
       break;
     }
 
     case TypeCheckKind::kArrayObjectCheck: {
+      // /* HeapReference<Class> */ temp = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp2_loc,
+                                        kWithoutReadBarrier);
+
       // Do an exact check.
-      Label check_non_primitive_component_type;
       __ cmp(temp, ShifterOperand(cls));
       __ b(&done, EQ);
 
       // Otherwise, we need to check that the object's class is a non-primitive array.
       // /* HeapReference<Class> */ temp = temp->component_type_
-      GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, maybe_temp2_loc);
-
-      // If the component type is not null (i.e. the object is indeed
-      // an array), jump to label `check_non_primitive_component_type`
-      // to further check that this component type is not a primitive
-      // type.
-      __ CompareAndBranchIfNonZero(temp, &check_non_primitive_component_type);
-      // Otherwise, jump to the slow path to throw the exception.
-      //
-      // But before, move back the object's class into `temp` before
-      // going into the slow path, as it has been overwritten in the
-      // meantime.
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(
-          instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
-      __ b(type_check_slow_path->GetEntryLabel());
-
-      __ Bind(&check_non_primitive_component_type);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       temp_loc,
+                                       component_offset,
+                                       maybe_temp2_loc,
+                                       kWithoutReadBarrier);
+      // If the component type is null, jump to the slow path to throw the exception.
+      __ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel());
+      // Otherwise,the object is indeed an array, jump to label `check_non_primitive_component_type`
+      // to further check that this component type is not a primitive type.
       __ LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset);
       static_assert(Primitive::kPrimNot == 0, "Expected 0 for art::Primitive::kPrimNot");
-      __ CompareAndBranchIfZero(temp, &done);
-      // Same comment as above regarding `temp` and the slow path.
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(
-          instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
-      __ b(type_check_slow_path->GetEntryLabel());
+      __ CompareAndBranchIfNonZero(temp, type_check_slow_path->GetEntryLabel());
       break;
     }
 
     case TypeCheckKind::kUnresolvedCheck:
-    case TypeCheckKind::kInterfaceCheck:
-      // We always go into the type check slow path for the unresolved
-      // and interface check cases.
-      //
+      // We always go into the type check slow path for the unresolved check case.
       // We cannot directly call the CheckCast runtime entry point
       // without resorting to a type checking slow path here (i.e. by
       // calling InvokeRuntime directly), as it would require to
@@ -6207,15 +6468,47 @@
       // instruction (following the runtime calling convention), which
       // might be cluttered by the potential first read barrier
       // emission at the beginning of this method.
-      //
-      // TODO: Introduce a new runtime entry point taking the object
-      // to test (instead of its class) as argument, and let it deal
-      // with the read barrier issues. This will let us refactor this
-      // case of the `switch` code as it was previously (with a direct
-      // call to the runtime not using a type checking slow path).
-      // This should also be beneficial for the other cases above.
+
       __ b(type_check_slow_path->GetEntryLabel());
       break;
+
+    case TypeCheckKind::kInterfaceCheck: {
+      // Avoid read barriers to improve performance of the fast path. We can not get false
+      // positives by doing this.
+      // /* HeapReference<Class> */ temp = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp2_loc,
+                                        kWithoutReadBarrier);
+
+      // /* HeapReference<Class> */ temp = temp->iftable_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        temp_loc,
+                                        iftable_offset,
+                                        maybe_temp2_loc,
+                                        kWithoutReadBarrier);
+      // Iftable is never null.
+      __ ldr(maybe_temp2_loc.AsRegister<Register>(), Address(temp, array_length_offset));
+      // Loop through the iftable and check if any class matches.
+      Label start_loop;
+      __ Bind(&start_loop);
+      __ CompareAndBranchIfZero(maybe_temp2_loc.AsRegister<Register>(),
+                                type_check_slow_path->GetEntryLabel());
+      __ ldr(maybe_temp3_loc.AsRegister<Register>(), Address(temp, object_array_data_offset));
+      __ MaybeUnpoisonHeapReference(maybe_temp3_loc.AsRegister<Register>());
+      // Go to next interface.
+      __ add(temp, temp, ShifterOperand(2 * kHeapReferenceSize));
+      __ sub(maybe_temp2_loc.AsRegister<Register>(),
+             maybe_temp2_loc.AsRegister<Register>(),
+             ShifterOperand(2));
+      // Compare the classes and continue the loop if they do not match.
+      __ cmp(cls, ShifterOperand(maybe_temp3_loc.AsRegister<Register>()));
+      __ b(&start_loop, NE);
+      break;
+    }
   }
   __ Bind(&done);
 
@@ -6489,12 +6782,15 @@
   }
 }
 
-void InstructionCodeGeneratorARM::GenerateReferenceLoadOneRegister(HInstruction* instruction,
-                                                                   Location out,
-                                                                   uint32_t offset,
-                                                                   Location maybe_temp) {
+void InstructionCodeGeneratorARM::GenerateReferenceLoadOneRegister(
+    HInstruction* instruction,
+    Location out,
+    uint32_t offset,
+    Location maybe_temp,
+    ReadBarrierOption read_barrier_option) {
   Register out_reg = out.AsRegister<Register>();
-  if (kEmitCompilerReadBarrier) {
+  if (read_barrier_option == kWithReadBarrier) {
+    CHECK(kEmitCompilerReadBarrier);
     DCHECK(maybe_temp.IsRegister()) << maybe_temp;
     if (kUseBakerReadBarrier) {
       // Load with fast path based Baker's read barrier.
@@ -6519,14 +6815,17 @@
   }
 }
 
-void InstructionCodeGeneratorARM::GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
-                                                                    Location out,
-                                                                    Location obj,
-                                                                    uint32_t offset,
-                                                                    Location maybe_temp) {
+void InstructionCodeGeneratorARM::GenerateReferenceLoadTwoRegisters(
+    HInstruction* instruction,
+    Location out,
+    Location obj,
+    uint32_t offset,
+    Location maybe_temp,
+    ReadBarrierOption read_barrier_option) {
   Register out_reg = out.AsRegister<Register>();
   Register obj_reg = obj.AsRegister<Register>();
-  if (kEmitCompilerReadBarrier) {
+  if (read_barrier_option == kWithReadBarrier) {
+    CHECK(kEmitCompilerReadBarrier);
     if (kUseBakerReadBarrier) {
       DCHECK(maybe_temp.IsRegister()) << maybe_temp;
       // Load with fast path based Baker's read barrier.
@@ -6551,17 +6850,18 @@
                                                           Location root,
                                                           Register obj,
                                                           uint32_t offset,
-                                                          bool requires_read_barrier) {
+                                                          ReadBarrierOption read_barrier_option) {
   Register root_reg = root.AsRegister<Register>();
-  if (requires_read_barrier) {
+  if (read_barrier_option == kWithReadBarrier) {
     DCHECK(kEmitCompilerReadBarrier);
     if (kUseBakerReadBarrier) {
       // Fast path implementation of art::ReadBarrier::BarrierForRoot when
       // Baker's read barrier are used:
       //
       //   root = obj.field;
-      //   if (Thread::Current()->GetIsGcMarking()) {
-      //     root = ReadBarrier::Mark(root)
+      //   temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
+      //   if (temp != null) {
+      //     root = temp(root)
       //   }
 
       // /* GcRoot<mirror::Object> */ root = *(obj + offset)
@@ -6575,14 +6875,23 @@
                     "have different sizes.");
 
       // Slow path marking the GC root `root`.
+      Location temp = Location::RegisterLocation(LR);
       SlowPathCodeARM* slow_path =
-          new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM(instruction, root);
+          new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM(
+              instruction,
+              root,
+              /*entrypoint*/ temp);
       codegen_->AddSlowPath(slow_path);
 
-      // IP = Thread::Current()->GetIsGcMarking()
-      __ LoadFromOffset(
-          kLoadWord, IP, TR, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value());
-      __ CompareAndBranchIfNonZero(IP, slow_path->GetEntryLabel());
+      // temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
+      const int32_t entry_point_offset =
+          CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmPointerSize>(root.reg());
+      // Loading the entrypoint does not require a load acquire since it is only changed when
+      // threads are suspended or running a checkpoint.
+      __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), TR, entry_point_offset);
+      // The entrypoint is null when the GC is not marking, this prevents one load compared to
+      // checking GetIsGcMarking.
+      __ CompareAndBranchIfNonZero(temp.AsRegister<Register>(), slow_path->GetEntryLabel());
       __ Bind(slow_path->GetExitLabel());
     } else {
       // GC root loaded through a slow path for read barriers other
@@ -6644,7 +6953,9 @@
                                                                  Location index,
                                                                  ScaleFactor scale_factor,
                                                                  Location temp,
-                                                                 bool needs_null_check) {
+                                                                 bool needs_null_check,
+                                                                 bool always_update_field,
+                                                                 Register* temp2) {
   DCHECK(kEmitCompilerReadBarrier);
   DCHECK(kUseBakerReadBarrier);
 
@@ -6658,7 +6969,7 @@
   //   uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
   //   lfence;  // Load fence or artificial data dependency to prevent load-load reordering
   //   HeapReference<Object> ref = *src;  // Original reference load.
-  //   bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+  //   bool is_gray = (rb_state == ReadBarrier::GrayState());
   //   if (is_gray) {
   //     ref = ReadBarrier::Mark(ref);  // Performed by runtime entrypoint slow path.
   //   }
@@ -6689,8 +7000,9 @@
 
   // The actual reference load.
   if (index.IsValid()) {
-    // Load types involving an "index": ArrayGet and
-    // UnsafeGetObject/UnsafeGetObjectVolatile intrinsics.
+    // Load types involving an "index": ArrayGet,
+    // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
+    // intrinsics.
     // /* HeapReference<Object> */ ref = *(obj + offset + (index << scale_factor))
     if (index.IsConstant()) {
       size_t computed_offset =
@@ -6698,9 +7010,9 @@
       __ LoadFromOffset(kLoadWord, ref_reg, obj, computed_offset);
     } else {
       // Handle the special case of the
-      // UnsafeGetObject/UnsafeGetObjectVolatile intrinsics, which use
-      // a register pair as index ("long offset"), of which only the low
-      // part contains data.
+      // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
+      // intrinsics, which use a register pair as index ("long
+      // offset"), of which only the low part contains data.
       Register index_reg = index.IsRegisterPair()
           ? index.AsRegisterPairLow<Register>()
           : index.AsRegister<Register>();
@@ -6716,18 +7028,30 @@
   __ MaybeUnpoisonHeapReference(ref_reg);
 
   // Slow path marking the object `ref` when it is gray.
-  SlowPathCodeARM* slow_path =
-      new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM(instruction, ref);
+  SlowPathCodeARM* slow_path;
+  if (always_update_field) {
+    DCHECK(temp2 != nullptr);
+    // ReadBarrierMarkAndUpdateFieldSlowPathARM only supports address
+    // of the form `obj + field_offset`, where `obj` is a register and
+    // `field_offset` is a register pair (of which only the lower half
+    // is used). Thus `offset` and `scale_factor` above are expected
+    // to be null in this code path.
+    DCHECK_EQ(offset, 0u);
+    DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
+    slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkAndUpdateFieldSlowPathARM(
+        instruction, ref, obj, /* field_offset */ index, temp_reg, *temp2);
+  } else {
+    slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM(instruction, ref);
+  }
   AddSlowPath(slow_path);
 
-  // if (rb_state == ReadBarrier::gray_ptr_)
+  // if (rb_state == ReadBarrier::GrayState())
   //   ref = ReadBarrier::Mark(ref);
   // Given the numeric representation, it's enough to check the low bit of the
   // rb_state. We do that by shifting the bit out of the lock word with LSRS
   // which can be a 16-bit instruction unlike the TST immediate.
-  static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
-  static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
-  static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+  static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+  static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
   __ Lsrs(temp_reg, temp_reg, LockWord::kReadBarrierStateShift + 1);
   __ b(slow_path->GetEntryLabel(), CS);  // Carry flag is the last bit shifted out by LSRS.
   __ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 4d59b47..f95dd57 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -263,7 +263,8 @@
   void GenerateReferenceLoadOneRegister(HInstruction* instruction,
                                         Location out,
                                         uint32_t offset,
-                                        Location maybe_temp);
+                                        Location maybe_temp,
+                                        ReadBarrierOption read_barrier_option);
   // Generate a heap reference load using two different registers
   // `out` and `obj`:
   //
@@ -278,17 +279,18 @@
                                          Location out,
                                          Location obj,
                                          uint32_t offset,
-                                         Location maybe_temp);
+                                         Location maybe_temp,
+                                         ReadBarrierOption read_barrier_option);
   // Generate a GC root reference load:
   //
   //   root <- *(obj + offset)
   //
-  // while honoring read barriers if requires_read_barrier is true.
+  // while honoring read barriers based on read_barrier_option.
   void GenerateGcRootFieldLoad(HInstruction* instruction,
                                Location root,
                                Register obj,
                                uint32_t offset,
-                               bool requires_read_barrier = kEmitCompilerReadBarrier);
+                               ReadBarrierOption read_barrier_option);
   void GenerateTestAndBranch(HInstruction* instruction,
                              size_t condition_input_index,
                              Label* true_target,
@@ -508,6 +510,18 @@
                                              bool needs_null_check);
   // Factored implementation used by GenerateFieldLoadWithBakerReadBarrier
   // and GenerateArrayLoadWithBakerReadBarrier.
+
+  // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
+  // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
+  //
+  // Load the object reference located at the address
+  // `obj + offset + (index << scale_factor)`, held by object `obj`, into
+  // `ref`, and mark it if needed.
+  //
+  // If `always_update_field` is true, the value of the reference is
+  // atomically updated in the holder (`obj`).  This operation
+  // requires an extra temporary register, which must be provided as a
+  // non-null pointer (`temp2`).
   void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
                                                  Location ref,
                                                  Register obj,
@@ -515,7 +529,9 @@
                                                  Location index,
                                                  ScaleFactor scale_factor,
                                                  Location temp,
-                                                 bool needs_null_check);
+                                                 bool needs_null_check,
+                                                 bool always_update_field = false,
+                                                 Register* temp2 = nullptr);
 
   // Generate a read barrier for a heap reference within `instruction`
   // using a slow path.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 9e59d8c..6ec9c91 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -459,9 +459,7 @@
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
-    Location class_to_check = locations->InAt(1);
-    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
-                                                        : locations->Out();
+
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
@@ -476,21 +474,22 @@
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
     InvokeRuntimeCallingConvention calling_convention;
-    codegen->EmitParallelMoves(
-        class_to_check, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot,
-        object_class, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot);
-
+    codegen->EmitParallelMoves(locations->InAt(0),
+                               LocationFrom(calling_convention.GetRegisterAt(0)),
+                               Primitive::kPrimNot,
+                               locations->InAt(1),
+                               LocationFrom(calling_convention.GetRegisterAt(1)),
+                               Primitive::kPrimNot);
     if (instruction_->IsInstanceOf()) {
       arm64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
-      CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t,
-                           const mirror::Class*, const mirror::Class*>();
+      CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
       Primitive::Type ret_type = instruction_->GetType();
       Location ret_loc = calling_convention.GetReturnLocation(ret_type);
       arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
     } else {
       DCHECK(instruction_->IsCheckCast());
-      arm64_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this);
-      CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+      arm64_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
+      CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
     }
 
     if (!is_fatal_) {
@@ -589,11 +588,27 @@
   }
 }
 
-// Slow path marking an object during a read barrier.
+// Slow path marking an object reference `ref` during a read
+// barrier. The field `obj.field` in the object `obj` holding this
+// reference does not get updated by this slow path after marking (see
+// ReadBarrierMarkAndUpdateFieldSlowPathARM64 below for that).
+//
+// This means that after the execution of this slow path, `ref` will
+// always be up-to-date, but `obj.field` may not; i.e., after the
+// flip, `ref` will be a to-space reference, but `obj.field` will
+// probably still be a from-space reference (unless it gets updated by
+// another thread, or if another thread installed another object
+// reference (different from `ref`) in `obj.field`).
+// If entrypoint is a valid location it is assumed to already be holding the entrypoint. The case
+// where the entrypoint is passed in is for the GcRoot read barrier.
 class ReadBarrierMarkSlowPathARM64 : public SlowPathCodeARM64 {
  public:
-  ReadBarrierMarkSlowPathARM64(HInstruction* instruction, Location obj)
-      : SlowPathCodeARM64(instruction), obj_(obj) {
+  ReadBarrierMarkSlowPathARM64(HInstruction* instruction,
+                               Location ref,
+                               Location entrypoint = Location::NoLocation())
+      : SlowPathCodeARM64(instruction),
+        ref_(ref),
+        entrypoint_(entrypoint) {
     DCHECK(kEmitCompilerReadBarrier);
   }
 
@@ -602,7 +617,8 @@
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(locations->CanCall());
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(obj_.reg()));
+    DCHECK(ref_.IsRegister()) << ref_;
+    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_.reg())) << ref_.reg();
     DCHECK(instruction_->IsInstanceFieldGet() ||
            instruction_->IsStaticFieldGet() ||
            instruction_->IsArrayGet() ||
@@ -615,44 +631,225 @@
            (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified()))
         << "Unexpected instruction in read barrier marking slow path: "
         << instruction_->DebugName();
+    // The read barrier instrumentation of object ArrayGet
+    // instructions does not support the HIntermediateAddress
+    // instruction.
+    DCHECK(!(instruction_->IsArrayGet() &&
+             instruction_->AsArrayGet()->GetArray()->IsIntermediateAddress()));
 
     __ Bind(GetEntryLabel());
     // No need to save live registers; it's taken care of by the
     // entrypoint. Also, there is no need to update the stack mask,
     // as this runtime call will not trigger a garbage collection.
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
-    DCHECK_NE(obj_.reg(), LR);
-    DCHECK_NE(obj_.reg(), WSP);
-    DCHECK_NE(obj_.reg(), WZR);
+    DCHECK_NE(ref_.reg(), LR);
+    DCHECK_NE(ref_.reg(), WSP);
+    DCHECK_NE(ref_.reg(), WZR);
     // IP0 is used internally by the ReadBarrierMarkRegX entry point
     // as a temporary, it cannot be the entry point's input/output.
-    DCHECK_NE(obj_.reg(), IP0);
-    DCHECK(0 <= obj_.reg() && obj_.reg() < kNumberOfWRegisters) << obj_.reg();
+    DCHECK_NE(ref_.reg(), IP0);
+    DCHECK(0 <= ref_.reg() && ref_.reg() < kNumberOfWRegisters) << ref_.reg();
     // "Compact" slow path, saving two moves.
     //
     // Instead of using the standard runtime calling convention (input
     // and output in W0):
     //
-    //   W0 <- obj
+    //   W0 <- ref
     //   W0 <- ReadBarrierMark(W0)
-    //   obj <- W0
+    //   ref <- W0
     //
-    // we just use rX (the register holding `obj`) as input and output
+    // we just use rX (the register containing `ref`) as input and output
+    // of a dedicated entrypoint:
+    //
+    //   rX <- ReadBarrierMarkRegX(rX)
+    //
+    if (entrypoint_.IsValid()) {
+      arm64_codegen->ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction_, this);
+      __ Blr(XRegisterFrom(entrypoint_));
+    } else {
+      // Entrypoint is not already loaded, load from the thread.
+      int32_t entry_point_offset =
+          CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(ref_.reg());
+      // This runtime call does not require a stack map.
+      arm64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
+    }
+    __ B(GetExitLabel());
+  }
+
+ private:
+  // The location (register) of the marked object reference.
+  const Location ref_;
+
+  // The location of the entrypoint if it is already loaded.
+  const Location entrypoint_;
+
+  DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathARM64);
+};
+
+// Slow path marking an object reference `ref` during a read barrier,
+// and if needed, atomically updating the field `obj.field` in the
+// object `obj` holding this reference after marking (contrary to
+// ReadBarrierMarkSlowPathARM64 above, which never tries to update
+// `obj.field`).
+//
+// This means that after the execution of this slow path, both `ref`
+// and `obj.field` will be up-to-date; i.e., after the flip, both will
+// hold the same to-space reference (unless another thread installed
+// another object reference (different from `ref`) in `obj.field`).
+class ReadBarrierMarkAndUpdateFieldSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+  ReadBarrierMarkAndUpdateFieldSlowPathARM64(HInstruction* instruction,
+                                             Location ref,
+                                             Register obj,
+                                             Location field_offset,
+                                             Register temp)
+      : SlowPathCodeARM64(instruction),
+        ref_(ref),
+        obj_(obj),
+        field_offset_(field_offset),
+        temp_(temp) {
+    DCHECK(kEmitCompilerReadBarrier);
+  }
+
+  const char* GetDescription() const OVERRIDE {
+    return "ReadBarrierMarkAndUpdateFieldSlowPathARM64";
+  }
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
+    Register ref_reg = WRegisterFrom(ref_);
+    DCHECK(locations->CanCall());
+    DCHECK(ref_.IsRegister()) << ref_;
+    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_.reg())) << ref_.reg();
+    // This slow path is only used by the UnsafeCASObject intrinsic.
+    DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
+        << "Unexpected instruction in read barrier marking and field updating slow path: "
+        << instruction_->DebugName();
+    DCHECK(instruction_->GetLocations()->Intrinsified());
+    DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
+    DCHECK(field_offset_.IsRegister()) << field_offset_;
+
+    __ Bind(GetEntryLabel());
+
+    // Save the old reference.
+    // Note that we cannot use IP to save the old reference, as IP is
+    // used internally by the ReadBarrierMarkRegX entry point, and we
+    // need the old reference after the call to that entry point.
+    DCHECK_NE(LocationFrom(temp_).reg(), IP0);
+    __ Mov(temp_.W(), ref_reg);
+
+    // No need to save live registers; it's taken care of by the
+    // entrypoint. Also, there is no need to update the stack mask,
+    // as this runtime call will not trigger a garbage collection.
+    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+    DCHECK_NE(ref_.reg(), LR);
+    DCHECK_NE(ref_.reg(), WSP);
+    DCHECK_NE(ref_.reg(), WZR);
+    // IP0 is used internally by the ReadBarrierMarkRegX entry point
+    // as a temporary, it cannot be the entry point's input/output.
+    DCHECK_NE(ref_.reg(), IP0);
+    DCHECK(0 <= ref_.reg() && ref_.reg() < kNumberOfWRegisters) << ref_.reg();
+    // "Compact" slow path, saving two moves.
+    //
+    // Instead of using the standard runtime calling convention (input
+    // and output in W0):
+    //
+    //   W0 <- ref
+    //   W0 <- ReadBarrierMark(W0)
+    //   ref <- W0
+    //
+    // we just use rX (the register containing `ref`) as input and output
     // of a dedicated entrypoint:
     //
     //   rX <- ReadBarrierMarkRegX(rX)
     //
     int32_t entry_point_offset =
-        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(obj_.reg());
+        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(ref_.reg());
     // This runtime call does not require a stack map.
     arm64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
+
+    // If the new reference is different from the old reference,
+    // update the field in the holder (`*(obj_ + field_offset_)`).
+    //
+    // Note that this field could also hold a different object, if
+    // another thread had concurrently changed it. In that case, the
+    // LDXR/CMP/BNE sequence of instructions in the compare-and-set
+    // (CAS) operation below would abort the CAS, leaving the field
+    // as-is.
+    vixl::aarch64::Label done;
+    __ Cmp(temp_.W(), ref_reg);
+    __ B(eq, &done);
+
+    // Update the the holder's field atomically.  This may fail if
+    // mutator updates before us, but it's OK.  This is achieved
+    // using a strong compare-and-set (CAS) operation with relaxed
+    // memory synchronization ordering, where the expected value is
+    // the old reference and the desired value is the new reference.
+
+    MacroAssembler* masm = arm64_codegen->GetVIXLAssembler();
+    UseScratchRegisterScope temps(masm);
+
+    // Convenience aliases.
+    Register base = obj_.W();
+    Register offset = XRegisterFrom(field_offset_);
+    Register expected = temp_.W();
+    Register value = ref_reg;
+    Register tmp_ptr = temps.AcquireX();    // Pointer to actual memory.
+    Register tmp_value = temps.AcquireW();  // Value in memory.
+
+    __ Add(tmp_ptr, base.X(), Operand(offset));
+
+    if (kPoisonHeapReferences) {
+      arm64_codegen->GetAssembler()->PoisonHeapReference(expected);
+      if (value.Is(expected)) {
+        // Do not poison `value`, as it is the same register as
+        // `expected`, which has just been poisoned.
+      } else {
+        arm64_codegen->GetAssembler()->PoisonHeapReference(value);
+      }
+    }
+
+    // do {
+    //   tmp_value = [tmp_ptr] - expected;
+    // } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value));
+
+    vixl::aarch64::Label loop_head, comparison_failed, exit_loop;
+    __ Bind(&loop_head);
+    __ Ldxr(tmp_value, MemOperand(tmp_ptr));
+    __ Cmp(tmp_value, expected);
+    __ B(&comparison_failed, ne);
+    __ Stxr(tmp_value, value, MemOperand(tmp_ptr));
+    __ Cbnz(tmp_value, &loop_head);
+    __ B(&exit_loop);
+    __ Bind(&comparison_failed);
+    __ Clrex();
+    __ Bind(&exit_loop);
+
+    if (kPoisonHeapReferences) {
+      arm64_codegen->GetAssembler()->UnpoisonHeapReference(expected);
+      if (value.Is(expected)) {
+        // Do not unpoison `value`, as it is the same register as
+        // `expected`, which has just been unpoisoned.
+      } else {
+        arm64_codegen->GetAssembler()->UnpoisonHeapReference(value);
+      }
+    }
+
+    __ Bind(&done);
     __ B(GetExitLabel());
   }
 
  private:
-  const Location obj_;
+  // The location (register) of the marked object reference.
+  const Location ref_;
+  // The register containing the object holding the marked object reference field.
+  const Register obj_;
+  // The location of the offset of the marked reference field within `obj_`.
+  Location field_offset_;
 
-  DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathARM64);
+  const Register temp_;
+
+  DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkAndUpdateFieldSlowPathARM64);
 };
 
 // Slow path generating a read barrier for a heap reference.
@@ -698,7 +895,9 @@
            (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
         << "Unexpected instruction in read barrier for heap reference slow path: "
         << instruction_->DebugName();
-    // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
+    // The read barrier instrumentation of object ArrayGet
+    // instructions does not support the HIntermediateAddress
+    // instruction.
     DCHECK(!(instruction_->IsArrayGet() &&
              instruction_->AsArrayGet()->GetArray()->IsIntermediateAddress()));
 
@@ -768,7 +967,7 @@
         DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) ||
                (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile))
             << instruction_->AsInvoke()->GetIntrinsic();
-        DCHECK_EQ(offset_, 0U);
+        DCHECK_EQ(offset_, 0u);
         DCHECK(index_.IsRegister());
       }
     }
@@ -2014,8 +2213,6 @@
 }
 
 void LocationsBuilderARM64::VisitIntermediateAddress(HIntermediateAddress* instruction) {
-  // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
-  DCHECK(!kEmitCompilerReadBarrier);
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
   locations->SetInAt(0, Location::RequiresRegister());
@@ -2023,10 +2220,7 @@
   locations->SetOut(Location::RequiresRegister());
 }
 
-void InstructionCodeGeneratorARM64::VisitIntermediateAddress(
-    HIntermediateAddress* instruction) {
-  // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
-  DCHECK(!kEmitCompilerReadBarrier);
+void InstructionCodeGeneratorARM64::VisitIntermediateAddress(HIntermediateAddress* instruction) {
   __ Add(OutputRegister(instruction),
          InputRegisterAt(instruction, 0),
          Operand(InputOperandAt(instruction, 1)));
@@ -2126,11 +2320,15 @@
   // Block pools between `Load` and `MaybeRecordImplicitNullCheck`.
   BlockPoolsScope block_pools(masm);
 
+  // The read barrier instrumentation of object ArrayGet instructions
+  // does not support the HIntermediateAddress instruction.
+  DCHECK(!((type == Primitive::kPrimNot) &&
+           instruction->GetArray()->IsIntermediateAddress() &&
+           kEmitCompilerReadBarrier));
+
   if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
     // Object ArrayGet with Baker's read barrier case.
     Register temp = temps.AcquireW();
-    // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
-    DCHECK(!instruction->GetArray()->IsIntermediateAddress());
     // Note that a potential implicit null check is handled in the
     // CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier call.
     codegen_->GenerateArrayLoadWithBakerReadBarrier(
@@ -2142,13 +2340,22 @@
     if (maybe_compressed_char_at) {
       uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
       length = temps.AcquireW();
-      __ Ldr(length, HeapOperand(obj, count_offset));
+      if (instruction->GetArray()->IsIntermediateAddress()) {
+        DCHECK_LT(count_offset, offset);
+        int64_t adjusted_offset = static_cast<int64_t>(count_offset) - static_cast<int64_t>(offset);
+        // Note that `adjusted_offset` is negative, so this will be a LDUR.
+        __ Ldr(length, MemOperand(obj.X(), adjusted_offset));
+      } else {
+        __ Ldr(length, HeapOperand(obj, count_offset));
+      }
       codegen_->MaybeRecordImplicitNullCheck(instruction);
     }
     if (index.IsConstant()) {
       if (maybe_compressed_char_at) {
         vixl::aarch64::Label uncompressed_load, done;
-        __ Tbz(length.W(), kWRegSize - 1, &uncompressed_load);
+        static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                      "Expecting 0=compressed, 1=uncompressed");
+        __ Tbnz(length.W(), 0, &uncompressed_load);
         __ Ldrb(Register(OutputCPURegister(instruction)),
                 HeapOperand(obj, offset + Int64ConstantFrom(index)));
         __ B(&done);
@@ -2163,9 +2370,6 @@
     } else {
       Register temp = temps.AcquireSameSizeAs(obj);
       if (instruction->GetArray()->IsIntermediateAddress()) {
-        // The read barrier instrumentation does not support the
-        // HIntermediateAddress instruction yet.
-        DCHECK(!kEmitCompilerReadBarrier);
         // We do not need to compute the intermediate address from the array: the
         // input instruction has done it already. See the comment in
         // `TryExtractArrayAccessAddress()`.
@@ -2179,7 +2383,9 @@
       }
       if (maybe_compressed_char_at) {
         vixl::aarch64::Label uncompressed_load, done;
-        __ Tbz(length.W(), kWRegSize - 1, &uncompressed_load);
+        static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                      "Expecting 0=compressed, 1=uncompressed");
+        __ Tbnz(length.W(), 0, &uncompressed_load);
         __ Ldrb(Register(OutputCPURegister(instruction)),
                 HeapOperand(temp, XRegisterFrom(index), LSL, 0));
         __ B(&done);
@@ -2224,7 +2430,7 @@
   codegen_->MaybeRecordImplicitNullCheck(instruction);
   // Mask out compression flag from String's array length.
   if (mirror::kUseStringCompression && instruction->IsStringLength()) {
-    __ And(out.W(), out.W(), Operand(static_cast<int32_t>(INT32_MAX)));
+    __ Lsr(out.W(), out.W(), 1u);
   }
 }
 
@@ -2273,9 +2479,6 @@
       UseScratchRegisterScope temps(masm);
       Register temp = temps.AcquireSameSizeAs(array);
       if (instruction->GetArray()->IsIntermediateAddress()) {
-        // The read barrier instrumentation does not support the
-        // HIntermediateAddress instruction yet.
-        DCHECK(!kEmitCompilerReadBarrier);
         // We do not need to compute the intermediate address from the array: the
         // input instruction has done it already. See the comment in
         // `TryExtractArrayAccessAddress()`.
@@ -3136,12 +3339,26 @@
   HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
 }
 
-static bool TypeCheckNeedsATemporary(TypeCheckKind type_check_kind) {
-  return kEmitCompilerReadBarrier &&
+// Temp is used for read barrier.
+static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
+  if (kEmitCompilerReadBarrier &&
       (kUseBakerReadBarrier ||
-       type_check_kind == TypeCheckKind::kAbstractClassCheck ||
-       type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
-       type_check_kind == TypeCheckKind::kArrayObjectCheck);
+          type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+          type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+          type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
+    return 1;
+  }
+  return 0;
+}
+
+// Interface case has 3 temps, one for holding the number of interfaces, one for the current
+// interface pointer, one for loading the current interface.
+// The other checks have one temp for loading the object's class.
+static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
+  if (type_check_kind == TypeCheckKind::kInterfaceCheck) {
+    return 3;
+  }
+  return 1 + NumberOfInstanceOfTemps(type_check_kind);
 }
 
 void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
@@ -3173,11 +3390,8 @@
   // The "out" register is used as a temporary, so it overlaps with the inputs.
   // Note that TypeCheckSlowPathARM64 uses this register too.
   locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
-  // When read barriers are enabled, we need a temporary register for
-  // some cases.
-  if (TypeCheckNeedsATemporary(type_check_kind)) {
-    locations->AddTemp(Location::RequiresRegister());
-  }
+  // Add temps if necessary for read barriers.
+  locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind));
 }
 
 void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
@@ -3188,9 +3402,9 @@
   Register cls = InputRegisterAt(instruction, 1);
   Location out_loc = locations->Out();
   Register out = OutputRegister(instruction);
-  Location maybe_temp_loc = TypeCheckNeedsATemporary(type_check_kind) ?
-      locations->GetTemp(0) :
-      Location::NoLocation();
+  const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind);
+  DCHECK_LE(num_temps, 1u);
+  Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation();
   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
   uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
   uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
@@ -3205,11 +3419,15 @@
     __ Cbz(obj, &zero);
   }
 
-  // /* HeapReference<Class> */ out = obj->klass_
-  GenerateReferenceLoadTwoRegisters(instruction, out_loc, obj_loc, class_offset, maybe_temp_loc);
-
   switch (type_check_kind) {
     case TypeCheckKind::kExactCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp_loc,
+                                        kCompilerReadBarrierOption);
       __ Cmp(out, cls);
       __ Cset(out, eq);
       if (zero.IsLinked()) {
@@ -3219,12 +3437,23 @@
     }
 
     case TypeCheckKind::kAbstractClassCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp_loc,
+                                        kCompilerReadBarrierOption);
       // If the class is abstract, we eagerly fetch the super class of the
       // object to avoid doing a comparison we know will fail.
       vixl::aarch64::Label loop, success;
       __ Bind(&loop);
       // /* HeapReference<Class> */ out = out->super_class_
-      GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       out_loc,
+                                       super_offset,
+                                       maybe_temp_loc,
+                                       kCompilerReadBarrierOption);
       // If `out` is null, we use it for the result, and jump to `done`.
       __ Cbz(out, &done);
       __ Cmp(out, cls);
@@ -3237,13 +3466,24 @@
     }
 
     case TypeCheckKind::kClassHierarchyCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp_loc,
+                                        kCompilerReadBarrierOption);
       // Walk over the class hierarchy to find a match.
       vixl::aarch64::Label loop, success;
       __ Bind(&loop);
       __ Cmp(out, cls);
       __ B(eq, &success);
       // /* HeapReference<Class> */ out = out->super_class_
-      GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       out_loc,
+                                       super_offset,
+                                       maybe_temp_loc,
+                                       kCompilerReadBarrierOption);
       __ Cbnz(out, &loop);
       // If `out` is null, we use it for the result, and jump to `done`.
       __ B(&done);
@@ -3256,13 +3496,24 @@
     }
 
     case TypeCheckKind::kArrayObjectCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp_loc,
+                                        kCompilerReadBarrierOption);
       // Do an exact check.
       vixl::aarch64::Label exact_check;
       __ Cmp(out, cls);
       __ B(eq, &exact_check);
       // Otherwise, we need to check that the object's class is a non-primitive array.
       // /* HeapReference<Class> */ out = out->component_type_
-      GenerateReferenceLoadOneRegister(instruction, out_loc, component_offset, maybe_temp_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       out_loc,
+                                       component_offset,
+                                       maybe_temp_loc,
+                                       kCompilerReadBarrierOption);
       // If `out` is null, we use it for the result, and jump to `done`.
       __ Cbz(out, &done);
       __ Ldrh(out, HeapOperand(out, primitive_offset));
@@ -3275,6 +3526,14 @@
     }
 
     case TypeCheckKind::kArrayCheck: {
+      // No read barrier since the slow path will retry upon failure.
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp_loc,
+                                        kWithoutReadBarrier);
       __ Cmp(out, cls);
       DCHECK(locations->OnlyCallsOnSlowPath());
       slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction,
@@ -3358,13 +3617,8 @@
   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::RequiresRegister());
-  // Note that TypeCheckSlowPathARM64 uses this "temp" register too.
-  locations->AddTemp(Location::RequiresRegister());
-  // When read barriers are enabled, we need an additional temporary
-  // register for some cases.
-  if (TypeCheckNeedsATemporary(type_check_kind)) {
-    locations->AddTemp(Location::RequiresRegister());
-  }
+  // Add temps for read barriers and other uses. One is used by TypeCheckSlowPathARM64.
+  locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
 }
 
 void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
@@ -3373,22 +3627,34 @@
   Location obj_loc = locations->InAt(0);
   Register obj = InputRegisterAt(instruction, 0);
   Register cls = InputRegisterAt(instruction, 1);
+  const size_t num_temps = NumberOfCheckCastTemps(type_check_kind);
+  DCHECK_GE(num_temps, 1u);
+  DCHECK_LE(num_temps, 3u);
   Location temp_loc = locations->GetTemp(0);
-  Location maybe_temp2_loc = TypeCheckNeedsATemporary(type_check_kind) ?
-      locations->GetTemp(1) :
-      Location::NoLocation();
+  Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation();
+  Location maybe_temp3_loc = (num_temps >= 3) ? locations->GetTemp(2) : Location::NoLocation();
   Register temp = WRegisterFrom(temp_loc);
-  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
-  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
-  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+  const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+  const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+  const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+  const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+  const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value();
+  const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value();
+  const uint32_t object_array_data_offset =
+      mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
 
-  bool is_type_check_slow_path_fatal =
-      (type_check_kind == TypeCheckKind::kExactCheck ||
-       type_check_kind == TypeCheckKind::kAbstractClassCheck ||
-       type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
-       type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
-      !instruction->CanThrowIntoCatchBlock();
+  bool is_type_check_slow_path_fatal = false;
+  // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
+  // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
+  // read barriers is done for performance and code size reasons.
+  if (!kEmitCompilerReadBarrier) {
+    is_type_check_slow_path_fatal =
+        (type_check_kind == TypeCheckKind::kExactCheck ||
+         type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+         type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+         type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
+        !instruction->CanThrowIntoCatchBlock();
+  }
   SlowPathCodeARM64* type_check_slow_path =
       new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction,
                                                           is_type_check_slow_path_fatal);
@@ -3400,12 +3666,17 @@
     __ Cbz(obj, &done);
   }
 
-  // /* HeapReference<Class> */ temp = obj->klass_
-  GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
-
   switch (type_check_kind) {
     case TypeCheckKind::kExactCheck:
     case TypeCheckKind::kArrayCheck: {
+      // /* HeapReference<Class> */ temp = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp2_loc,
+                                        kWithoutReadBarrier);
+
       __ Cmp(temp, cls);
       // Jump to slow path for throwing the exception or doing a
       // more involved array check.
@@ -3414,34 +3685,43 @@
     }
 
     case TypeCheckKind::kAbstractClassCheck: {
+      // /* HeapReference<Class> */ temp = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp2_loc,
+                                        kWithoutReadBarrier);
+
       // If the class is abstract, we eagerly fetch the super class of the
       // object to avoid doing a comparison we know will fail.
-      vixl::aarch64::Label loop, compare_classes;
+      vixl::aarch64::Label loop;
       __ Bind(&loop);
       // /* HeapReference<Class> */ temp = temp->super_class_
-      GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       temp_loc,
+                                       super_offset,
+                                       maybe_temp2_loc,
+                                       kWithoutReadBarrier);
 
-      // If the class reference currently in `temp` is not null, jump
-      // to the `compare_classes` label to compare it with the checked
-      // class.
-      __ Cbnz(temp, &compare_classes);
-      // Otherwise, jump to the slow path to throw the exception.
-      //
-      // But before, move back the object's class into `temp` before
-      // going into the slow path, as it has been overwritten in the
-      // meantime.
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(
-          instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
-      __ B(type_check_slow_path->GetEntryLabel());
-
-      __ Bind(&compare_classes);
+      // If the class reference currently in `temp` is null, jump to the slow path to throw the
+      // exception.
+      __ Cbz(temp, type_check_slow_path->GetEntryLabel());
+      // Otherwise, compare classes.
       __ Cmp(temp, cls);
       __ B(ne, &loop);
       break;
     }
 
     case TypeCheckKind::kClassHierarchyCheck: {
+      // /* HeapReference<Class> */ temp = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp2_loc,
+                                        kWithoutReadBarrier);
+
       // Walk over the class hierarchy to find a match.
       vixl::aarch64::Label loop;
       __ Bind(&loop);
@@ -3449,64 +3729,53 @@
       __ B(eq, &done);
 
       // /* HeapReference<Class> */ temp = temp->super_class_
-      GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       temp_loc,
+                                       super_offset,
+                                       maybe_temp2_loc,
+                                       kWithoutReadBarrier);
 
       // If the class reference currently in `temp` is not null, jump
       // back at the beginning of the loop.
       __ Cbnz(temp, &loop);
       // Otherwise, jump to the slow path to throw the exception.
-      //
-      // But before, move back the object's class into `temp` before
-      // going into the slow path, as it has been overwritten in the
-      // meantime.
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(
-          instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
       __ B(type_check_slow_path->GetEntryLabel());
       break;
     }
 
     case TypeCheckKind::kArrayObjectCheck: {
+      // /* HeapReference<Class> */ temp = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp2_loc,
+                                        kWithoutReadBarrier);
+
       // Do an exact check.
-      vixl::aarch64::Label check_non_primitive_component_type;
       __ Cmp(temp, cls);
       __ B(eq, &done);
 
       // Otherwise, we need to check that the object's class is a non-primitive array.
       // /* HeapReference<Class> */ temp = temp->component_type_
-      GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, maybe_temp2_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       temp_loc,
+                                       component_offset,
+                                       maybe_temp2_loc,
+                                       kWithoutReadBarrier);
 
-      // If the component type is not null (i.e. the object is indeed
-      // an array), jump to label `check_non_primitive_component_type`
-      // to further check that this component type is not a primitive
-      // type.
-      __ Cbnz(temp, &check_non_primitive_component_type);
-      // Otherwise, jump to the slow path to throw the exception.
-      //
-      // But before, move back the object's class into `temp` before
-      // going into the slow path, as it has been overwritten in the
-      // meantime.
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(
-          instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
-      __ B(type_check_slow_path->GetEntryLabel());
-
-      __ Bind(&check_non_primitive_component_type);
+      // If the component type is null, jump to the slow path to throw the exception.
+      __ Cbz(temp, type_check_slow_path->GetEntryLabel());
+      // Otherwise, the object is indeed an array. Further check that this component type is not a
+      // primitive type.
       __ Ldrh(temp, HeapOperand(temp, primitive_offset));
       static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
-      __ Cbz(temp, &done);
-      // Same comment as above regarding `temp` and the slow path.
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(
-          instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
-      __ B(type_check_slow_path->GetEntryLabel());
+      __ Cbnz(temp, type_check_slow_path->GetEntryLabel());
       break;
     }
 
     case TypeCheckKind::kUnresolvedCheck:
-    case TypeCheckKind::kInterfaceCheck:
-      // We always go into the type check slow path for the unresolved
-      // and interface check cases.
+      // We always go into the type check slow path for the unresolved check cases.
       //
       // We cannot directly call the CheckCast runtime entry point
       // without resorting to a type checking slow path here (i.e. by
@@ -3515,15 +3784,40 @@
       // instruction (following the runtime calling convention), which
       // might be cluttered by the potential first read barrier
       // emission at the beginning of this method.
-      //
-      // TODO: Introduce a new runtime entry point taking the object
-      // to test (instead of its class) as argument, and let it deal
-      // with the read barrier issues. This will let us refactor this
-      // case of the `switch` code as it was previously (with a direct
-      // call to the runtime not using a type checking slow path).
-      // This should also be beneficial for the other cases above.
       __ B(type_check_slow_path->GetEntryLabel());
       break;
+    case TypeCheckKind::kInterfaceCheck: {
+      // /* HeapReference<Class> */ temp = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp2_loc,
+                                        kWithoutReadBarrier);
+
+      // /* HeapReference<Class> */ temp = temp->iftable_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        temp_loc,
+                                        iftable_offset,
+                                        maybe_temp2_loc,
+                                        kWithoutReadBarrier);
+      // Iftable is never null.
+      __ Ldr(WRegisterFrom(maybe_temp2_loc), HeapOperand(temp.W(), array_length_offset));
+      // Loop through the iftable and check if any class matches.
+      vixl::aarch64::Label start_loop;
+      __ Bind(&start_loop);
+      __ Cbz(WRegisterFrom(maybe_temp2_loc), type_check_slow_path->GetEntryLabel());
+      __ Ldr(WRegisterFrom(maybe_temp3_loc), HeapOperand(temp.W(), object_array_data_offset));
+      GetAssembler()->MaybeUnpoisonHeapReference(WRegisterFrom(maybe_temp3_loc));
+      // Go to next interface.
+      __ Add(temp, temp, 2 * kHeapReferenceSize);
+      __ Sub(WRegisterFrom(maybe_temp2_loc), WRegisterFrom(maybe_temp2_loc), 2);
+      // Compare the classes and continue the loop if they do not match.
+      __ Cmp(cls, WRegisterFrom(maybe_temp3_loc));
+      __ B(ne, &start_loop);
+      break;
+    }
   }
   __ Bind(&done);
 
@@ -4086,7 +4380,9 @@
   Location out_loc = cls->GetLocations()->Out();
   Register out = OutputRegister(cls);
 
-  const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+  const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
+      ? kWithoutReadBarrier
+      : kCompilerReadBarrierOption;
   bool generate_null_check = false;
   switch (cls->GetLoadKind()) {
     case HLoadClass::LoadKind::kReferrersClass: {
@@ -4098,17 +4394,17 @@
                               out_loc,
                               current_method,
                               ArtMethod::DeclaringClassOffset().Int32Value(),
-                              /*fixup_label*/ nullptr,
-                              requires_read_barrier);
+                              /* fixup_label */ nullptr,
+                              read_barrier_option);
       break;
     }
     case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
-      DCHECK(!requires_read_barrier);
+      DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
       __ Ldr(out, codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
                                                             cls->GetTypeIndex()));
       break;
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
-      DCHECK(!requires_read_barrier);
+      DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
       // Add ADRP with its PC-relative type patch.
       const DexFile& dex_file = cls->GetDexFile();
       uint32_t type_index = cls->GetTypeIndex();
@@ -4121,7 +4417,7 @@
       break;
     }
     case HLoadClass::LoadKind::kBootImageAddress: {
-      DCHECK(!requires_read_barrier);
+      DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
       DCHECK(cls->GetAddress() != 0u && IsUint<32>(cls->GetAddress()));
       __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(cls->GetAddress()));
       break;
@@ -4143,8 +4439,8 @@
                               out_loc,
                               out.X(),
                               offset,
-                              /*fixup_label*/ nullptr,
-                              requires_read_barrier);
+                              /* fixup_label */ nullptr,
+                              read_barrier_option);
       generate_null_check = !cls->IsInDexCache();
       break;
     }
@@ -4164,7 +4460,7 @@
                               out.X(),
                               /* offset placeholder */ 0,
                               ldr_label,
-                              requires_read_barrier);
+                              read_barrier_option);
       generate_null_check = !cls->IsInDexCache();
       break;
     }
@@ -4180,8 +4476,8 @@
                               out_loc,
                               out.X(),
                               CodeGenerator::GetCacheOffset(cls->GetTypeIndex()),
-                              /*fixup_label*/ nullptr,
-                              requires_read_barrier);
+                              /* fixup_label */ nullptr,
+                              read_barrier_option);
       generate_null_check = !cls->IsInDexCache();
       break;
     }
@@ -4236,9 +4532,6 @@
       break;
     case HLoadString::LoadKind::kBootImageAddress:
       break;
-    case HLoadString::LoadKind::kDexCacheAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
-      break;
     case HLoadString::LoadKind::kBssEntry:
       DCHECK(!Runtime::Current()->UseJitCompilation());
       break;
@@ -4319,8 +4612,9 @@
       GenerateGcRootFieldLoad(load,
                               load->GetLocations()->Out(),
                               temp,
-                              /* placeholder */ 0u,
-                              ldr_label);
+                              /* offset placeholder */ 0u,
+                              ldr_label,
+                              kCompilerReadBarrierOption);
       SlowPathCodeARM64* slow_path =
           new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load, temp, adrp_label);
       codegen_->AddSlowPath(slow_path);
@@ -5022,13 +5316,16 @@
   }
 }
 
-void InstructionCodeGeneratorARM64::GenerateReferenceLoadOneRegister(HInstruction* instruction,
-                                                                     Location out,
-                                                                     uint32_t offset,
-                                                                     Location maybe_temp) {
+void InstructionCodeGeneratorARM64::GenerateReferenceLoadOneRegister(
+    HInstruction* instruction,
+    Location out,
+    uint32_t offset,
+    Location maybe_temp,
+    ReadBarrierOption read_barrier_option) {
   Primitive::Type type = Primitive::kPrimNot;
   Register out_reg = RegisterFrom(out, type);
-  if (kEmitCompilerReadBarrier) {
+  if (read_barrier_option == kWithReadBarrier) {
+    CHECK(kEmitCompilerReadBarrier);
     Register temp_reg = RegisterFrom(maybe_temp, type);
     if (kUseBakerReadBarrier) {
       // Load with fast path based Baker's read barrier.
@@ -5058,15 +5355,18 @@
   }
 }
 
-void InstructionCodeGeneratorARM64::GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
-                                                                      Location out,
-                                                                      Location obj,
-                                                                      uint32_t offset,
-                                                                      Location maybe_temp) {
+void InstructionCodeGeneratorARM64::GenerateReferenceLoadTwoRegisters(
+    HInstruction* instruction,
+    Location out,
+    Location obj,
+    uint32_t offset,
+    Location maybe_temp,
+    ReadBarrierOption read_barrier_option) {
   Primitive::Type type = Primitive::kPrimNot;
   Register out_reg = RegisterFrom(out, type);
   Register obj_reg = RegisterFrom(obj, type);
-  if (kEmitCompilerReadBarrier) {
+  if (read_barrier_option == kWithReadBarrier) {
+    CHECK(kEmitCompilerReadBarrier);
     if (kUseBakerReadBarrier) {
       // Load with fast path based Baker's read barrier.
       Register temp_reg = RegisterFrom(maybe_temp, type);
@@ -5092,23 +5392,25 @@
   }
 }
 
-void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad(HInstruction* instruction,
-                                                            Location root,
-                                                            Register obj,
-                                                            uint32_t offset,
-                                                            vixl::aarch64::Label* fixup_label,
-                                                            bool requires_read_barrier) {
+void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad(
+    HInstruction* instruction,
+    Location root,
+    Register obj,
+    uint32_t offset,
+    vixl::aarch64::Label* fixup_label,
+    ReadBarrierOption read_barrier_option) {
   DCHECK(fixup_label == nullptr || offset == 0u);
   Register root_reg = RegisterFrom(root, Primitive::kPrimNot);
-  if (requires_read_barrier) {
+  if (read_barrier_option == kWithReadBarrier) {
     DCHECK(kEmitCompilerReadBarrier);
     if (kUseBakerReadBarrier) {
       // Fast path implementation of art::ReadBarrier::BarrierForRoot when
       // Baker's read barrier are used:
       //
       //   root = obj.field;
-      //   if (Thread::Current()->GetIsGcMarking()) {
-      //     root = ReadBarrier::Mark(root)
+      //   temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
+      //   if (temp != null) {
+      //     root = temp(root)
       //   }
 
       // /* GcRoot<mirror::Object> */ root = *(obj + offset)
@@ -5125,16 +5427,22 @@
                     "art::mirror::CompressedReference<mirror::Object> and int32_t "
                     "have different sizes.");
 
-      // Slow path marking the GC root `root`.
-      SlowPathCodeARM64* slow_path =
-          new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM64(instruction, root);
-      codegen_->AddSlowPath(slow_path);
+      Register temp = lr;
 
-      MacroAssembler* masm = GetVIXLAssembler();
-      UseScratchRegisterScope temps(masm);
-      Register temp = temps.AcquireW();
-      // temp = Thread::Current()->GetIsGcMarking()
-      __ Ldr(temp, MemOperand(tr, Thread::IsGcMarkingOffset<kArm64PointerSize>().Int32Value()));
+      // Slow path marking the GC root `root`. The entrypoint will alrady be loaded in temp.
+      SlowPathCodeARM64* slow_path =
+          new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM64(instruction,
+                                                                    root,
+                                                                    LocationFrom(temp));
+      codegen_->AddSlowPath(slow_path);
+      const int32_t entry_point_offset =
+          CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(root.reg());
+      // temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
+      // Loading the entrypoint does not require a load acquire since it is only changed when
+      // threads are suspended or running a checkpoint.
+      __ Ldr(temp, MemOperand(tr, entry_point_offset));
+      // The entrypoint is null when the GC is not marking, this prevents one load compared to
+      // checking GetIsGcMarking.
       __ Cbnz(temp, slow_path->GetEntryLabel());
       __ Bind(slow_path->GetExitLabel());
     } else {
@@ -5174,7 +5482,7 @@
 
   // /* HeapReference<Object> */ ref = *(obj + offset)
   Location no_index = Location::NoLocation();
-  size_t no_scale_factor = 0U;
+  size_t no_scale_factor = 0u;
   GenerateReferenceLoadWithBakerReadBarrier(instruction,
                                             ref,
                                             obj,
@@ -5225,7 +5533,8 @@
                                                                    size_t scale_factor,
                                                                    Register temp,
                                                                    bool needs_null_check,
-                                                                   bool use_load_acquire) {
+                                                                   bool use_load_acquire,
+                                                                   bool always_update_field) {
   DCHECK(kEmitCompilerReadBarrier);
   DCHECK(kUseBakerReadBarrier);
   // If we are emitting an array load, we should not be using a
@@ -5246,7 +5555,7 @@
   //   uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
   //   lfence;  // Load fence or artificial data dependency to prevent load-load reordering
   //   HeapReference<Object> ref = *src;  // Original reference load.
-  //   bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+  //   bool is_gray = (rb_state == ReadBarrier::GrayState());
   //   if (is_gray) {
   //     ref = ReadBarrier::Mark(ref);  // Performed by runtime entrypoint slow path.
   //   }
@@ -5278,7 +5587,9 @@
 
   // The actual reference load.
   if (index.IsValid()) {
-    // Load types involving an "index".
+    // Load types involving an "index": ArrayGet,
+    // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
+    // intrinsics.
     if (use_load_acquire) {
       // UnsafeGetObjectVolatile intrinsic case.
       // Register `index` is not an index in an object array, but an
@@ -5287,9 +5598,9 @@
       DCHECK(instruction->GetLocations()->Intrinsified());
       DCHECK(instruction->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile)
           << instruction->AsInvoke()->GetIntrinsic();
-      DCHECK_EQ(offset, 0U);
-      DCHECK_EQ(scale_factor, 0U);
-      DCHECK_EQ(needs_null_check, 0U);
+      DCHECK_EQ(offset, 0u);
+      DCHECK_EQ(scale_factor, 0u);
+      DCHECK_EQ(needs_null_check, 0u);
       // /* HeapReference<Object> */ ref = *(obj + index)
       MemOperand field = HeapOperand(obj, XRegisterFrom(index));
       LoadAcquire(instruction, ref_reg, field, /* needs_null_check */ false);
@@ -5300,10 +5611,10 @@
         uint32_t computed_offset = offset + (Int64ConstantFrom(index) << scale_factor);
         Load(type, ref_reg, HeapOperand(obj, computed_offset));
       } else {
-        Register temp2 = temps.AcquireW();
-        __ Add(temp2, obj, offset);
-        Load(type, ref_reg, HeapOperand(temp2, XRegisterFrom(index), LSL, scale_factor));
-        temps.Release(temp2);
+        Register temp3 = temps.AcquireW();
+        __ Add(temp3, obj, offset);
+        Load(type, ref_reg, HeapOperand(temp3, XRegisterFrom(index), LSL, scale_factor));
+        temps.Release(temp3);
       }
     }
   } else {
@@ -5320,16 +5631,26 @@
   GetAssembler()->MaybeUnpoisonHeapReference(ref_reg);
 
   // Slow path marking the object `ref` when it is gray.
-  SlowPathCodeARM64* slow_path =
-      new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM64(instruction, ref);
+  SlowPathCodeARM64* slow_path;
+  if (always_update_field) {
+    // ReadBarrierMarkAndUpdateFieldSlowPathARM64 only supports
+    // address of the form `obj + field_offset`, where `obj` is a
+    // register and `field_offset` is a register. Thus `offset` and
+    // `scale_factor` above are expected to be null in this code path.
+    DCHECK_EQ(offset, 0u);
+    DCHECK_EQ(scale_factor, 0u);  /* "times 1" */
+    slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkAndUpdateFieldSlowPathARM64(
+        instruction, ref, obj, /* field_offset */ index, temp);
+  } else {
+    slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM64(instruction, ref);
+  }
   AddSlowPath(slow_path);
 
-  // if (rb_state == ReadBarrier::gray_ptr_)
+  // if (rb_state == ReadBarrier::GrayState())
   //   ref = ReadBarrier::Mark(ref);
   // Given the numeric representation, it's enough to check the low bit of the rb_state.
-  static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
-  static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
-  static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+  static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+  static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
   __ Tbnz(temp, LockWord::kReadBarrierStateShift, slow_path->GetEntryLabel());
   __ Bind(slow_path->GetExitLabel());
 }
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index eb28ecb..0e8d4fd 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -269,7 +269,8 @@
   void GenerateReferenceLoadOneRegister(HInstruction* instruction,
                                         Location out,
                                         uint32_t offset,
-                                        Location maybe_temp);
+                                        Location maybe_temp,
+                                        ReadBarrierOption read_barrier_option);
   // Generate a heap reference load using two different registers
   // `out` and `obj`:
   //
@@ -284,18 +285,19 @@
                                          Location out,
                                          Location obj,
                                          uint32_t offset,
-                                         Location maybe_temp);
+                                         Location maybe_temp,
+                                         ReadBarrierOption read_barrier_option);
   // Generate a GC root reference load:
   //
   //   root <- *(obj + offset)
   //
-  // while honoring read barriers (if any).
+  // while honoring read barriers based on read_barrier_option.
   void GenerateGcRootFieldLoad(HInstruction* instruction,
                                Location root,
                                vixl::aarch64::Register obj,
                                uint32_t offset,
-                               vixl::aarch64::Label* fixup_label = nullptr,
-                               bool requires_read_barrier = kEmitCompilerReadBarrier);
+                               vixl::aarch64::Label* fixup_label,
+                               ReadBarrierOption read_barrier_option);
 
   // Generate a floating-point comparison.
   void GenerateFcmp(HInstruction* instruction);
@@ -594,6 +596,13 @@
                                              bool needs_null_check);
   // Factored implementation used by GenerateFieldLoadWithBakerReadBarrier
   // and GenerateArrayLoadWithBakerReadBarrier.
+  //
+  // Load the object reference located at the address
+  // `obj + offset + (index << scale_factor)`, held by object `obj`, into
+  // `ref`, and mark it if needed.
+  //
+  // If `always_update_field` is true, the value of the reference is
+  // atomically updated in the holder (`obj`).
   void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
                                                  Location ref,
                                                  vixl::aarch64::Register obj,
@@ -602,7 +611,8 @@
                                                  size_t scale_factor,
                                                  vixl::aarch64::Register temp,
                                                  bool needs_null_check,
-                                                 bool use_load_acquire);
+                                                 bool use_load_acquire,
+                                                 bool always_update_field = false);
 
   // Generate a read barrier for a heap reference within `instruction`
   // using a slow path.
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 32287a0..61e6b4b 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -23,6 +23,7 @@
 #include "compiled_method.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "gc/accounting/card_table.h"
+#include "intrinsics_arm_vixl.h"
 #include "mirror/array-inl.h"
 #include "mirror/class-inl.h"
 #include "thread.h"
@@ -37,11 +38,12 @@
 namespace vixl32 = vixl::aarch32;
 using namespace vixl32;  // NOLINT(build/namespaces)
 
+using helpers::DRegisterFrom;
 using helpers::DWARFReg;
-using helpers::FromLowSToD;
 using helpers::HighDRegisterFrom;
 using helpers::HighRegisterFrom;
 using helpers::InputOperandAt;
+using helpers::InputRegister;
 using helpers::InputRegisterAt;
 using helpers::InputSRegisterAt;
 using helpers::InputVRegisterAt;
@@ -61,7 +63,9 @@
   return ((location.low() & 1) == 0) && (location.low() + 1 == location.high());
 }
 
+static constexpr int kCurrentMethodStackOffset = 0;
 static constexpr size_t kArmInstrMaxSizeInBytes = 4u;
+static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7;
 
 #ifdef __
 #error "ARM Codegen VIXL macro-assembler macro already defined."
@@ -339,6 +343,46 @@
   DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARMVIXL);
 };
 
+class BoundsCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
+ public:
+  explicit BoundsCheckSlowPathARMVIXL(HBoundsCheck* instruction)
+      : SlowPathCodeARMVIXL(instruction) {}
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
+    LocationSummary* locations = instruction_->GetLocations();
+
+    __ Bind(GetEntryLabel());
+    if (instruction_->CanThrowIntoCatchBlock()) {
+      // Live registers will be restored in the catch block if caught.
+      SaveLiveRegisters(codegen, instruction_->GetLocations());
+    }
+    // We're moving two locations to locations that could overlap, so we need a parallel
+    // move resolver.
+    InvokeRuntimeCallingConventionARMVIXL calling_convention;
+    codegen->EmitParallelMoves(
+        locations->InAt(0),
+        LocationFrom(calling_convention.GetRegisterAt(0)),
+        Primitive::kPrimInt,
+        locations->InAt(1),
+        LocationFrom(calling_convention.GetRegisterAt(1)),
+        Primitive::kPrimInt);
+    QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt()
+        ? kQuickThrowStringBounds
+        : kQuickThrowArrayBounds;
+    arm_codegen->InvokeRuntime(entrypoint, instruction_, instruction_->GetDexPc(), this);
+    CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
+    CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
+  }
+
+  bool IsFatal() const OVERRIDE { return true; }
+
+  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARMVIXL"; }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARMVIXL);
+};
+
 class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
  public:
   LoadClassSlowPathARMVIXL(HLoadClass* cls, HInstruction* at, uint32_t dex_pc, bool do_clinit)
@@ -393,6 +437,125 @@
   DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARMVIXL);
 };
 
+class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
+ public:
+  TypeCheckSlowPathARMVIXL(HInstruction* instruction, bool is_fatal)
+      : SlowPathCodeARMVIXL(instruction), is_fatal_(is_fatal) {}
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
+    DCHECK(instruction_->IsCheckCast()
+           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+    CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
+    __ Bind(GetEntryLabel());
+
+    if (!is_fatal_) {
+      SaveLiveRegisters(codegen, locations);
+    }
+
+    // We're moving two locations to locations that could overlap, so we need a parallel
+    // move resolver.
+    InvokeRuntimeCallingConventionARMVIXL calling_convention;
+
+    codegen->EmitParallelMoves(locations->InAt(0),
+                               LocationFrom(calling_convention.GetRegisterAt(0)),
+                               Primitive::kPrimNot,
+                               locations->InAt(1),
+                               LocationFrom(calling_convention.GetRegisterAt(1)),
+                               Primitive::kPrimNot);
+    if (instruction_->IsInstanceOf()) {
+      arm_codegen->InvokeRuntime(kQuickInstanceofNonTrivial,
+                                 instruction_,
+                                 instruction_->GetDexPc(),
+                                 this);
+      CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
+      arm_codegen->Move32(locations->Out(), LocationFrom(r0));
+    } else {
+      DCHECK(instruction_->IsCheckCast());
+      arm_codegen->InvokeRuntime(kQuickCheckInstanceOf,
+                                 instruction_,
+                                 instruction_->GetDexPc(),
+                                 this);
+      CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
+    }
+
+    if (!is_fatal_) {
+      RestoreLiveRegisters(codegen, locations);
+      __ B(GetExitLabel());
+    }
+  }
+
+  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARMVIXL"; }
+
+  bool IsFatal() const OVERRIDE { return is_fatal_; }
+
+ private:
+  const bool is_fatal_;
+
+  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARMVIXL);
+};
+
+class DeoptimizationSlowPathARMVIXL : public SlowPathCodeARMVIXL {
+ public:
+  explicit DeoptimizationSlowPathARMVIXL(HDeoptimize* instruction)
+      : SlowPathCodeARMVIXL(instruction) {}
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
+    __ Bind(GetEntryLabel());
+    arm_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
+    CheckEntrypointTypes<kQuickDeoptimize, void, void>();
+  }
+
+  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARMVIXL"; }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARMVIXL);
+};
+
+class ArraySetSlowPathARMVIXL : public SlowPathCodeARMVIXL {
+ public:
+  explicit ArraySetSlowPathARMVIXL(HInstruction* instruction) : SlowPathCodeARMVIXL(instruction) {}
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
+    __ Bind(GetEntryLabel());
+    SaveLiveRegisters(codegen, locations);
+
+    InvokeRuntimeCallingConventionARMVIXL calling_convention;
+    HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+    parallel_move.AddMove(
+        locations->InAt(0),
+        LocationFrom(calling_convention.GetRegisterAt(0)),
+        Primitive::kPrimNot,
+        nullptr);
+    parallel_move.AddMove(
+        locations->InAt(1),
+        LocationFrom(calling_convention.GetRegisterAt(1)),
+        Primitive::kPrimInt,
+        nullptr);
+    parallel_move.AddMove(
+        locations->InAt(2),
+        LocationFrom(calling_convention.GetRegisterAt(2)),
+        Primitive::kPrimNot,
+        nullptr);
+    codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+
+    CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
+    arm_codegen->InvokeRuntime(kQuickAputObject, instruction_, instruction_->GetDexPc(), this);
+    CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
+    RestoreLiveRegisters(codegen, locations);
+    __ B(GetExitLabel());
+  }
+
+  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARMVIXL"; }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARMVIXL);
+};
+
+
 inline vixl32::Condition ARMCondition(IfCondition cond) {
   switch (cond) {
     case kCondEQ: return eq;
@@ -466,6 +629,11 @@
   return mask;
 }
 
+size_t CodeGeneratorARMVIXL::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+  GetAssembler()->LoadSFromOffset(vixl32::SRegister(reg_id), sp, stack_index);
+  return kArmWordSize;
+}
+
 #undef __
 
 CodeGeneratorARMVIXL::CodeGeneratorARMVIXL(HGraph* graph,
@@ -481,6 +649,7 @@
                     compiler_options,
                     stats),
       block_labels_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+      jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       location_builder_(graph, this),
       instruction_visitor_(graph, this),
       move_resolver_(graph->GetArena(), this),
@@ -488,11 +657,64 @@
       isa_features_(isa_features) {
   // Always save the LR register to mimic Quick.
   AddAllocatedRegister(Location::RegisterLocation(LR));
+  // Give d14 and d15 as scratch registers to VIXL.
+  // They are removed from the register allocator in `SetupBlockedRegisters()`.
+  // TODO(VIXL): We need two scratch D registers for `EmitSwap` when swapping two double stack
+  // slots. If that is sufficiently rare, and we have pressure on FP registers, we could instead
+  // spill in `EmitSwap`. But if we actually are guaranteed to have 32 D registers, we could give
+  // d30 and d31 to VIXL to avoid removing registers from the allocator. If that is the case, we may
+  // also want to investigate giving those 14 other D registers to the allocator.
+  GetVIXLAssembler()->GetScratchVRegisterList()->Combine(d14);
+  GetVIXLAssembler()->GetScratchVRegisterList()->Combine(d15);
 }
 
-#define __ reinterpret_cast<ArmVIXLAssembler*>(GetAssembler())->GetVIXLAssembler()->
+void JumpTableARMVIXL::EmitTable(CodeGeneratorARMVIXL* codegen) {
+  uint32_t num_entries = switch_instr_->GetNumEntries();
+  DCHECK_GE(num_entries, kPackedSwitchCompareJumpThreshold);
+
+  // We are about to use the assembler to place literals directly. Make sure we have enough
+  // underlying code buffer and we have generated the jump table with right size.
+  codegen->GetVIXLAssembler()->GetBuffer().Align();
+  AssemblerAccurateScope aas(codegen->GetVIXLAssembler(),
+                             num_entries * sizeof(int32_t),
+                             CodeBufferCheckScope::kMaximumSize);
+  // TODO(VIXL): Check that using lower case bind is fine here.
+  codegen->GetVIXLAssembler()->bind(&table_start_);
+  for (uint32_t i = 0; i < num_entries; i++) {
+    codegen->GetVIXLAssembler()->place(bb_addresses_[i].get());
+  }
+}
+
+void JumpTableARMVIXL::FixTable(CodeGeneratorARMVIXL* codegen) {
+  uint32_t num_entries = switch_instr_->GetNumEntries();
+  DCHECK_GE(num_entries, kPackedSwitchCompareJumpThreshold);
+
+  const ArenaVector<HBasicBlock*>& successors = switch_instr_->GetBlock()->GetSuccessors();
+  for (uint32_t i = 0; i < num_entries; i++) {
+    vixl32::Label* target_label = codegen->GetLabelOf(successors[i]);
+    DCHECK(target_label->IsBound());
+    int32_t jump_offset = target_label->GetLocation() - table_start_.GetLocation();
+    // When doing BX to address we need to have lower bit set to 1 in T32.
+    if (codegen->GetVIXLAssembler()->IsUsingT32()) {
+      jump_offset++;
+    }
+    DCHECK_GT(jump_offset, std::numeric_limits<int32_t>::min());
+    DCHECK_LE(jump_offset, std::numeric_limits<int32_t>::max());
+
+    bb_addresses_[i].get()->UpdateValue(jump_offset, &codegen->GetVIXLAssembler()->GetBuffer());
+  }
+}
+
+void CodeGeneratorARMVIXL::FixJumpTables() {
+  for (auto&& jump_table : jump_tables_) {
+    jump_table->FixTable(this);
+  }
+}
+
+#define __ reinterpret_cast<ArmVIXLAssembler*>(GetAssembler())->GetVIXLAssembler()->  // NOLINT
 
 void CodeGeneratorARMVIXL::Finalize(CodeAllocator* allocator) {
+  FixJumpTables();
   GetAssembler()->FinalizeCode();
   CodeGenerator::Finalize(allocator);
 }
@@ -509,6 +731,13 @@
   // Reserve temp register.
   blocked_core_registers_[IP] = true;
 
+  // Registers s28-s31 (d14-d15) are left to VIXL for scratch registers.
+  // (They are given to the `MacroAssembler` in `CodeGeneratorARMVIXL::CodeGeneratorARMVIXL`.)
+  blocked_fpu_registers_[28] = true;
+  blocked_fpu_registers_[29] = true;
+  blocked_fpu_registers_[30] = true;
+  blocked_fpu_registers_[31] = true;
+
   if (GetGraph()->IsDebuggable()) {
     // Stubs do not save callee-save floating point registers. If the graph
     // is debuggable, we need to deal with these registers differently. For
@@ -665,9 +894,9 @@
   }
 }
 
-void CodeGeneratorARMVIXL::MoveConstant(Location destination ATTRIBUTE_UNUSED,
-                                        int32_t value ATTRIBUTE_UNUSED) {
-  TODO_VIXL32(FATAL);
+void CodeGeneratorARMVIXL::MoveConstant(Location location, int32_t value) {
+  DCHECK(location.IsRegister());
+  __ Mov(RegisterFrom(location), value);
 }
 
 void CodeGeneratorARMVIXL::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
@@ -678,9 +907,15 @@
   GetMoveResolver()->EmitNativeCode(&move);
 }
 
-void CodeGeneratorARMVIXL::AddLocationAsTemp(Location location ATTRIBUTE_UNUSED,
-                                             LocationSummary* locations ATTRIBUTE_UNUSED) {
-  TODO_VIXL32(FATAL);
+void CodeGeneratorARMVIXL::AddLocationAsTemp(Location location, LocationSummary* locations) {
+  if (location.IsRegister()) {
+    locations->AddTemp(location);
+  } else if (location.IsRegisterPair()) {
+    locations->AddTemp(LocationFrom(LowRegisterFrom(location)));
+    locations->AddTemp(LocationFrom(HighRegisterFrom(location)));
+  } else {
+    UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+  }
 }
 
 void CodeGeneratorARMVIXL::InvokeRuntime(QuickEntrypointEnum entrypoint,
@@ -708,110 +943,6 @@
   __ Blx(lr);
 }
 
-void LocationsBuilderARMVIXL::VisitClinitCheck(HClinitCheck* check) {
-  LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
-  locations->SetInAt(0, Location::RequiresRegister());
-  if (check->HasUses()) {
-    locations->SetOut(Location::SameAsFirstInput());
-  }
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitClinitCheck(HClinitCheck* check) {
-  // We assume the class is not null.
-  LoadClassSlowPathARMVIXL* slow_path =
-      new (GetGraph()->GetArena()) LoadClassSlowPathARMVIXL(check->GetLoadClass(),
-                                                            check,
-                                                            check->GetDexPc(),
-                                                            /* do_clinit */ true);
-  codegen_->AddSlowPath(slow_path);
-  GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
-}
-
-void InstructionCodeGeneratorARMVIXL::GenerateClassInitializationCheck(
-    LoadClassSlowPathARMVIXL* slow_path, vixl32::Register class_reg) {
-  UseScratchRegisterScope temps(GetVIXLAssembler());
-  vixl32::Register temp = temps.Acquire();
-  GetAssembler()->LoadFromOffset(kLoadWord,
-                                 temp,
-                                 class_reg,
-                                 mirror::Class::StatusOffset().Int32Value());
-  __ Cmp(temp, mirror::Class::kStatusInitialized);
-  __ B(lt, slow_path->GetEntryLabel());
-  // Even if the initialized flag is set, we may be in a situation where caches are not synced
-  // properly. Therefore, we do a memory fence.
-  __ Dmb(ISH);
-  __ Bind(slow_path->GetExitLabel());
-}
-
-// Check if the desired_string_load_kind is supported. If it is, return it,
-// otherwise return a fall-back kind that should be used instead.
-HLoadString::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadStringKind(
-      HLoadString::LoadKind desired_string_load_kind ATTRIBUTE_UNUSED) {
-  // TODO(VIXL): Implement optimized code paths. For now we always use the simpler fallback code.
-  return HLoadString::LoadKind::kDexCacheViaMethod;
-}
-
-void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) {
-  LocationSummary::CallKind call_kind = load->NeedsEnvironment()
-      ? LocationSummary::kCallOnMainOnly
-      : LocationSummary::kNoCall;
-  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
-
-  // TODO(VIXL): Implement optimized code paths.
-  // See InstructionCodeGeneratorARMVIXL::VisitLoadString.
-  HLoadString::LoadKind load_kind = load->GetLoadKind();
-  if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod) {
-    locations->SetInAt(0, Location::RequiresRegister());
-    // TODO(VIXL): Use InvokeRuntimeCallingConventionARMVIXL instead.
-    locations->SetOut(LocationFrom(r0));
-  } else {
-    locations->SetOut(Location::RequiresRegister());
-  }
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) {
-  // TODO(VIXL): Implement optimized code paths.
-  // We implemented the simplest solution to get first ART tests passing, we deferred the
-  // optimized path until later, we should implement it using ARM64 implementation as a
-  // reference. The same related to LocationsBuilderARMVIXL::VisitLoadString.
-
-  // TODO: Re-add the compiler code to do string dex cache lookup again.
-  DCHECK_EQ(load->GetLoadKind(), HLoadString::LoadKind::kDexCacheViaMethod);
-  InvokeRuntimeCallingConventionARMVIXL calling_convention;
-  __ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex());
-  codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
-  CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
-}
-
-// Check if the desired_class_load_kind is supported. If it is, return it,
-// otherwise return a fall-back kind that should be used instead.
-HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
-      HLoadClass::LoadKind desired_class_load_kind ATTRIBUTE_UNUSED) {
-  // TODO(VIXL): Implement optimized code paths.
-  return HLoadClass::LoadKind::kDexCacheViaMethod;
-}
-
-// Check if the desired_dispatch_info is supported. If it is, return it,
-// otherwise return a fall-back info that should be used instead.
-HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStaticOrDirectDispatch(
-      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info ATTRIBUTE_UNUSED,
-      HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
-  // TODO(VIXL): Implement optimized code paths.
-  return {
-    HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
-    HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
-    0u,
-    0u
-  };
-}
-
-// Copy the result of a call into the given target.
-void CodeGeneratorARMVIXL::MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
-                                                  Primitive::Type type ATTRIBUTE_UNUSED) {
-  TODO_VIXL32(FATAL);
-}
-
 void InstructionCodeGeneratorARMVIXL::HandleGoto(HInstruction* got, HBasicBlock* successor) {
   DCHECK(!successor->IsExitBlock());
   HBasicBlock* block = got->GetBlock();
@@ -882,14 +1013,14 @@
       __ Vcmp(F32, InputSRegisterAt(instruction, 0), 0.0);
     } else {
       DCHECK_EQ(type, Primitive::kPrimDouble);
-      __ Vcmp(F64, FromLowSToD(LowSRegisterFrom(lhs_loc)), 0.0);
+      __ Vcmp(F64, DRegisterFrom(lhs_loc), 0.0);
     }
   } else {
     if (type == Primitive::kPrimFloat) {
       __ Vcmp(InputSRegisterAt(instruction, 0), InputSRegisterAt(instruction, 1));
     } else {
       DCHECK_EQ(type, Primitive::kPrimDouble);
-      __ Vcmp(FromLowSToD(LowSRegisterFrom(lhs_loc)), FromLowSToD(LowSRegisterFrom(rhs_loc)));
+      __ Vcmp(DRegisterFrom(lhs_loc), DRegisterFrom(rhs_loc));
     }
   }
 }
@@ -1115,6 +1246,24 @@
   GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
 }
 
+void LocationsBuilderARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) {
+  LocationSummary* locations = new (GetGraph()->GetArena())
+      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
+  locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
+  if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
+    locations->SetInAt(0, Location::RequiresRegister());
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) {
+  SlowPathCodeARMVIXL* slow_path =
+      deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARMVIXL>(deoptimize);
+  GenerateTestAndBranch(deoptimize,
+                        /* condition_input_index */ 0,
+                        slow_path->GetEntryLabel(),
+                        /* false_target */ nullptr);
+}
+
 void LocationsBuilderARMVIXL::VisitSelect(HSelect* select) {
   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
   if (Primitive::IsFloatingPointType(select->GetType())) {
@@ -1141,6 +1290,14 @@
   __ Bind(&false_target);
 }
 
+void LocationsBuilderARMVIXL::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+  new (GetGraph()->GetArena()) LocationSummary(info);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitNativeDebugInfo(HNativeDebugInfo*) {
+  // MaybeRecordNativeDebugInfo is already called implicitly in CodeGenerator::Compile.
+}
+
 void CodeGeneratorARMVIXL::GenerateNop() {
   __ Nop();
 }
@@ -1331,6 +1488,28 @@
   // Will be generated at use site.
 }
 
+void LocationsBuilderARMVIXL::VisitFloatConstant(HFloatConstant* constant) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+  locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitFloatConstant(
+    HFloatConstant* constant ATTRIBUTE_UNUSED) {
+  // Will be generated at use site.
+}
+
+void LocationsBuilderARMVIXL::VisitDoubleConstant(HDoubleConstant* constant) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+  locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitDoubleConstant(
+    HDoubleConstant* constant ATTRIBUTE_UNUSED) {
+  // Will be generated at use site.
+}
+
 void LocationsBuilderARMVIXL::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
   memory_barrier->SetLocations(nullptr);
 }
@@ -1357,14 +1536,42 @@
   codegen_->GenerateFrameExit();
 }
 
+void LocationsBuilderARMVIXL::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
+  // The trampoline uses the same calling convention as dex calling conventions,
+  // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
+  // the method_idx.
+  HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
+  codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
+}
+
 void LocationsBuilderARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
   // Explicit clinit checks triggered by static invokes must have been pruned by
   // art::PrepareForRegisterAllocation.
   DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
 
-  // TODO(VIXL): TryDispatch
+  IntrinsicLocationsBuilderARMVIXL intrinsic(codegen_);
+  if (intrinsic.TryDispatch(invoke)) {
+    if (invoke->GetLocations()->CanCall() && invoke->HasPcRelativeDexCache()) {
+      invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::Any());
+    }
+    return;
+  }
 
   HandleInvoke(invoke);
+
+  // TODO(VIXL): invoke->HasPcRelativeDexCache()
+}
+
+static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARMVIXL* codegen) {
+  if (invoke->GetLocations()->Intrinsified()) {
+    IntrinsicCodeGeneratorARMVIXL intrinsic(codegen);
+    intrinsic.Dispatch(invoke);
+    return true;
+  }
+  return false;
 }
 
 void InstructionCodeGeneratorARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
@@ -1372,7 +1579,9 @@
   // art::PrepareForRegisterAllocation.
   DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
 
-  // TODO(VIXL): TryGenerateIntrinsicCode
+  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
+    return;
+  }
 
   LocationSummary* locations = invoke->GetLocations();
   DCHECK(locations->HasTemps());
@@ -1388,13 +1597,18 @@
 }
 
 void LocationsBuilderARMVIXL::VisitInvokeVirtual(HInvokeVirtual* invoke) {
-  // TODO(VIXL): TryDispatch
+  IntrinsicLocationsBuilderARMVIXL intrinsic(codegen_);
+  if (intrinsic.TryDispatch(invoke)) {
+    return;
+  }
 
   HandleInvoke(invoke);
 }
 
 void InstructionCodeGeneratorARMVIXL::VisitInvokeVirtual(HInvokeVirtual* invoke) {
-  // TODO(VIXL): TryGenerateIntrinsicCode
+  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
+    return;
+  }
 
   codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
   DCHECK(!codegen_->IsLeafMethod());
@@ -1403,6 +1617,124 @@
   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
+void LocationsBuilderARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) {
+  HandleInvoke(invoke);
+  // Add the hidden argument.
+  invoke->GetLocations()->AddTemp(LocationFrom(r12));
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) {
+  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
+  LocationSummary* locations = invoke->GetLocations();
+  vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
+  vixl32::Register hidden_reg = RegisterFrom(locations->GetTemp(1));
+  Location receiver = locations->InAt(0);
+  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+
+  DCHECK(!receiver.IsStackSlot());
+
+  // /* HeapReference<Class> */ temp = receiver->klass_
+  GetAssembler()->LoadFromOffset(kLoadWord, temp, RegisterFrom(receiver), class_offset);
+
+  codegen_->MaybeRecordImplicitNullCheck(invoke);
+  // Instead of simply (possibly) unpoisoning `temp` here, we should
+  // emit a read barrier for the previous class reference load.
+  // However this is not required in practice, as this is an
+  // intermediate/temporary reference and because the current
+  // concurrent copying collector keeps the from-space memory
+  // intact/accessible until the end of the marking phase (the
+  // concurrent copying collector may not in the future).
+  GetAssembler()->MaybeUnpoisonHeapReference(temp);
+  GetAssembler()->LoadFromOffset(kLoadWord,
+                                 temp,
+                                 temp,
+                                 mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value());
+  uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
+      invoke->GetImtIndex(), kArmPointerSize));
+  // temp = temp->GetImtEntryAt(method_offset);
+  GetAssembler()->LoadFromOffset(kLoadWord, temp, temp, method_offset);
+  uint32_t entry_point =
+      ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value();
+  // LR = temp->GetEntryPoint();
+  GetAssembler()->LoadFromOffset(kLoadWord, lr, temp, entry_point);
+
+  // Set the hidden (in r12) argument. It is done here, right before a BLX to prevent other
+  // instruction from clobbering it as they might use r12 as a scratch register.
+  DCHECK(hidden_reg.Is(r12));
+  __ Mov(hidden_reg, invoke->GetDexMethodIndex());
+
+  {
+    AssemblerAccurateScope aas(GetVIXLAssembler(),
+                               kArmInstrMaxSizeInBytes,
+                               CodeBufferCheckScope::kMaximumSize);
+    // LR();
+    __ blx(lr);
+    DCHECK(!codegen_->IsLeafMethod());
+    codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+  }
+}
+
+void LocationsBuilderARMVIXL::VisitNeg(HNeg* neg) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+  switch (neg->GetResultType()) {
+    case Primitive::kPrimInt: {
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+      break;
+    }
+    case Primitive::kPrimLong: {
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+      break;
+    }
+
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble:
+      locations->SetInAt(0, Location::RequiresFpuRegister());
+      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+      break;
+
+    default:
+      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitNeg(HNeg* neg) {
+  LocationSummary* locations = neg->GetLocations();
+  Location out = locations->Out();
+  Location in = locations->InAt(0);
+  switch (neg->GetResultType()) {
+    case Primitive::kPrimInt:
+      __ Rsb(OutputRegister(neg), InputRegisterAt(neg, 0), 0);
+      break;
+
+    case Primitive::kPrimLong:
+      // out.lo = 0 - in.lo (and update the carry/borrow (C) flag)
+      __ Rsbs(LowRegisterFrom(out), LowRegisterFrom(in), 0);
+      // We cannot emit an RSC (Reverse Subtract with Carry)
+      // instruction here, as it does not exist in the Thumb-2
+      // instruction set.  We use the following approach
+      // using SBC and SUB instead.
+      //
+      // out.hi = -C
+      __ Sbc(HighRegisterFrom(out), HighRegisterFrom(out), HighRegisterFrom(out));
+      // out.hi = out.hi - in.hi
+      __ Sub(HighRegisterFrom(out), HighRegisterFrom(out), HighRegisterFrom(in));
+      break;
+
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble:
+      // TODO(VIXL): Consider introducing an InputVRegister()
+      // helper function (equivalent to InputRegister()).
+      __ Vneg(OutputVRegister(neg), InputVRegisterAt(neg, 0));
+      break;
+
+    default:
+      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+  }
+}
+
 void LocationsBuilderARMVIXL::VisitTypeConversion(HTypeConversion* conversion) {
   Primitive::Type result_type = conversion->GetResultType();
   Primitive::Type input_type = conversion->GetInputType();
@@ -1693,7 +2025,7 @@
         case Primitive::kPrimFloat: {
           // Processing a Dex `float-to-int' instruction.
           vixl32::SRegister temp = LowSRegisterFrom(locations->GetTemp(0));
-          __ Vcvt(I32, F32, temp, InputSRegisterAt(conversion, 0));
+          __ Vcvt(S32, F32, temp, InputSRegisterAt(conversion, 0));
           __ Vmov(OutputRegister(conversion), temp);
           break;
         }
@@ -1701,7 +2033,7 @@
         case Primitive::kPrimDouble: {
           // Processing a Dex `double-to-int' instruction.
           vixl32::SRegister temp_s = LowSRegisterFrom(locations->GetTemp(0));
-          __ Vcvt(I32, F64, temp_s, FromLowSToD(LowSRegisterFrom(in)));
+          __ Vcvt(S32, F64, temp_s, DRegisterFrom(in));
           __ Vmov(OutputRegister(conversion), temp_s);
           break;
         }
@@ -1777,7 +2109,7 @@
         case Primitive::kPrimChar: {
           // Processing a Dex `int-to-float' instruction.
           __ Vmov(OutputSRegister(conversion), InputRegisterAt(conversion, 0));
-          __ Vcvt(F32, I32, OutputSRegister(conversion), OutputSRegister(conversion));
+          __ Vcvt(F32, S32, OutputSRegister(conversion), OutputSRegister(conversion));
           break;
         }
 
@@ -1789,7 +2121,7 @@
 
         case Primitive::kPrimDouble:
           // Processing a Dex `double-to-float' instruction.
-          __ Vcvt(F32, F64, OutputSRegister(conversion), FromLowSToD(LowSRegisterFrom(in)));
+          __ Vcvt(F32, F64, OutputSRegister(conversion), DRegisterFrom(in));
           break;
 
         default:
@@ -1808,7 +2140,7 @@
         case Primitive::kPrimChar: {
           // Processing a Dex `int-to-double' instruction.
           __ Vmov(LowSRegisterFrom(out), InputRegisterAt(conversion, 0));
-          __ Vcvt(F64, I32, FromLowSToD(LowSRegisterFrom(out)), LowSRegisterFrom(out));
+          __ Vcvt(F64, S32, DRegisterFrom(out), LowSRegisterFrom(out));
           break;
         }
 
@@ -1816,19 +2148,15 @@
           // Processing a Dex `long-to-double' instruction.
           vixl32::Register low = LowRegisterFrom(in);
           vixl32::Register high = HighRegisterFrom(in);
-
           vixl32::SRegister out_s = LowSRegisterFrom(out);
-          vixl32::DRegister out_d = FromLowSToD(out_s);
-
+          vixl32::DRegister out_d = DRegisterFrom(out);
           vixl32::SRegister temp_s = LowSRegisterFrom(locations->GetTemp(0));
-          vixl32::DRegister temp_d = FromLowSToD(temp_s);
-
-          vixl32::SRegister constant_s = LowSRegisterFrom(locations->GetTemp(1));
-          vixl32::DRegister constant_d = FromLowSToD(constant_s);
+          vixl32::DRegister temp_d = DRegisterFrom(locations->GetTemp(0));
+          vixl32::DRegister constant_d = DRegisterFrom(locations->GetTemp(1));
 
           // temp_d = int-to-double(high)
           __ Vmov(temp_s, high);
-          __ Vcvt(F64, I32, temp_d, temp_s);
+          __ Vcvt(F64, S32, temp_d, temp_s);
           // constant_d = k2Pow32EncodingForDouble
           __ Vmov(constant_d, bit_cast<double, int64_t>(k2Pow32EncodingForDouble));
           // out_d = unsigned-to-double(low)
@@ -1841,7 +2169,7 @@
 
         case Primitive::kPrimFloat:
           // Processing a Dex `float-to-double' instruction.
-          __ Vcvt(F64, F32, FromLowSToD(LowSRegisterFrom(out)), InputSRegisterAt(conversion, 0));
+          __ Vcvt(F64, F32, DRegisterFrom(out), InputSRegisterAt(conversion, 0));
           break;
 
         default:
@@ -2055,181 +2383,6 @@
   }
 }
 
-void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
-  InvokeRuntimeCallingConventionARMVIXL calling_convention;
-  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
-  locations->SetOut(LocationFrom(r0));
-  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
-  locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(2)));
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) {
-  InvokeRuntimeCallingConventionARMVIXL calling_convention;
-  __ Mov(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
-  // Note: if heap poisoning is enabled, the entry point takes cares
-  // of poisoning the reference.
-  codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
-  CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
-}
-
-void LocationsBuilderARMVIXL::VisitNewInstance(HNewInstance* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
-  if (instruction->IsStringAlloc()) {
-    locations->AddTemp(LocationFrom(kMethodRegister));
-  } else {
-    InvokeRuntimeCallingConventionARMVIXL calling_convention;
-    locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
-    locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
-  }
-  locations->SetOut(LocationFrom(r0));
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction) {
-  // Note: if heap poisoning is enabled, the entry point takes cares
-  // of poisoning the reference.
-  if (instruction->IsStringAlloc()) {
-    // String is allocated through StringFactory. Call NewEmptyString entry point.
-    vixl32::Register temp = RegisterFrom(instruction->GetLocations()->GetTemp(0));
-    MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize);
-    GetAssembler()->LoadFromOffset(kLoadWord, temp, tr, QUICK_ENTRY_POINT(pNewEmptyString));
-    GetAssembler()->LoadFromOffset(kLoadWord, lr, temp, code_offset.Int32Value());
-    AssemblerAccurateScope aas(GetVIXLAssembler(),
-                               kArmInstrMaxSizeInBytes,
-                               CodeBufferCheckScope::kMaximumSize);
-    __ blx(lr);
-    codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
-  } else {
-    codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
-    CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
-  }
-}
-
-void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
-  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
-  if (location.IsStackSlot()) {
-    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
-  } else if (location.IsDoubleStackSlot()) {
-    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
-  }
-  locations->SetOut(location);
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitParameterValue(
-    HParameterValue* instruction ATTRIBUTE_UNUSED) {
-  // Nothing to do, the parameter is already at its location.
-}
-
-void LocationsBuilderARMVIXL::VisitCurrentMethod(HCurrentMethod* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
-  locations->SetOut(LocationFrom(kMethodRegister));
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitCurrentMethod(
-    HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
-  // Nothing to do, the method is already at its location.
-}
-
-void LocationsBuilderARMVIXL::VisitNot(HNot* not_) {
-  LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitNot(HNot* not_) {
-  LocationSummary* locations = not_->GetLocations();
-  Location out = locations->Out();
-  Location in = locations->InAt(0);
-  switch (not_->GetResultType()) {
-    case Primitive::kPrimInt:
-      __ Mvn(OutputRegister(not_), InputRegisterAt(not_, 0));
-      break;
-
-    case Primitive::kPrimLong:
-      __ Mvn(LowRegisterFrom(out), LowRegisterFrom(in));
-      __ Mvn(HighRegisterFrom(out), HighRegisterFrom(in));
-      break;
-
-    default:
-      LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
-  }
-}
-
-void LocationsBuilderARMVIXL::VisitPhi(HPhi* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
-  for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
-    locations->SetInAt(i, Location::Any());
-  }
-  locations->SetOut(Location::Any());
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unreachable";
-}
-
-void CodeGeneratorARMVIXL::GenerateMemoryBarrier(MemBarrierKind kind) {
-  // TODO (ported from quick): revisit ARM barrier kinds.
-  DmbOptions flavor = DmbOptions::ISH;  // Quiet C++ warnings.
-  switch (kind) {
-    case MemBarrierKind::kAnyStore:
-    case MemBarrierKind::kLoadAny:
-    case MemBarrierKind::kAnyAny: {
-      flavor = DmbOptions::ISH;
-      break;
-    }
-    case MemBarrierKind::kStoreStore: {
-      flavor = DmbOptions::ISHST;
-      break;
-    }
-    default:
-      LOG(FATAL) << "Unexpected memory barrier " << kind;
-  }
-  __ Dmb(flavor);
-}
-
-void InstructionCodeGeneratorARMVIXL::GenerateWideAtomicLoad(vixl32::Register addr,
-                                                             uint32_t offset,
-                                                             vixl32::Register out_lo,
-                                                             vixl32::Register out_hi) {
-  UseScratchRegisterScope temps(GetVIXLAssembler());
-  if (offset != 0) {
-    vixl32::Register temp = temps.Acquire();
-    __ Add(temp, addr, offset);
-    addr = temp;
-  }
-  __ Ldrexd(out_lo, out_hi, addr);
-}
-
-void InstructionCodeGeneratorARMVIXL::GenerateWideAtomicStore(vixl32::Register addr,
-                                                              uint32_t offset,
-                                                              vixl32::Register value_lo,
-                                                              vixl32::Register value_hi,
-                                                              vixl32::Register temp1,
-                                                              vixl32::Register temp2,
-                                                              HInstruction* instruction) {
-  UseScratchRegisterScope temps(GetVIXLAssembler());
-  vixl32::Label fail;
-  if (offset != 0) {
-    vixl32::Register temp = temps.Acquire();
-    __ Add(temp, addr, offset);
-    addr = temp;
-  }
-  __ Bind(&fail);
-  // We need a load followed by store. (The address used in a STREX instruction must
-  // be the same as the address in the most recently executed LDREX instruction.)
-  __ Ldrexd(temp1, temp2, addr);
-  codegen_->MaybeRecordImplicitNullCheck(instruction);
-  __ Strexd(temp1, value_lo, value_hi, addr);
-  __ Cbnz(temp1, &fail);
-}
-
 void InstructionCodeGeneratorARMVIXL::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
   DCHECK(instruction->IsDiv() || instruction->IsRem());
   DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
@@ -2384,12 +2537,22 @@
         locations->SetInAt(1, Location::RequiresRegister());
         locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
       } else {
-        TODO_VIXL32(FATAL);
+        InvokeRuntimeCallingConventionARMVIXL calling_convention;
+        locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+        locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
+        //       we only need the former.
+        locations->SetOut(LocationFrom(r0));
       }
       break;
     }
     case Primitive::kPrimLong: {
-      TODO_VIXL32(FATAL);
+      InvokeRuntimeCallingConventionARMVIXL calling_convention;
+      locations->SetInAt(0, LocationFrom(
+          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+      locations->SetInAt(1, LocationFrom(
+          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+      locations->SetOut(LocationFrom(r0, r1));
       break;
     }
     case Primitive::kPrimFloat:
@@ -2406,6 +2569,7 @@
 }
 
 void InstructionCodeGeneratorARMVIXL::VisitDiv(HDiv* div) {
+  Location lhs = div->GetLocations()->InAt(0);
   Location rhs = div->GetLocations()->InAt(1);
 
   switch (div->GetResultType()) {
@@ -2415,13 +2579,28 @@
       } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
         __ Sdiv(OutputRegister(div), InputRegisterAt(div, 0), InputRegisterAt(div, 1));
       } else {
-        TODO_VIXL32(FATAL);
+        InvokeRuntimeCallingConventionARMVIXL calling_convention;
+        DCHECK(calling_convention.GetRegisterAt(0).Is(RegisterFrom(lhs)));
+        DCHECK(calling_convention.GetRegisterAt(1).Is(RegisterFrom(rhs)));
+        DCHECK(r0.Is(OutputRegister(div)));
+
+        codegen_->InvokeRuntime(kQuickIdivmod, div, div->GetDexPc());
+        CheckEntrypointTypes<kQuickIdivmod, int32_t, int32_t, int32_t>();
       }
       break;
     }
 
     case Primitive::kPrimLong: {
-      TODO_VIXL32(FATAL);
+      InvokeRuntimeCallingConventionARMVIXL calling_convention;
+      DCHECK(calling_convention.GetRegisterAt(0).Is(LowRegisterFrom(lhs)));
+      DCHECK(calling_convention.GetRegisterAt(1).Is(HighRegisterFrom(lhs)));
+      DCHECK(calling_convention.GetRegisterAt(2).Is(LowRegisterFrom(rhs)));
+      DCHECK(calling_convention.GetRegisterAt(3).Is(HighRegisterFrom(rhs)));
+      DCHECK(LowRegisterFrom(div->GetLocations()->Out()).Is(r0));
+      DCHECK(HighRegisterFrom(div->GetLocations()->Out()).Is(r1));
+
+      codegen_->InvokeRuntime(kQuickLdiv, div, div->GetDexPc());
+      CheckEntrypointTypes<kQuickLdiv, int64_t, int64_t, int64_t>();
       break;
     }
 
@@ -2435,6 +2614,140 @@
   }
 }
 
+void LocationsBuilderARMVIXL::VisitRem(HRem* rem) {
+  Primitive::Type type = rem->GetResultType();
+
+  // Most remainders are implemented in the runtime.
+  LocationSummary::CallKind call_kind = LocationSummary::kCallOnMainOnly;
+  if (rem->GetResultType() == Primitive::kPrimInt && rem->InputAt(1)->IsConstant()) {
+    // sdiv will be replaced by other instruction sequence.
+    call_kind = LocationSummary::kNoCall;
+  } else if ((rem->GetResultType() == Primitive::kPrimInt)
+             && codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+    // Have hardware divide instruction for int, do it with three instructions.
+    call_kind = LocationSummary::kNoCall;
+  }
+
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+
+  switch (type) {
+    case Primitive::kPrimInt: {
+      if (rem->InputAt(1)->IsConstant()) {
+        locations->SetInAt(0, Location::RequiresRegister());
+        locations->SetInAt(1, Location::ConstantLocation(rem->InputAt(1)->AsConstant()));
+        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+        int32_t value = rem->InputAt(1)->AsIntConstant()->GetValue();
+        if (value == 1 || value == 0 || value == -1) {
+          // No temp register required.
+        } else {
+          locations->AddTemp(Location::RequiresRegister());
+          if (!IsPowerOfTwo(AbsOrMin(value))) {
+            locations->AddTemp(Location::RequiresRegister());
+          }
+        }
+      } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+        locations->SetInAt(0, Location::RequiresRegister());
+        locations->SetInAt(1, Location::RequiresRegister());
+        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+        locations->AddTemp(Location::RequiresRegister());
+      } else {
+        InvokeRuntimeCallingConventionARMVIXL calling_convention;
+        locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+        locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
+        //       we only need the latter.
+        locations->SetOut(LocationFrom(r1));
+      }
+      break;
+    }
+    case Primitive::kPrimLong: {
+      InvokeRuntimeCallingConventionARMVIXL calling_convention;
+      locations->SetInAt(0, LocationFrom(
+          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+      locations->SetInAt(1, LocationFrom(
+          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+      // The runtime helper puts the output in R2,R3.
+      locations->SetOut(LocationFrom(r2, r3));
+      break;
+    }
+    case Primitive::kPrimFloat: {
+      InvokeRuntimeCallingConventionARMVIXL calling_convention;
+      locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0)));
+      locations->SetInAt(1, LocationFrom(calling_convention.GetFpuRegisterAt(1)));
+      locations->SetOut(LocationFrom(s0));
+      break;
+    }
+
+    case Primitive::kPrimDouble: {
+      InvokeRuntimeCallingConventionARMVIXL calling_convention;
+      locations->SetInAt(0, LocationFrom(
+          calling_convention.GetFpuRegisterAt(0), calling_convention.GetFpuRegisterAt(1)));
+      locations->SetInAt(1, LocationFrom(
+          calling_convention.GetFpuRegisterAt(2), calling_convention.GetFpuRegisterAt(3)));
+      locations->SetOut(LocationFrom(s0, s1));
+      break;
+    }
+
+    default:
+      LOG(FATAL) << "Unexpected rem type " << type;
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitRem(HRem* rem) {
+  LocationSummary* locations = rem->GetLocations();
+  Location second = locations->InAt(1);
+
+  Primitive::Type type = rem->GetResultType();
+  switch (type) {
+    case Primitive::kPrimInt: {
+        vixl32::Register reg1 = InputRegisterAt(rem, 0);
+        vixl32::Register out_reg = OutputRegister(rem);
+        if (second.IsConstant()) {
+          GenerateDivRemConstantIntegral(rem);
+        } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+        vixl32::Register reg2 = RegisterFrom(second);
+        vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
+
+        // temp = reg1 / reg2  (integer division)
+        // dest = reg1 - temp * reg2
+        __ Sdiv(temp, reg1, reg2);
+        __ Mls(out_reg, temp, reg2, reg1);
+      } else {
+        InvokeRuntimeCallingConventionARMVIXL calling_convention;
+        DCHECK(reg1.Is(calling_convention.GetRegisterAt(0)));
+        DCHECK(RegisterFrom(second).Is(calling_convention.GetRegisterAt(1)));
+        DCHECK(out_reg.Is(r1));
+
+        codegen_->InvokeRuntime(kQuickIdivmod, rem, rem->GetDexPc());
+        CheckEntrypointTypes<kQuickIdivmod, int32_t, int32_t, int32_t>();
+      }
+      break;
+    }
+
+    case Primitive::kPrimLong: {
+      codegen_->InvokeRuntime(kQuickLmod, rem, rem->GetDexPc());
+        CheckEntrypointTypes<kQuickLmod, int64_t, int64_t, int64_t>();
+      break;
+    }
+
+    case Primitive::kPrimFloat: {
+      codegen_->InvokeRuntime(kQuickFmodf, rem, rem->GetDexPc());
+      CheckEntrypointTypes<kQuickFmodf, float, float, float>();
+      break;
+    }
+
+    case Primitive::kPrimDouble: {
+      codegen_->InvokeRuntime(kQuickFmod, rem, rem->GetDexPc());
+      CheckEntrypointTypes<kQuickFmod, double, double, double>();
+      break;
+    }
+
+    default:
+      LOG(FATAL) << "Unexpected rem type " << type;
+  }
+}
+
+
 void LocationsBuilderARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
   // TODO(VIXL): https://android-review.googlesource.com/#/c/275337/
   LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
@@ -2490,6 +2803,647 @@
   }
 }
 
+void InstructionCodeGeneratorARMVIXL::HandleIntegerRotate(HRor* ror) {
+  LocationSummary* locations = ror->GetLocations();
+  vixl32::Register in = InputRegisterAt(ror, 0);
+  Location rhs = locations->InAt(1);
+  vixl32::Register out = OutputRegister(ror);
+
+  if (rhs.IsConstant()) {
+    // Arm32 and Thumb2 assemblers require a rotation on the interval [1,31],
+    // so map all rotations to a +ve. equivalent in that range.
+    // (e.g. left *or* right by -2 bits == 30 bits in the same direction.)
+    uint32_t rot = CodeGenerator::GetInt32ValueOf(rhs.GetConstant()) & 0x1F;
+    if (rot) {
+      // Rotate, mapping left rotations to right equivalents if necessary.
+      // (e.g. left by 2 bits == right by 30.)
+      __ Ror(out, in, rot);
+    } else if (!out.Is(in)) {
+      __ Mov(out, in);
+    }
+  } else {
+    __ Ror(out, in, RegisterFrom(rhs));
+  }
+}
+
+// Gain some speed by mapping all Long rotates onto equivalent pairs of Integer
+// rotates by swapping input regs (effectively rotating by the first 32-bits of
+// a larger rotation) or flipping direction (thus treating larger right/left
+// rotations as sub-word sized rotations in the other direction) as appropriate.
+void InstructionCodeGeneratorARMVIXL::HandleLongRotate(HRor* ror) {
+  LocationSummary* locations = ror->GetLocations();
+  vixl32::Register in_reg_lo = LowRegisterFrom(locations->InAt(0));
+  vixl32::Register in_reg_hi = HighRegisterFrom(locations->InAt(0));
+  Location rhs = locations->InAt(1);
+  vixl32::Register out_reg_lo = LowRegisterFrom(locations->Out());
+  vixl32::Register out_reg_hi = HighRegisterFrom(locations->Out());
+
+  if (rhs.IsConstant()) {
+    uint64_t rot = CodeGenerator::GetInt64ValueOf(rhs.GetConstant());
+    // Map all rotations to +ve. equivalents on the interval [0,63].
+    rot &= kMaxLongShiftDistance;
+    // For rotates over a word in size, 'pre-rotate' by 32-bits to keep rotate
+    // logic below to a simple pair of binary orr.
+    // (e.g. 34 bits == in_reg swap + 2 bits right.)
+    if (rot >= kArmBitsPerWord) {
+      rot -= kArmBitsPerWord;
+      std::swap(in_reg_hi, in_reg_lo);
+    }
+    // Rotate, or mov to out for zero or word size rotations.
+    if (rot != 0u) {
+      __ Lsr(out_reg_hi, in_reg_hi, rot);
+      __ Orr(out_reg_hi, out_reg_hi, Operand(in_reg_lo, ShiftType::LSL, kArmBitsPerWord - rot));
+      __ Lsr(out_reg_lo, in_reg_lo, rot);
+      __ Orr(out_reg_lo, out_reg_lo, Operand(in_reg_hi, ShiftType::LSL, kArmBitsPerWord - rot));
+    } else {
+      __ Mov(out_reg_lo, in_reg_lo);
+      __ Mov(out_reg_hi, in_reg_hi);
+    }
+  } else {
+    vixl32::Register shift_right = RegisterFrom(locations->GetTemp(0));
+    vixl32::Register shift_left = RegisterFrom(locations->GetTemp(1));
+    vixl32::Label end;
+    vixl32::Label shift_by_32_plus_shift_right;
+
+    __ And(shift_right, RegisterFrom(rhs), 0x1F);
+    __ Lsrs(shift_left, RegisterFrom(rhs), 6);
+    // TODO(VIXL): Check that flags are kept after "vixl32::LeaveFlags" enabled.
+    __ Rsb(shift_left, shift_right, kArmBitsPerWord);
+    __ B(cc, &shift_by_32_plus_shift_right);
+
+    // out_reg_hi = (reg_hi << shift_left) | (reg_lo >> shift_right).
+    // out_reg_lo = (reg_lo << shift_left) | (reg_hi >> shift_right).
+    __ Lsl(out_reg_hi, in_reg_hi, shift_left);
+    __ Lsr(out_reg_lo, in_reg_lo, shift_right);
+    __ Add(out_reg_hi, out_reg_hi, out_reg_lo);
+    __ Lsl(out_reg_lo, in_reg_lo, shift_left);
+    __ Lsr(shift_left, in_reg_hi, shift_right);
+    __ Add(out_reg_lo, out_reg_lo, shift_left);
+    __ B(&end);
+
+    __ Bind(&shift_by_32_plus_shift_right);  // Shift by 32+shift_right.
+    // out_reg_hi = (reg_hi >> shift_right) | (reg_lo << shift_left).
+    // out_reg_lo = (reg_lo >> shift_right) | (reg_hi << shift_left).
+    __ Lsr(out_reg_hi, in_reg_hi, shift_right);
+    __ Lsl(out_reg_lo, in_reg_lo, shift_left);
+    __ Add(out_reg_hi, out_reg_hi, out_reg_lo);
+    __ Lsr(out_reg_lo, in_reg_lo, shift_right);
+    __ Lsl(shift_right, in_reg_hi, shift_left);
+    __ Add(out_reg_lo, out_reg_lo, shift_right);
+
+    __ Bind(&end);
+  }
+}
+
+void LocationsBuilderARMVIXL::VisitRor(HRor* ror) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(ror, LocationSummary::kNoCall);
+  switch (ror->GetResultType()) {
+    case Primitive::kPrimInt: {
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetInAt(1, Location::RegisterOrConstant(ror->InputAt(1)));
+      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+      break;
+    }
+    case Primitive::kPrimLong: {
+      locations->SetInAt(0, Location::RequiresRegister());
+      if (ror->InputAt(1)->IsConstant()) {
+        locations->SetInAt(1, Location::ConstantLocation(ror->InputAt(1)->AsConstant()));
+      } else {
+        locations->SetInAt(1, Location::RequiresRegister());
+        locations->AddTemp(Location::RequiresRegister());
+        locations->AddTemp(Location::RequiresRegister());
+      }
+      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unexpected operation type " << ror->GetResultType();
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitRor(HRor* ror) {
+  Primitive::Type type = ror->GetResultType();
+  switch (type) {
+    case Primitive::kPrimInt: {
+      HandleIntegerRotate(ror);
+      break;
+    }
+    case Primitive::kPrimLong: {
+      HandleLongRotate(ror);
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unexpected operation type " << type;
+      UNREACHABLE();
+  }
+}
+
+void LocationsBuilderARMVIXL::HandleShift(HBinaryOperation* op) {
+  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
+
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
+
+  switch (op->GetResultType()) {
+    case Primitive::kPrimInt: {
+      locations->SetInAt(0, Location::RequiresRegister());
+      if (op->InputAt(1)->IsConstant()) {
+        locations->SetInAt(1, Location::ConstantLocation(op->InputAt(1)->AsConstant()));
+        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+      } else {
+        locations->SetInAt(1, Location::RequiresRegister());
+        // Make the output overlap, as it will be used to hold the masked
+        // second input.
+        locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+      }
+      break;
+    }
+    case Primitive::kPrimLong: {
+      locations->SetInAt(0, Location::RequiresRegister());
+      if (op->InputAt(1)->IsConstant()) {
+        locations->SetInAt(1, Location::ConstantLocation(op->InputAt(1)->AsConstant()));
+        // For simplicity, use kOutputOverlap even though we only require that low registers
+        // don't clash with high registers which the register allocator currently guarantees.
+        locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+      } else {
+        locations->SetInAt(1, Location::RequiresRegister());
+        locations->AddTemp(Location::RequiresRegister());
+        locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+      }
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unexpected operation type " << op->GetResultType();
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::HandleShift(HBinaryOperation* op) {
+  DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
+
+  LocationSummary* locations = op->GetLocations();
+  Location out = locations->Out();
+  Location first = locations->InAt(0);
+  Location second = locations->InAt(1);
+
+  Primitive::Type type = op->GetResultType();
+  switch (type) {
+    case Primitive::kPrimInt: {
+      vixl32::Register out_reg = OutputRegister(op);
+      vixl32::Register first_reg = InputRegisterAt(op, 0);
+      if (second.IsRegister()) {
+        vixl32::Register second_reg = RegisterFrom(second);
+        // ARM doesn't mask the shift count so we need to do it ourselves.
+        __ And(out_reg, second_reg, kMaxIntShiftDistance);
+        if (op->IsShl()) {
+          __ Lsl(out_reg, first_reg, out_reg);
+        } else if (op->IsShr()) {
+          __ Asr(out_reg, first_reg, out_reg);
+        } else {
+          __ Lsr(out_reg, first_reg, out_reg);
+        }
+      } else {
+        int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
+        uint32_t shift_value = cst & kMaxIntShiftDistance;
+        if (shift_value == 0) {  // ARM does not support shifting with 0 immediate.
+          __ Mov(out_reg, first_reg);
+        } else if (op->IsShl()) {
+          __ Lsl(out_reg, first_reg, shift_value);
+        } else if (op->IsShr()) {
+          __ Asr(out_reg, first_reg, shift_value);
+        } else {
+          __ Lsr(out_reg, first_reg, shift_value);
+        }
+      }
+      break;
+    }
+    case Primitive::kPrimLong: {
+      vixl32::Register o_h = HighRegisterFrom(out);
+      vixl32::Register o_l = LowRegisterFrom(out);
+
+      vixl32::Register high = HighRegisterFrom(first);
+      vixl32::Register low = LowRegisterFrom(first);
+
+      if (second.IsRegister()) {
+        vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
+
+        vixl32::Register second_reg = RegisterFrom(second);
+
+        if (op->IsShl()) {
+          __ And(o_l, second_reg, kMaxLongShiftDistance);
+          // Shift the high part
+          __ Lsl(o_h, high, o_l);
+          // Shift the low part and `or` what overflew on the high part
+          __ Rsb(temp, o_l, kArmBitsPerWord);
+          __ Lsr(temp, low, temp);
+          __ Orr(o_h, o_h, temp);
+          // If the shift is > 32 bits, override the high part
+          __ Subs(temp, o_l, kArmBitsPerWord);
+          {
+            AssemblerAccurateScope guard(GetVIXLAssembler(),
+                                         3 * kArmInstrMaxSizeInBytes,
+                                         CodeBufferCheckScope::kMaximumSize);
+            __ it(pl);
+            __ lsl(pl, o_h, low, temp);
+          }
+          // Shift the low part
+          __ Lsl(o_l, low, o_l);
+        } else if (op->IsShr()) {
+          __ And(o_h, second_reg, kMaxLongShiftDistance);
+          // Shift the low part
+          __ Lsr(o_l, low, o_h);
+          // Shift the high part and `or` what underflew on the low part
+          __ Rsb(temp, o_h, kArmBitsPerWord);
+          __ Lsl(temp, high, temp);
+          __ Orr(o_l, o_l, temp);
+          // If the shift is > 32 bits, override the low part
+          __ Subs(temp, o_h, kArmBitsPerWord);
+          {
+            AssemblerAccurateScope guard(GetVIXLAssembler(),
+                                         3 * kArmInstrMaxSizeInBytes,
+                                         CodeBufferCheckScope::kMaximumSize);
+            __ it(pl);
+            __ asr(pl, o_l, high, temp);
+          }
+          // Shift the high part
+          __ Asr(o_h, high, o_h);
+        } else {
+          __ And(o_h, second_reg, kMaxLongShiftDistance);
+          // same as Shr except we use `Lsr`s and not `Asr`s
+          __ Lsr(o_l, low, o_h);
+          __ Rsb(temp, o_h, kArmBitsPerWord);
+          __ Lsl(temp, high, temp);
+          __ Orr(o_l, o_l, temp);
+          __ Subs(temp, o_h, kArmBitsPerWord);
+          {
+            AssemblerAccurateScope guard(GetVIXLAssembler(),
+                                         3 * kArmInstrMaxSizeInBytes,
+                                         CodeBufferCheckScope::kMaximumSize);
+          __ it(pl);
+          __ lsr(pl, o_l, high, temp);
+          }
+          __ Lsr(o_h, high, o_h);
+        }
+      } else {
+        // Register allocator doesn't create partial overlap.
+        DCHECK(!o_l.Is(high));
+        DCHECK(!o_h.Is(low));
+        int32_t cst = second.GetConstant()->AsIntConstant()->GetValue();
+        uint32_t shift_value = cst & kMaxLongShiftDistance;
+        if (shift_value > 32) {
+          if (op->IsShl()) {
+            __ Lsl(o_h, low, shift_value - 32);
+            __ Mov(o_l, 0);
+          } else if (op->IsShr()) {
+            __ Asr(o_l, high, shift_value - 32);
+            __ Asr(o_h, high, 31);
+          } else {
+            __ Lsr(o_l, high, shift_value - 32);
+            __ Mov(o_h, 0);
+          }
+        } else if (shift_value == 32) {
+          if (op->IsShl()) {
+            __ Mov(o_h, low);
+            __ Mov(o_l, 0);
+          } else if (op->IsShr()) {
+            __ Mov(o_l, high);
+            __ Asr(o_h, high, 31);
+          } else {
+            __ Mov(o_l, high);
+            __ Mov(o_h, 0);
+          }
+        } else if (shift_value == 1) {
+          if (op->IsShl()) {
+            __ Lsls(o_l, low, 1);
+            __ Adc(o_h, high, high);
+          } else if (op->IsShr()) {
+            __ Asrs(o_h, high, 1);
+            __ Rrx(o_l, low);
+          } else {
+            __ Lsrs(o_h, high, 1);
+            __ Rrx(o_l, low);
+          }
+        } else {
+          DCHECK(2 <= shift_value && shift_value < 32) << shift_value;
+          if (op->IsShl()) {
+            __ Lsl(o_h, high, shift_value);
+            __ Orr(o_h, o_h, Operand(low, ShiftType::LSR, 32 - shift_value));
+            __ Lsl(o_l, low, shift_value);
+          } else if (op->IsShr()) {
+            __ Lsr(o_l, low, shift_value);
+            __ Orr(o_l, o_l, Operand(high, ShiftType::LSL, 32 - shift_value));
+            __ Asr(o_h, high, shift_value);
+          } else {
+            __ Lsr(o_l, low, shift_value);
+            __ Orr(o_l, o_l, Operand(high, ShiftType::LSL, 32 - shift_value));
+            __ Lsr(o_h, high, shift_value);
+          }
+        }
+      }
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unexpected operation type " << type;
+      UNREACHABLE();
+  }
+}
+
+void LocationsBuilderARMVIXL::VisitShl(HShl* shl) {
+  HandleShift(shl);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitShl(HShl* shl) {
+  HandleShift(shl);
+}
+
+void LocationsBuilderARMVIXL::VisitShr(HShr* shr) {
+  HandleShift(shr);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitShr(HShr* shr) {
+  HandleShift(shr);
+}
+
+void LocationsBuilderARMVIXL::VisitUShr(HUShr* ushr) {
+  HandleShift(ushr);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitUShr(HUShr* ushr) {
+  HandleShift(ushr);
+}
+
+void LocationsBuilderARMVIXL::VisitNewInstance(HNewInstance* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+  if (instruction->IsStringAlloc()) {
+    locations->AddTemp(LocationFrom(kMethodRegister));
+  } else {
+    InvokeRuntimeCallingConventionARMVIXL calling_convention;
+    locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+    locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+  }
+  locations->SetOut(LocationFrom(r0));
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction) {
+  // Note: if heap poisoning is enabled, the entry point takes cares
+  // of poisoning the reference.
+  if (instruction->IsStringAlloc()) {
+    // String is allocated through StringFactory. Call NewEmptyString entry point.
+    vixl32::Register temp = RegisterFrom(instruction->GetLocations()->GetTemp(0));
+    MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize);
+    GetAssembler()->LoadFromOffset(kLoadWord, temp, tr, QUICK_ENTRY_POINT(pNewEmptyString));
+    GetAssembler()->LoadFromOffset(kLoadWord, lr, temp, code_offset.Int32Value());
+    AssemblerAccurateScope aas(GetVIXLAssembler(),
+                               kArmInstrMaxSizeInBytes,
+                               CodeBufferCheckScope::kMaximumSize);
+    __ blx(lr);
+    codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+  } else {
+    codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
+    CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+  }
+}
+
+void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+  InvokeRuntimeCallingConventionARMVIXL calling_convention;
+  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
+  locations->SetOut(LocationFrom(r0));
+  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
+  locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(2)));
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) {
+  InvokeRuntimeCallingConventionARMVIXL calling_convention;
+  __ Mov(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
+  // Note: if heap poisoning is enabled, the entry point takes cares
+  // of poisoning the reference.
+  codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
+  CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
+}
+
+void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
+  if (location.IsStackSlot()) {
+    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+  } else if (location.IsDoubleStackSlot()) {
+    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+  }
+  locations->SetOut(location);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitParameterValue(
+    HParameterValue* instruction ATTRIBUTE_UNUSED) {
+  // Nothing to do, the parameter is already at its location.
+}
+
+void LocationsBuilderARMVIXL::VisitCurrentMethod(HCurrentMethod* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+  locations->SetOut(LocationFrom(kMethodRegister));
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitCurrentMethod(
+    HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
+  // Nothing to do, the method is already at its location.
+}
+
+void LocationsBuilderARMVIXL::VisitNot(HNot* not_) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitNot(HNot* not_) {
+  LocationSummary* locations = not_->GetLocations();
+  Location out = locations->Out();
+  Location in = locations->InAt(0);
+  switch (not_->GetResultType()) {
+    case Primitive::kPrimInt:
+      __ Mvn(OutputRegister(not_), InputRegisterAt(not_, 0));
+      break;
+
+    case Primitive::kPrimLong:
+      __ Mvn(LowRegisterFrom(out), LowRegisterFrom(in));
+      __ Mvn(HighRegisterFrom(out), HighRegisterFrom(in));
+      break;
+
+    default:
+      LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
+  }
+}
+
+void LocationsBuilderARMVIXL::VisitBooleanNot(HBooleanNot* bool_not) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitBooleanNot(HBooleanNot* bool_not) {
+  __ Eor(OutputRegister(bool_not), InputRegister(bool_not), 1);
+}
+
+void LocationsBuilderARMVIXL::VisitCompare(HCompare* compare) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
+  switch (compare->InputAt(0)->GetType()) {
+    case Primitive::kPrimBoolean:
+    case Primitive::kPrimByte:
+    case Primitive::kPrimShort:
+    case Primitive::kPrimChar:
+    case Primitive::kPrimInt:
+    case Primitive::kPrimLong: {
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetInAt(1, Location::RequiresRegister());
+      // Output overlaps because it is written before doing the low comparison.
+      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+      break;
+    }
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble: {
+      locations->SetInAt(0, Location::RequiresFpuRegister());
+      locations->SetInAt(1, ArithmeticZeroOrFpuRegister(compare->InputAt(1)));
+      locations->SetOut(Location::RequiresRegister());
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitCompare(HCompare* compare) {
+  LocationSummary* locations = compare->GetLocations();
+  vixl32::Register out = OutputRegister(compare);
+  Location left = locations->InAt(0);
+  Location right = locations->InAt(1);
+
+  vixl32::Label less, greater, done;
+  Primitive::Type type = compare->InputAt(0)->GetType();
+  vixl32::Condition less_cond = vixl32::Condition(kNone);
+  switch (type) {
+    case Primitive::kPrimBoolean:
+    case Primitive::kPrimByte:
+    case Primitive::kPrimShort:
+    case Primitive::kPrimChar:
+    case Primitive::kPrimInt: {
+      // Emit move to `out` before the `Cmp`, as `Mov` might affect the status flags.
+      __ Mov(out, 0);
+      __ Cmp(RegisterFrom(left), RegisterFrom(right));  // Signed compare.
+      less_cond = lt;
+      break;
+    }
+    case Primitive::kPrimLong: {
+      __ Cmp(HighRegisterFrom(left), HighRegisterFrom(right));  // Signed compare.
+      __ B(lt, &less);
+      __ B(gt, &greater);
+      // Emit move to `out` before the last `Cmp`, as `Mov` might affect the status flags.
+      __ Mov(out, 0);
+      __ Cmp(LowRegisterFrom(left), LowRegisterFrom(right));  // Unsigned compare.
+      less_cond = lo;
+      break;
+    }
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble: {
+      __ Mov(out, 0);
+      GenerateVcmp(compare);
+      // To branch on the FP compare result we transfer FPSCR to APSR (encoded as PC in VMRS).
+      __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
+      less_cond = ARMFPCondition(kCondLT, compare->IsGtBias());
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unexpected compare type " << type;
+      UNREACHABLE();
+  }
+
+  __ B(eq, &done);
+  __ B(less_cond, &less);
+
+  __ Bind(&greater);
+  __ Mov(out, 1);
+  __ B(&done);
+
+  __ Bind(&less);
+  __ Mov(out, -1);
+
+  __ Bind(&done);
+}
+
+void LocationsBuilderARMVIXL::VisitPhi(HPhi* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+  for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
+    locations->SetInAt(i, Location::Any());
+  }
+  locations->SetOut(Location::Any());
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
+  LOG(FATAL) << "Unreachable";
+}
+
+void CodeGeneratorARMVIXL::GenerateMemoryBarrier(MemBarrierKind kind) {
+  // TODO (ported from quick): revisit ARM barrier kinds.
+  DmbOptions flavor = DmbOptions::ISH;  // Quiet C++ warnings.
+  switch (kind) {
+    case MemBarrierKind::kAnyStore:
+    case MemBarrierKind::kLoadAny:
+    case MemBarrierKind::kAnyAny: {
+      flavor = DmbOptions::ISH;
+      break;
+    }
+    case MemBarrierKind::kStoreStore: {
+      flavor = DmbOptions::ISHST;
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unexpected memory barrier " << kind;
+  }
+  __ Dmb(flavor);
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateWideAtomicLoad(vixl32::Register addr,
+                                                             uint32_t offset,
+                                                             vixl32::Register out_lo,
+                                                             vixl32::Register out_hi) {
+  UseScratchRegisterScope temps(GetVIXLAssembler());
+  if (offset != 0) {
+    vixl32::Register temp = temps.Acquire();
+    __ Add(temp, addr, offset);
+    addr = temp;
+  }
+  __ Ldrexd(out_lo, out_hi, addr);
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateWideAtomicStore(vixl32::Register addr,
+                                                              uint32_t offset,
+                                                              vixl32::Register value_lo,
+                                                              vixl32::Register value_hi,
+                                                              vixl32::Register temp1,
+                                                              vixl32::Register temp2,
+                                                              HInstruction* instruction) {
+  UseScratchRegisterScope temps(GetVIXLAssembler());
+  vixl32::Label fail;
+  if (offset != 0) {
+    vixl32::Register temp = temps.Acquire();
+    __ Add(temp, addr, offset);
+    addr = temp;
+  }
+  __ Bind(&fail);
+  // We need a load followed by store. (The address used in a STREX instruction must
+  // be the same as the address in the most recently executed LDREX instruction.)
+  __ Ldrexd(temp1, temp2, addr);
+  codegen_->MaybeRecordImplicitNullCheck(instruction);
+  __ Strexd(temp1, value_lo, value_hi, addr);
+  __ Cbnz(temp1, &fail);
+}
+
 void LocationsBuilderARMVIXL::HandleFieldSet(
     HInstruction* instruction, const FieldInfo& field_info) {
   DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
@@ -2606,7 +3560,7 @@
     }
 
     case Primitive::kPrimDouble: {
-      vixl32::DRegister value_reg = FromLowSToD(LowSRegisterFrom(value));
+      vixl32::DRegister value_reg = DRegisterFrom(value);
       if (is_volatile && !atomic_ldrd_strd) {
         vixl32::Register value_reg_lo = RegisterFrom(locations->GetTemp(0));
         vixl32::Register value_reg_hi = RegisterFrom(locations->GetTemp(1));
@@ -2697,6 +3651,78 @@
   }
 }
 
+Location LocationsBuilderARMVIXL::ArithmeticZeroOrFpuRegister(HInstruction* input) {
+  DCHECK(Primitive::IsFloatingPointType(input->GetType())) << input->GetType();
+  if ((input->IsFloatConstant() && (input->AsFloatConstant()->IsArithmeticZero())) ||
+      (input->IsDoubleConstant() && (input->AsDoubleConstant()->IsArithmeticZero()))) {
+    return Location::ConstantLocation(input->AsConstant());
+  } else {
+    return Location::RequiresFpuRegister();
+  }
+}
+
+Location LocationsBuilderARMVIXL::ArmEncodableConstantOrRegister(HInstruction* constant,
+                                                                 Opcode opcode) {
+  DCHECK(!Primitive::IsFloatingPointType(constant->GetType()));
+  if (constant->IsConstant() &&
+      CanEncodeConstantAsImmediate(constant->AsConstant(), opcode)) {
+    return Location::ConstantLocation(constant->AsConstant());
+  }
+  return Location::RequiresRegister();
+}
+
+bool LocationsBuilderARMVIXL::CanEncodeConstantAsImmediate(HConstant* input_cst,
+                                                           Opcode opcode) {
+  uint64_t value = static_cast<uint64_t>(Int64FromConstant(input_cst));
+  if (Primitive::Is64BitType(input_cst->GetType())) {
+    Opcode high_opcode = opcode;
+    SetCc low_set_cc = kCcDontCare;
+    switch (opcode) {
+      case SUB:
+        // Flip the operation to an ADD.
+        value = -value;
+        opcode = ADD;
+        FALLTHROUGH_INTENDED;
+      case ADD:
+        if (Low32Bits(value) == 0u) {
+          return CanEncodeConstantAsImmediate(High32Bits(value), opcode, kCcDontCare);
+        }
+        high_opcode = ADC;
+        low_set_cc = kCcSet;
+        break;
+      default:
+        break;
+    }
+    return CanEncodeConstantAsImmediate(Low32Bits(value), opcode, low_set_cc) &&
+        CanEncodeConstantAsImmediate(High32Bits(value), high_opcode, kCcDontCare);
+  } else {
+    return CanEncodeConstantAsImmediate(Low32Bits(value), opcode);
+  }
+}
+
+// TODO(VIXL): Replace art::arm::SetCc` with `vixl32::FlagsUpdate after flags set optimization
+// enabled.
+bool LocationsBuilderARMVIXL::CanEncodeConstantAsImmediate(uint32_t value,
+                                                           Opcode opcode,
+                                                           SetCc set_cc) {
+  ArmVIXLAssembler* assembler = codegen_->GetAssembler();
+  if (assembler->ShifterOperandCanHold(opcode, value, set_cc)) {
+    return true;
+  }
+  Opcode neg_opcode = kNoOperand;
+  switch (opcode) {
+    case AND: neg_opcode = BIC; value = ~value; break;
+    case ORR: neg_opcode = ORN; value = ~value; break;
+    case ADD: neg_opcode = SUB; value = -value; break;
+    case ADC: neg_opcode = SBC; value = ~value; break;
+    case SUB: neg_opcode = ADD; value = -value; break;
+    case SBC: neg_opcode = ADC; value = ~value; break;
+    default:
+      return false;
+  }
+  return assembler->ShifterOperandCanHold(neg_opcode, value, set_cc);
+}
+
 void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction,
                                                      const FieldInfo& field_info) {
   DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
@@ -2762,7 +3788,7 @@
       break;
 
     case Primitive::kPrimDouble: {
-      vixl32::DRegister out_dreg = FromLowSToD(LowSRegisterFrom(out));
+      vixl32::DRegister out_dreg = DRegisterFrom(out);
       if (is_volatile && !atomic_ldrd_strd) {
         vixl32::Register lo = RegisterFrom(locations->GetTemp(0));
         vixl32::Register hi = RegisterFrom(locations->GetTemp(1));
@@ -2826,6 +3852,82 @@
   HandleFieldGet(instruction, instruction->GetFieldInfo());
 }
 
+void LocationsBuilderARMVIXL::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
+}
+
+void LocationsBuilderARMVIXL::VisitUnresolvedInstanceFieldGet(
+    HUnresolvedInstanceFieldGet* instruction) {
+  FieldAccessCallingConventionARMVIXL calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitUnresolvedInstanceFieldGet(
+    HUnresolvedInstanceFieldGet* instruction) {
+  FieldAccessCallingConventionARMVIXL calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderARMVIXL::VisitUnresolvedInstanceFieldSet(
+    HUnresolvedInstanceFieldSet* instruction) {
+  FieldAccessCallingConventionARMVIXL calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitUnresolvedInstanceFieldSet(
+    HUnresolvedInstanceFieldSet* instruction) {
+  FieldAccessCallingConventionARMVIXL calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderARMVIXL::VisitUnresolvedStaticFieldGet(
+    HUnresolvedStaticFieldGet* instruction) {
+  FieldAccessCallingConventionARMVIXL calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitUnresolvedStaticFieldGet(
+    HUnresolvedStaticFieldGet* instruction) {
+  FieldAccessCallingConventionARMVIXL calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderARMVIXL::VisitUnresolvedStaticFieldSet(
+    HUnresolvedStaticFieldSet* instruction) {
+  FieldAccessCallingConventionARMVIXL calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitUnresolvedStaticFieldSet(
+    HUnresolvedStaticFieldSet* instruction) {
+  FieldAccessCallingConventionARMVIXL calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
 void LocationsBuilderARMVIXL::VisitNullCheck(HNullCheck* instruction) {
   // TODO(VIXL): https://android-review.googlesource.com/#/c/275337/
   LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
@@ -2862,6 +3964,606 @@
   codegen_->GenerateNullCheck(instruction);
 }
 
+static LoadOperandType GetLoadOperandType(Primitive::Type type) {
+  switch (type) {
+    case Primitive::kPrimNot:
+      return kLoadWord;
+    case Primitive::kPrimBoolean:
+      return kLoadUnsignedByte;
+    case Primitive::kPrimByte:
+      return kLoadSignedByte;
+    case Primitive::kPrimChar:
+      return kLoadUnsignedHalfword;
+    case Primitive::kPrimShort:
+      return kLoadSignedHalfword;
+    case Primitive::kPrimInt:
+      return kLoadWord;
+    case Primitive::kPrimLong:
+      return kLoadWordPair;
+    case Primitive::kPrimFloat:
+      return kLoadSWord;
+    case Primitive::kPrimDouble:
+      return kLoadDWord;
+    default:
+      LOG(FATAL) << "Unreachable type " << type;
+      UNREACHABLE();
+  }
+}
+
+static StoreOperandType GetStoreOperandType(Primitive::Type type) {
+  switch (type) {
+    case Primitive::kPrimNot:
+      return kStoreWord;
+    case Primitive::kPrimBoolean:
+    case Primitive::kPrimByte:
+      return kStoreByte;
+    case Primitive::kPrimChar:
+    case Primitive::kPrimShort:
+      return kStoreHalfword;
+    case Primitive::kPrimInt:
+      return kStoreWord;
+    case Primitive::kPrimLong:
+      return kStoreWordPair;
+    case Primitive::kPrimFloat:
+      return kStoreSWord;
+    case Primitive::kPrimDouble:
+      return kStoreDWord;
+    default:
+      LOG(FATAL) << "Unreachable type " << type;
+      UNREACHABLE();
+  }
+}
+
+void CodeGeneratorARMVIXL::LoadFromShiftedRegOffset(Primitive::Type type,
+                                                    Location out_loc,
+                                                    vixl32::Register base,
+                                                    vixl32::Register reg_index,
+                                                    vixl32::Condition cond) {
+  uint32_t shift_count = Primitive::ComponentSizeShift(type);
+  MemOperand mem_address(base, reg_index, vixl32::LSL, shift_count);
+
+  switch (type) {
+    case Primitive::kPrimByte:
+      __ Ldrsb(cond, RegisterFrom(out_loc), mem_address);
+      break;
+    case Primitive::kPrimBoolean:
+      __ Ldrb(cond, RegisterFrom(out_loc), mem_address);
+      break;
+    case Primitive::kPrimShort:
+      __ Ldrsh(cond, RegisterFrom(out_loc), mem_address);
+      break;
+    case Primitive::kPrimChar:
+      __ Ldrh(cond, RegisterFrom(out_loc), mem_address);
+      break;
+    case Primitive::kPrimNot:
+    case Primitive::kPrimInt:
+      __ Ldr(cond, RegisterFrom(out_loc), mem_address);
+      break;
+    // T32 doesn't support LoadFromShiftedRegOffset mem address mode for these types.
+    case Primitive::kPrimLong:
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble:
+    default:
+      LOG(FATAL) << "Unreachable type " << type;
+      UNREACHABLE();
+  }
+}
+
+void CodeGeneratorARMVIXL::StoreToShiftedRegOffset(Primitive::Type type,
+                                                   Location loc,
+                                                   vixl32::Register base,
+                                                   vixl32::Register reg_index,
+                                                   vixl32::Condition cond) {
+  uint32_t shift_count = Primitive::ComponentSizeShift(type);
+  MemOperand mem_address(base, reg_index, vixl32::LSL, shift_count);
+
+  switch (type) {
+    case Primitive::kPrimByte:
+    case Primitive::kPrimBoolean:
+      __ Strb(cond, RegisterFrom(loc), mem_address);
+      break;
+    case Primitive::kPrimShort:
+    case Primitive::kPrimChar:
+      __ Strh(cond, RegisterFrom(loc), mem_address);
+      break;
+    case Primitive::kPrimNot:
+    case Primitive::kPrimInt:
+      __ Str(cond, RegisterFrom(loc), mem_address);
+      break;
+    // T32 doesn't support StoreToShiftedRegOffset mem address mode for these types.
+    case Primitive::kPrimLong:
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble:
+    default:
+      LOG(FATAL) << "Unreachable type " << type;
+      UNREACHABLE();
+  }
+}
+
+void LocationsBuilderARMVIXL::VisitArrayGet(HArrayGet* instruction) {
+  bool object_array_get_with_read_barrier =
+      kEmitCompilerReadBarrier && (instruction->GetType() == Primitive::kPrimNot);
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction,
+                                                   object_array_get_with_read_barrier ?
+                                                       LocationSummary::kCallOnSlowPath :
+                                                       LocationSummary::kNoCall);
+  if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
+    TODO_VIXL32(FATAL);
+  }
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+  if (Primitive::IsFloatingPointType(instruction->GetType())) {
+    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+  } else {
+    // The output overlaps in the case of an object array get with
+    // read barriers enabled: we do not want the move to overwrite the
+    // array's location, as we need it to emit the read barrier.
+    locations->SetOut(
+        Location::RequiresRegister(),
+        object_array_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap);
+  }
+  // We need a temporary register for the read barrier marking slow
+  // path in CodeGeneratorARM::GenerateArrayLoadWithBakerReadBarrier.
+  // Also need for String compression feature.
+  if ((object_array_get_with_read_barrier && kUseBakerReadBarrier)
+      || (mirror::kUseStringCompression && instruction->IsStringCharAt())) {
+    locations->AddTemp(Location::RequiresRegister());
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
+  UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+  LocationSummary* locations = instruction->GetLocations();
+  Location obj_loc = locations->InAt(0);
+  vixl32::Register obj = InputRegisterAt(instruction, 0);
+  Location index = locations->InAt(1);
+  Location out_loc = locations->Out();
+  uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
+  Primitive::Type type = instruction->GetType();
+  const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
+                                        instruction->IsStringCharAt();
+  HInstruction* array_instr = instruction->GetArray();
+  bool has_intermediate_address = array_instr->IsIntermediateAddress();
+  // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
+  DCHECK(!(has_intermediate_address && kEmitCompilerReadBarrier));
+
+  switch (type) {
+    case Primitive::kPrimBoolean:
+    case Primitive::kPrimByte:
+    case Primitive::kPrimShort:
+    case Primitive::kPrimChar:
+    case Primitive::kPrimInt: {
+      vixl32::Register length;
+      if (maybe_compressed_char_at) {
+        length = RegisterFrom(locations->GetTemp(0));
+        uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+        GetAssembler()->LoadFromOffset(kLoadWord, length, obj, count_offset);
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+      }
+      if (index.IsConstant()) {
+        int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
+        if (maybe_compressed_char_at) {
+          vixl32::Label uncompressed_load, done;
+          __ Lsrs(length, length, 1u);  // LSRS has a 16-bit encoding, TST (immediate) does not.
+          static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                        "Expecting 0=compressed, 1=uncompressed");
+          __ B(cs, &uncompressed_load);
+          GetAssembler()->LoadFromOffset(kLoadUnsignedByte,
+                                         RegisterFrom(out_loc),
+                                         obj,
+                                         data_offset + const_index);
+          __ B(&done);
+          __ Bind(&uncompressed_load);
+          GetAssembler()->LoadFromOffset(GetLoadOperandType(Primitive::kPrimChar),
+                                         RegisterFrom(out_loc),
+                                         obj,
+                                         data_offset + (const_index << 1));
+          __ Bind(&done);
+        } else {
+          uint32_t full_offset = data_offset + (const_index << Primitive::ComponentSizeShift(type));
+
+          LoadOperandType load_type = GetLoadOperandType(type);
+          GetAssembler()->LoadFromOffset(load_type, RegisterFrom(out_loc), obj, full_offset);
+        }
+      } else {
+        vixl32::Register temp = temps.Acquire();
+
+        if (has_intermediate_address) {
+          // We do not need to compute the intermediate address from the array: the
+          // input instruction has done it already. See the comment in
+          // `TryExtractArrayAccessAddress()`.
+          if (kIsDebugBuild) {
+            HIntermediateAddress* tmp = array_instr->AsIntermediateAddress();
+            DCHECK_EQ(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64(), data_offset);
+          }
+          temp = obj;
+        } else {
+          __ Add(temp, obj, data_offset);
+        }
+        if (maybe_compressed_char_at) {
+          vixl32::Label uncompressed_load, done;
+          __ Lsrs(length, length, 1u);  // LSRS has a 16-bit encoding, TST (immediate) does not.
+          static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                        "Expecting 0=compressed, 1=uncompressed");
+          __ B(cs, &uncompressed_load);
+          __ Ldrb(RegisterFrom(out_loc), MemOperand(temp, RegisterFrom(index), vixl32::LSL, 0));
+          __ B(&done);
+          __ Bind(&uncompressed_load);
+          __ Ldrh(RegisterFrom(out_loc), MemOperand(temp, RegisterFrom(index), vixl32::LSL, 1));
+          __ Bind(&done);
+        } else {
+          codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, RegisterFrom(index));
+        }
+      }
+      break;
+    }
+
+    case Primitive::kPrimNot: {
+      static_assert(
+          sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+          "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+      // /* HeapReference<Object> */ out =
+      //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
+      if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+        TODO_VIXL32(FATAL);
+      } else {
+        vixl32::Register out = OutputRegister(instruction);
+        if (index.IsConstant()) {
+          size_t offset =
+              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+          GetAssembler()->LoadFromOffset(kLoadWord, out, obj, offset);
+          codegen_->MaybeRecordImplicitNullCheck(instruction);
+          // If read barriers are enabled, emit read barriers other than
+          // Baker's using a slow path (and also unpoison the loaded
+          // reference, if heap poisoning is enabled).
+          codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
+        } else {
+          vixl32::Register temp = temps.Acquire();
+
+          if (has_intermediate_address) {
+            // We do not need to compute the intermediate address from the array: the
+            // input instruction has done it already. See the comment in
+            // `TryExtractArrayAccessAddress()`.
+            if (kIsDebugBuild) {
+              HIntermediateAddress* tmp = array_instr->AsIntermediateAddress();
+              DCHECK_EQ(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64(), data_offset);
+            }
+            temp = obj;
+          } else {
+            __ Add(temp, obj, data_offset);
+          }
+          codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, RegisterFrom(index));
+
+          codegen_->MaybeRecordImplicitNullCheck(instruction);
+          // If read barriers are enabled, emit read barriers other than
+          // Baker's using a slow path (and also unpoison the loaded
+          // reference, if heap poisoning is enabled).
+          codegen_->MaybeGenerateReadBarrierSlow(
+              instruction, out_loc, out_loc, obj_loc, data_offset, index);
+        }
+      }
+      break;
+    }
+
+    case Primitive::kPrimLong: {
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+        GetAssembler()->LoadFromOffset(kLoadWordPair, LowRegisterFrom(out_loc), obj, offset);
+      } else {
+        vixl32::Register temp = temps.Acquire();
+        __ Add(temp, obj, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8));
+        GetAssembler()->LoadFromOffset(kLoadWordPair, LowRegisterFrom(out_loc), temp, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimFloat: {
+      vixl32::SRegister out = SRegisterFrom(out_loc);
+      if (index.IsConstant()) {
+        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+        GetAssembler()->LoadSFromOffset(out, obj, offset);
+      } else {
+        vixl32::Register temp = temps.Acquire();
+        __ Add(temp, obj, Operand(RegisterFrom(index), vixl32::LSL, TIMES_4));
+        GetAssembler()->LoadSFromOffset(out, temp, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimDouble: {
+      if (index.IsConstant()) {
+        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+        GetAssembler()->LoadDFromOffset(DRegisterFrom(out_loc), obj, offset);
+      } else {
+        vixl32::Register temp = temps.Acquire();
+        __ Add(temp, obj, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8));
+        GetAssembler()->LoadDFromOffset(DRegisterFrom(out_loc), temp, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimVoid:
+      LOG(FATAL) << "Unreachable type " << type;
+      UNREACHABLE();
+  }
+
+  if (type == Primitive::kPrimNot) {
+    // Potential implicit null checks, in the case of reference
+    // arrays, are handled in the previous switch statement.
+  } else if (!maybe_compressed_char_at) {
+    codegen_->MaybeRecordImplicitNullCheck(instruction);
+  }
+}
+
+void LocationsBuilderARMVIXL::VisitArraySet(HArraySet* instruction) {
+  Primitive::Type value_type = instruction->GetComponentType();
+
+  bool needs_write_barrier =
+      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
+  bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
+
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+      instruction,
+      may_need_runtime_call_for_type_check ?
+          LocationSummary::kCallOnSlowPath :
+          LocationSummary::kNoCall);
+
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+  if (Primitive::IsFloatingPointType(value_type)) {
+    locations->SetInAt(2, Location::RequiresFpuRegister());
+  } else {
+    locations->SetInAt(2, Location::RequiresRegister());
+  }
+  if (needs_write_barrier) {
+    // Temporary registers for the write barrier.
+    locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
+    locations->AddTemp(Location::RequiresRegister());
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
+  UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+  LocationSummary* locations = instruction->GetLocations();
+  vixl32::Register array = InputRegisterAt(instruction, 0);
+  Location index = locations->InAt(1);
+  Primitive::Type value_type = instruction->GetComponentType();
+  bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
+  bool needs_write_barrier =
+      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
+  uint32_t data_offset =
+      mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
+  Location value_loc = locations->InAt(2);
+  HInstruction* array_instr = instruction->GetArray();
+  bool has_intermediate_address = array_instr->IsIntermediateAddress();
+  // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
+  DCHECK(!(has_intermediate_address && kEmitCompilerReadBarrier));
+
+  switch (value_type) {
+    case Primitive::kPrimBoolean:
+    case Primitive::kPrimByte:
+    case Primitive::kPrimShort:
+    case Primitive::kPrimChar:
+    case Primitive::kPrimInt: {
+      if (index.IsConstant()) {
+        int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
+        uint32_t full_offset =
+            data_offset + (const_index << Primitive::ComponentSizeShift(value_type));
+        StoreOperandType store_type = GetStoreOperandType(value_type);
+        GetAssembler()->StoreToOffset(store_type, RegisterFrom(value_loc), array, full_offset);
+      } else {
+        vixl32::Register temp = temps.Acquire();
+
+        if (has_intermediate_address) {
+          // We do not need to compute the intermediate address from the array: the
+          // input instruction has done it already. See the comment in
+          // `TryExtractArrayAccessAddress()`.
+          if (kIsDebugBuild) {
+            HIntermediateAddress* tmp = array_instr->AsIntermediateAddress();
+            DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == data_offset);
+          }
+          temp = array;
+        } else {
+          __ Add(temp, array, data_offset);
+        }
+        codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index));
+      }
+      break;
+    }
+
+    case Primitive::kPrimNot: {
+      vixl32::Register value = RegisterFrom(value_loc);
+      // TryExtractArrayAccessAddress optimization is never applied for non-primitive ArraySet.
+      // See the comment in instruction_simplifier_shared.cc.
+      DCHECK(!has_intermediate_address);
+
+      if (instruction->InputAt(2)->IsNullConstant()) {
+        // Just setting null.
+        if (index.IsConstant()) {
+          size_t offset =
+              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+          GetAssembler()->StoreToOffset(kStoreWord, value, array, offset);
+        } else {
+          DCHECK(index.IsRegister()) << index;
+          vixl32::Register temp = temps.Acquire();
+          __ Add(temp, array, data_offset);
+          codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index));
+        }
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+        DCHECK(!needs_write_barrier);
+        DCHECK(!may_need_runtime_call_for_type_check);
+        break;
+      }
+
+      DCHECK(needs_write_barrier);
+      Location temp1_loc = locations->GetTemp(0);
+      vixl32::Register temp1 = RegisterFrom(temp1_loc);
+      Location temp2_loc = locations->GetTemp(1);
+      vixl32::Register temp2 = RegisterFrom(temp2_loc);
+      uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+      uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+      uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+      vixl32::Label done;
+      SlowPathCodeARMVIXL* slow_path = nullptr;
+
+      if (may_need_runtime_call_for_type_check) {
+        slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARMVIXL(instruction);
+        codegen_->AddSlowPath(slow_path);
+        if (instruction->GetValueCanBeNull()) {
+          vixl32::Label non_zero;
+          __ Cbnz(value, &non_zero);
+          if (index.IsConstant()) {
+            size_t offset =
+               (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+            GetAssembler()->StoreToOffset(kStoreWord, value, array, offset);
+          } else {
+            DCHECK(index.IsRegister()) << index;
+            vixl32::Register temp = temps.Acquire();
+            __ Add(temp, array, data_offset);
+            codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index));
+          }
+          codegen_->MaybeRecordImplicitNullCheck(instruction);
+          __ B(&done);
+          __ Bind(&non_zero);
+        }
+
+        // Note that when read barriers are enabled, the type checks
+        // are performed without read barriers.  This is fine, even in
+        // the case where a class object is in the from-space after
+        // the flip, as a comparison involving such a type would not
+        // produce a false positive; it may of course produce a false
+        // negative, in which case we would take the ArraySet slow
+        // path.
+
+        // /* HeapReference<Class> */ temp1 = array->klass_
+        GetAssembler()->LoadFromOffset(kLoadWord, temp1, array, class_offset);
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+        GetAssembler()->MaybeUnpoisonHeapReference(temp1);
+
+        // /* HeapReference<Class> */ temp1 = temp1->component_type_
+        GetAssembler()->LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
+        // /* HeapReference<Class> */ temp2 = value->klass_
+        GetAssembler()->LoadFromOffset(kLoadWord, temp2, value, class_offset);
+        // If heap poisoning is enabled, no need to unpoison `temp1`
+        // nor `temp2`, as we are comparing two poisoned references.
+        __ Cmp(temp1, temp2);
+
+        if (instruction->StaticTypeOfArrayIsObjectArray()) {
+          vixl32::Label do_put;
+          __ B(eq, &do_put);
+          // If heap poisoning is enabled, the `temp1` reference has
+          // not been unpoisoned yet; unpoison it now.
+          GetAssembler()->MaybeUnpoisonHeapReference(temp1);
+
+          // /* HeapReference<Class> */ temp1 = temp1->super_class_
+          GetAssembler()->LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
+          // If heap poisoning is enabled, no need to unpoison
+          // `temp1`, as we are comparing against null below.
+          __ Cbnz(temp1, slow_path->GetEntryLabel());
+          __ Bind(&do_put);
+        } else {
+          __ B(ne, slow_path->GetEntryLabel());
+        }
+      }
+
+      vixl32::Register source = value;
+      if (kPoisonHeapReferences) {
+        // Note that in the case where `value` is a null reference,
+        // we do not enter this block, as a null reference does not
+        // need poisoning.
+        DCHECK_EQ(value_type, Primitive::kPrimNot);
+        __ Mov(temp1, value);
+        GetAssembler()->PoisonHeapReference(temp1);
+        source = temp1;
+      }
+
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+        GetAssembler()->StoreToOffset(kStoreWord, source, array, offset);
+      } else {
+        DCHECK(index.IsRegister()) << index;
+
+        vixl32::Register temp = temps.Acquire();
+        __ Add(temp, array, data_offset);
+        codegen_->StoreToShiftedRegOffset(value_type,
+                                          LocationFrom(source),
+                                          temp,
+                                          RegisterFrom(index));
+      }
+
+      if (!may_need_runtime_call_for_type_check) {
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+      }
+
+      codegen_->MarkGCCard(temp1, temp2, array, value, instruction->GetValueCanBeNull());
+
+      if (done.IsReferenced()) {
+        __ Bind(&done);
+      }
+
+      if (slow_path != nullptr) {
+        __ Bind(slow_path->GetExitLabel());
+      }
+
+      break;
+    }
+
+    case Primitive::kPrimLong: {
+      Location value = locations->InAt(2);
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+        GetAssembler()->StoreToOffset(kStoreWordPair, LowRegisterFrom(value), array, offset);
+      } else {
+        vixl32::Register temp = temps.Acquire();
+        __ Add(temp, array, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8));
+        GetAssembler()->StoreToOffset(kStoreWordPair, LowRegisterFrom(value), temp, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimFloat: {
+      Location value = locations->InAt(2);
+      DCHECK(value.IsFpuRegister());
+      if (index.IsConstant()) {
+        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+        GetAssembler()->StoreSToOffset(SRegisterFrom(value), array, offset);
+      } else {
+        vixl32::Register temp = temps.Acquire();
+        __ Add(temp, array, Operand(RegisterFrom(index), vixl32::LSL, TIMES_4));
+        GetAssembler()->StoreSToOffset(SRegisterFrom(value), temp, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimDouble: {
+      Location value = locations->InAt(2);
+      DCHECK(value.IsFpuRegisterPair());
+      if (index.IsConstant()) {
+        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+        GetAssembler()->StoreDToOffset(DRegisterFrom(value), array, offset);
+      } else {
+        vixl32::Register temp = temps.Acquire();
+        __ Add(temp, array, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8));
+        GetAssembler()->StoreDToOffset(DRegisterFrom(value), temp, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimVoid:
+      LOG(FATAL) << "Unreachable type " << value_type;
+      UNREACHABLE();
+  }
+
+  // Objects are handled in the switch.
+  if (value_type != Primitive::kPrimNot) {
+    codegen_->MaybeRecordImplicitNullCheck(instruction);
+  }
+}
+
 void LocationsBuilderARMVIXL::VisitArrayLength(HArrayLength* instruction) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
@@ -2875,7 +4577,58 @@
   vixl32::Register out = OutputRegister(instruction);
   GetAssembler()->LoadFromOffset(kLoadWord, out, obj, offset);
   codegen_->MaybeRecordImplicitNullCheck(instruction);
-  // TODO(VIXL): https://android-review.googlesource.com/#/c/272625/
+  // Mask out compression flag from String's array length.
+  if (mirror::kUseStringCompression && instruction->IsStringLength()) {
+    __ Lsr(out, out, 1u);
+  }
+}
+
+void LocationsBuilderARMVIXL::VisitIntermediateAddress(HIntermediateAddress* instruction) {
+  // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
+  DCHECK(!kEmitCompilerReadBarrier);
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RegisterOrConstant(instruction->GetOffset()));
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitIntermediateAddress(HIntermediateAddress* instruction) {
+  vixl32::Register out = OutputRegister(instruction);
+  vixl32::Register first = InputRegisterAt(instruction, 0);
+  Location second = instruction->GetLocations()->InAt(1);
+
+  // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
+  DCHECK(!kEmitCompilerReadBarrier);
+
+  if (second.IsRegister()) {
+    __ Add(out, first, RegisterFrom(second));
+  } else {
+    __ Add(out, first, second.GetConstant()->AsIntConstant()->GetValue());
+  }
+}
+
+void LocationsBuilderARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction) {
+  RegisterSet caller_saves = RegisterSet::Empty();
+  InvokeRuntimeCallingConventionARMVIXL calling_convention;
+  caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
+  caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(1)));
+  LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction) {
+  SlowPathCodeARMVIXL* slow_path =
+      new (GetGraph()->GetArena()) BoundsCheckSlowPathARMVIXL(instruction);
+  codegen_->AddSlowPath(slow_path);
+
+  vixl32::Register index = InputRegisterAt(instruction, 0);
+  vixl32::Register length = InputRegisterAt(instruction, 1);
+
+  __ Cmp(index, length);
+  __ B(hs, slow_path->GetEntryLabel());
 }
 
 void CodeGeneratorARMVIXL::MarkGCCard(vixl32::Register temp,
@@ -2989,17 +4742,33 @@
       GetAssembler()->StoreToOffset(kStoreWord, temp, sp, destination.GetStackIndex());
     }
   } else if (source.IsFpuRegister()) {
-    TODO_VIXL32(FATAL);
+    if (destination.IsRegister()) {
+      __ Vmov(RegisterFrom(destination), SRegisterFrom(source));
+    } else if (destination.IsFpuRegister()) {
+      __ Vmov(SRegisterFrom(destination), SRegisterFrom(source));
+    } else {
+      DCHECK(destination.IsStackSlot());
+      GetAssembler()->StoreSToOffset(SRegisterFrom(source), sp, destination.GetStackIndex());
+    }
   } else if (source.IsDoubleStackSlot()) {
-    TODO_VIXL32(FATAL);
+    if (destination.IsDoubleStackSlot()) {
+      vixl32::DRegister temp = temps.AcquireD();
+      GetAssembler()->LoadDFromOffset(temp, sp, source.GetStackIndex());
+      GetAssembler()->StoreDToOffset(temp, sp, destination.GetStackIndex());
+    } else if (destination.IsRegisterPair()) {
+      DCHECK(ExpectedPairLayout(destination));
+      GetAssembler()->LoadFromOffset(
+          kLoadWordPair, LowRegisterFrom(destination), sp, source.GetStackIndex());
+    } else {
+      DCHECK(destination.IsFpuRegisterPair()) << destination;
+      GetAssembler()->LoadDFromOffset(DRegisterFrom(destination), sp, source.GetStackIndex());
+    }
   } else if (source.IsRegisterPair()) {
     if (destination.IsRegisterPair()) {
       __ Mov(LowRegisterFrom(destination), LowRegisterFrom(source));
       __ Mov(HighRegisterFrom(destination), HighRegisterFrom(source));
     } else if (destination.IsFpuRegisterPair()) {
-      __ Vmov(FromLowSToD(LowSRegisterFrom(destination)),
-              LowRegisterFrom(source),
-              HighRegisterFrom(source));
+      __ Vmov(DRegisterFrom(destination), LowRegisterFrom(source), HighRegisterFrom(source));
     } else {
       DCHECK(destination.IsDoubleStackSlot()) << destination;
       DCHECK(ExpectedPairLayout(source));
@@ -3009,7 +4778,14 @@
                                     destination.GetStackIndex());
     }
   } else if (source.IsFpuRegisterPair()) {
-    TODO_VIXL32(FATAL);
+    if (destination.IsRegisterPair()) {
+      __ Vmov(LowRegisterFrom(destination), HighRegisterFrom(destination), DRegisterFrom(source));
+    } else if (destination.IsFpuRegisterPair()) {
+      __ Vmov(DRegisterFrom(destination), DRegisterFrom(source));
+    } else {
+      DCHECK(destination.IsDoubleStackSlot()) << destination;
+      GetAssembler()->StoreDToOffset(DRegisterFrom(source), sp, destination.GetStackIndex());
+    }
   } else {
     DCHECK(source.IsConstant()) << source;
     HConstant* constant = source.GetConstant();
@@ -3042,7 +4818,7 @@
     } else if (constant->IsDoubleConstant()) {
       double value = constant->AsDoubleConstant()->GetValue();
       if (destination.IsFpuRegisterPair()) {
-        __ Vmov(FromLowSToD(LowSRegisterFrom(destination)), value);
+        __ Vmov(DRegisterFrom(destination), value);
       } else {
         DCHECK(destination.IsDoubleStackSlot()) << destination;
         uint64_t int_value = bit_cast<uint64_t, double>(value);
@@ -3070,18 +4846,82 @@
   }
 }
 
-void ParallelMoveResolverARMVIXL::Exchange(Register reg ATTRIBUTE_UNUSED,
-                                           int mem ATTRIBUTE_UNUSED) {
-  TODO_VIXL32(FATAL);
+void ParallelMoveResolverARMVIXL::Exchange(vixl32::Register reg, int mem) {
+  UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+  vixl32::Register temp = temps.Acquire();
+  __ Mov(temp, reg);
+  GetAssembler()->LoadFromOffset(kLoadWord, reg, sp, mem);
+  GetAssembler()->StoreToOffset(kStoreWord, temp, sp, mem);
 }
 
-void ParallelMoveResolverARMVIXL::Exchange(int mem1 ATTRIBUTE_UNUSED,
-                                           int mem2 ATTRIBUTE_UNUSED) {
-  TODO_VIXL32(FATAL);
+void ParallelMoveResolverARMVIXL::Exchange(int mem1, int mem2) {
+  // TODO(VIXL32): Double check the performance of this implementation.
+  UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+  vixl32::Register temp = temps.Acquire();
+  vixl32::SRegister temp_s = temps.AcquireS();
+
+  __ Ldr(temp, MemOperand(sp, mem1));
+  __ Vldr(temp_s, MemOperand(sp, mem2));
+  __ Str(temp, MemOperand(sp, mem2));
+  __ Vstr(temp_s, MemOperand(sp, mem1));
 }
 
-void ParallelMoveResolverARMVIXL::EmitSwap(size_t index ATTRIBUTE_UNUSED) {
-  TODO_VIXL32(FATAL);
+void ParallelMoveResolverARMVIXL::EmitSwap(size_t index) {
+  MoveOperands* move = moves_[index];
+  Location source = move->GetSource();
+  Location destination = move->GetDestination();
+  UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+
+  if (source.IsRegister() && destination.IsRegister()) {
+    vixl32::Register temp = temps.Acquire();
+    DCHECK(!RegisterFrom(source).Is(temp));
+    DCHECK(!RegisterFrom(destination).Is(temp));
+    __ Mov(temp, RegisterFrom(destination));
+    __ Mov(RegisterFrom(destination), RegisterFrom(source));
+    __ Mov(RegisterFrom(source), temp);
+  } else if (source.IsRegister() && destination.IsStackSlot()) {
+    Exchange(RegisterFrom(source), destination.GetStackIndex());
+  } else if (source.IsStackSlot() && destination.IsRegister()) {
+    Exchange(RegisterFrom(destination), source.GetStackIndex());
+  } else if (source.IsStackSlot() && destination.IsStackSlot()) {
+    TODO_VIXL32(FATAL);
+  } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
+    TODO_VIXL32(FATAL);
+  } else if (source.IsRegisterPair() && destination.IsRegisterPair()) {
+    vixl32::DRegister temp = temps.AcquireD();
+    __ Vmov(temp, LowRegisterFrom(source), HighRegisterFrom(source));
+    __ Mov(LowRegisterFrom(source), LowRegisterFrom(destination));
+    __ Mov(HighRegisterFrom(source), HighRegisterFrom(destination));
+    __ Vmov(LowRegisterFrom(destination), HighRegisterFrom(destination), temp);
+  } else if (source.IsRegisterPair() || destination.IsRegisterPair()) {
+    vixl32::Register low_reg = LowRegisterFrom(source.IsRegisterPair() ? source : destination);
+    int mem = source.IsRegisterPair() ? destination.GetStackIndex() : source.GetStackIndex();
+    DCHECK(ExpectedPairLayout(source.IsRegisterPair() ? source : destination));
+    vixl32::DRegister temp = temps.AcquireD();
+    __ Vmov(temp, low_reg, vixl32::Register(low_reg.GetCode() + 1));
+    GetAssembler()->LoadFromOffset(kLoadWordPair, low_reg, sp, mem);
+    GetAssembler()->StoreDToOffset(temp, sp, mem);
+  } else if (source.IsFpuRegisterPair() && destination.IsFpuRegisterPair()) {
+    vixl32::DRegister first = DRegisterFrom(source);
+    vixl32::DRegister second = DRegisterFrom(destination);
+    vixl32::DRegister temp = temps.AcquireD();
+    __ Vmov(temp, first);
+    __ Vmov(first, second);
+    __ Vmov(second, temp);
+  } else if (source.IsFpuRegisterPair() || destination.IsFpuRegisterPair()) {
+    TODO_VIXL32(FATAL);
+  } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
+    TODO_VIXL32(FATAL);
+  } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
+    vixl32::DRegister temp1 = temps.AcquireD();
+    vixl32::DRegister temp2 = temps.AcquireD();
+    __ Vldr(temp1, MemOperand(sp, source.GetStackIndex()));
+    __ Vldr(temp2, MemOperand(sp, destination.GetStackIndex()));
+    __ Vstr(temp1, MemOperand(sp, destination.GetStackIndex()));
+    __ Vstr(temp2, MemOperand(sp, source.GetStackIndex()));
+  } else {
+    LOG(FATAL) << "Unimplemented" << source << " <-> " << destination;
+  }
 }
 
 void ParallelMoveResolverARMVIXL::SpillScratch(int reg ATTRIBUTE_UNUSED) {
@@ -3092,6 +4932,14 @@
   TODO_VIXL32(FATAL);
 }
 
+// Check if the desired_class_load_kind is supported. If it is, return it,
+// otherwise return a fall-back kind that should be used instead.
+HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
+      HLoadClass::LoadKind desired_class_load_kind ATTRIBUTE_UNUSED) {
+  // TODO(VIXL): Implement optimized code paths.
+  return HLoadClass::LoadKind::kDexCacheViaMethod;
+}
+
 void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
   if (cls->NeedsAccessCheck()) {
     InvokeRuntimeCallingConventionARMVIXL calling_convention;
@@ -3140,7 +4988,8 @@
       GenerateGcRootFieldLoad(cls,
                               out_loc,
                               current_method,
-                              ArtMethod::DeclaringClassOffset().Int32Value());
+                              ArtMethod::DeclaringClassOffset().Int32Value(),
+                              kEmitCompilerReadBarrier);
       break;
     }
     case HLoadClass::LoadKind::kDexCacheViaMethod: {
@@ -3152,7 +5001,7 @@
       GetAssembler()->LoadFromOffset(kLoadWord, out, current_method, resolved_types_offset);
       // /* GcRoot<mirror::Class> */ out = out[type_index]
       size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex());
-      GenerateGcRootFieldLoad(cls, out_loc, out, offset);
+      GenerateGcRootFieldLoad(cls, out_loc, out, offset, kEmitCompilerReadBarrier);
       generate_null_check = !cls->IsInDexCache();
       break;
     }
@@ -3176,6 +5025,786 @@
   }
 }
 
+void LocationsBuilderARMVIXL::VisitClinitCheck(HClinitCheck* check) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+  locations->SetInAt(0, Location::RequiresRegister());
+  if (check->HasUses()) {
+    locations->SetOut(Location::SameAsFirstInput());
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitClinitCheck(HClinitCheck* check) {
+  // We assume the class is not null.
+  LoadClassSlowPathARMVIXL* slow_path =
+      new (GetGraph()->GetArena()) LoadClassSlowPathARMVIXL(check->GetLoadClass(),
+                                                            check,
+                                                            check->GetDexPc(),
+                                                            /* do_clinit */ true);
+  codegen_->AddSlowPath(slow_path);
+  GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateClassInitializationCheck(
+    LoadClassSlowPathARMVIXL* slow_path, vixl32::Register class_reg) {
+  UseScratchRegisterScope temps(GetVIXLAssembler());
+  vixl32::Register temp = temps.Acquire();
+  GetAssembler()->LoadFromOffset(kLoadWord,
+                                 temp,
+                                 class_reg,
+                                 mirror::Class::StatusOffset().Int32Value());
+  __ Cmp(temp, mirror::Class::kStatusInitialized);
+  __ B(lt, slow_path->GetEntryLabel());
+  // Even if the initialized flag is set, we may be in a situation where caches are not synced
+  // properly. Therefore, we do a memory fence.
+  __ Dmb(ISH);
+  __ Bind(slow_path->GetExitLabel());
+}
+
+// Check if the desired_string_load_kind is supported. If it is, return it,
+// otherwise return a fall-back kind that should be used instead.
+HLoadString::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadStringKind(
+      HLoadString::LoadKind desired_string_load_kind ATTRIBUTE_UNUSED) {
+  // TODO(VIXL): Implement optimized code paths. For now we always use the simpler fallback code.
+  return HLoadString::LoadKind::kDexCacheViaMethod;
+}
+
+void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) {
+  LocationSummary::CallKind call_kind = load->NeedsEnvironment()
+      ? LocationSummary::kCallOnMainOnly
+      : LocationSummary::kNoCall;
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+
+  // TODO(VIXL): Implement optimized code paths.
+  // See InstructionCodeGeneratorARMVIXL::VisitLoadString.
+  HLoadString::LoadKind load_kind = load->GetLoadKind();
+  if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod) {
+    locations->SetInAt(0, Location::RequiresRegister());
+    // TODO(VIXL): Use InvokeRuntimeCallingConventionARMVIXL instead.
+    locations->SetOut(LocationFrom(r0));
+  } else {
+    locations->SetOut(Location::RequiresRegister());
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) {
+  // TODO(VIXL): Implement optimized code paths.
+  // We implemented the simplest solution to get first ART tests passing, we deferred the
+  // optimized path until later, we should implement it using ARM64 implementation as a
+  // reference. The same related to LocationsBuilderARMVIXL::VisitLoadString.
+
+  // TODO: Re-add the compiler code to do string dex cache lookup again.
+  DCHECK_EQ(load->GetLoadKind(), HLoadString::LoadKind::kDexCacheViaMethod);
+  InvokeRuntimeCallingConventionARMVIXL calling_convention;
+  __ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex());
+  codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
+  CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
+}
+
+static int32_t GetExceptionTlsOffset() {
+  return Thread::ExceptionOffset<kArmPointerSize>().Int32Value();
+}
+
+void LocationsBuilderARMVIXL::VisitLoadException(HLoadException* load) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+  locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitLoadException(HLoadException* load) {
+  vixl32::Register out = OutputRegister(load);
+  GetAssembler()->LoadFromOffset(kLoadWord, out, tr, GetExceptionTlsOffset());
+}
+
+
+void LocationsBuilderARMVIXL::VisitClearException(HClearException* clear) {
+  new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
+  UseScratchRegisterScope temps(GetVIXLAssembler());
+  vixl32::Register temp = temps.Acquire();
+  __ Mov(temp, 0);
+  GetAssembler()->StoreToOffset(kStoreWord, temp, tr, GetExceptionTlsOffset());
+}
+
+void LocationsBuilderARMVIXL::VisitThrow(HThrow* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+  InvokeRuntimeCallingConventionARMVIXL calling_convention;
+  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitThrow(HThrow* instruction) {
+  codegen_->InvokeRuntime(kQuickDeliverException, instruction, instruction->GetDexPc());
+  CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
+}
+
+static bool TypeCheckNeedsATemporary(TypeCheckKind type_check_kind) {
+  return kEmitCompilerReadBarrier &&
+      (kUseBakerReadBarrier ||
+       type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+       type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+       type_check_kind == TypeCheckKind::kArrayObjectCheck);
+}
+
+
+void LocationsBuilderARMVIXL::VisitInstanceOf(HInstanceOf* instruction) {
+  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+  TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+  bool baker_read_barrier_slow_path = false;
+  switch (type_check_kind) {
+    case TypeCheckKind::kExactCheck:
+    case TypeCheckKind::kAbstractClassCheck:
+    case TypeCheckKind::kClassHierarchyCheck:
+    case TypeCheckKind::kArrayObjectCheck:
+      call_kind =
+          kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
+      baker_read_barrier_slow_path = kUseBakerReadBarrier;
+      break;
+    case TypeCheckKind::kArrayCheck:
+    case TypeCheckKind::kUnresolvedCheck:
+    case TypeCheckKind::kInterfaceCheck:
+      call_kind = LocationSummary::kCallOnSlowPath;
+      break;
+  }
+
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+  if (baker_read_barrier_slow_path) {
+    locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
+  }
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  // The "out" register is used as a temporary, so it overlaps with the inputs.
+  // Note that TypeCheckSlowPathARM uses this register too.
+  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+  // When read barriers are enabled, we need a temporary register for
+  // some cases.
+  if (TypeCheckNeedsATemporary(type_check_kind)) {
+    locations->AddTemp(Location::RequiresRegister());
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) {
+  TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+  LocationSummary* locations = instruction->GetLocations();
+  Location obj_loc = locations->InAt(0);
+  vixl32::Register obj = InputRegisterAt(instruction, 0);
+  vixl32::Register cls = InputRegisterAt(instruction, 1);
+  Location out_loc = locations->Out();
+  vixl32::Register out = OutputRegister(instruction);
+  Location maybe_temp_loc = TypeCheckNeedsATemporary(type_check_kind) ?
+      locations->GetTemp(0) :
+      Location::NoLocation();
+  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+  vixl32::Label done, zero;
+  SlowPathCodeARMVIXL* slow_path = nullptr;
+
+  // Return 0 if `obj` is null.
+  // avoid null check if we know obj is not null.
+  if (instruction->MustDoNullCheck()) {
+    __ Cbz(obj, &zero);
+  }
+
+  switch (type_check_kind) {
+    case TypeCheckKind::kExactCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp_loc);
+      __ Cmp(out, cls);
+      // Classes must be equal for the instanceof to succeed.
+      __ B(ne, &zero);
+      __ Mov(out, 1);
+      __ B(&done);
+      break;
+    }
+
+    case TypeCheckKind::kAbstractClassCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp_loc);
+      // If the class is abstract, we eagerly fetch the super class of the
+      // object to avoid doing a comparison we know will fail.
+      vixl32::Label loop;
+      __ Bind(&loop);
+      // /* HeapReference<Class> */ out = out->super_class_
+      GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc);
+      // If `out` is null, we use it for the result, and jump to `done`.
+      __ Cbz(out, &done);
+      __ Cmp(out, cls);
+      __ B(ne, &loop);
+      __ Mov(out, 1);
+      if (zero.IsReferenced()) {
+        __ B(&done);
+      }
+      break;
+    }
+
+    case TypeCheckKind::kClassHierarchyCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp_loc);
+      // Walk over the class hierarchy to find a match.
+      vixl32::Label loop, success;
+      __ Bind(&loop);
+      __ Cmp(out, cls);
+      __ B(eq, &success);
+      // /* HeapReference<Class> */ out = out->super_class_
+      GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc);
+      __ Cbnz(out, &loop);
+      // If `out` is null, we use it for the result, and jump to `done`.
+      __ B(&done);
+      __ Bind(&success);
+      __ Mov(out, 1);
+      if (zero.IsReferenced()) {
+        __ B(&done);
+      }
+      break;
+    }
+
+    case TypeCheckKind::kArrayObjectCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp_loc);
+      // Do an exact check.
+      vixl32::Label exact_check;
+      __ Cmp(out, cls);
+      __ B(eq, &exact_check);
+      // Otherwise, we need to check that the object's class is a non-primitive array.
+      // /* HeapReference<Class> */ out = out->component_type_
+      GenerateReferenceLoadOneRegister(instruction, out_loc, component_offset, maybe_temp_loc);
+      // If `out` is null, we use it for the result, and jump to `done`.
+      __ Cbz(out, &done);
+      GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
+      static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+      __ Cbnz(out, &zero);
+      __ Bind(&exact_check);
+      __ Mov(out, 1);
+      __ B(&done);
+      break;
+    }
+
+    case TypeCheckKind::kArrayCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        maybe_temp_loc);
+      __ Cmp(out, cls);
+      DCHECK(locations->OnlyCallsOnSlowPath());
+      slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction,
+                                                                        /* is_fatal */ false);
+      codegen_->AddSlowPath(slow_path);
+      __ B(ne, slow_path->GetEntryLabel());
+      __ Mov(out, 1);
+      if (zero.IsReferenced()) {
+        __ B(&done);
+      }
+      break;
+    }
+
+    case TypeCheckKind::kUnresolvedCheck:
+    case TypeCheckKind::kInterfaceCheck: {
+      // Note that we indeed only call on slow path, but we always go
+      // into the slow path for the unresolved and interface check
+      // cases.
+      //
+      // We cannot directly call the InstanceofNonTrivial runtime
+      // entry point without resorting to a type checking slow path
+      // here (i.e. by calling InvokeRuntime directly), as it would
+      // require to assign fixed registers for the inputs of this
+      // HInstanceOf instruction (following the runtime calling
+      // convention), which might be cluttered by the potential first
+      // read barrier emission at the beginning of this method.
+      //
+      // TODO: Introduce a new runtime entry point taking the object
+      // to test (instead of its class) as argument, and let it deal
+      // with the read barrier issues. This will let us refactor this
+      // case of the `switch` code as it was previously (with a direct
+      // call to the runtime not using a type checking slow path).
+      // This should also be beneficial for the other cases above.
+      DCHECK(locations->OnlyCallsOnSlowPath());
+      slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction,
+                                                                        /* is_fatal */ false);
+      codegen_->AddSlowPath(slow_path);
+      __ B(slow_path->GetEntryLabel());
+      if (zero.IsReferenced()) {
+        __ B(&done);
+      }
+      break;
+    }
+  }
+
+  if (zero.IsReferenced()) {
+    __ Bind(&zero);
+    __ Mov(out, 0);
+  }
+
+  if (done.IsReferenced()) {
+    __ Bind(&done);
+  }
+
+  if (slow_path != nullptr) {
+    __ Bind(slow_path->GetExitLabel());
+  }
+}
+
+void LocationsBuilderARMVIXL::VisitCheckCast(HCheckCast* instruction) {
+  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+  bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
+
+  TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+  switch (type_check_kind) {
+    case TypeCheckKind::kExactCheck:
+    case TypeCheckKind::kAbstractClassCheck:
+    case TypeCheckKind::kClassHierarchyCheck:
+    case TypeCheckKind::kArrayObjectCheck:
+      call_kind = (throws_into_catch || kEmitCompilerReadBarrier) ?
+          LocationSummary::kCallOnSlowPath :
+          LocationSummary::kNoCall;  // In fact, call on a fatal (non-returning) slow path.
+      break;
+    case TypeCheckKind::kArrayCheck:
+    case TypeCheckKind::kUnresolvedCheck:
+    case TypeCheckKind::kInterfaceCheck:
+      call_kind = LocationSummary::kCallOnSlowPath;
+      break;
+  }
+
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  // Note that TypeCheckSlowPathARM uses this "temp" register too.
+  locations->AddTemp(Location::RequiresRegister());
+  // When read barriers are enabled, we need an additional temporary
+  // register for some cases.
+  if (TypeCheckNeedsATemporary(type_check_kind)) {
+    locations->AddTemp(Location::RequiresRegister());
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
+  TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+  LocationSummary* locations = instruction->GetLocations();
+  Location obj_loc = locations->InAt(0);
+  vixl32::Register obj = InputRegisterAt(instruction, 0);
+  vixl32::Register cls = InputRegisterAt(instruction, 1);
+  Location temp_loc = locations->GetTemp(0);
+  vixl32::Register temp = RegisterFrom(temp_loc);
+  Location maybe_temp2_loc = TypeCheckNeedsATemporary(type_check_kind) ?
+      locations->GetTemp(1) :
+      Location::NoLocation();
+  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+
+  bool is_type_check_slow_path_fatal =
+      (type_check_kind == TypeCheckKind::kExactCheck ||
+       type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+       type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+       type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
+      !instruction->CanThrowIntoCatchBlock();
+  SlowPathCodeARMVIXL* type_check_slow_path =
+      new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction,
+                                                            is_type_check_slow_path_fatal);
+  codegen_->AddSlowPath(type_check_slow_path);
+
+  vixl32::Label done;
+  // Avoid null check if we know obj is not null.
+  if (instruction->MustDoNullCheck()) {
+    __ Cbz(obj, &done);
+  }
+
+  // /* HeapReference<Class> */ temp = obj->klass_
+  GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
+
+  switch (type_check_kind) {
+    case TypeCheckKind::kExactCheck:
+    case TypeCheckKind::kArrayCheck: {
+      __ Cmp(temp, cls);
+      // Jump to slow path for throwing the exception or doing a
+      // more involved array check.
+      __ B(ne, type_check_slow_path->GetEntryLabel());
+      break;
+    }
+
+    case TypeCheckKind::kAbstractClassCheck: {
+      // If the class is abstract, we eagerly fetch the super class of the
+      // object to avoid doing a comparison we know will fail.
+      vixl32::Label loop;
+      __ Bind(&loop);
+      // /* HeapReference<Class> */ temp = temp->super_class_
+      GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
+
+      // If the class reference currently in `temp` is null, jump to the slow path to throw the
+      // exception.
+      __ Cbz(temp, type_check_slow_path->GetEntryLabel());
+
+      // Otherwise, compare the classes.
+      __ Cmp(temp, cls);
+      __ B(ne, &loop);
+      break;
+    }
+
+    case TypeCheckKind::kClassHierarchyCheck: {
+      // Walk over the class hierarchy to find a match.
+      vixl32::Label loop;
+      __ Bind(&loop);
+      __ Cmp(temp, cls);
+      __ B(eq, &done);
+
+      // /* HeapReference<Class> */ temp = temp->super_class_
+      GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
+
+      // If the class reference currently in `temp` is null, jump to the slow path to throw the
+      // exception.
+      __ Cbz(temp, type_check_slow_path->GetEntryLabel());
+      // Otherwise, jump to the beginning of the loop.
+      __ B(&loop);
+      break;
+    }
+
+    case TypeCheckKind::kArrayObjectCheck:  {
+      // Do an exact check.
+      __ Cmp(temp, cls);
+      __ B(eq, &done);
+
+      // Otherwise, we need to check that the object's class is a non-primitive array.
+      // /* HeapReference<Class> */ temp = temp->component_type_
+      GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, maybe_temp2_loc);
+      // If the component type is null, jump to the slow path to throw the exception.
+      __ Cbz(temp, type_check_slow_path->GetEntryLabel());
+      // Otherwise,the object is indeed an array, jump to label `check_non_primitive_component_type`
+      // to further check that this component type is not a primitive type.
+      GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset);
+      static_assert(Primitive::kPrimNot == 0, "Expected 0 for art::Primitive::kPrimNot");
+      __ Cbnz(temp, type_check_slow_path->GetEntryLabel());
+      break;
+    }
+
+    case TypeCheckKind::kUnresolvedCheck:
+    case TypeCheckKind::kInterfaceCheck:
+      // We always go into the type check slow path for the unresolved
+      // and interface check cases.
+      //
+      // We cannot directly call the CheckCast runtime entry point
+      // without resorting to a type checking slow path here (i.e. by
+      // calling InvokeRuntime directly), as it would require to
+      // assign fixed registers for the inputs of this HInstanceOf
+      // instruction (following the runtime calling convention), which
+      // might be cluttered by the potential first read barrier
+      // emission at the beginning of this method.
+      __ B(type_check_slow_path->GetEntryLabel());
+      break;
+  }
+  __ Bind(&done);
+
+  __ Bind(type_check_slow_path->GetExitLabel());
+}
+
+void LocationsBuilderARMVIXL::VisitMonitorOperation(HMonitorOperation* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+  InvokeRuntimeCallingConventionARMVIXL calling_convention;
+  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitMonitorOperation(HMonitorOperation* instruction) {
+  codegen_->InvokeRuntime(instruction->IsEnter() ? kQuickLockObject : kQuickUnlockObject,
+                          instruction,
+                          instruction->GetDexPc());
+  if (instruction->IsEnter()) {
+    CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
+  } else {
+    CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
+  }
+}
+
+void LocationsBuilderARMVIXL::VisitAnd(HAnd* instruction) {
+  HandleBitwiseOperation(instruction, AND);
+}
+
+void LocationsBuilderARMVIXL::VisitOr(HOr* instruction) {
+  HandleBitwiseOperation(instruction, ORR);
+}
+
+void LocationsBuilderARMVIXL::VisitXor(HXor* instruction) {
+  HandleBitwiseOperation(instruction, EOR);
+}
+
+void LocationsBuilderARMVIXL::HandleBitwiseOperation(HBinaryOperation* instruction, Opcode opcode) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+  DCHECK(instruction->GetResultType() == Primitive::kPrimInt
+         || instruction->GetResultType() == Primitive::kPrimLong);
+  // Note: GVN reorders commutative operations to have the constant on the right hand side.
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, ArmEncodableConstantOrRegister(instruction->InputAt(1), opcode));
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitAnd(HAnd* instruction) {
+  HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitOr(HOr* instruction) {
+  HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitXor(HXor* instruction) {
+  HandleBitwiseOperation(instruction);
+}
+
+void LocationsBuilderARMVIXL::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+  DCHECK(instruction->GetResultType() == Primitive::kPrimInt
+         || instruction->GetResultType() == Primitive::kPrimLong);
+
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) {
+  LocationSummary* locations = instruction->GetLocations();
+  Location first = locations->InAt(0);
+  Location second = locations->InAt(1);
+  Location out = locations->Out();
+
+  if (instruction->GetResultType() == Primitive::kPrimInt) {
+    vixl32::Register first_reg = RegisterFrom(first);
+    vixl32::Register second_reg = RegisterFrom(second);
+    vixl32::Register out_reg = RegisterFrom(out);
+
+    switch (instruction->GetOpKind()) {
+      case HInstruction::kAnd:
+        __ Bic(out_reg, first_reg, second_reg);
+        break;
+      case HInstruction::kOr:
+        __ Orn(out_reg, first_reg, second_reg);
+        break;
+      // There is no EON on arm.
+      case HInstruction::kXor:
+      default:
+        LOG(FATAL) << "Unexpected instruction " << instruction->DebugName();
+        UNREACHABLE();
+    }
+    return;
+
+  } else {
+    DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+    vixl32::Register first_low = LowRegisterFrom(first);
+    vixl32::Register first_high = HighRegisterFrom(first);
+    vixl32::Register second_low = LowRegisterFrom(second);
+    vixl32::Register second_high = HighRegisterFrom(second);
+    vixl32::Register out_low = LowRegisterFrom(out);
+    vixl32::Register out_high = HighRegisterFrom(out);
+
+    switch (instruction->GetOpKind()) {
+      case HInstruction::kAnd:
+        __ Bic(out_low, first_low, second_low);
+        __ Bic(out_high, first_high, second_high);
+        break;
+      case HInstruction::kOr:
+        __ Orn(out_low, first_low, second_low);
+        __ Orn(out_high, first_high, second_high);
+        break;
+      // There is no EON on arm.
+      case HInstruction::kXor:
+      default:
+        LOG(FATAL) << "Unexpected instruction " << instruction->DebugName();
+        UNREACHABLE();
+    }
+  }
+}
+
+// TODO(VIXL): Remove optimizations in the helper when they are implemented in vixl.
+void InstructionCodeGeneratorARMVIXL::GenerateAndConst(vixl32::Register out,
+                                                       vixl32::Register first,
+                                                       uint32_t value) {
+  // Optimize special cases for individual halfs of `and-long` (`and` is simplified earlier).
+  if (value == 0xffffffffu) {
+    if (!out.Is(first)) {
+      __ Mov(out, first);
+    }
+    return;
+  }
+  if (value == 0u) {
+    __ Mov(out, 0);
+    return;
+  }
+  if (GetAssembler()->ShifterOperandCanHold(AND, value)) {
+  __ And(out, first, value);
+  } else {
+    DCHECK(GetAssembler()->ShifterOperandCanHold(BIC, ~value));
+  __ Bic(out, first, ~value);
+  }
+}
+
+// TODO(VIXL): Remove optimizations in the helper when they are implemented in vixl.
+void InstructionCodeGeneratorARMVIXL::GenerateOrrConst(vixl32::Register out,
+                                                       vixl32::Register first,
+                                                       uint32_t value) {
+  // Optimize special cases for individual halfs of `or-long` (`or` is simplified earlier).
+  if (value == 0u) {
+    if (!out.Is(first)) {
+      __ Mov(out, first);
+    }
+    return;
+  }
+  if (value == 0xffffffffu) {
+    __ Mvn(out, 0);
+    return;
+  }
+  if (GetAssembler()->ShifterOperandCanHold(ORR, value)) {
+    __ Orr(out, first, value);
+  } else {
+    DCHECK(GetAssembler()->ShifterOperandCanHold(ORN, ~value));
+    __ Orn(out, first, ~value);
+  }
+}
+
+// TODO(VIXL): Remove optimizations in the helper when they are implemented in vixl.
+void InstructionCodeGeneratorARMVIXL::GenerateEorConst(vixl32::Register out,
+                                                       vixl32::Register first,
+                                                       uint32_t value) {
+  // Optimize special case for individual halfs of `xor-long` (`xor` is simplified earlier).
+  if (value == 0u) {
+    if (!out.Is(first)) {
+      __ Mov(out, first);
+    }
+    return;
+  }
+  __ Eor(out, first, value);
+}
+
+void InstructionCodeGeneratorARMVIXL::HandleBitwiseOperation(HBinaryOperation* instruction) {
+  LocationSummary* locations = instruction->GetLocations();
+  Location first = locations->InAt(0);
+  Location second = locations->InAt(1);
+  Location out = locations->Out();
+
+  if (second.IsConstant()) {
+    uint64_t value = static_cast<uint64_t>(Int64FromConstant(second.GetConstant()));
+    uint32_t value_low = Low32Bits(value);
+    if (instruction->GetResultType() == Primitive::kPrimInt) {
+      vixl32::Register first_reg = InputRegisterAt(instruction, 0);
+      vixl32::Register out_reg = OutputRegister(instruction);
+      if (instruction->IsAnd()) {
+        GenerateAndConst(out_reg, first_reg, value_low);
+      } else if (instruction->IsOr()) {
+        GenerateOrrConst(out_reg, first_reg, value_low);
+      } else {
+        DCHECK(instruction->IsXor());
+        GenerateEorConst(out_reg, first_reg, value_low);
+      }
+    } else {
+      DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+      uint32_t value_high = High32Bits(value);
+      vixl32::Register first_low = LowRegisterFrom(first);
+      vixl32::Register first_high = HighRegisterFrom(first);
+      vixl32::Register out_low = LowRegisterFrom(out);
+      vixl32::Register out_high = HighRegisterFrom(out);
+      if (instruction->IsAnd()) {
+        GenerateAndConst(out_low, first_low, value_low);
+        GenerateAndConst(out_high, first_high, value_high);
+      } else if (instruction->IsOr()) {
+        GenerateOrrConst(out_low, first_low, value_low);
+        GenerateOrrConst(out_high, first_high, value_high);
+      } else {
+        DCHECK(instruction->IsXor());
+        GenerateEorConst(out_low, first_low, value_low);
+        GenerateEorConst(out_high, first_high, value_high);
+      }
+    }
+    return;
+  }
+
+  if (instruction->GetResultType() == Primitive::kPrimInt) {
+    vixl32::Register first_reg = InputRegisterAt(instruction, 0);
+    vixl32::Register second_reg = InputRegisterAt(instruction, 1);
+    vixl32::Register out_reg = OutputRegister(instruction);
+    if (instruction->IsAnd()) {
+      __ And(out_reg, first_reg, second_reg);
+    } else if (instruction->IsOr()) {
+      __ Orr(out_reg, first_reg, second_reg);
+    } else {
+      DCHECK(instruction->IsXor());
+      __ Eor(out_reg, first_reg, second_reg);
+    }
+  } else {
+    DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+    vixl32::Register first_low = LowRegisterFrom(first);
+    vixl32::Register first_high = HighRegisterFrom(first);
+    vixl32::Register second_low = LowRegisterFrom(second);
+    vixl32::Register second_high = HighRegisterFrom(second);
+    vixl32::Register out_low = LowRegisterFrom(out);
+    vixl32::Register out_high = HighRegisterFrom(out);
+    if (instruction->IsAnd()) {
+      __ And(out_low, first_low, second_low);
+      __ And(out_high, first_high, second_high);
+    } else if (instruction->IsOr()) {
+      __ Orr(out_low, first_low, second_low);
+      __ Orr(out_high, first_high, second_high);
+    } else {
+      DCHECK(instruction->IsXor());
+      __ Eor(out_low, first_low, second_low);
+      __ Eor(out_high, first_high, second_high);
+    }
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadOneRegister(
+    HInstruction* instruction ATTRIBUTE_UNUSED,
+    Location out,
+    uint32_t offset,
+    Location maybe_temp ATTRIBUTE_UNUSED) {
+  vixl32::Register out_reg = RegisterFrom(out);
+  if (kEmitCompilerReadBarrier) {
+    TODO_VIXL32(FATAL);
+  } else {
+    // Plain load with no read barrier.
+    // /* HeapReference<Object> */ out = *(out + offset)
+    GetAssembler()->LoadFromOffset(kLoadWord, out_reg, out_reg, offset);
+    GetAssembler()->MaybeUnpoisonHeapReference(out_reg);
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadTwoRegisters(
+    HInstruction* instruction ATTRIBUTE_UNUSED,
+    Location out,
+    Location obj,
+    uint32_t offset,
+    Location maybe_temp ATTRIBUTE_UNUSED) {
+  vixl32::Register out_reg = RegisterFrom(out);
+  vixl32::Register obj_reg = RegisterFrom(obj);
+  if (kEmitCompilerReadBarrier) {
+    TODO_VIXL32(FATAL);
+  } else {
+    // Plain load with no read barrier.
+    // /* HeapReference<Object> */ out = *(obj + offset)
+    GetAssembler()->LoadFromOffset(kLoadWord, out_reg, obj_reg, offset);
+    GetAssembler()->MaybeUnpoisonHeapReference(out_reg);
+  }
+}
+
 void InstructionCodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
     HInstruction* instruction ATTRIBUTE_UNUSED,
     Location root,
@@ -3194,6 +5823,67 @@
   }
 }
 
+void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(
+    HInstruction* instruction ATTRIBUTE_UNUSED,
+    Location ref ATTRIBUTE_UNUSED,
+    vixl::aarch32::Register obj ATTRIBUTE_UNUSED,
+    uint32_t offset ATTRIBUTE_UNUSED,
+    Location temp ATTRIBUTE_UNUSED,
+    bool needs_null_check ATTRIBUTE_UNUSED) {
+  TODO_VIXL32(FATAL);
+}
+
+void CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier(
+    HInstruction* instruction ATTRIBUTE_UNUSED,
+    Location ref ATTRIBUTE_UNUSED,
+    vixl::aarch32::Register obj ATTRIBUTE_UNUSED,
+    uint32_t offset ATTRIBUTE_UNUSED,
+    Location index ATTRIBUTE_UNUSED,
+    ScaleFactor scale_factor ATTRIBUTE_UNUSED,
+    Location temp ATTRIBUTE_UNUSED,
+    bool needs_null_check ATTRIBUTE_UNUSED,
+    bool always_update_field ATTRIBUTE_UNUSED,
+    vixl::aarch32::Register* temp2 ATTRIBUTE_UNUSED) {
+  TODO_VIXL32(FATAL);
+}
+
+void CodeGeneratorARMVIXL::GenerateReadBarrierSlow(HInstruction* instruction ATTRIBUTE_UNUSED,
+                                                   Location out ATTRIBUTE_UNUSED,
+                                                   Location ref ATTRIBUTE_UNUSED,
+                                                   Location obj ATTRIBUTE_UNUSED,
+                                                   uint32_t offset ATTRIBUTE_UNUSED,
+                                                   Location index ATTRIBUTE_UNUSED) {
+  TODO_VIXL32(FATAL);
+}
+
+void CodeGeneratorARMVIXL::MaybeGenerateReadBarrierSlow(HInstruction* instruction ATTRIBUTE_UNUSED,
+                                                        Location out,
+                                                        Location ref ATTRIBUTE_UNUSED,
+                                                        Location obj ATTRIBUTE_UNUSED,
+                                                        uint32_t offset ATTRIBUTE_UNUSED,
+                                                        Location index ATTRIBUTE_UNUSED) {
+  if (kEmitCompilerReadBarrier) {
+    DCHECK(!kUseBakerReadBarrier);
+    TODO_VIXL32(FATAL);
+  } else if (kPoisonHeapReferences) {
+    GetAssembler()->UnpoisonHeapReference(RegisterFrom(out));
+  }
+}
+
+// Check if the desired_dispatch_info is supported. If it is, return it,
+// otherwise return a fall-back info that should be used instead.
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStaticOrDirectDispatch(
+      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info ATTRIBUTE_UNUSED,
+      HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+  // TODO(VIXL): Implement optimized code paths.
+  return {
+    HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+    HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+    0u,
+    0u
+  };
+}
+
 vixl32::Register CodeGeneratorARMVIXL::GetInvokeStaticOrDirectExtraParameter(
     HInvokeStaticOrDirect* invoke, vixl32::Register temp) {
   DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
@@ -3239,7 +5929,10 @@
       if (current_method.IsRegister()) {
         method_reg = RegisterFrom(current_method);
       } else {
-        TODO_VIXL32(FATAL);
+        DCHECK(invoke->GetLocations()->Intrinsified());
+        DCHECK(!current_method.IsValid());
+        method_reg = temp_reg;
+        GetAssembler()->LoadFromOffset(kLoadWord, temp_reg, sp, kCurrentMethodStackOffset);
       }
       // /* ArtMethod*[] */ temp = temp.ptr_sized_fields_->dex_cache_resolved_methods_;
       GetAssembler()->LoadFromOffset(
@@ -3309,58 +6002,170 @@
   __ Blx(lr);
 }
 
-static int32_t GetExceptionTlsOffset() {
-  return Thread::ExceptionOffset<kArmPointerSize>().Int32Value();
-}
-
-void LocationsBuilderARMVIXL::VisitLoadException(HLoadException* load) {
+void LocationsBuilderARMVIXL::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
   LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
-  locations->SetOut(Location::RequiresRegister());
+      new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall);
+  locations->SetInAt(HMultiplyAccumulate::kInputAccumulatorIndex,
+                     Location::RequiresRegister());
+  locations->SetInAt(HMultiplyAccumulate::kInputMulLeftIndex, Location::RequiresRegister());
+  locations->SetInAt(HMultiplyAccumulate::kInputMulRightIndex, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
 }
 
-void InstructionCodeGeneratorARMVIXL::VisitLoadException(HLoadException* load) {
-  vixl32::Register out = OutputRegister(load);
-  GetAssembler()->LoadFromOffset(kLoadWord, out, tr, GetExceptionTlsOffset());
-}
+void InstructionCodeGeneratorARMVIXL::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
+  vixl32::Register res = OutputRegister(instr);
+  vixl32::Register accumulator =
+      InputRegisterAt(instr, HMultiplyAccumulate::kInputAccumulatorIndex);
+  vixl32::Register mul_left =
+      InputRegisterAt(instr, HMultiplyAccumulate::kInputMulLeftIndex);
+  vixl32::Register mul_right =
+      InputRegisterAt(instr, HMultiplyAccumulate::kInputMulRightIndex);
 
-void LocationsBuilderARMVIXL::VisitClearException(HClearException* clear) {
-  new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
-  UseScratchRegisterScope temps(GetVIXLAssembler());
-  vixl32::Register temp = temps.Acquire();
-  __ Mov(temp, 0);
-  GetAssembler()->StoreToOffset(kStoreWord, temp, tr, GetExceptionTlsOffset());
-}
-
-void LocationsBuilderARMVIXL::VisitThrow(HThrow* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
-  InvokeRuntimeCallingConventionARMVIXL calling_convention;
-  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
-}
-
-void InstructionCodeGeneratorARMVIXL::VisitThrow(HThrow* instruction) {
-  codegen_->InvokeRuntime(kQuickDeliverException, instruction, instruction->GetDexPc());
-  CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
-}
-
-void CodeGeneratorARMVIXL::MaybeGenerateReadBarrierSlow(HInstruction* instruction ATTRIBUTE_UNUSED,
-                                                        Location out,
-                                                        Location ref ATTRIBUTE_UNUSED,
-                                                        Location obj ATTRIBUTE_UNUSED,
-                                                        uint32_t offset ATTRIBUTE_UNUSED,
-                                                        Location index ATTRIBUTE_UNUSED) {
-  if (kEmitCompilerReadBarrier) {
-    DCHECK(!kUseBakerReadBarrier);
-    TODO_VIXL32(FATAL);
-  } else if (kPoisonHeapReferences) {
-    GetAssembler()->UnpoisonHeapReference(RegisterFrom(out));
+  if (instr->GetOpKind() == HInstruction::kAdd) {
+    __ Mla(res, mul_left, mul_right, accumulator);
+  } else {
+    __ Mls(res, mul_left, mul_right, accumulator);
   }
 }
 
+void LocationsBuilderARMVIXL::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
+  // Nothing to do, this should be removed during prepare for register allocator.
+  LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
+  // Nothing to do, this should be removed during prepare for register allocator.
+  LOG(FATAL) << "Unreachable";
+}
+
+// Simple implementation of packed switch - generate cascaded compare/jumps.
+void LocationsBuilderARMVIXL::VisitPackedSwitch(HPackedSwitch* switch_instr) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+  locations->SetInAt(0, Location::RequiresRegister());
+  if (switch_instr->GetNumEntries() > kPackedSwitchCompareJumpThreshold &&
+      codegen_->GetAssembler()->GetVIXLAssembler()->IsUsingT32()) {
+    locations->AddTemp(Location::RequiresRegister());  // We need a temp for the table base.
+    if (switch_instr->GetStartValue() != 0) {
+      locations->AddTemp(Location::RequiresRegister());  // We need a temp for the bias.
+    }
+  }
+}
+
+// TODO(VIXL): Investigate and reach the parity with old arm codegen.
+void InstructionCodeGeneratorARMVIXL::VisitPackedSwitch(HPackedSwitch* switch_instr) {
+  int32_t lower_bound = switch_instr->GetStartValue();
+  uint32_t num_entries = switch_instr->GetNumEntries();
+  LocationSummary* locations = switch_instr->GetLocations();
+  vixl32::Register value_reg = InputRegisterAt(switch_instr, 0);
+  HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+
+  if (num_entries <= kPackedSwitchCompareJumpThreshold ||
+      !codegen_->GetAssembler()->GetVIXLAssembler()->IsUsingT32()) {
+    // Create a series of compare/jumps.
+    UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+    vixl32::Register temp_reg = temps.Acquire();
+    // Note: It is fine for the below AddConstantSetFlags() using IP register to temporarily store
+    // the immediate, because IP is used as the destination register. For the other
+    // AddConstantSetFlags() and GenerateCompareWithImmediate(), the immediate values are constant,
+    // and they can be encoded in the instruction without making use of IP register.
+    __ Adds(temp_reg, value_reg, -lower_bound);
+
+    const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
+    // Jump to successors[0] if value == lower_bound.
+    __ B(eq, codegen_->GetLabelOf(successors[0]));
+    int32_t last_index = 0;
+    for (; num_entries - last_index > 2; last_index += 2) {
+      __ Adds(temp_reg, temp_reg, -2);
+      // Jump to successors[last_index + 1] if value < case_value[last_index + 2].
+      __ B(lo, codegen_->GetLabelOf(successors[last_index + 1]));
+      // Jump to successors[last_index + 2] if value == case_value[last_index + 2].
+      __ B(eq, codegen_->GetLabelOf(successors[last_index + 2]));
+    }
+    if (num_entries - last_index == 2) {
+      // The last missing case_value.
+      __ Cmp(temp_reg, 1);
+      __ B(eq, codegen_->GetLabelOf(successors[last_index + 1]));
+    }
+
+    // And the default for any other value.
+    if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
+      __ B(codegen_->GetLabelOf(default_block));
+    }
+  } else {
+    // Create a table lookup.
+    vixl32::Register table_base = RegisterFrom(locations->GetTemp(0));
+
+    JumpTableARMVIXL* jump_table = codegen_->CreateJumpTable(switch_instr);
+
+    // Remove the bias.
+    vixl32::Register key_reg;
+    if (lower_bound != 0) {
+      key_reg = RegisterFrom(locations->GetTemp(1));
+      __ Sub(key_reg, value_reg, lower_bound);
+    } else {
+      key_reg = value_reg;
+    }
+
+    // Check whether the value is in the table, jump to default block if not.
+    __ Cmp(key_reg, num_entries - 1);
+    __ B(hi, codegen_->GetLabelOf(default_block));
+
+    UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+    vixl32::Register jump_offset = temps.Acquire();
+
+    // Load jump offset from the table.
+    __ Adr(table_base, jump_table->GetTableStartLabel());
+    __ Ldr(jump_offset, MemOperand(table_base, key_reg, vixl32::LSL, 2));
+
+    // Jump to target block by branching to table_base(pc related) + offset.
+    vixl32::Register target_address = table_base;
+    __ Add(target_address, table_base, jump_offset);
+    __ Bx(target_address);
+
+    jump_table->EmitTable(codegen_);
+  }
+}
+
+// Copy the result of a call into the given target.
+void CodeGeneratorARMVIXL::MoveFromReturnRegister(Location trg, Primitive::Type type) {
+  if (!trg.IsValid()) {
+    DCHECK_EQ(type, Primitive::kPrimVoid);
+    return;
+  }
+
+  DCHECK_NE(type, Primitive::kPrimVoid);
+
+  Location return_loc = InvokeDexCallingConventionVisitorARM().GetReturnLocation(type);
+  if (return_loc.Equals(trg)) {
+    return;
+  }
+
+  // TODO: Consider pairs in the parallel move resolver, then this could be nicely merged
+  //       with the last branch.
+  if (type == Primitive::kPrimLong) {
+    TODO_VIXL32(FATAL);
+  } else if (type == Primitive::kPrimDouble) {
+    TODO_VIXL32(FATAL);
+  } else {
+    // Let the parallel move resolver take care of all of this.
+    HParallelMove parallel_move(GetGraph()->GetArena());
+    parallel_move.AddMove(return_loc, trg, type, nullptr);
+    GetMoveResolver()->EmitNativeCode(&parallel_move);
+  }
+}
+
+void LocationsBuilderARMVIXL::VisitClassTableGet(
+    HClassTableGet* instruction ATTRIBUTE_UNUSED) {
+  TODO_VIXL32(FATAL);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitClassTableGet(
+    HClassTableGet* instruction ATTRIBUTE_UNUSED) {
+  TODO_VIXL32(FATAL);
+}
+
+
 #undef __
 #undef QUICK_ENTRY_POINT
 #undef TODO_VIXL32
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index c749f86..302ee38 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -18,6 +18,7 @@
 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_
 
 #include "code_generator_arm.h"
+#include "common_arm.h"
 #include "utils/arm/assembler_arm_vixl.h"
 
 // TODO(VIXL): make vixl clean wrt -Wshadow.
@@ -105,24 +106,41 @@
   M(Above)                                      \
   M(AboveOrEqual)                               \
   M(Add)                                        \
+  M(And)                                        \
+  M(ArrayGet)                                   \
   M(ArrayLength)                                \
+  M(ArraySet)                                   \
   M(Below)                                      \
   M(BelowOrEqual)                               \
+  M(BitwiseNegatedRight)                        \
+  M(BooleanNot)                                 \
+  M(BoundsCheck)                                \
+  M(BoundType)                                  \
+  M(CheckCast)                                  \
+  M(ClassTableGet)                              \
   M(ClearException)                             \
   M(ClinitCheck)                                \
+  M(Compare)                                    \
   M(CurrentMethod)                              \
+  M(Deoptimize)                                 \
   M(Div)                                        \
   M(DivZeroCheck)                               \
+  M(DoubleConstant)                             \
   M(Equal)                                      \
   M(Exit)                                       \
+  M(FloatConstant)                              \
   M(Goto)                                       \
   M(GreaterThan)                                \
   M(GreaterThanOrEqual)                         \
   M(If)                                         \
   M(InstanceFieldGet)                           \
   M(InstanceFieldSet)                           \
+  M(InstanceOf)                                 \
   M(IntConstant)                                \
+  M(IntermediateAddress)                        \
+  M(InvokeInterface)                            \
   M(InvokeStaticOrDirect)                       \
+  M(InvokeUnresolved)                           \
   M(InvokeVirtual)                              \
   M(LessThan)                                   \
   M(LessThanOrEqual)                            \
@@ -131,53 +149,36 @@
   M(LoadString)                                 \
   M(LongConstant)                               \
   M(MemoryBarrier)                              \
+  M(MonitorOperation)                           \
   M(Mul)                                        \
+  M(MultiplyAccumulate)                         \
+  M(NativeDebugInfo)                            \
+  M(Neg)                                        \
   M(NewArray)                                   \
   M(NewInstance)                                \
   M(Not)                                        \
   M(NotEqual)                                   \
   M(NullCheck)                                  \
   M(NullConstant)                               \
+  M(Or)                                         \
+  M(PackedSwitch)                               \
   M(ParallelMove)                               \
   M(ParameterValue)                             \
   M(Phi)                                        \
+  M(Rem)                                        \
   M(Return)                                     \
   M(ReturnVoid)                                 \
+  M(Ror)                                        \
   M(Select)                                     \
+  M(Shl)                                        \
+  M(Shr)                                        \
   M(StaticFieldGet)                             \
+  M(StaticFieldSet)                             \
   M(Sub)                                        \
   M(SuspendCheck)                               \
   M(Throw)                                      \
   M(TryBoundary)                                \
   M(TypeConversion)                             \
-
-// TODO: Remove once the VIXL32 backend is implemented completely.
-#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M)   \
-  M(And)                                        \
-  M(ArrayGet)                                   \
-  M(ArraySet)                                   \
-  M(BooleanNot)                                 \
-  M(BoundsCheck)                                \
-  M(BoundType)                                  \
-  M(CheckCast)                                  \
-  M(ClassTableGet)                              \
-  M(Compare)                                    \
-  M(Deoptimize)                                 \
-  M(DoubleConstant)                             \
-  M(FloatConstant)                              \
-  M(InstanceOf)                                 \
-  M(InvokeInterface)                            \
-  M(InvokeUnresolved)                           \
-  M(MonitorOperation)                           \
-  M(NativeDebugInfo)                            \
-  M(Neg)                                        \
-  M(Or)                                         \
-  M(PackedSwitch)                               \
-  M(Rem)                                        \
-  M(Ror)                                        \
-  M(Shl)                                        \
-  M(Shr)                                        \
-  M(StaticFieldSet)                             \
   M(UnresolvedInstanceFieldGet)                 \
   M(UnresolvedInstanceFieldSet)                 \
   M(UnresolvedStaticFieldGet)                   \
@@ -185,8 +186,40 @@
   M(UShr)                                       \
   M(Xor)                                        \
 
+// TODO: Remove once the VIXL32 backend is implemented completely.
+#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M)   \
+  M(ArmDexCacheArraysBase)                      \
+
 class CodeGeneratorARMVIXL;
 
+class JumpTableARMVIXL : public DeletableArenaObject<kArenaAllocSwitchTable> {
+ public:
+  typedef vixl::aarch32::Literal<int32_t> IntLiteral;
+
+  explicit JumpTableARMVIXL(HPackedSwitch* switch_instr)
+      : switch_instr_(switch_instr),
+        table_start_(),
+        bb_addresses_(switch_instr->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+    uint32_t num_entries = switch_instr_->GetNumEntries();
+    for (uint32_t i = 0; i < num_entries; i++) {
+      IntLiteral *lit = new IntLiteral(0);
+      bb_addresses_.emplace_back(lit);
+    }
+  }
+
+  vixl::aarch32::Label* GetTableStartLabel() { return &table_start_; }
+
+  void EmitTable(CodeGeneratorARMVIXL* codegen);
+  void FixTable(CodeGeneratorARMVIXL* codegen);
+
+ private:
+  HPackedSwitch* const switch_instr_;
+  vixl::aarch32::Label table_start_;
+  ArenaVector<std::unique_ptr<IntLiteral>> bb_addresses_;
+
+  DISALLOW_COPY_AND_ASSIGN(JumpTableARMVIXL);
+};
+
 class InvokeRuntimeCallingConventionARMVIXL
     : public CallingConvention<vixl::aarch32::Register, vixl::aarch32::SRegister> {
  public:
@@ -215,6 +248,38 @@
   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionARMVIXL);
 };
 
+class FieldAccessCallingConventionARMVIXL : public FieldAccessCallingConvention {
+ public:
+  FieldAccessCallingConventionARMVIXL() {}
+
+  Location GetObjectLocation() const OVERRIDE {
+    return helpers::LocationFrom(vixl::aarch32::r1);
+  }
+  Location GetFieldIndexLocation() const OVERRIDE {
+    return helpers::LocationFrom(vixl::aarch32::r0);
+  }
+  Location GetReturnLocation(Primitive::Type type) const OVERRIDE {
+    return Primitive::Is64BitType(type)
+        ? helpers::LocationFrom(vixl::aarch32::r0, vixl::aarch32::r1)
+        : helpers::LocationFrom(vixl::aarch32::r0);
+  }
+  Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+    return Primitive::Is64BitType(type)
+        ? helpers::LocationFrom(vixl::aarch32::r2, vixl::aarch32::r3)
+        : (is_instance
+            ? helpers::LocationFrom(vixl::aarch32::r2)
+            : helpers::LocationFrom(vixl::aarch32::r1));
+  }
+  Location GetFpuLocation(Primitive::Type type) const OVERRIDE {
+    return Primitive::Is64BitType(type)
+        ? helpers::LocationFrom(vixl::aarch32::s0, vixl::aarch32::s1)
+        : helpers::LocationFrom(vixl::aarch32::s0);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARMVIXL);
+};
+
 class SlowPathCodeARMVIXL : public SlowPathCode {
  public:
   explicit SlowPathCodeARMVIXL(HInstruction* instruction)
@@ -246,7 +311,7 @@
   ArmVIXLAssembler* GetAssembler() const;
 
  private:
-  void Exchange(Register reg, int mem);
+  void Exchange(vixl32::Register reg, int mem);
   void Exchange(int mem1, int mem2);
 
   CodeGeneratorARMVIXL* const codegen_;
@@ -276,10 +341,19 @@
   }
 
   void HandleInvoke(HInvoke* invoke);
+  void HandleBitwiseOperation(HBinaryOperation* operation, Opcode opcode);
   void HandleCondition(HCondition* condition);
+  void HandleIntegerRotate(LocationSummary* locations);
+  void HandleLongRotate(LocationSummary* locations);
+  void HandleShift(HBinaryOperation* operation);
   void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
 
+  Location ArithmeticZeroOrFpuRegister(HInstruction* input);
+  Location ArmEncodableConstantOrRegister(HInstruction* constant, Opcode opcode);
+  bool CanEncodeConstantAsImmediate(HConstant* input_cst, Opcode opcode);
+  bool CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode, SetCc set_cc = kCcDontCare);
+
   CodeGeneratorARMVIXL* const codegen_;
   InvokeDexCallingConventionVisitorARM parameter_visitor_;
 
@@ -309,7 +383,14 @@
   void GenerateClassInitializationCheck(LoadClassSlowPathARMVIXL* slow_path,
                                         vixl32::Register class_reg);
   void HandleGoto(HInstruction* got, HBasicBlock* successor);
+  void GenerateAndConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value);
+  void GenerateOrrConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value);
+  void GenerateEorConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value);
+  void HandleBitwiseOperation(HBinaryOperation* operation);
   void HandleCondition(HCondition* condition);
+  void HandleIntegerRotate(HRor* ror);
+  void HandleLongRotate(HRor* ror);
+  void HandleShift(HBinaryOperation* operation);
 
   void GenerateWideAtomicStore(vixl::aarch32::Register addr,
                                uint32_t offset,
@@ -328,6 +409,35 @@
                       bool value_can_be_null);
   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
 
+  // Generate a heap reference load using one register `out`:
+  //
+  //   out <- *(out + offset)
+  //
+  // while honoring heap poisoning and/or read barriers (if any).
+  //
+  // Location `maybe_temp` is used when generating a read barrier and
+  // shall be a register in that case; it may be an invalid location
+  // otherwise.
+  void GenerateReferenceLoadOneRegister(HInstruction* instruction,
+                                        Location out,
+                                        uint32_t offset,
+                                        Location maybe_temp);
+  // Generate a heap reference load using two different registers
+  // `out` and `obj`:
+  //
+  //   out <- *(obj + offset)
+  //
+  // while honoring heap poisoning and/or read barriers (if any).
+  //
+  // Location `maybe_temp` is used when generating a Baker's (fast
+  // path) read barrier and shall be a register in that case; it may
+  // be an invalid location otherwise.
+  void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
+                                         Location out,
+                                         Location obj,
+                                         uint32_t offset,
+                                         Location maybe_temp);
+
   // Generate a GC root reference load:
   //
   //   root <- *(obj + offset)
@@ -337,7 +447,7 @@
                                Location root,
                                vixl::aarch32::Register obj,
                                uint32_t offset,
-                               bool requires_read_barrier = kEmitCompilerReadBarrier);
+                               bool requires_read_barrier);
   void GenerateTestAndBranch(HInstruction* instruction,
                              size_t condition_input_index,
                              vixl::aarch32::Label* true_target,
@@ -406,10 +516,16 @@
     return block_entry_label->GetLocation();
   }
 
+  JumpTableARMVIXL* CreateJumpTable(HPackedSwitch* switch_instr) {
+    jump_tables_.emplace_back(new (GetGraph()->GetArena()) JumpTableARMVIXL(switch_instr));
+    return jump_tables_.back().get();
+  }
+
   HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
 
   HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
 
+  void FixJumpTables();
   void GenerateMemoryBarrier(MemBarrierKind kind);
   void Finalize(CodeAllocator* allocator) OVERRIDE;
   void SetupBlockedRegisters() const OVERRIDE;
@@ -422,6 +538,17 @@
   // Helper method to move a 32-bit value between two locations.
   void Move32(Location destination, Location source);
 
+  void LoadFromShiftedRegOffset(Primitive::Type type,
+                                Location out_loc,
+                                vixl::aarch32::Register base,
+                                vixl::aarch32::Register reg_index,
+                                vixl::aarch32::Condition cond = vixl::aarch32::al);
+  void StoreToShiftedRegOffset(Primitive::Type type,
+                               Location out_loc,
+                               vixl::aarch32::Register base,
+                               vixl::aarch32::Register reg_index,
+                               vixl::aarch32::Condition cond = vixl::aarch32::al);
+
   const ArmInstructionSetFeatures& GetInstructionSetFeatures() const { return isa_features_; }
 
   vixl::aarch32::Label* GetFrameEntryLabel() { return &frame_entry_label_; }
@@ -446,11 +573,7 @@
     return 0;
   }
 
-  size_t RestoreFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED,
-                                      uint32_t reg_id ATTRIBUTE_UNUSED) OVERRIDE {
-    UNIMPLEMENTED(INFO) << "TODO: RestoreFloatingPointRegister";
-    return 0;
-  }
+  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
 
   bool NeedsTwoRegisters(Primitive::Type type) const OVERRIDE {
     return type == Primitive::kPrimDouble || type == Primitive::kPrimLong;
@@ -486,6 +609,62 @@
                   vixl::aarch32::Register value,
                   bool can_be_null);
 
+  // Fast path implementation of ReadBarrier::Barrier for a heap
+  // reference field load when Baker's read barriers are used.
+  void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
+                                             Location ref,
+                                             vixl::aarch32::Register obj,
+                                             uint32_t offset,
+                                             Location temp,
+                                             bool needs_null_check);
+
+  // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
+  // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
+  //
+  // Load the object reference located at the address
+  // `obj + offset + (index << scale_factor)`, held by object `obj`, into
+  // `ref`, and mark it if needed.
+  //
+  // If `always_update_field` is true, the value of the reference is
+  // atomically updated in the holder (`obj`).  This operation
+  // requires an extra temporary register, which must be provided as a
+  // non-null pointer (`temp2`).
+  void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
+                                                 Location ref,
+                                                 vixl::aarch32::Register obj,
+                                                 uint32_t offset,
+                                                 Location index,
+                                                 ScaleFactor scale_factor,
+                                                 Location temp,
+                                                 bool needs_null_check,
+                                                 bool always_update_field = false,
+                                                 vixl::aarch32::Register* temp2 = nullptr);
+
+  // Generate a read barrier for a heap reference within `instruction`
+  // using a slow path.
+  //
+  // A read barrier for an object reference read from the heap is
+  // implemented as a call to the artReadBarrierSlow runtime entry
+  // point, which is passed the values in locations `ref`, `obj`, and
+  // `offset`:
+  //
+  //   mirror::Object* artReadBarrierSlow(mirror::Object* ref,
+  //                                      mirror::Object* obj,
+  //                                      uint32_t offset);
+  //
+  // The `out` location contains the value returned by
+  // artReadBarrierSlow.
+  //
+  // When `index` is provided (i.e. for array accesses), the offset
+  // value passed to artReadBarrierSlow is adjusted to take `index`
+  // into account.
+  void GenerateReadBarrierSlow(HInstruction* instruction,
+                               Location out,
+                               Location ref,
+                               Location obj,
+                               uint32_t offset,
+                               Location index = Location::NoLocation());
+
   // If read barriers are enabled, generate a read barrier for a heap
   // reference using a slow path. If heap poisoning is enabled, also
   // unpoison the reference in `out`.
@@ -528,6 +707,7 @@
   ArenaDeque<vixl::aarch32::Label> block_labels_;  // Indexed by block id.
   vixl::aarch32::Label frame_entry_label_;
 
+  ArenaVector<std::unique_ptr<JumpTableARMVIXL>> jump_tables_;
   LocationsBuilderARMVIXL location_builder_;
   InstructionCodeGeneratorARMVIXL instruction_visitor_;
   ParallelMoveResolverARMVIXL move_resolver_;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index f4a804f..573bb50 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -378,7 +378,6 @@
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
-    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out();
     uint32_t dex_pc = instruction_->GetDexPc();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -390,24 +389,22 @@
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
     InvokeRuntimeCallingConvention calling_convention;
-    codegen->EmitParallelMoves(locations->InAt(1),
+    codegen->EmitParallelMoves(locations->InAt(0),
                                Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
                                Primitive::kPrimNot,
-                               object_class,
+                               locations->InAt(1),
                                Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
                                Primitive::kPrimNot);
-
     if (instruction_->IsInstanceOf()) {
       mips_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
-      CheckEntrypointTypes<
-          kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
+      CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
       Primitive::Type ret_type = instruction_->GetType();
       Location ret_loc = calling_convention.GetReturnLocation(ret_type);
       mips_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
     } else {
       DCHECK(instruction_->IsCheckCast());
-      mips_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this);
-      CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+      mips_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
+      CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
     }
 
     RestoreLiveRegisters(codegen, locations);
@@ -563,8 +560,7 @@
     DCHECK_EQ(type, Primitive::kPrimFloat);  // Can only swap a float.
     FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
                                         : loc2.AsFpuRegister<FRegister>();
-    Register r2 = loc1.IsRegister() ? loc1.AsRegister<Register>()
-                                    : loc2.AsRegister<Register>();
+    Register r2 = loc1.IsRegister() ? loc1.AsRegister<Register>() : loc2.AsRegister<Register>();
     __ Move(TMP, r2);
     __ Mfc1(r2, f1);
     __ Mtc1(TMP, f1);
@@ -605,10 +601,8 @@
     Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ true);
   } else if ((loc1.IsRegister() && loc2.IsStackSlot()) ||
              (loc1.IsStackSlot() && loc2.IsRegister())) {
-    Register reg = loc1.IsRegister() ? loc1.AsRegister<Register>()
-                                     : loc2.AsRegister<Register>();
-    intptr_t offset = loc1.IsStackSlot() ? loc1.GetStackIndex()
-                                         : loc2.GetStackIndex();
+    Register reg = loc1.IsRegister() ? loc1.AsRegister<Register>() : loc2.AsRegister<Register>();
+    intptr_t offset = loc1.IsStackSlot() ? loc1.GetStackIndex() : loc2.GetStackIndex();
     __ Move(TMP, reg);
     __ LoadFromOffset(kLoadWord, reg, SP, offset);
     __ StoreToOffset(kStoreWord, TMP, SP, offset);
@@ -618,8 +612,7 @@
                                            : loc2.AsRegisterPairLow<Register>();
     Register reg_h = loc1.IsRegisterPair() ? loc1.AsRegisterPairHigh<Register>()
                                            : loc2.AsRegisterPairHigh<Register>();
-    intptr_t offset_l = loc1.IsDoubleStackSlot() ? loc1.GetStackIndex()
-                                                 : loc2.GetStackIndex();
+    intptr_t offset_l = loc1.IsDoubleStackSlot() ? loc1.GetStackIndex() : loc2.GetStackIndex();
     intptr_t offset_h = loc1.IsDoubleStackSlot() ? loc1.GetHighStackIndex(kMipsWordSize)
                                                  : loc2.GetHighStackIndex(kMipsWordSize);
     __ Move(TMP, reg_l);
@@ -628,6 +621,20 @@
     __ Move(TMP, reg_h);
     __ LoadFromOffset(kLoadWord, reg_h, SP, offset_h);
     __ StoreToOffset(kStoreWord, TMP, SP, offset_h);
+  } else if (loc1.IsFpuRegister() || loc2.IsFpuRegister()) {
+    FRegister reg = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
+                                         : loc2.AsFpuRegister<FRegister>();
+    intptr_t offset = loc1.IsFpuRegister() ? loc2.GetStackIndex() : loc1.GetStackIndex();
+    if (type == Primitive::kPrimFloat) {
+      __ MovS(FTMP, reg);
+      __ LoadSFromOffset(reg, SP, offset);
+      __ StoreSToOffset(FTMP, SP, offset);
+    } else {
+      DCHECK_EQ(type, Primitive::kPrimDouble);
+      __ MovD(FTMP, reg);
+      __ LoadDFromOffset(reg, SP, offset);
+      __ StoreDToOffset(FTMP, SP, offset);
+    }
   } else {
     LOG(FATAL) << "Swap between " << loc1 << " and " << loc2 << " is unsupported";
   }
@@ -5194,10 +5201,6 @@
       break;
     case HLoadString::LoadKind::kBootImageAddress:
       break;
-    case HLoadString::LoadKind::kDexCacheAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
-      fallback_load = false;
-      break;
     case HLoadString::LoadKind::kBssEntry:
       DCHECK(!Runtime::Current()->UseJitCompilation());
       break;
@@ -5695,7 +5698,12 @@
     default:
       break;
   }
-  locations->SetOut(Location::RequiresRegister());
+  if (load_kind == HLoadString::LoadKind::kDexCacheViaMethod) {
+    InvokeRuntimeCallingConvention calling_convention;
+    locations->SetOut(calling_convention.GetReturnLocation(load->GetType()));
+  } else {
+    locations->SetOut(Location::RequiresRegister());
+  }
 }
 
 void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 010bf24..1a54935 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -322,7 +322,7 @@
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
-    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out();
+
     uint32_t dex_pc = instruction_->GetDexPc();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -334,24 +334,22 @@
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
     InvokeRuntimeCallingConvention calling_convention;
-    codegen->EmitParallelMoves(locations->InAt(1),
+    codegen->EmitParallelMoves(locations->InAt(0),
                                Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
                                Primitive::kPrimNot,
-                               object_class,
+                               locations->InAt(1),
                                Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
                                Primitive::kPrimNot);
-
     if (instruction_->IsInstanceOf()) {
       mips64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
-      CheckEntrypointTypes<
-          kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
+      CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
       Primitive::Type ret_type = instruction_->GetType();
       Location ret_loc = calling_convention.GetReturnLocation(ret_type);
       mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
     } else {
       DCHECK(instruction_->IsCheckCast());
-      mips64_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this);
-      CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+      mips64_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
+      CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
     }
 
     RestoreLiveRegisters(codegen, locations);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 02c1c3b..7e4ad26 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -151,7 +151,7 @@
       }
       __ movl(length_loc.AsRegister<Register>(), array_len);
       if (mirror::kUseStringCompression) {
-        __ andl(length_loc.AsRegister<Register>(), Immediate(INT32_MAX));
+        __ shrl(length_loc.AsRegister<Register>(), Immediate(1));
       }
     }
     x86_codegen->EmitParallelMoves(
@@ -312,8 +312,6 @@
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
-    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
-                                                        : locations->Out();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
 
@@ -327,25 +325,25 @@
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
     InvokeRuntimeCallingConvention calling_convention;
-    x86_codegen->EmitParallelMoves(
-        locations->InAt(1),
-        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-        Primitive::kPrimNot,
-        object_class,
-        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
-        Primitive::kPrimNot);
-
+    x86_codegen->EmitParallelMoves(locations->InAt(0),
+                                   Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+                                   Primitive::kPrimNot,
+                                   locations->InAt(1),
+                                   Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+                                   Primitive::kPrimNot);
     if (instruction_->IsInstanceOf()) {
       x86_codegen->InvokeRuntime(kQuickInstanceofNonTrivial,
                                  instruction_,
                                  instruction_->GetDexPc(),
                                  this);
-      CheckEntrypointTypes<
-          kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
+      CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
     } else {
       DCHECK(instruction_->IsCheckCast());
-      x86_codegen->InvokeRuntime(kQuickCheckCast, instruction_, instruction_->GetDexPc(), this);
-      CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+      x86_codegen->InvokeRuntime(kQuickCheckInstanceOf,
+                                 instruction_,
+                                 instruction_->GetDexPc(),
+                                 this);
+      CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
     }
 
     if (!is_fatal_) {
@@ -426,11 +424,25 @@
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86);
 };
 
-// Slow path marking an object during a read barrier.
+// Slow path marking an object reference `ref` during a read
+// barrier. The field `obj.field` in the object `obj` holding this
+// reference does not get updated by this slow path after marking (see
+// ReadBarrierMarkAndUpdateFieldSlowPathX86 below for that).
+//
+// This means that after the execution of this slow path, `ref` will
+// always be up-to-date, but `obj.field` may not; i.e., after the
+// flip, `ref` will be a to-space reference, but `obj.field` will
+// probably still be a from-space reference (unless it gets updated by
+// another thread, or if another thread installed another object
+// reference (different from `ref`) in `obj.field`).
 class ReadBarrierMarkSlowPathX86 : public SlowPathCode {
  public:
-  ReadBarrierMarkSlowPathX86(HInstruction* instruction, Location obj, bool unpoison)
-      : SlowPathCode(instruction), obj_(obj), unpoison_(unpoison) {
+  ReadBarrierMarkSlowPathX86(HInstruction* instruction,
+                             Location ref,
+                             bool unpoison_ref_before_marking)
+      : SlowPathCode(instruction),
+        ref_(ref),
+        unpoison_ref_before_marking_(unpoison_ref_before_marking) {
     DCHECK(kEmitCompilerReadBarrier);
   }
 
@@ -438,9 +450,9 @@
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
-    Register reg = obj_.AsRegister<Register>();
+    Register ref_reg = ref_.AsRegister<Register>();
     DCHECK(locations->CanCall());
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg));
+    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
     DCHECK(instruction_->IsInstanceFieldGet() ||
            instruction_->IsStaticFieldGet() ||
            instruction_->IsArrayGet() ||
@@ -455,44 +467,211 @@
         << instruction_->DebugName();
 
     __ Bind(GetEntryLabel());
-    if (unpoison_) {
+    if (unpoison_ref_before_marking_) {
       // Object* ref = ref_addr->AsMirrorPtr()
-      __ MaybeUnpoisonHeapReference(reg);
+      __ MaybeUnpoisonHeapReference(ref_reg);
     }
     // No need to save live registers; it's taken care of by the
     // entrypoint. Also, there is no need to update the stack mask,
     // as this runtime call will not trigger a garbage collection.
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
-    DCHECK_NE(reg, ESP);
-    DCHECK(0 <= reg && reg < kNumberOfCpuRegisters) << reg;
+    DCHECK_NE(ref_reg, ESP);
+    DCHECK(0 <= ref_reg && ref_reg < kNumberOfCpuRegisters) << ref_reg;
     // "Compact" slow path, saving two moves.
     //
     // Instead of using the standard runtime calling convention (input
     // and output in EAX):
     //
-    //   EAX <- obj
+    //   EAX <- ref
     //   EAX <- ReadBarrierMark(EAX)
-    //   obj <- EAX
+    //   ref <- EAX
     //
-    // we just use rX (the register holding `obj`) as input and output
+    // we just use rX (the register containing `ref`) as input and output
     // of a dedicated entrypoint:
     //
     //   rX <- ReadBarrierMarkRegX(rX)
     //
     int32_t entry_point_offset =
-        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86PointerSize>(reg);
+        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86PointerSize>(ref_reg);
     // This runtime call does not require a stack map.
     x86_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
     __ jmp(GetExitLabel());
   }
 
  private:
-  const Location obj_;
-  const bool unpoison_;
+  // The location (register) of the marked object reference.
+  const Location ref_;
+  // Should the reference in `ref_` be unpoisoned prior to marking it?
+  const bool unpoison_ref_before_marking_;
 
   DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathX86);
 };
 
+// Slow path marking an object reference `ref` during a read barrier,
+// and if needed, atomically updating the field `obj.field` in the
+// object `obj` holding this reference after marking (contrary to
+// ReadBarrierMarkSlowPathX86 above, which never tries to update
+// `obj.field`).
+//
+// This means that after the execution of this slow path, both `ref`
+// and `obj.field` will be up-to-date; i.e., after the flip, both will
+// hold the same to-space reference (unless another thread installed
+// another object reference (different from `ref`) in `obj.field`).
+class ReadBarrierMarkAndUpdateFieldSlowPathX86 : public SlowPathCode {
+ public:
+  ReadBarrierMarkAndUpdateFieldSlowPathX86(HInstruction* instruction,
+                                           Location ref,
+                                           Register obj,
+                                           const Address& field_addr,
+                                           bool unpoison_ref_before_marking,
+                                           Register temp)
+      : SlowPathCode(instruction),
+        ref_(ref),
+        obj_(obj),
+        field_addr_(field_addr),
+        unpoison_ref_before_marking_(unpoison_ref_before_marking),
+        temp_(temp) {
+    DCHECK(kEmitCompilerReadBarrier);
+  }
+
+  const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkAndUpdateFieldSlowPathX86"; }
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
+    Register ref_reg = ref_.AsRegister<Register>();
+    DCHECK(locations->CanCall());
+    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
+    // This slow path is only used by the UnsafeCASObject intrinsic.
+    DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
+        << "Unexpected instruction in read barrier marking and field updating slow path: "
+        << instruction_->DebugName();
+    DCHECK(instruction_->GetLocations()->Intrinsified());
+    DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
+
+    __ Bind(GetEntryLabel());
+    if (unpoison_ref_before_marking_) {
+      // Object* ref = ref_addr->AsMirrorPtr()
+      __ MaybeUnpoisonHeapReference(ref_reg);
+    }
+
+    // Save the old (unpoisoned) reference.
+    __ movl(temp_, ref_reg);
+
+    // No need to save live registers; it's taken care of by the
+    // entrypoint. Also, there is no need to update the stack mask,
+    // as this runtime call will not trigger a garbage collection.
+    CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
+    DCHECK_NE(ref_reg, ESP);
+    DCHECK(0 <= ref_reg && ref_reg < kNumberOfCpuRegisters) << ref_reg;
+    // "Compact" slow path, saving two moves.
+    //
+    // Instead of using the standard runtime calling convention (input
+    // and output in EAX):
+    //
+    //   EAX <- ref
+    //   EAX <- ReadBarrierMark(EAX)
+    //   ref <- EAX
+    //
+    // we just use rX (the register containing `ref`) as input and output
+    // of a dedicated entrypoint:
+    //
+    //   rX <- ReadBarrierMarkRegX(rX)
+    //
+    int32_t entry_point_offset =
+        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86PointerSize>(ref_reg);
+    // This runtime call does not require a stack map.
+    x86_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
+
+    // If the new reference is different from the old reference,
+    // update the field in the holder (`*field_addr`).
+    //
+    // Note that this field could also hold a different object, if
+    // another thread had concurrently changed it. In that case, the
+    // LOCK CMPXCHGL instruction in the compare-and-set (CAS)
+    // operation below would abort the CAS, leaving the field as-is.
+    NearLabel done;
+    __ cmpl(temp_, ref_reg);
+    __ j(kEqual, &done);
+
+    // Update the the holder's field atomically.  This may fail if
+    // mutator updates before us, but it's OK.  This is achieved
+    // using a strong compare-and-set (CAS) operation with relaxed
+    // memory synchronization ordering, where the expected value is
+    // the old reference and the desired value is the new reference.
+    // This operation is implemented with a 32-bit LOCK CMPXLCHG
+    // instruction, which requires the expected value (the old
+    // reference) to be in EAX.  Save EAX beforehand, and move the
+    // expected value (stored in `temp_`) into EAX.
+    __ pushl(EAX);
+    __ movl(EAX, temp_);
+
+    // Convenience aliases.
+    Register base = obj_;
+    Register expected = EAX;
+    Register value = ref_reg;
+
+    bool base_equals_value = (base == value);
+    if (kPoisonHeapReferences) {
+      if (base_equals_value) {
+        // If `base` and `value` are the same register location, move
+        // `value` to a temporary register.  This way, poisoning
+        // `value` won't invalidate `base`.
+        value = temp_;
+        __ movl(value, base);
+      }
+
+      // Check that the register allocator did not assign the location
+      // of `expected` (EAX) to `value` nor to `base`, so that heap
+      // poisoning (when enabled) works as intended below.
+      // - If `value` were equal to `expected`, both references would
+      //   be poisoned twice, meaning they would not be poisoned at
+      //   all, as heap poisoning uses address negation.
+      // - If `base` were equal to `expected`, poisoning `expected`
+      //   would invalidate `base`.
+      DCHECK_NE(value, expected);
+      DCHECK_NE(base, expected);
+
+      __ PoisonHeapReference(expected);
+      __ PoisonHeapReference(value);
+    }
+
+    __ LockCmpxchgl(field_addr_, value);
+
+    // If heap poisoning is enabled, we need to unpoison the values
+    // that were poisoned earlier.
+    if (kPoisonHeapReferences) {
+      if (base_equals_value) {
+        // `value` has been moved to a temporary register, no need
+        // to unpoison it.
+      } else {
+        __ UnpoisonHeapReference(value);
+      }
+      // No need to unpoison `expected` (EAX), as it is be overwritten below.
+    }
+
+    // Restore EAX.
+    __ popl(EAX);
+
+    __ Bind(&done);
+    __ jmp(GetExitLabel());
+  }
+
+ private:
+  // The location (register) of the marked object reference.
+  const Location ref_;
+  // The register containing the object holding the marked object reference field.
+  const Register obj_;
+  // The address of the marked reference field.  The base of this address must be `obj_`.
+  const Address field_addr_;
+
+  // Should the reference in `ref_` be unpoisoned prior to marking it?
+  const bool unpoison_ref_before_marking_;
+
+  const Register temp_;
+
+  DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkAndUpdateFieldSlowPathX86);
+};
+
 // Slow path generating a read barrier for a heap reference.
 class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode {
  public:
@@ -5056,9 +5235,11 @@
         // Branch cases into compressed and uncompressed for each index's type.
         uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
         NearLabel done, not_compressed;
-        __ cmpl(Address(obj, count_offset), Immediate(0));
+        __ testl(Address(obj, count_offset), Immediate(1));
         codegen_->MaybeRecordImplicitNullCheck(instruction);
-        __ j(kGreaterEqual, &not_compressed);
+        static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                      "Expecting 0=compressed, 1=uncompressed");
+        __ j(kNotZero, &not_compressed);
         __ movzxb(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_1, data_offset));
         __ jmp(&done);
         __ Bind(&not_compressed);
@@ -5408,7 +5589,7 @@
   codegen_->MaybeRecordImplicitNullCheck(instruction);
   // Mask out most significant bit in case the array is String's array of char.
   if (mirror::kUseStringCompression && instruction->IsStringLength()) {
-    __ andl(out, Immediate(INT32_MAX));
+    __ shrl(out, Immediate(1));
   }
 }
 
@@ -5467,10 +5648,12 @@
       Location array_loc = array_length->GetLocations()->InAt(0);
       Address array_len(array_loc.AsRegister<Register>(), len_offset);
       if (is_string_compressed_char_at) {
+        // TODO: if index_loc.IsConstant(), compare twice the index (to compensate for
+        // the string compression flag) with the in-memory length and avoid the temporary.
         Register length_reg = locations->GetTemp(0).AsRegister<Register>();
         __ movl(length_reg, array_len);
         codegen_->MaybeRecordImplicitNullCheck(array_length);
-        __ andl(length_reg, Immediate(INT32_MAX));
+        __ shrl(length_reg, Immediate(1));
         codegen_->GenerateIntCompare(length_reg, index_loc);
       } else {
         // Checking bounds for general case:
@@ -5886,7 +6069,9 @@
   Register out = out_loc.AsRegister<Register>();
 
   bool generate_null_check = false;
-  const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+  const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
+      ? kWithoutReadBarrier
+      : kCompilerReadBarrierOption;
   switch (cls->GetLoadKind()) {
     case HLoadClass::LoadKind::kReferrersClass: {
       DCHECK(!cls->CanCallRuntime());
@@ -5897,25 +6082,25 @@
           cls,
           out_loc,
           Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()),
-          /*fixup_label*/ nullptr,
-          requires_read_barrier);
+          /* fixup_label */ nullptr,
+          read_barrier_option);
       break;
     }
     case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
-      DCHECK(!requires_read_barrier);
+      DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
       __ movl(out, Immediate(/* placeholder */ 0));
       codegen_->RecordTypePatch(cls);
       break;
     }
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
-      DCHECK(!requires_read_barrier);
+      DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
       Register method_address = locations->InAt(0).AsRegister<Register>();
       __ leal(out, Address(method_address, CodeGeneratorX86::kDummy32BitOffset));
       codegen_->RecordTypePatch(cls);
       break;
     }
     case HLoadClass::LoadKind::kBootImageAddress: {
-      DCHECK(!requires_read_barrier);
+      DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
       DCHECK_NE(cls->GetAddress(), 0u);
       uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
       __ movl(out, Immediate(address));
@@ -5929,8 +6114,8 @@
       GenerateGcRootFieldLoad(cls,
                               out_loc,
                               Address::Absolute(address),
-                              /*fixup_label*/ nullptr,
-                              requires_read_barrier);
+                              /* fixup_label */ nullptr,
+                              read_barrier_option);
       generate_null_check = !cls->IsInDexCache();
       break;
     }
@@ -5943,7 +6128,7 @@
                               out_loc,
                               Address(base_reg, CodeGeneratorX86::kDummy32BitOffset),
                               fixup_label,
-                              requires_read_barrier);
+                              read_barrier_option);
       generate_null_check = !cls->IsInDexCache();
       break;
     }
@@ -5957,8 +6142,8 @@
       GenerateGcRootFieldLoad(cls,
                               out_loc,
                               Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())),
-                              /*fixup_label*/ nullptr,
-                              requires_read_barrier);
+                              /* fixup_label */ nullptr,
+                              read_barrier_option);
       generate_null_check = !cls->IsInDexCache();
       break;
     }
@@ -6031,9 +6216,6 @@
       break;
     case HLoadString::LoadKind::kBootImageAddress:
       break;
-    case HLoadString::LoadKind::kDexCacheAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
-      break;
     case HLoadString::LoadKind::kDexCacheViaMethod:
       break;
   }
@@ -6099,7 +6281,7 @@
       Address address = Address(method_address, CodeGeneratorX86::kDummy32BitOffset);
       Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
       // /* GcRoot<mirror::Class> */ out = *address  /* PC-relative */
-      GenerateGcRootFieldLoad(load, out_loc, address, fixup_label);
+      GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
       SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load);
       codegen_->AddSlowPath(slow_path);
       __ testl(out, out);
@@ -6153,12 +6335,26 @@
   CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
 }
 
-static bool TypeCheckNeedsATemporary(TypeCheckKind type_check_kind) {
-  return kEmitCompilerReadBarrier &&
+// Temp is used for read barrier.
+static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
+  if (kEmitCompilerReadBarrier &&
       !kUseBakerReadBarrier &&
       (type_check_kind == TypeCheckKind::kAbstractClassCheck ||
        type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
-       type_check_kind == TypeCheckKind::kArrayObjectCheck);
+       type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
+    return 1;
+  }
+  return 0;
+}
+
+// Interface case has 3 temps, one for holding the number of interfaces, one for the current
+// interface pointer, one for loading the current interface.
+// The other checks have one temp for loading the object's class.
+static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
+  if (type_check_kind == TypeCheckKind::kInterfaceCheck && !kPoisonHeapReferences) {
+    return 2;
+  }
+  return 1 + NumberOfInstanceOfTemps(type_check_kind);
 }
 
 void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
@@ -6189,11 +6385,8 @@
   locations->SetInAt(1, Location::Any());
   // Note that TypeCheckSlowPathX86 uses this "out" register too.
   locations->SetOut(Location::RequiresRegister());
-  // When read barriers are enabled, we need a temporary register for
-  // some cases.
-  if (TypeCheckNeedsATemporary(type_check_kind)) {
-    locations->AddTemp(Location::RequiresRegister());
-  }
+  // When read barriers are enabled, we need a temporary register for some cases.
+  locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind));
 }
 
 void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
@@ -6204,9 +6397,9 @@
   Location cls = locations->InAt(1);
   Location out_loc = locations->Out();
   Register out = out_loc.AsRegister<Register>();
-  Location maybe_temp_loc = TypeCheckNeedsATemporary(type_check_kind) ?
-      locations->GetTemp(0) :
-      Location::NoLocation();
+  const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind);
+  DCHECK_LE(num_temps, 1u);
+  Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation();
   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
   uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
   uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
@@ -6221,11 +6414,14 @@
     __ j(kEqual, &zero);
   }
 
-  // /* HeapReference<Class> */ out = obj->klass_
-  GenerateReferenceLoadTwoRegisters(instruction, out_loc, obj_loc, class_offset);
-
   switch (type_check_kind) {
     case TypeCheckKind::kExactCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        kCompilerReadBarrierOption);
       if (cls.IsRegister()) {
         __ cmpl(out, cls.AsRegister<Register>());
       } else {
@@ -6241,12 +6437,22 @@
     }
 
     case TypeCheckKind::kAbstractClassCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        kCompilerReadBarrierOption);
       // If the class is abstract, we eagerly fetch the super class of the
       // object to avoid doing a comparison we know will fail.
       NearLabel loop;
       __ Bind(&loop);
       // /* HeapReference<Class> */ out = out->super_class_
-      GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       out_loc,
+                                       super_offset,
+                                       maybe_temp_loc,
+                                       kCompilerReadBarrierOption);
       __ testl(out, out);
       // If `out` is null, we use it for the result, and jump to `done`.
       __ j(kEqual, &done);
@@ -6265,6 +6471,12 @@
     }
 
     case TypeCheckKind::kClassHierarchyCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        kCompilerReadBarrierOption);
       // Walk over the class hierarchy to find a match.
       NearLabel loop, success;
       __ Bind(&loop);
@@ -6276,7 +6488,11 @@
       }
       __ j(kEqual, &success);
       // /* HeapReference<Class> */ out = out->super_class_
-      GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       out_loc,
+                                       super_offset,
+                                       maybe_temp_loc,
+                                       kCompilerReadBarrierOption);
       __ testl(out, out);
       __ j(kNotEqual, &loop);
       // If `out` is null, we use it for the result, and jump to `done`.
@@ -6290,6 +6506,12 @@
     }
 
     case TypeCheckKind::kArrayObjectCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        kCompilerReadBarrierOption);
       // Do an exact check.
       NearLabel exact_check;
       if (cls.IsRegister()) {
@@ -6301,7 +6523,11 @@
       __ j(kEqual, &exact_check);
       // Otherwise, we need to check that the object's class is a non-primitive array.
       // /* HeapReference<Class> */ out = out->component_type_
-      GenerateReferenceLoadOneRegister(instruction, out_loc, component_offset, maybe_temp_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       out_loc,
+                                       component_offset,
+                                       maybe_temp_loc,
+                                       kCompilerReadBarrierOption);
       __ testl(out, out);
       // If `out` is null, we use it for the result, and jump to `done`.
       __ j(kEqual, &done);
@@ -6314,6 +6540,13 @@
     }
 
     case TypeCheckKind::kArrayCheck: {
+      // No read barrier since the slow path will retry upon failure.
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        kWithoutReadBarrier);
       if (cls.IsRegister()) {
         __ cmpl(out, cls.AsRegister<Register>());
       } else {
@@ -6378,35 +6611,43 @@
   }
 }
 
+static bool IsTypeCheckSlowPathFatal(TypeCheckKind type_check_kind, bool throws_into_catch) {
+  switch (type_check_kind) {
+  case TypeCheckKind::kExactCheck:
+  case TypeCheckKind::kAbstractClassCheck:
+  case TypeCheckKind::kClassHierarchyCheck:
+  case TypeCheckKind::kArrayObjectCheck:
+    return !throws_into_catch && !kEmitCompilerReadBarrier;
+  case TypeCheckKind::kInterfaceCheck:
+    return !throws_into_catch && !kEmitCompilerReadBarrier && !kPoisonHeapReferences;
+  case TypeCheckKind::kArrayCheck:
+  case TypeCheckKind::kUnresolvedCheck:
+    return false;
+  }
+  LOG(FATAL) << "Unreachable";
+  UNREACHABLE();
+}
+
 void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) {
-  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
   bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
   TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
-  switch (type_check_kind) {
-    case TypeCheckKind::kExactCheck:
-    case TypeCheckKind::kAbstractClassCheck:
-    case TypeCheckKind::kClassHierarchyCheck:
-    case TypeCheckKind::kArrayObjectCheck:
-      call_kind = (throws_into_catch || kEmitCompilerReadBarrier) ?
-          LocationSummary::kCallOnSlowPath :
-          LocationSummary::kNoCall;  // In fact, call on a fatal (non-returning) slow path.
-      break;
-    case TypeCheckKind::kArrayCheck:
-    case TypeCheckKind::kUnresolvedCheck:
-    case TypeCheckKind::kInterfaceCheck:
-      call_kind = LocationSummary::kCallOnSlowPath;
-      break;
-  }
+  LocationSummary::CallKind call_kind =
+      IsTypeCheckSlowPathFatal(type_check_kind, throws_into_catch)
+          ? LocationSummary::kNoCall
+          : LocationSummary::kCallOnSlowPath;
   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
   locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::Any());
+  if (type_check_kind == TypeCheckKind::kInterfaceCheck) {
+    // Require a register for the interface check since there is a loop that compares the class to
+    // a memory address.
+    locations->SetInAt(1, Location::RequiresRegister());
+  } else {
+    locations->SetInAt(1, Location::Any());
+  }
   // Note that TypeCheckSlowPathX86 uses this "temp" register too.
   locations->AddTemp(Location::RequiresRegister());
-  // When read barriers are enabled, we need an additional temporary
-  // register for some cases.
-  if (TypeCheckNeedsATemporary(type_check_kind)) {
-    locations->AddTemp(Location::RequiresRegister());
-  }
+  // When read barriers are enabled, we need an additional temporary register for some cases.
+  locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
 }
 
 void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
@@ -6417,20 +6658,25 @@
   Location cls = locations->InAt(1);
   Location temp_loc = locations->GetTemp(0);
   Register temp = temp_loc.AsRegister<Register>();
-  Location maybe_temp2_loc = TypeCheckNeedsATemporary(type_check_kind) ?
-      locations->GetTemp(1) :
-      Location::NoLocation();
-  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
-  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
-  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+  const size_t num_temps = NumberOfCheckCastTemps(type_check_kind);
+  DCHECK_GE(num_temps, 1u);
+  DCHECK_LE(num_temps, 2u);
+  Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation();
+  const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+  const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+  const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+  const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+  const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value();
+  const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value();
+  const uint32_t object_array_data_offset =
+      mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
 
+  // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
+  // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
+  // read barriers is done for performance and code size reasons.
   bool is_type_check_slow_path_fatal =
-      (type_check_kind == TypeCheckKind::kExactCheck ||
-       type_check_kind == TypeCheckKind::kAbstractClassCheck ||
-       type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
-       type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
-      !instruction->CanThrowIntoCatchBlock();
+      IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock());
+
   SlowPathCode* type_check_slow_path =
       new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction,
                                                         is_type_check_slow_path_fatal);
@@ -6443,12 +6689,16 @@
     __ j(kEqual, &done);
   }
 
-  // /* HeapReference<Class> */ temp = obj->klass_
-  GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
-
   switch (type_check_kind) {
     case TypeCheckKind::kExactCheck:
     case TypeCheckKind::kArrayCheck: {
+      // /* HeapReference<Class> */ temp = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        kWithoutReadBarrier);
+
       if (cls.IsRegister()) {
         __ cmpl(temp, cls.AsRegister<Register>());
       } else {
@@ -6462,28 +6712,30 @@
     }
 
     case TypeCheckKind::kAbstractClassCheck: {
+      // /* HeapReference<Class> */ temp = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        kWithoutReadBarrier);
+
       // If the class is abstract, we eagerly fetch the super class of the
       // object to avoid doing a comparison we know will fail.
-      NearLabel loop, compare_classes;
+      NearLabel loop;
       __ Bind(&loop);
       // /* HeapReference<Class> */ temp = temp->super_class_
-      GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       temp_loc,
+                                       super_offset,
+                                       maybe_temp2_loc,
+                                       kWithoutReadBarrier);
 
-      // If the class reference currently in `temp` is not null, jump
-      // to the `compare_classes` label to compare it with the checked
-      // class.
+      // If the class reference currently in `temp` is null, jump to the slow path to throw the
+      // exception.
       __ testl(temp, temp);
-      __ j(kNotEqual, &compare_classes);
-      // Otherwise, jump to the slow path to throw the exception.
-      //
-      // But before, move back the object's class into `temp` before
-      // going into the slow path, as it has been overwritten in the
-      // meantime.
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
-      __ jmp(type_check_slow_path->GetEntryLabel());
+      __ j(kZero, type_check_slow_path->GetEntryLabel());
 
-      __ Bind(&compare_classes);
+      // Otherwise, compare the classes
       if (cls.IsRegister()) {
         __ cmpl(temp, cls.AsRegister<Register>());
       } else {
@@ -6495,6 +6747,13 @@
     }
 
     case TypeCheckKind::kClassHierarchyCheck: {
+      // /* HeapReference<Class> */ temp = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        kWithoutReadBarrier);
+
       // Walk over the class hierarchy to find a match.
       NearLabel loop;
       __ Bind(&loop);
@@ -6507,26 +6766,30 @@
       __ j(kEqual, &done);
 
       // /* HeapReference<Class> */ temp = temp->super_class_
-      GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       temp_loc,
+                                       super_offset,
+                                       maybe_temp2_loc,
+                                       kWithoutReadBarrier);
 
       // If the class reference currently in `temp` is not null, jump
       // back at the beginning of the loop.
       __ testl(temp, temp);
-      __ j(kNotEqual, &loop);
-      // Otherwise, jump to the slow path to throw the exception.
-      //
-      // But before, move back the object's class into `temp` before
-      // going into the slow path, as it has been overwritten in the
-      // meantime.
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
+      __ j(kNotZero, &loop);
+      // Otherwise, jump to the slow path to throw the exception.;
       __ jmp(type_check_slow_path->GetEntryLabel());
       break;
     }
 
     case TypeCheckKind::kArrayObjectCheck: {
+      // /* HeapReference<Class> */ temp = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        kWithoutReadBarrier);
+
       // Do an exact check.
-      NearLabel check_non_primitive_component_type;
       if (cls.IsRegister()) {
         __ cmpl(temp, cls.AsRegister<Register>());
       } else {
@@ -6537,38 +6800,24 @@
 
       // Otherwise, we need to check that the object's class is a non-primitive array.
       // /* HeapReference<Class> */ temp = temp->component_type_
-      GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, maybe_temp2_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       temp_loc,
+                                       component_offset,
+                                       maybe_temp2_loc,
+                                       kWithoutReadBarrier);
 
-      // If the component type is not null (i.e. the object is indeed
-      // an array), jump to label `check_non_primitive_component_type`
-      // to further check that this component type is not a primitive
-      // type.
+      // If the component type is null (i.e. the object not an array),  jump to the slow path to
+      // throw the exception. Otherwise proceed with the check.
       __ testl(temp, temp);
-      __ j(kNotEqual, &check_non_primitive_component_type);
-      // Otherwise, jump to the slow path to throw the exception.
-      //
-      // But before, move back the object's class into `temp` before
-      // going into the slow path, as it has been overwritten in the
-      // meantime.
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
-      __ jmp(type_check_slow_path->GetEntryLabel());
+      __ j(kZero, type_check_slow_path->GetEntryLabel());
 
-      __ Bind(&check_non_primitive_component_type);
       __ cmpw(Address(temp, primitive_offset), Immediate(Primitive::kPrimNot));
-      __ j(kEqual, &done);
-      // Same comment as above regarding `temp` and the slow path.
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
-      __ jmp(type_check_slow_path->GetEntryLabel());
+      __ j(kNotEqual, type_check_slow_path->GetEntryLabel());
       break;
     }
 
     case TypeCheckKind::kUnresolvedCheck:
-    case TypeCheckKind::kInterfaceCheck:
-      // We always go into the type check slow path for the unresolved
-      // and interface check cases.
-      //
+      // We always go into the type check slow path for the unresolved check case.
       // We cannot directly call the CheckCast runtime entry point
       // without resorting to a type checking slow path here (i.e. by
       // calling InvokeRuntime directly), as it would require to
@@ -6576,15 +6825,50 @@
       // instruction (following the runtime calling convention), which
       // might be cluttered by the potential first read barrier
       // emission at the beginning of this method.
-      //
-      // TODO: Introduce a new runtime entry point taking the object
-      // to test (instead of its class) as argument, and let it deal
-      // with the read barrier issues. This will let us refactor this
-      // case of the `switch` code as it was previously (with a direct
-      // call to the runtime not using a type checking slow path).
-      // This should also be beneficial for the other cases above.
       __ jmp(type_check_slow_path->GetEntryLabel());
       break;
+
+    case TypeCheckKind::kInterfaceCheck: {
+      // Fast path for the interface check. Since we compare with a memory location in the inner
+      // loop we would need to have cls poisoned. However unpoisoning cls would reset the
+      // conditional flags and cause the conditional jump to be incorrect. Therefore we just jump
+      // to the slow path if we are running under poisoning.
+      if (!kPoisonHeapReferences) {
+        // Try to avoid read barriers to improve the fast path. We can not get false positives by
+        // doing this.
+        // /* HeapReference<Class> */ temp = obj->klass_
+        GenerateReferenceLoadTwoRegisters(instruction,
+                                          temp_loc,
+                                          obj_loc,
+                                          class_offset,
+                                          kWithoutReadBarrier);
+
+        // /* HeapReference<Class> */ temp = temp->iftable_
+        GenerateReferenceLoadTwoRegisters(instruction,
+                                          temp_loc,
+                                          temp_loc,
+                                          iftable_offset,
+                                          kWithoutReadBarrier);
+        // Iftable is never null.
+        __ movl(maybe_temp2_loc.AsRegister<Register>(), Address(temp, array_length_offset));
+        // Loop through the iftable and check if any class matches.
+        NearLabel start_loop;
+        __ Bind(&start_loop);
+        // Need to subtract first to handle the empty array case.
+        __ subl(maybe_temp2_loc.AsRegister<Register>(), Immediate(2));
+        __ j(kNegative, type_check_slow_path->GetEntryLabel());
+        // Go to next interface if the classes do not match.
+        __ cmpl(cls.AsRegister<Register>(),
+                CodeGeneratorX86::ArrayAddress(temp,
+                                               maybe_temp2_loc,
+                                               TIMES_4,
+                                               object_array_data_offset));
+        __ j(kNotEqual, &start_loop);
+      } else {
+        __ jmp(type_check_slow_path->GetEntryLabel());
+      }
+      break;
+    }
   }
   __ Bind(&done);
 
@@ -6743,12 +7027,15 @@
   }
 }
 
-void InstructionCodeGeneratorX86::GenerateReferenceLoadOneRegister(HInstruction* instruction,
-                                                                   Location out,
-                                                                   uint32_t offset,
-                                                                   Location maybe_temp) {
+void InstructionCodeGeneratorX86::GenerateReferenceLoadOneRegister(
+    HInstruction* instruction,
+    Location out,
+    uint32_t offset,
+    Location maybe_temp,
+    ReadBarrierOption read_barrier_option) {
   Register out_reg = out.AsRegister<Register>();
-  if (kEmitCompilerReadBarrier) {
+  if (read_barrier_option == kWithReadBarrier) {
+    CHECK(kEmitCompilerReadBarrier);
     if (kUseBakerReadBarrier) {
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(out + offset)
@@ -6773,13 +7060,16 @@
   }
 }
 
-void InstructionCodeGeneratorX86::GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
-                                                                    Location out,
-                                                                    Location obj,
-                                                                    uint32_t offset) {
+void InstructionCodeGeneratorX86::GenerateReferenceLoadTwoRegisters(
+    HInstruction* instruction,
+    Location out,
+    Location obj,
+    uint32_t offset,
+    ReadBarrierOption read_barrier_option) {
   Register out_reg = out.AsRegister<Register>();
   Register obj_reg = obj.AsRegister<Register>();
-  if (kEmitCompilerReadBarrier) {
+  if (read_barrier_option == kWithReadBarrier) {
+    CHECK(kEmitCompilerReadBarrier);
     if (kUseBakerReadBarrier) {
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
@@ -6799,13 +7089,14 @@
   }
 }
 
-void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad(HInstruction* instruction,
-                                                          Location root,
-                                                          const Address& address,
-                                                          Label* fixup_label,
-                                                          bool requires_read_barrier) {
+void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad(
+    HInstruction* instruction,
+    Location root,
+    const Address& address,
+    Label* fixup_label,
+    ReadBarrierOption read_barrier_option) {
   Register root_reg = root.AsRegister<Register>();
-  if (requires_read_barrier) {
+  if (read_barrier_option == kWithReadBarrier) {
     DCHECK(kEmitCompilerReadBarrier);
     if (kUseBakerReadBarrier) {
       // Fast path implementation of art::ReadBarrier::BarrierForRoot when
@@ -6831,7 +7122,7 @@
 
       // Slow path marking the GC root `root`.
       SlowPathCode* slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86(
-          instruction, root, /* unpoison */ false);
+          instruction, root, /* unpoison_ref_before_marking */ false);
       codegen_->AddSlowPath(slow_path);
 
       __ fs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86PointerSize>().Int32Value()),
@@ -6896,7 +7187,9 @@
                                                                  Location ref,
                                                                  Register obj,
                                                                  const Address& src,
-                                                                 bool needs_null_check) {
+                                                                 bool needs_null_check,
+                                                                 bool always_update_field,
+                                                                 Register* temp) {
   DCHECK(kEmitCompilerReadBarrier);
   DCHECK(kUseBakerReadBarrier);
 
@@ -6910,7 +7203,7 @@
   //   uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
   //   lfence;  // Load fence or artificial data dependency to prevent load-load reordering
   //   HeapReference<Object> ref = *src;  // Original reference load.
-  //   bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+  //   bool is_gray = (rb_state == ReadBarrier::GrayState());
   //   if (is_gray) {
   //     ref = ReadBarrier::Mark(ref);  // Performed by runtime entrypoint slow path.
   //   }
@@ -6928,14 +7221,13 @@
   uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
 
   // Given the numeric representation, it's enough to check the low bit of the rb_state.
-  static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
-  static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
-  static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+  static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+  static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
   constexpr uint32_t gray_byte_position = LockWord::kReadBarrierStateShift / kBitsPerByte;
   constexpr uint32_t gray_bit_position = LockWord::kReadBarrierStateShift % kBitsPerByte;
   constexpr int32_t test_value = static_cast<int8_t>(1 << gray_bit_position);
 
-  // if (rb_state == ReadBarrier::gray_ptr_)
+  // if (rb_state == ReadBarrier::GrayState())
   //   ref = ReadBarrier::Mark(ref);
   // At this point, just do the "if" and make sure that flags are preserved until the branch.
   __ testb(Address(obj, monitor_offset + gray_byte_position), Immediate(test_value));
@@ -6953,8 +7245,15 @@
 
   // Note: Reference unpoisoning modifies the flags, so we need to delay it after the branch.
   // Slow path marking the object `ref` when it is gray.
-  SlowPathCode* slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86(
-      instruction, ref, /* unpoison */ true);
+  SlowPathCode* slow_path;
+  if (always_update_field) {
+    DCHECK(temp != nullptr);
+    slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
+        instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp);
+  } else {
+    slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86(
+        instruction, ref, /* unpoison_ref_before_marking */ true);
+  }
   AddSlowPath(slow_path);
 
   // We have done the "if" of the gray bit check above, now branch based on the flags.
@@ -7263,7 +7562,7 @@
     // The value to patch is the distance from the offset in the constant area
     // from the address computed by the HX86ComputeBaseMethodAddress instruction.
     int32_t constant_offset = codegen_->ConstantAreaStart() + offset_into_constant_area_;
-    int32_t relative_position = constant_offset - codegen_->GetMethodAddressOffset();;
+    int32_t relative_position = constant_offset - codegen_->GetMethodAddressOffset();
 
     // Patch in the right value.
     region.StoreUnaligned<int32_t>(pos - 4, relative_position);
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index e7d9a43..164231b 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -240,7 +240,8 @@
   void GenerateReferenceLoadOneRegister(HInstruction* instruction,
                                         Location out,
                                         uint32_t offset,
-                                        Location maybe_temp);
+                                        Location maybe_temp,
+                                        ReadBarrierOption read_barrier_option);
   // Generate a heap reference load using two different registers
   // `out` and `obj`:
   //
@@ -254,17 +255,18 @@
   void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
                                          Location out,
                                          Location obj,
-                                         uint32_t offset);
+                                         uint32_t offset,
+                                         ReadBarrierOption read_barrier_option);
   // Generate a GC root reference load:
   //
   //   root <- *address
   //
-  // while honoring read barriers (if any).
+  // while honoring read barriers based on read_barrier_option.
   void GenerateGcRootFieldLoad(HInstruction* instruction,
                                Location root,
                                const Address& address,
-                               Label* fixup_label = nullptr,
-                               bool requires_read_barrier = kEmitCompilerReadBarrier);
+                               Label* fixup_label,
+                               ReadBarrierOption read_barrier_option);
 
   // Push value to FPU stack. `is_fp` specifies whether the value is floating point or not.
   // `is_wide` specifies whether it is long/double or not.
@@ -499,13 +501,24 @@
                                              uint32_t data_offset,
                                              Location index,
                                              bool needs_null_check);
-  // Factored implementation used by GenerateFieldLoadWithBakerReadBarrier
-  // and GenerateArrayLoadWithBakerReadBarrier.
+  // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
+  // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
+  //
+  // Load the object reference located at address `src`, held by
+  // object `obj`, into `ref`, and mark it if needed.  The base of
+  // address `src` must be `obj`.
+  //
+  // If `always_update_field` is true, the value of the reference is
+  // atomically updated in the holder (`obj`).  This operation
+  // requires a temporary register, which must be provided as a
+  // non-null pointer (`temp`).
   void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
                                                  Location ref,
                                                  Register obj,
                                                  const Address& src,
-                                                 bool needs_null_check);
+                                                 bool needs_null_check,
+                                                 bool always_update_field = false,
+                                                 Register* temp = nullptr);
 
   // Generate a read barrier for a heap reference within `instruction`
   // using a slow path.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 4b64c1b..19b3019 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -199,7 +199,7 @@
       }
       __ movl(length_loc.AsRegister<CpuRegister>(), array_len);
       if (mirror::kUseStringCompression) {
-        __ andl(length_loc.AsRegister<CpuRegister>(), Immediate(INT32_MAX));
+        __ shrl(length_loc.AsRegister<CpuRegister>(), Immediate(1));
       }
     }
 
@@ -332,8 +332,6 @@
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
-    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
-                                                        : locations->Out();
     uint32_t dex_pc = instruction_->GetDexPc();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -348,22 +346,19 @@
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
     InvokeRuntimeCallingConvention calling_convention;
-    codegen->EmitParallelMoves(
-        locations->InAt(1),
-        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-        Primitive::kPrimNot,
-        object_class,
-        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
-        Primitive::kPrimNot);
-
+    codegen->EmitParallelMoves(locations->InAt(0),
+                               Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+                               Primitive::kPrimNot,
+                               locations->InAt(1),
+                               Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+                               Primitive::kPrimNot);
     if (instruction_->IsInstanceOf()) {
       x86_64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
-      CheckEntrypointTypes<
-          kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
+      CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
     } else {
       DCHECK(instruction_->IsCheckCast());
-      x86_64_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this);
-      CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+      x86_64_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
+      CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
     }
 
     if (!is_fatal_) {
@@ -445,11 +440,25 @@
   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86_64);
 };
 
-// Slow path marking an object during a read barrier.
+// Slow path marking an object reference `ref` during a read
+// barrier. The field `obj.field` in the object `obj` holding this
+// reference does not get updated by this slow path after marking (see
+// ReadBarrierMarkAndUpdateFieldSlowPathX86_64 below for that).
+//
+// This means that after the execution of this slow path, `ref` will
+// always be up-to-date, but `obj.field` may not; i.e., after the
+// flip, `ref` will be a to-space reference, but `obj.field` will
+// probably still be a from-space reference (unless it gets updated by
+// another thread, or if another thread installed another object
+// reference (different from `ref`) in `obj.field`).
 class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode {
  public:
-  ReadBarrierMarkSlowPathX86_64(HInstruction* instruction, Location obj, bool unpoison)
-      : SlowPathCode(instruction), obj_(obj), unpoison_(unpoison) {
+  ReadBarrierMarkSlowPathX86_64(HInstruction* instruction,
+                                Location ref,
+                                bool unpoison_ref_before_marking)
+      : SlowPathCode(instruction),
+        ref_(ref),
+        unpoison_ref_before_marking_(unpoison_ref_before_marking) {
     DCHECK(kEmitCompilerReadBarrier);
   }
 
@@ -457,10 +466,10 @@
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
-    CpuRegister cpu_reg = obj_.AsRegister<CpuRegister>();
-    Register reg = cpu_reg.AsRegister();
+    CpuRegister ref_cpu_reg = ref_.AsRegister<CpuRegister>();
+    Register ref_reg = ref_cpu_reg.AsRegister();
     DCHECK(locations->CanCall());
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg));
+    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
     DCHECK(instruction_->IsInstanceFieldGet() ||
            instruction_->IsStaticFieldGet() ||
            instruction_->IsArrayGet() ||
@@ -475,44 +484,218 @@
         << instruction_->DebugName();
 
     __ Bind(GetEntryLabel());
-    if (unpoison_) {
+    if (unpoison_ref_before_marking_) {
       // Object* ref = ref_addr->AsMirrorPtr()
-      __ MaybeUnpoisonHeapReference(cpu_reg);
+      __ MaybeUnpoisonHeapReference(ref_cpu_reg);
     }
     // No need to save live registers; it's taken care of by the
     // entrypoint. Also, there is no need to update the stack mask,
     // as this runtime call will not trigger a garbage collection.
     CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
-    DCHECK_NE(reg, RSP);
-    DCHECK(0 <= reg && reg < kNumberOfCpuRegisters) << reg;
+    DCHECK_NE(ref_reg, RSP);
+    DCHECK(0 <= ref_reg && ref_reg < kNumberOfCpuRegisters) << ref_reg;
     // "Compact" slow path, saving two moves.
     //
     // Instead of using the standard runtime calling convention (input
     // and output in R0):
     //
-    //   RDI <- obj
+    //   RDI <- ref
     //   RAX <- ReadBarrierMark(RDI)
-    //   obj <- RAX
+    //   ref <- RAX
     //
-    // we just use rX (the register holding `obj`) as input and output
+    // we just use rX (the register containing `ref`) as input and output
     // of a dedicated entrypoint:
     //
     //   rX <- ReadBarrierMarkRegX(rX)
     //
     int32_t entry_point_offset =
-        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86_64PointerSize>(reg);
+        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86_64PointerSize>(ref_reg);
     // This runtime call does not require a stack map.
     x86_64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
     __ jmp(GetExitLabel());
   }
 
  private:
-  const Location obj_;
-  const bool unpoison_;
+  // The location (register) of the marked object reference.
+  const Location ref_;
+  // Should the reference in `ref_` be unpoisoned prior to marking it?
+  const bool unpoison_ref_before_marking_;
 
   DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathX86_64);
 };
 
+// Slow path marking an object reference `ref` during a read barrier,
+// and if needed, atomically updating the field `obj.field` in the
+// object `obj` holding this reference after marking (contrary to
+// ReadBarrierMarkSlowPathX86_64 above, which never tries to update
+// `obj.field`).
+//
+// This means that after the execution of this slow path, both `ref`
+// and `obj.field` will be up-to-date; i.e., after the flip, both will
+// hold the same to-space reference (unless another thread installed
+// another object reference (different from `ref`) in `obj.field`).
+class ReadBarrierMarkAndUpdateFieldSlowPathX86_64 : public SlowPathCode {
+ public:
+  ReadBarrierMarkAndUpdateFieldSlowPathX86_64(HInstruction* instruction,
+                                              Location ref,
+                                              CpuRegister obj,
+                                              const Address& field_addr,
+                                              bool unpoison_ref_before_marking,
+                                              CpuRegister temp1,
+                                              CpuRegister temp2)
+      : SlowPathCode(instruction),
+        ref_(ref),
+        obj_(obj),
+        field_addr_(field_addr),
+        unpoison_ref_before_marking_(unpoison_ref_before_marking),
+        temp1_(temp1),
+        temp2_(temp2) {
+    DCHECK(kEmitCompilerReadBarrier);
+  }
+
+  const char* GetDescription() const OVERRIDE {
+    return "ReadBarrierMarkAndUpdateFieldSlowPathX86_64";
+  }
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
+    CpuRegister ref_cpu_reg = ref_.AsRegister<CpuRegister>();
+    Register ref_reg = ref_cpu_reg.AsRegister();
+    DCHECK(locations->CanCall());
+    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
+    // This slow path is only used by the UnsafeCASObject intrinsic.
+    DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
+        << "Unexpected instruction in read barrier marking and field updating slow path: "
+        << instruction_->DebugName();
+    DCHECK(instruction_->GetLocations()->Intrinsified());
+    DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
+
+    __ Bind(GetEntryLabel());
+    if (unpoison_ref_before_marking_) {
+      // Object* ref = ref_addr->AsMirrorPtr()
+      __ MaybeUnpoisonHeapReference(ref_cpu_reg);
+    }
+
+    // Save the old (unpoisoned) reference.
+    __ movl(temp1_, ref_cpu_reg);
+
+    // No need to save live registers; it's taken care of by the
+    // entrypoint. Also, there is no need to update the stack mask,
+    // as this runtime call will not trigger a garbage collection.
+    CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+    DCHECK_NE(ref_reg, RSP);
+    DCHECK(0 <= ref_reg && ref_reg < kNumberOfCpuRegisters) << ref_reg;
+    // "Compact" slow path, saving two moves.
+    //
+    // Instead of using the standard runtime calling convention (input
+    // and output in R0):
+    //
+    //   RDI <- ref
+    //   RAX <- ReadBarrierMark(RDI)
+    //   ref <- RAX
+    //
+    // we just use rX (the register containing `ref`) as input and output
+    // of a dedicated entrypoint:
+    //
+    //   rX <- ReadBarrierMarkRegX(rX)
+    //
+    int32_t entry_point_offset =
+        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86_64PointerSize>(ref_reg);
+    // This runtime call does not require a stack map.
+    x86_64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
+
+    // If the new reference is different from the old reference,
+    // update the field in the holder (`*field_addr`).
+    //
+    // Note that this field could also hold a different object, if
+    // another thread had concurrently changed it. In that case, the
+    // LOCK CMPXCHGL instruction in the compare-and-set (CAS)
+    // operation below would abort the CAS, leaving the field as-is.
+    NearLabel done;
+    __ cmpl(temp1_, ref_cpu_reg);
+    __ j(kEqual, &done);
+
+    // Update the the holder's field atomically.  This may fail if
+    // mutator updates before us, but it's OK.  This is achived
+    // using a strong compare-and-set (CAS) operation with relaxed
+    // memory synchronization ordering, where the expected value is
+    // the old reference and the desired value is the new reference.
+    // This operation is implemented with a 32-bit LOCK CMPXLCHG
+    // instruction, which requires the expected value (the old
+    // reference) to be in EAX.  Save RAX beforehand, and move the
+    // expected value (stored in `temp1_`) into EAX.
+    __ movq(temp2_, CpuRegister(RAX));
+    __ movl(CpuRegister(RAX), temp1_);
+
+    // Convenience aliases.
+    CpuRegister base = obj_;
+    CpuRegister expected = CpuRegister(RAX);
+    CpuRegister value = ref_cpu_reg;
+
+    bool base_equals_value = (base.AsRegister() == value.AsRegister());
+    Register value_reg = ref_reg;
+    if (kPoisonHeapReferences) {
+      if (base_equals_value) {
+        // If `base` and `value` are the same register location, move
+        // `value_reg` to a temporary register.  This way, poisoning
+        // `value_reg` won't invalidate `base`.
+        value_reg = temp1_.AsRegister();
+        __ movl(CpuRegister(value_reg), base);
+      }
+
+      // Check that the register allocator did not assign the location
+      // of `expected` (RAX) to `value` nor to `base`, so that heap
+      // poisoning (when enabled) works as intended below.
+      // - If `value` were equal to `expected`, both references would
+      //   be poisoned twice, meaning they would not be poisoned at
+      //   all, as heap poisoning uses address negation.
+      // - If `base` were equal to `expected`, poisoning `expected`
+      //   would invalidate `base`.
+      DCHECK_NE(value_reg, expected.AsRegister());
+      DCHECK_NE(base.AsRegister(), expected.AsRegister());
+
+      __ PoisonHeapReference(expected);
+      __ PoisonHeapReference(CpuRegister(value_reg));
+    }
+
+    __ LockCmpxchgl(field_addr_, CpuRegister(value_reg));
+
+    // If heap poisoning is enabled, we need to unpoison the values
+    // that were poisoned earlier.
+    if (kPoisonHeapReferences) {
+      if (base_equals_value) {
+        // `value_reg` has been moved to a temporary register, no need
+        // to unpoison it.
+      } else {
+        __ UnpoisonHeapReference(CpuRegister(value_reg));
+      }
+      // No need to unpoison `expected` (RAX), as it is be overwritten below.
+    }
+
+    // Restore RAX.
+    __ movq(CpuRegister(RAX), temp2_);
+
+    __ Bind(&done);
+    __ jmp(GetExitLabel());
+  }
+
+ private:
+  // The location (register) of the marked object reference.
+  const Location ref_;
+  // The register containing the object holding the marked object reference field.
+  const CpuRegister obj_;
+  // The address of the marked reference field.  The base of this address must be `obj_`.
+  const Address field_addr_;
+
+  // Should the reference in `ref_` be unpoisoned prior to marking it?
+  const bool unpoison_ref_before_marking_;
+
+  const CpuRegister temp1_;
+  const CpuRegister temp2_;
+
+  DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkAndUpdateFieldSlowPathX86_64);
+};
+
 // Slow path generating a read barrier for a heap reference.
 class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode {
  public:
@@ -4122,7 +4305,7 @@
       // /* HeapReference<Object> */ out = *(base + offset)
       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
         // Note that a potential implicit null check is handled in this
-        // CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier call.
+        // CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier call.
         codegen_->GenerateFieldLoadWithBakerReadBarrier(
             instruction, out, base, offset, /* needs_null_check */ true);
         if (is_volatile) {
@@ -4541,9 +4724,11 @@
         // Branch cases into compressed and uncompressed for each index's type.
         uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
         NearLabel done, not_compressed;
-        __ cmpl(Address(obj, count_offset), Immediate(0));
+        __ testl(Address(obj, count_offset), Immediate(1));
         codegen_->MaybeRecordImplicitNullCheck(instruction);
-        __ j(kGreaterEqual, &not_compressed);
+        static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                      "Expecting 0=compressed, 1=uncompressed");
+        __ j(kNotZero, &not_compressed);
         __ movzxb(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_1, data_offset));
         __ jmp(&done);
         __ Bind(&not_compressed);
@@ -4569,7 +4754,7 @@
       //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
         // Note that a potential implicit null check is handled in this
-        // CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier call.
+        // CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier call.
         codegen_->GenerateArrayLoadWithBakerReadBarrier(
             instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true);
       } else {
@@ -4875,7 +5060,7 @@
   codegen_->MaybeRecordImplicitNullCheck(instruction);
   // Mask out most significant bit in case the array is String's array of char.
   if (mirror::kUseStringCompression && instruction->IsStringLength()) {
-    __ andl(out, Immediate(INT32_MAX));
+    __ shrl(out, Immediate(1));
   }
 }
 
@@ -4927,10 +5112,12 @@
       Location array_loc = array_length->GetLocations()->InAt(0);
       Address array_len(array_loc.AsRegister<CpuRegister>(), len_offset);
       if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
+        // TODO: if index_loc.IsConstant(), compare twice the index (to compensate for
+        // the string compression flag) with the in-memory length and avoid the temporary.
         CpuRegister length_reg = CpuRegister(TMP);
         __ movl(length_reg, array_len);
         codegen_->MaybeRecordImplicitNullCheck(array_length);
-        __ andl(length_reg, Immediate(INT32_MAX));
+        __ shrl(length_reg, Immediate(1));
         codegen_->GenerateIntCompare(length_reg, index_loc);
       } else {
         // Checking the bound for general case:
@@ -5306,7 +5493,9 @@
   Location out_loc = locations->Out();
   CpuRegister out = out_loc.AsRegister<CpuRegister>();
 
-  const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+  const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
+      ? kWithoutReadBarrier
+      : kCompilerReadBarrierOption;
   bool generate_null_check = false;
   switch (cls->GetLoadKind()) {
     case HLoadClass::LoadKind::kReferrersClass: {
@@ -5318,17 +5507,17 @@
           cls,
           out_loc,
           Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()),
-          /*fixup_label*/nullptr,
-          requires_read_barrier);
+          /* fixup_label */ nullptr,
+          read_barrier_option);
       break;
     }
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
-      DCHECK(!requires_read_barrier);
+      DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
       __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
       codegen_->RecordTypePatch(cls);
       break;
     case HLoadClass::LoadKind::kBootImageAddress: {
-      DCHECK(!requires_read_barrier);
+      DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
       DCHECK_NE(cls->GetAddress(), 0u);
       uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
       __ movl(out, Immediate(address));  // Zero-extended.
@@ -5343,16 +5532,16 @@
         GenerateGcRootFieldLoad(cls,
                                 out_loc,
                                 address,
-                                /*fixup_label*/nullptr,
-                                requires_read_barrier);
+                                /* fixup_label */ nullptr,
+                                read_barrier_option);
       } else {
         // TODO: Consider using opcode A1, i.e. movl eax, moff32 (with 64-bit address).
         __ movq(out, Immediate(cls->GetAddress()));
         GenerateGcRootFieldLoad(cls,
                                 out_loc,
                                 Address(out, 0),
-                                /*fixup_label*/nullptr,
-                                requires_read_barrier);
+                                /* fixup_label */ nullptr,
+                                read_barrier_option);
       }
       generate_null_check = !cls->IsInDexCache();
       break;
@@ -5363,7 +5552,7 @@
       Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
                                           /* no_rip */ false);
       // /* GcRoot<mirror::Class> */ out = *address  /* PC-relative */
-      GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, requires_read_barrier);
+      GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
       generate_null_check = !cls->IsInDexCache();
       break;
     }
@@ -5379,8 +5568,8 @@
           cls,
           out_loc,
           Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())),
-          /*fixup_label*/nullptr,
-          requires_read_barrier);
+          /* fixup_label */ nullptr,
+          read_barrier_option);
       generate_null_check = !cls->IsInDexCache();
       break;
     }
@@ -5436,9 +5625,6 @@
       break;
     case HLoadString::LoadKind::kBootImageAddress:
       break;
-    case HLoadString::LoadKind::kDexCacheAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
-      break;
     case HLoadString::LoadKind::kBssEntry:
       DCHECK(!Runtime::Current()->UseJitCompilation());
       break;
@@ -5496,7 +5682,7 @@
                                           /* no_rip */ false);
       Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
       // /* GcRoot<mirror::Class> */ out = *address  /* PC-relative */
-      GenerateGcRootFieldLoad(load, out_loc, address, fixup_label);
+      GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
       SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load);
       codegen_->AddSlowPath(slow_path);
       __ testl(out, out);
@@ -5552,7 +5738,19 @@
   CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
 }
 
-static bool TypeCheckNeedsATemporary(TypeCheckKind type_check_kind) {
+static bool CheckCastTypeCheckNeedsATemporary(TypeCheckKind type_check_kind) {
+  if (type_check_kind == TypeCheckKind::kInterfaceCheck && !kPoisonHeapReferences) {
+    // We need a temporary for holding the iftable length.
+    return true;
+  }
+  return kEmitCompilerReadBarrier &&
+      !kUseBakerReadBarrier &&
+      (type_check_kind == TypeCheckKind::kAbstractClassCheck ||
+       type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
+       type_check_kind == TypeCheckKind::kArrayObjectCheck);
+}
+
+static bool InstanceOfTypeCheckNeedsATemporary(TypeCheckKind type_check_kind) {
   return kEmitCompilerReadBarrier &&
       !kUseBakerReadBarrier &&
       (type_check_kind == TypeCheckKind::kAbstractClassCheck ||
@@ -5590,7 +5788,7 @@
   locations->SetOut(Location::RequiresRegister());
   // When read barriers are enabled, we need a temporary register for
   // some cases.
-  if (TypeCheckNeedsATemporary(type_check_kind)) {
+  if (InstanceOfTypeCheckNeedsATemporary(type_check_kind)) {
     locations->AddTemp(Location::RequiresRegister());
   }
 }
@@ -5603,7 +5801,7 @@
   Location cls = locations->InAt(1);
   Location out_loc =  locations->Out();
   CpuRegister out = out_loc.AsRegister<CpuRegister>();
-  Location maybe_temp_loc = TypeCheckNeedsATemporary(type_check_kind) ?
+  Location maybe_temp_loc = InstanceOfTypeCheckNeedsATemporary(type_check_kind) ?
       locations->GetTemp(0) :
       Location::NoLocation();
   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -5620,11 +5818,14 @@
     __ j(kEqual, &zero);
   }
 
-  // /* HeapReference<Class> */ out = obj->klass_
-  GenerateReferenceLoadTwoRegisters(instruction, out_loc, obj_loc, class_offset);
-
   switch (type_check_kind) {
     case TypeCheckKind::kExactCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        kCompilerReadBarrierOption);
       if (cls.IsRegister()) {
         __ cmpl(out, cls.AsRegister<CpuRegister>());
       } else {
@@ -5645,12 +5846,22 @@
     }
 
     case TypeCheckKind::kAbstractClassCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        kCompilerReadBarrierOption);
       // If the class is abstract, we eagerly fetch the super class of the
       // object to avoid doing a comparison we know will fail.
       NearLabel loop, success;
       __ Bind(&loop);
       // /* HeapReference<Class> */ out = out->super_class_
-      GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       out_loc,
+                                       super_offset,
+                                       maybe_temp_loc,
+                                       kCompilerReadBarrierOption);
       __ testl(out, out);
       // If `out` is null, we use it for the result, and jump to `done`.
       __ j(kEqual, &done);
@@ -5669,6 +5880,12 @@
     }
 
     case TypeCheckKind::kClassHierarchyCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        kCompilerReadBarrierOption);
       // Walk over the class hierarchy to find a match.
       NearLabel loop, success;
       __ Bind(&loop);
@@ -5680,7 +5897,11 @@
       }
       __ j(kEqual, &success);
       // /* HeapReference<Class> */ out = out->super_class_
-      GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       out_loc,
+                                       super_offset,
+                                       maybe_temp_loc,
+                                       kCompilerReadBarrierOption);
       __ testl(out, out);
       __ j(kNotEqual, &loop);
       // If `out` is null, we use it for the result, and jump to `done`.
@@ -5694,6 +5915,12 @@
     }
 
     case TypeCheckKind::kArrayObjectCheck: {
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        kCompilerReadBarrierOption);
       // Do an exact check.
       NearLabel exact_check;
       if (cls.IsRegister()) {
@@ -5705,7 +5932,11 @@
       __ j(kEqual, &exact_check);
       // Otherwise, we need to check that the object's class is a non-primitive array.
       // /* HeapReference<Class> */ out = out->component_type_
-      GenerateReferenceLoadOneRegister(instruction, out_loc, component_offset, maybe_temp_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       out_loc,
+                                       component_offset,
+                                       maybe_temp_loc,
+                                       kCompilerReadBarrierOption);
       __ testl(out, out);
       // If `out` is null, we use it for the result, and jump to `done`.
       __ j(kEqual, &done);
@@ -5718,6 +5949,13 @@
     }
 
     case TypeCheckKind::kArrayCheck: {
+      // No read barrier since the slow path will retry upon failure.
+      // /* HeapReference<Class> */ out = obj->klass_
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        out_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        kWithoutReadBarrier);
       if (cls.IsRegister()) {
         __ cmpl(out, cls.AsRegister<CpuRegister>());
       } else {
@@ -5782,33 +6020,45 @@
   }
 }
 
-void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) {
-  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
-  bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
-  TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+static bool IsTypeCheckSlowPathFatal(TypeCheckKind type_check_kind, bool throws_into_catch) {
   switch (type_check_kind) {
     case TypeCheckKind::kExactCheck:
     case TypeCheckKind::kAbstractClassCheck:
     case TypeCheckKind::kClassHierarchyCheck:
     case TypeCheckKind::kArrayObjectCheck:
-      call_kind = (throws_into_catch || kEmitCompilerReadBarrier) ?
-          LocationSummary::kCallOnSlowPath :
-          LocationSummary::kNoCall;  // In fact, call on a fatal (non-returning) slow path.
-      break;
+      return !throws_into_catch && !kEmitCompilerReadBarrier;
+    case TypeCheckKind::kInterfaceCheck:
+      return !throws_into_catch && !kEmitCompilerReadBarrier && !kPoisonHeapReferences;
     case TypeCheckKind::kArrayCheck:
     case TypeCheckKind::kUnresolvedCheck:
-    case TypeCheckKind::kInterfaceCheck:
-      call_kind = LocationSummary::kCallOnSlowPath;
-      break;
+      return false;
   }
+  LOG(FATAL) << "Unreachable";
+  UNREACHABLE();
+}
+
+void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) {
+  bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
+  TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
+  bool is_fatal_slow_path = IsTypeCheckSlowPathFatal(type_check_kind, throws_into_catch);
+  LocationSummary::CallKind call_kind = is_fatal_slow_path
+                                            ? LocationSummary::kNoCall
+                                            : LocationSummary::kCallOnSlowPath;
   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
   locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::Any());
+  if (type_check_kind == TypeCheckKind::kInterfaceCheck) {
+    // Require a register for the interface check since there is a loop that compares the class to
+    // a memory address.
+    locations->SetInAt(1, Location::RequiresRegister());
+  } else {
+    locations->SetInAt(1, Location::Any());
+  }
+
   // Note that TypeCheckSlowPathX86_64 uses this "temp" register too.
   locations->AddTemp(Location::RequiresRegister());
   // When read barriers are enabled, we need an additional temporary
   // register for some cases.
-  if (TypeCheckNeedsATemporary(type_check_kind)) {
+  if (CheckCastTypeCheckNeedsATemporary(type_check_kind)) {
     locations->AddTemp(Location::RequiresRegister());
   }
 }
@@ -5821,38 +6071,45 @@
   Location cls = locations->InAt(1);
   Location temp_loc = locations->GetTemp(0);
   CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
-  Location maybe_temp2_loc = TypeCheckNeedsATemporary(type_check_kind) ?
+  Location maybe_temp2_loc = CheckCastTypeCheckNeedsATemporary(type_check_kind) ?
       locations->GetTemp(1) :
       Location::NoLocation();
-  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
-  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
-  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+  const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+  const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+  const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+  const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+  const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value();
+  const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value();
+  const uint32_t object_array_data_offset =
+      mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
 
+  // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
+  // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
+  // read barriers is done for performance and code size reasons.
   bool is_type_check_slow_path_fatal =
-      (type_check_kind == TypeCheckKind::kExactCheck ||
-       type_check_kind == TypeCheckKind::kAbstractClassCheck ||
-       type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
-       type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
-      !instruction->CanThrowIntoCatchBlock();
+      IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock());
   SlowPathCode* type_check_slow_path =
       new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction,
                                                            is_type_check_slow_path_fatal);
   codegen_->AddSlowPath(type_check_slow_path);
 
+
+  NearLabel done;
+  // Avoid null check if we know obj is not null.
+  if (instruction->MustDoNullCheck()) {
+    __ testl(obj, obj);
+    __ j(kEqual, &done);
+  }
+
   switch (type_check_kind) {
     case TypeCheckKind::kExactCheck:
     case TypeCheckKind::kArrayCheck: {
-      NearLabel done;
-      // Avoid null check if we know obj is not null.
-      if (instruction->MustDoNullCheck()) {
-        __ testl(obj, obj);
-        __ j(kEqual, &done);
-      }
-
       // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
-
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        kWithoutReadBarrier);
       if (cls.IsRegister()) {
         __ cmpl(temp, cls.AsRegister<CpuRegister>());
       } else {
@@ -5862,43 +6119,32 @@
       // Jump to slow path for throwing the exception or doing a
       // more involved array check.
       __ j(kNotEqual, type_check_slow_path->GetEntryLabel());
-      __ Bind(&done);
       break;
     }
 
     case TypeCheckKind::kAbstractClassCheck: {
-      NearLabel done;
-      // Avoid null check if we know obj is not null.
-      if (instruction->MustDoNullCheck()) {
-        __ testl(obj, obj);
-        __ j(kEqual, &done);
-      }
-
       // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
-
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        kWithoutReadBarrier);
       // If the class is abstract, we eagerly fetch the super class of the
       // object to avoid doing a comparison we know will fail.
-      NearLabel loop, compare_classes;
+      NearLabel loop;
       __ Bind(&loop);
       // /* HeapReference<Class> */ temp = temp->super_class_
-      GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       temp_loc,
+                                       super_offset,
+                                       maybe_temp2_loc,
+                                       kWithoutReadBarrier);
 
-      // If the class reference currently in `temp` is not null, jump
-      // to the `compare_classes` label to compare it with the checked
-      // class.
+      // If the class reference currently in `temp` is null, jump to the slow path to throw the
+      // exception.
       __ testl(temp, temp);
-      __ j(kNotEqual, &compare_classes);
-      // Otherwise, jump to the slow path to throw the exception.
-      //
-      // But before, move back the object's class into `temp` before
-      // going into the slow path, as it has been overwritten in the
-      // meantime.
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
-      __ jmp(type_check_slow_path->GetEntryLabel());
-
-      __ Bind(&compare_classes);
+      // Otherwise, compare the classes.
+      __ j(kZero, type_check_slow_path->GetEntryLabel());
       if (cls.IsRegister()) {
         __ cmpl(temp, cls.AsRegister<CpuRegister>());
       } else {
@@ -5906,21 +6152,16 @@
         __ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex()));
       }
       __ j(kNotEqual, &loop);
-      __ Bind(&done);
       break;
     }
 
     case TypeCheckKind::kClassHierarchyCheck: {
-      NearLabel done;
-      // Avoid null check if we know obj is not null.
-      if (instruction->MustDoNullCheck()) {
-        __ testl(obj, obj);
-        __ j(kEqual, &done);
-      }
-
       // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
-
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        kWithoutReadBarrier);
       // Walk over the class hierarchy to find a match.
       NearLabel loop;
       __ Bind(&loop);
@@ -5933,39 +6174,28 @@
       __ j(kEqual, &done);
 
       // /* HeapReference<Class> */ temp = temp->super_class_
-      GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       temp_loc,
+                                       super_offset,
+                                       maybe_temp2_loc,
+                                       kWithoutReadBarrier);
 
       // If the class reference currently in `temp` is not null, jump
       // back at the beginning of the loop.
       __ testl(temp, temp);
-      __ j(kNotEqual, &loop);
+      __ j(kNotZero, &loop);
       // Otherwise, jump to the slow path to throw the exception.
-      //
-      // But before, move back the object's class into `temp` before
-      // going into the slow path, as it has been overwritten in the
-      // meantime.
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
       __ jmp(type_check_slow_path->GetEntryLabel());
-      __ Bind(&done);
       break;
     }
 
     case TypeCheckKind::kArrayObjectCheck: {
-      // We cannot use a NearLabel here, as its range might be too
-      // short in some cases when read barriers are enabled.  This has
-      // been observed for instance when the code emitted for this
-      // case uses high x86-64 registers (R8-R15).
-      Label done;
-      // Avoid null check if we know obj is not null.
-      if (instruction->MustDoNullCheck()) {
-        __ testl(obj, obj);
-        __ j(kEqual, &done);
-      }
-
       // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
-
+      GenerateReferenceLoadTwoRegisters(instruction,
+                                        temp_loc,
+                                        obj_loc,
+                                        class_offset,
+                                        kWithoutReadBarrier);
       // Do an exact check.
       NearLabel check_non_primitive_component_type;
       if (cls.IsRegister()) {
@@ -5978,48 +6208,26 @@
 
       // Otherwise, we need to check that the object's class is a non-primitive array.
       // /* HeapReference<Class> */ temp = temp->component_type_
-      GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, maybe_temp2_loc);
+      GenerateReferenceLoadOneRegister(instruction,
+                                       temp_loc,
+                                       component_offset,
+                                       maybe_temp2_loc,
+                                       kWithoutReadBarrier);
 
       // If the component type is not null (i.e. the object is indeed
       // an array), jump to label `check_non_primitive_component_type`
       // to further check that this component type is not a primitive
       // type.
       __ testl(temp, temp);
-      __ j(kNotEqual, &check_non_primitive_component_type);
       // Otherwise, jump to the slow path to throw the exception.
-      //
-      // But before, move back the object's class into `temp` before
-      // going into the slow path, as it has been overwritten in the
-      // meantime.
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
-      __ jmp(type_check_slow_path->GetEntryLabel());
-
-      __ Bind(&check_non_primitive_component_type);
+      __ j(kZero, type_check_slow_path->GetEntryLabel());
       __ cmpw(Address(temp, primitive_offset), Immediate(Primitive::kPrimNot));
-      __ j(kEqual, &done);
-      // Same comment as above regarding `temp` and the slow path.
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
-      __ jmp(type_check_slow_path->GetEntryLabel());
-      __ Bind(&done);
+      __ j(kNotEqual, type_check_slow_path->GetEntryLabel());
       break;
     }
 
-    case TypeCheckKind::kUnresolvedCheck:
-    case TypeCheckKind::kInterfaceCheck:
-      NearLabel done;
-      // Avoid null check if we know obj is not null.
-      if (instruction->MustDoNullCheck()) {
-        __ testl(obj, obj);
-        __ j(kEqual, &done);
-      }
-
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
-
-      // We always go into the type check slow path for the unresolved
-      // and interface check cases.
+    case TypeCheckKind::kUnresolvedCheck: {
+      // We always go into the type check slow path for the unresolved case.
       //
       // We cannot directly call the CheckCast runtime entry point
       // without resorting to a type checking slow path here (i.e. by
@@ -6028,16 +6236,52 @@
       // instruction (following the runtime calling convention), which
       // might be cluttered by the potential first read barrier
       // emission at the beginning of this method.
-      //
-      // TODO: Introduce a new runtime entry point taking the object
-      // to test (instead of its class) as argument, and let it deal
-      // with the read barrier issues. This will let us refactor this
-      // case of the `switch` code as it was previously (with a direct
-      // call to the runtime not using a type checking slow path).
-      // This should also be beneficial for the other cases above.
       __ jmp(type_check_slow_path->GetEntryLabel());
-      __ Bind(&done);
       break;
+    }
+
+    case TypeCheckKind::kInterfaceCheck:
+      // Fast path for the interface check. We always go slow path for heap poisoning since
+      // unpoisoning cls would require an extra temp.
+      if (!kPoisonHeapReferences) {
+        // Try to avoid read barriers to improve the fast path. We can not get false positives by
+        // doing this.
+        // /* HeapReference<Class> */ temp = obj->klass_
+        GenerateReferenceLoadTwoRegisters(instruction,
+                                          temp_loc,
+                                          obj_loc,
+                                          class_offset,
+                                          kWithoutReadBarrier);
+
+        // /* HeapReference<Class> */ temp = temp->iftable_
+        GenerateReferenceLoadTwoRegisters(instruction,
+                                          temp_loc,
+                                          temp_loc,
+                                          iftable_offset,
+                                          kWithoutReadBarrier);
+        // Iftable is never null.
+        __ movl(maybe_temp2_loc.AsRegister<CpuRegister>(), Address(temp, array_length_offset));
+        // Loop through the iftable and check if any class matches.
+        NearLabel start_loop;
+        __ Bind(&start_loop);
+        // Need to subtract first to handle the empty array case.
+        __ subl(maybe_temp2_loc.AsRegister<CpuRegister>(), Immediate(2));
+        __ j(kNegative, type_check_slow_path->GetEntryLabel());
+        // Go to next interface if the classes do not match.
+        __ cmpl(cls.AsRegister<CpuRegister>(),
+                CodeGeneratorX86_64::ArrayAddress(temp,
+                                                  maybe_temp2_loc,
+                                                  TIMES_4,
+                                                  object_array_data_offset));
+        __ j(kNotEqual, &start_loop);  // Return if same class.
+      } else {
+        __ jmp(type_check_slow_path->GetEntryLabel());
+      }
+      break;
+  }
+
+  if (done.IsLinked()) {
+    __ Bind(&done);
   }
 
   __ Bind(type_check_slow_path->GetExitLabel());
@@ -6176,12 +6420,15 @@
   }
 }
 
-void InstructionCodeGeneratorX86_64::GenerateReferenceLoadOneRegister(HInstruction* instruction,
-                                                                      Location out,
-                                                                      uint32_t offset,
-                                                                      Location maybe_temp) {
+void InstructionCodeGeneratorX86_64::GenerateReferenceLoadOneRegister(
+    HInstruction* instruction,
+    Location out,
+    uint32_t offset,
+    Location maybe_temp,
+    ReadBarrierOption read_barrier_option) {
   CpuRegister out_reg = out.AsRegister<CpuRegister>();
-  if (kEmitCompilerReadBarrier) {
+  if (read_barrier_option == kWithReadBarrier) {
+    CHECK(kEmitCompilerReadBarrier);
     if (kUseBakerReadBarrier) {
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(out + offset)
@@ -6206,13 +6453,16 @@
   }
 }
 
-void InstructionCodeGeneratorX86_64::GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
-                                                                       Location out,
-                                                                       Location obj,
-                                                                       uint32_t offset) {
+void InstructionCodeGeneratorX86_64::GenerateReferenceLoadTwoRegisters(
+    HInstruction* instruction,
+    Location out,
+    Location obj,
+    uint32_t offset,
+    ReadBarrierOption read_barrier_option) {
   CpuRegister out_reg = out.AsRegister<CpuRegister>();
   CpuRegister obj_reg = obj.AsRegister<CpuRegister>();
-  if (kEmitCompilerReadBarrier) {
+  if (read_barrier_option == kWithReadBarrier) {
+    CHECK(kEmitCompilerReadBarrier);
     if (kUseBakerReadBarrier) {
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
@@ -6232,13 +6482,14 @@
   }
 }
 
-void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad(HInstruction* instruction,
-                                                             Location root,
-                                                             const Address& address,
-                                                             Label* fixup_label,
-                                                             bool requires_read_barrier) {
+void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad(
+    HInstruction* instruction,
+    Location root,
+    const Address& address,
+    Label* fixup_label,
+    ReadBarrierOption read_barrier_option) {
   CpuRegister root_reg = root.AsRegister<CpuRegister>();
-  if (requires_read_barrier) {
+  if (read_barrier_option == kWithReadBarrier) {
     DCHECK(kEmitCompilerReadBarrier);
     if (kUseBakerReadBarrier) {
       // Fast path implementation of art::ReadBarrier::BarrierForRoot when
@@ -6264,7 +6515,7 @@
 
       // Slow path marking the GC root `root`.
       SlowPathCode* slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64(
-          instruction, root, /* unpoison */ false);
+          instruction, root, /* unpoison_ref_before_marking */ false);
       codegen_->AddSlowPath(slow_path);
 
       __ gs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86_64PointerSize>().Int32Value(),
@@ -6330,7 +6581,10 @@
                                                                     Location ref,
                                                                     CpuRegister obj,
                                                                     const Address& src,
-                                                                    bool needs_null_check) {
+                                                                    bool needs_null_check,
+                                                                    bool always_update_field,
+                                                                    CpuRegister* temp1,
+                                                                    CpuRegister* temp2) {
   DCHECK(kEmitCompilerReadBarrier);
   DCHECK(kUseBakerReadBarrier);
 
@@ -6344,7 +6598,7 @@
   //   uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
   //   lfence;  // Load fence or artificial data dependency to prevent load-load reordering
   //   HeapReference<Object> ref = *src;  // Original reference load.
-  //   bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+  //   bool is_gray = (rb_state == ReadBarrier::GrayState());
   //   if (is_gray) {
   //     ref = ReadBarrier::Mark(ref);  // Performed by runtime entrypoint slow path.
   //   }
@@ -6362,14 +6616,13 @@
   uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
 
   // Given the numeric representation, it's enough to check the low bit of the rb_state.
-  static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
-  static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
-  static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+  static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+  static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
   constexpr uint32_t gray_byte_position = LockWord::kReadBarrierStateShift / kBitsPerByte;
   constexpr uint32_t gray_bit_position = LockWord::kReadBarrierStateShift % kBitsPerByte;
   constexpr int32_t test_value = static_cast<int8_t>(1 << gray_bit_position);
 
-  // if (rb_state == ReadBarrier::gray_ptr_)
+  // if (rb_state == ReadBarrier::GrayState())
   //   ref = ReadBarrier::Mark(ref);
   // At this point, just do the "if" and make sure that flags are preserved until the branch.
   __ testb(Address(obj, monitor_offset + gray_byte_position), Immediate(test_value));
@@ -6387,8 +6640,16 @@
 
   // Note: Reference unpoisoning modifies the flags, so we need to delay it after the branch.
   // Slow path marking the object `ref` when it is gray.
-  SlowPathCode* slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64(
-      instruction, ref, /* unpoison */ true);
+  SlowPathCode* slow_path;
+  if (always_update_field) {
+    DCHECK(temp1 != nullptr);
+    DCHECK(temp2 != nullptr);
+    slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
+        instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp1, *temp2);
+  } else {
+    slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64(
+        instruction, ref, /* unpoison_ref_before_marking */ true);
+  }
   AddSlowPath(slow_path);
 
   // We have done the "if" of the gray bit check above, now branch based on the flags.
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 57ef83f..e5a4152 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -234,7 +234,8 @@
   void GenerateReferenceLoadOneRegister(HInstruction* instruction,
                                         Location out,
                                         uint32_t offset,
-                                        Location maybe_temp);
+                                        Location maybe_temp,
+                                        ReadBarrierOption read_barrier_option);
   // Generate a heap reference load using two different registers
   // `out` and `obj`:
   //
@@ -248,17 +249,18 @@
   void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
                                          Location out,
                                          Location obj,
-                                         uint32_t offset);
+                                         uint32_t offset,
+                                         ReadBarrierOption read_barrier_option);
   // Generate a GC root reference load:
   //
   //   root <- *address
   //
-  // while honoring read barriers (if any).
+  // while honoring read barriers based on read_barrier_option.
   void GenerateGcRootFieldLoad(HInstruction* instruction,
                                Location root,
                                const Address& address,
-                               Label* fixup_label = nullptr,
-                               bool requires_read_barrier = kEmitCompilerReadBarrier);
+                               Label* fixup_label,
+                               ReadBarrierOption read_barrier_option);
 
   void PushOntoFPStack(Location source, uint32_t temp_offset,
                        uint32_t stack_adjustment, bool is_float);
@@ -434,13 +436,25 @@
                                              uint32_t data_offset,
                                              Location index,
                                              bool needs_null_check);
-  // Factored implementation used by GenerateFieldLoadWithBakerReadBarrier
-  // and GenerateArrayLoadWithBakerReadBarrier.
+  // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
+  // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
+  //
+  // Load the object reference located at address `src`, held by
+  // object `obj`, into `ref`, and mark it if needed.  The base of
+  // address `src` must be `obj`.
+  //
+  // If `always_update_field` is true, the value of the reference is
+  // atomically updated in the holder (`obj`).  This operation
+  // requires two temporary registers, which must be provided as
+  // non-null pointers (`temp1` and `temp2`).
   void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
                                                  Location ref,
                                                  CpuRegister obj,
                                                  const Address& src,
-                                                 bool needs_null_check);
+                                                 bool needs_null_check,
+                                                 bool always_update_field = false,
+                                                 CpuRegister* temp1 = nullptr,
+                                                 CpuRegister* temp2 = nullptr);
 
   // Generate a read barrier for a heap reference within `instruction`
   // using a slow path.
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 9ec32df..ac83bd9 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -259,7 +259,7 @@
   GraphChecker graph_checker(graph);
   graph_checker.Run();
   if (!graph_checker.IsValid()) {
-    for (auto error : graph_checker.GetErrors()) {
+    for (const auto& error : graph_checker.GetErrors()) {
       std::cout << error << std::endl;
     }
   }
@@ -269,7 +269,7 @@
 template <typename Expected>
 static void RunCodeNoCheck(CodeGenerator* codegen,
                            HGraph* graph,
-                           std::function<void(HGraph*)> hook_before_codegen,
+                           const std::function<void(HGraph*)>& hook_before_codegen,
                            bool has_result,
                            Expected expected) {
   SsaLivenessAnalysis liveness(graph, codegen);
diff --git a/compiler/optimizing/common_arm.h b/compiler/optimizing/common_arm.h
index 5d92bfd..5129daf 100644
--- a/compiler/optimizing/common_arm.h
+++ b/compiler/optimizing/common_arm.h
@@ -37,29 +37,24 @@
   return dwarf::Reg::ArmFp(static_cast<int>(reg.GetCode()));
 }
 
-inline vixl::aarch32::DRegister FromLowSToD(vixl::aarch32::SRegister reg) {
-  DCHECK_EQ(reg.GetCode() % 2, 0u) << reg;
-  return vixl::aarch32::DRegister(reg.GetCode() / 2);
-}
-
 inline vixl::aarch32::Register HighRegisterFrom(Location location) {
   DCHECK(location.IsRegisterPair()) << location;
-  return vixl::aarch32::Register(location.AsRegisterPairHigh<vixl32::Register>());
+  return vixl::aarch32::Register(location.AsRegisterPairHigh<vixl::aarch32::Register>());
 }
 
 inline vixl::aarch32::DRegister HighDRegisterFrom(Location location) {
   DCHECK(location.IsFpuRegisterPair()) << location;
-  return vixl::aarch32::DRegister(location.AsFpuRegisterPairHigh<vixl32::DRegister>());
+  return vixl::aarch32::DRegister(location.AsFpuRegisterPairHigh<vixl::aarch32::DRegister>());
 }
 
 inline vixl::aarch32::Register LowRegisterFrom(Location location) {
   DCHECK(location.IsRegisterPair()) << location;
-  return vixl::aarch32::Register(location.AsRegisterPairLow<vixl32::Register>());
+  return vixl::aarch32::Register(location.AsRegisterPairLow<vixl::aarch32::Register>());
 }
 
 inline vixl::aarch32::SRegister LowSRegisterFrom(Location location) {
   DCHECK(location.IsFpuRegisterPair()) << location;
-  return vixl::aarch32::SRegister(location.AsFpuRegisterPairLow<vixl32::SRegister>());
+  return vixl::aarch32::SRegister(location.AsFpuRegisterPairLow<vixl::aarch32::SRegister>());
 }
 
 inline vixl::aarch32::Register RegisterFrom(Location location) {
@@ -135,6 +130,21 @@
                       instr->InputAt(input_index)->GetType());
 }
 
+inline vixl::aarch32::Register InputRegister(HInstruction* instr) {
+  DCHECK_EQ(instr->InputCount(), 1u);
+  return InputRegisterAt(instr, 0);
+}
+
+inline int32_t Int32ConstantFrom(Location location) {
+  HConstant* instr = location.GetConstant();
+  if (instr->IsIntConstant()) {
+    return instr->AsIntConstant()->GetValue();
+  } else {
+    DCHECK(instr->IsNullConstant()) << instr->DebugName();
+    return 0;
+  }
+}
+
 inline int64_t Int64ConstantFrom(Location location) {
   HConstant* instr = location.GetConstant();
   if (instr->IsIntConstant()) {
diff --git a/compiler/optimizing/constant_folding.h b/compiler/optimizing/constant_folding.h
index e10b1d6..05c6df4 100644
--- a/compiler/optimizing/constant_folding.h
+++ b/compiler/optimizing/constant_folding.h
@@ -39,8 +39,7 @@
  */
 class HConstantFolding : public HOptimization {
  public:
-  HConstantFolding(HGraph* graph, const char* name = kConstantFoldingPassName)
-      : HOptimization(graph, name) {}
+  HConstantFolding(HGraph* graph, const char* name) : HOptimization(graph, name) {}
 
   void Run() OVERRIDE;
 
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index d1a2a26..5fac3ac 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -42,7 +42,7 @@
                 const std::string& expected_before,
                 const std::string& expected_after_cf,
                 const std::string& expected_after_dce,
-                std::function<void(HGraph*)> check_after_cf,
+                const std::function<void(HGraph*)>& check_after_cf,
                 Primitive::Type return_type = Primitive::kPrimInt) {
     graph_ = CreateCFG(&allocator_, data, return_type);
     TestCodeOnReadyGraph(expected_before,
@@ -54,7 +54,7 @@
   void TestCodeOnReadyGraph(const std::string& expected_before,
                             const std::string& expected_after_cf,
                             const std::string& expected_after_dce,
-                            std::function<void(HGraph*)> check_after_cf) {
+                            const std::function<void(HGraph*)>& check_after_cf) {
     ASSERT_NE(graph_, nullptr);
 
     StringPrettyPrinter printer_before(graph_);
@@ -65,7 +65,7 @@
     std::unique_ptr<const X86InstructionSetFeatures> features_x86(
         X86InstructionSetFeatures::FromCppDefines());
     x86::CodeGeneratorX86 codegenX86(graph_, *features_x86.get(), CompilerOptions());
-    HConstantFolding(graph_).Run();
+    HConstantFolding(graph_, "constant_folding").Run();
     GraphChecker graph_checker_cf(graph_);
     graph_checker_cf.Run();
     ASSERT_TRUE(graph_checker_cf.IsValid());
@@ -77,7 +77,7 @@
 
     check_after_cf(graph_);
 
-    HDeadCodeElimination(graph_).Run();
+    HDeadCodeElimination(graph_, nullptr /* stats */, "dead_code_elimination").Run();
     GraphChecker graph_checker_dce(graph_);
     graph_checker_dce.Run();
     ASSERT_TRUE(graph_checker_dce.IsValid());
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index adfe09b..c31c66a 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -18,6 +18,7 @@
 
 #include "base/array_ref.h"
 #include "base/bit_vector-inl.h"
+#include "base/stl_util.h"
 #include "ssa_phi_elimination.h"
 
 namespace art {
@@ -160,19 +161,32 @@
 //        |      |      |
 //       B4      B5    B?
 //
-// This simplification cannot be applied for loop headers, as they
-// contain a suspend check.
+// Note that individual edges can be redirected (for example B2->B3
+// can be redirected as B2->B5) without applying this optimization
+// to other incoming edges.
+//
+// This simplification cannot be applied to catch blocks, because
+// exception handler edges do not represent normal control flow.
+// Though in theory this could still apply to normal control flow
+// going directly to a catch block, we cannot support it at the
+// moment because the catch Phi's inputs do not correspond to the
+// catch block's predecessors, so we cannot identify which
+// predecessor corresponds to a given statically evaluated input.
+//
+// We do not apply this optimization to loop headers as this could
+// create irreducible loops. We rely on the suspend check in the
+// loop header to prevent the pattern match.
 //
 // Note that we rely on the dead code elimination to get rid of B3.
 bool HDeadCodeElimination::SimplifyIfs() {
   bool simplified_one_or_more_ifs = false;
   bool rerun_dominance_and_loop_analysis = false;
 
-  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
+  for (HBasicBlock* block : graph_->GetReversePostOrder()) {
     HInstruction* last = block->GetLastInstruction();
     HInstruction* first = block->GetFirstInstruction();
-    if (last->IsIf() &&
+    if (!block->IsCatchBlock() &&
+        last->IsIf() &&
         block->HasSinglePhi() &&
         block->GetFirstPhi()->HasOnlyOneNonEnvironmentUse()) {
       bool has_only_phi_and_if = (last == first) && (last->InputAt(0) == block->GetFirstPhi());
@@ -271,20 +285,22 @@
 }
 
 void HDeadCodeElimination::ConnectSuccessiveBlocks() {
-  // Order does not matter.
-  for (HReversePostOrderIterator it(*graph_); !it.Done();) {
-    HBasicBlock* block  = it.Current();
-    if (block->IsEntryBlock() || !block->GetLastInstruction()->IsGoto()) {
-      it.Advance();
-      continue;
+  // Order does not matter. Skip the entry block by starting at index 1 in reverse post order.
+  for (size_t i = 1u, size = graph_->GetReversePostOrder().size(); i != size; ++i) {
+    HBasicBlock* block  = graph_->GetReversePostOrder()[i];
+    DCHECK(!block->IsEntryBlock());
+    while (block->GetLastInstruction()->IsGoto()) {
+      HBasicBlock* successor = block->GetSingleSuccessor();
+      if (successor->IsExitBlock() || successor->GetPredecessors().size() != 1u) {
+        break;
+      }
+      DCHECK_LT(i, IndexOfElement(graph_->GetReversePostOrder(), successor));
+      block->MergeWith(successor);
+      --size;
+      DCHECK_EQ(size, graph_->GetReversePostOrder().size());
+      DCHECK_EQ(block, graph_->GetReversePostOrder()[i]);
+      // Reiterate on this block in case it can be merged with its new successor.
     }
-    HBasicBlock* successor = block->GetSingleSuccessor();
-    if (successor->IsExitBlock() || successor->GetPredecessors().size() != 1u) {
-      it.Advance();
-      continue;
-    }
-    block->MergeWith(successor);
-    // Reiterate on this block in case it can be merged with its new successor.
   }
 }
 
@@ -300,8 +316,7 @@
   // Remove all dead blocks. Iterate in post order because removal needs the
   // block's chain of dominators and nested loops need to be updated from the
   // inside out.
-  for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    HBasicBlock* block  = it.Current();
+  for (HBasicBlock* block : graph_->GetPostOrder()) {
     int id = block->GetBlockId();
     if (!live_blocks.IsBitSet(id)) {
       MaybeRecordDeadBlock(block);
@@ -332,8 +347,7 @@
 void HDeadCodeElimination::RemoveDeadInstructions() {
   // Process basic blocks in post-order in the dominator tree, so that
   // a dead instruction depending on another dead instruction is removed.
-  for (HPostOrderIterator b(*graph_); !b.Done(); b.Advance()) {
-    HBasicBlock* block = b.Current();
+  for (HBasicBlock* block : graph_->GetPostOrder()) {
     // Traverse this block's instructions in backward order and remove
     // the unused ones.
     HBackwardInstructionIterator i(block->GetInstructions());
diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h
index 58e700d..84fd890 100644
--- a/compiler/optimizing/dead_code_elimination.h
+++ b/compiler/optimizing/dead_code_elimination.h
@@ -29,9 +29,7 @@
  */
 class HDeadCodeElimination : public HOptimization {
  public:
-  HDeadCodeElimination(HGraph* graph,
-                       OptimizingCompilerStats* stats = nullptr,
-                       const char* name = kDeadCodeEliminationPassName)
+  HDeadCodeElimination(HGraph* graph, OptimizingCompilerStats* stats, const char* name)
       : HOptimization(graph, name, stats) {}
 
   void Run() OVERRIDE;
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index fe52aac..fdd77e7 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -44,7 +44,7 @@
   std::unique_ptr<const X86InstructionSetFeatures> features_x86(
       X86InstructionSetFeatures::FromCppDefines());
   x86::CodeGeneratorX86 codegenX86(graph, *features_x86.get(), CompilerOptions());
-  HDeadCodeElimination(graph).Run();
+  HDeadCodeElimination(graph, nullptr /* stats */, "dead_code_elimination").Run();
   GraphChecker graph_checker(graph);
   graph_checker.Run();
   ASSERT_TRUE(graph_checker.IsValid());
diff --git a/compiler/optimizing/emit_swap_mips_test.cc b/compiler/optimizing/emit_swap_mips_test.cc
new file mode 100644
index 0000000..9dc53e6
--- /dev/null
+++ b/compiler/optimizing/emit_swap_mips_test.cc
@@ -0,0 +1,354 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/arena_allocator.h"
+#include "code_generator_mips.h"
+#include "optimizing_unit_test.h"
+#include "parallel_move_resolver.h"
+#include "utils/assembler_test_base.h"
+#include "utils/mips/assembler_mips.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+class EmitSwapMipsTest : public ::testing::Test {
+ public:
+  void SetUp() OVERRIDE {
+    allocator_.reset(new ArenaAllocator(&pool_));
+    graph_ = CreateGraph(allocator_.get());
+    isa_features_ = MipsInstructionSetFeatures::FromCppDefines();
+    codegen_ = new (graph_->GetArena()) mips::CodeGeneratorMIPS(graph_,
+                                                                *isa_features_.get(),
+                                                                CompilerOptions());
+    moves_ = new (allocator_.get()) HParallelMove(allocator_.get());
+    test_helper_.reset(
+        new AssemblerTestInfrastructure(GetArchitectureString(),
+                                        GetAssemblerCmdName(),
+                                        GetAssemblerParameters(),
+                                        GetObjdumpCmdName(),
+                                        GetObjdumpParameters(),
+                                        GetDisassembleCmdName(),
+                                        GetDisassembleParameters(),
+                                        GetAssemblyHeader()));
+  }
+
+  void TearDown() OVERRIDE {
+    allocator_.reset();
+    test_helper_.reset();
+  }
+
+  // Get the typically used name for this architecture.
+  std::string GetArchitectureString() {
+    return "mips";
+  }
+
+  // Get the name of the assembler.
+  std::string GetAssemblerCmdName() {
+    return "as";
+  }
+
+  // Switches to the assembler command.
+  std::string GetAssemblerParameters() {
+    return " --no-warn -32 -march=mips32r2";
+  }
+
+  // Get the name of the objdump.
+  std::string GetObjdumpCmdName() {
+    return "objdump";
+  }
+
+  // Switches to the objdump command.
+  std::string GetObjdumpParameters() {
+    return " -h";
+  }
+
+  // Get the name of the objdump.
+  std::string GetDisassembleCmdName() {
+    return "objdump";
+  }
+
+  // Switches to the objdump command.
+  std::string GetDisassembleParameters() {
+    return " -D -bbinary -mmips:isa32r2";
+  }
+
+  // No need for assembly header here.
+  const char* GetAssemblyHeader() {
+    return nullptr;
+  }
+
+  void DriverWrapper(HParallelMove* move, std::string assembly_text, std::string test_name) {
+    codegen_->GetMoveResolver()->EmitNativeCode(move);
+    assembler_ = codegen_->GetAssembler();
+    assembler_->FinalizeCode();
+    std::unique_ptr<std::vector<uint8_t>> data(new std::vector<uint8_t>(assembler_->CodeSize()));
+    MemoryRegion code(&(*data)[0], data->size());
+    assembler_->FinalizeInstructions(code);
+    test_helper_->Driver(*data, assembly_text, test_name);
+  }
+
+ protected:
+  ArenaPool pool_;
+  HGraph* graph_;
+  HParallelMove* moves_;
+  mips::CodeGeneratorMIPS* codegen_;
+  mips::MipsAssembler* assembler_;
+  std::unique_ptr<ArenaAllocator> allocator_;
+  std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
+  std::unique_ptr<const MipsInstructionSetFeatures> isa_features_;
+};
+
+TEST_F(EmitSwapMipsTest, TwoRegisters) {
+  moves_->AddMove(
+      Location::RegisterLocation(4),
+      Location::RegisterLocation(5),
+      Primitive::kPrimInt,
+      nullptr);
+  moves_->AddMove(
+      Location::RegisterLocation(5),
+      Location::RegisterLocation(4),
+      Primitive::kPrimInt,
+      nullptr);
+  const char* expected =
+      "or $t8, $a1, $zero\n"
+      "or $a1, $a0, $zero\n"
+      "or $a0, $t8, $zero\n";
+  DriverWrapper(moves_, expected, "TwoRegisters");
+}
+
+TEST_F(EmitSwapMipsTest, TwoRegisterPairs) {
+  moves_->AddMove(
+      Location::RegisterPairLocation(4, 5),
+      Location::RegisterPairLocation(6, 7),
+      Primitive::kPrimLong,
+      nullptr);
+  moves_->AddMove(
+      Location::RegisterPairLocation(6, 7),
+      Location::RegisterPairLocation(4, 5),
+      Primitive::kPrimLong,
+      nullptr);
+  const char* expected =
+      "or $t8, $a2, $zero\n"
+      "or $a2, $a0, $zero\n"
+      "or $a0, $t8, $zero\n"
+      "or $t8, $a3, $zero\n"
+      "or $a3, $a1, $zero\n"
+      "or $a1, $t8, $zero\n";
+  DriverWrapper(moves_, expected, "TwoRegisterPairs");
+}
+
+TEST_F(EmitSwapMipsTest, TwoFpuRegistersFloat) {
+  moves_->AddMove(
+      Location::FpuRegisterLocation(4),
+      Location::FpuRegisterLocation(6),
+      Primitive::kPrimFloat,
+      nullptr);
+  moves_->AddMove(
+      Location::FpuRegisterLocation(6),
+      Location::FpuRegisterLocation(4),
+      Primitive::kPrimFloat,
+      nullptr);
+  const char* expected =
+      "mov.s $f8, $f6\n"
+      "mov.s $f6, $f4\n"
+      "mov.s $f4, $f8\n";
+  DriverWrapper(moves_, expected, "TwoFpuRegistersFloat");
+}
+
+TEST_F(EmitSwapMipsTest, TwoFpuRegistersDouble) {
+  moves_->AddMove(
+      Location::FpuRegisterLocation(4),
+      Location::FpuRegisterLocation(6),
+      Primitive::kPrimDouble,
+      nullptr);
+  moves_->AddMove(
+      Location::FpuRegisterLocation(6),
+      Location::FpuRegisterLocation(4),
+      Primitive::kPrimDouble,
+      nullptr);
+  const char* expected =
+      "mov.d $f8, $f6\n"
+      "mov.d $f6, $f4\n"
+      "mov.d $f4, $f8\n";
+  DriverWrapper(moves_, expected, "TwoFpuRegistersDouble");
+}
+
+TEST_F(EmitSwapMipsTest, RegisterAndFpuRegister) {
+  moves_->AddMove(
+      Location::RegisterLocation(4),
+      Location::FpuRegisterLocation(6),
+      Primitive::kPrimFloat,
+      nullptr);
+  moves_->AddMove(
+      Location::FpuRegisterLocation(6),
+      Location::RegisterLocation(4),
+      Primitive::kPrimFloat,
+      nullptr);
+  const char* expected =
+      "or $t8, $a0, $zero\n"
+      "mfc1 $a0, $f6\n"
+      "mtc1 $t8, $f6\n";
+  DriverWrapper(moves_, expected, "RegisterAndFpuRegister");
+}
+
+TEST_F(EmitSwapMipsTest, RegisterPairAndFpuRegister) {
+  moves_->AddMove(
+      Location::RegisterPairLocation(4, 5),
+      Location::FpuRegisterLocation(4),
+      Primitive::kPrimDouble,
+      nullptr);
+  moves_->AddMove(
+      Location::FpuRegisterLocation(4),
+      Location::RegisterPairLocation(4, 5),
+      Primitive::kPrimDouble,
+      nullptr);
+  const char* expected =
+      "mfc1 $t8, $f4\n"
+      "mfc1 $at, $f5\n"
+      "mtc1 $a0, $f4\n"
+      "mtc1 $a1, $f5\n"
+      "or $a0, $t8, $zero\n"
+      "or $a1, $at, $zero\n";
+  DriverWrapper(moves_, expected, "RegisterPairAndFpuRegister");
+}
+
+TEST_F(EmitSwapMipsTest, TwoStackSlots) {
+  moves_->AddMove(
+      Location::StackSlot(52),
+      Location::StackSlot(48),
+      Primitive::kPrimInt,
+      nullptr);
+  moves_->AddMove(
+      Location::StackSlot(48),
+      Location::StackSlot(52),
+      Primitive::kPrimInt,
+      nullptr);
+  const char* expected =
+      "addiu $sp, $sp, -4\n"
+      "sw $v0, 0($sp)\n"
+      "lw $v0, 56($sp)\n"
+      "lw $t8, 52($sp)\n"
+      "sw $v0, 52($sp)\n"
+      "sw $t8, 56($sp)\n"
+      "lw $v0, 0($sp)\n"
+      "addiu $sp, $sp, 4\n";
+  DriverWrapper(moves_, expected, "TwoStackSlots");
+}
+
+TEST_F(EmitSwapMipsTest, TwoDoubleStackSlots) {
+  moves_->AddMove(
+      Location::DoubleStackSlot(56),
+      Location::DoubleStackSlot(48),
+      Primitive::kPrimLong,
+      nullptr);
+  moves_->AddMove(
+      Location::DoubleStackSlot(48),
+      Location::DoubleStackSlot(56),
+      Primitive::kPrimLong,
+      nullptr);
+  const char* expected =
+      "addiu $sp, $sp, -4\n"
+      "sw $v0, 0($sp)\n"
+      "lw $v0, 60($sp)\n"
+      "lw $t8, 52($sp)\n"
+      "sw $v0, 52($sp)\n"
+      "sw $t8, 60($sp)\n"
+      "lw $v0, 64($sp)\n"
+      "lw $t8, 56($sp)\n"
+      "sw $v0, 56($sp)\n"
+      "sw $t8, 64($sp)\n"
+      "lw $v0, 0($sp)\n"
+      "addiu $sp, $sp, 4\n";
+  DriverWrapper(moves_, expected, "TwoDoubleStackSlots");
+}
+
+TEST_F(EmitSwapMipsTest, RegisterAndStackSlot) {
+  moves_->AddMove(
+      Location::RegisterLocation(4),
+      Location::StackSlot(48),
+      Primitive::kPrimInt,
+      nullptr);
+  moves_->AddMove(
+      Location::StackSlot(48),
+      Location::RegisterLocation(4),
+      Primitive::kPrimInt,
+      nullptr);
+  const char* expected =
+      "or $t8, $a0, $zero\n"
+      "lw $a0, 48($sp)\n"
+      "sw $t8, 48($sp)\n";
+  DriverWrapper(moves_, expected, "RegisterAndStackSlot");
+}
+
+TEST_F(EmitSwapMipsTest, RegisterPairAndDoubleStackSlot) {
+  moves_->AddMove(
+      Location::RegisterPairLocation(4, 5),
+      Location::DoubleStackSlot(32),
+      Primitive::kPrimLong,
+      nullptr);
+  moves_->AddMove(
+      Location::DoubleStackSlot(32),
+      Location::RegisterPairLocation(4, 5),
+      Primitive::kPrimLong,
+      nullptr);
+  const char* expected =
+      "or $t8, $a0, $zero\n"
+      "lw $a0, 32($sp)\n"
+      "sw $t8, 32($sp)\n"
+      "or $t8, $a1, $zero\n"
+      "lw $a1, 36($sp)\n"
+      "sw $t8, 36($sp)\n";
+  DriverWrapper(moves_, expected, "RegisterPairAndDoubleStackSlot");
+}
+
+TEST_F(EmitSwapMipsTest, FpuRegisterAndStackSlot) {
+  moves_->AddMove(
+      Location::FpuRegisterLocation(4),
+      Location::StackSlot(48),
+      Primitive::kPrimFloat,
+      nullptr);
+  moves_->AddMove(
+      Location::StackSlot(48),
+      Location::FpuRegisterLocation(4),
+      Primitive::kPrimFloat,
+      nullptr);
+  const char* expected =
+      "mov.s $f8, $f4\n"
+      "lwc1 $f4, 48($sp)\n"
+      "swc1 $f8, 48($sp)\n";
+  DriverWrapper(moves_, expected, "FpuRegisterAndStackSlot");
+}
+
+TEST_F(EmitSwapMipsTest, FpuRegisterAndDoubleStackSlot) {
+  moves_->AddMove(
+      Location::FpuRegisterLocation(4),
+      Location::DoubleStackSlot(48),
+      Primitive::kPrimDouble,
+      nullptr);
+  moves_->AddMove(
+      Location::DoubleStackSlot(48),
+      Location::FpuRegisterLocation(4),
+      Primitive::kPrimDouble,
+      nullptr);
+  const char* expected =
+      "mov.d $f8, $f4\n"
+      "ldc1 $f4, 48($sp)\n"
+      "sdc1 $f8, 48($sp)\n";
+  DriverWrapper(moves_, expected, "FpuRegisterAndDoubleStackSlot");
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 1e86b75..f5931a2 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -411,8 +411,8 @@
 
   // Use the reverse post order to ensure the non back-edge predecessors of a block are
   // visited before the block itself.
-  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    VisitBasicBlock(it.Current());
+  for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+    VisitBasicBlock(block);
   }
 }
 
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index 38937bf..f2602fb 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -23,12 +23,12 @@
  * Since graph traversal may enter a SCC at any position, an initial representation may be rotated,
  * along dependences, viz. any of (a, b, c, d), (d, a, b, c)  (c, d, a, b), (b, c, d, a) assuming
  * a chain of dependences (mutual independent items may occur in arbitrary order). For proper
- * classification, the lexicographically first entry-phi is rotated to the front.
+ * classification, the lexicographically first loop-phi is rotated to the front.
  */
 static void RotateEntryPhiFirst(HLoopInformation* loop,
                                 ArenaVector<HInstruction*>* scc,
                                 ArenaVector<HInstruction*>* new_scc) {
-  // Find very first entry-phi.
+  // Find very first loop-phi.
   const HInstructionList& phis = loop->GetHeader()->GetPhis();
   HInstruction* phi = nullptr;
   size_t phi_pos = -1;
@@ -41,7 +41,7 @@
     }
   }
 
-  // If found, bring that entry-phi to front.
+  // If found, bring that loop-phi to front.
   if (phi != nullptr) {
     new_scc->clear();
     for (size_t i = 0; i < size; i++) {
@@ -94,15 +94,16 @@
              graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
       type_(Primitive::kPrimVoid),
       induction_(std::less<HLoopInformation*>(),
-                 graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)) {
+                 graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+      cycles_(std::less<HPhi*>(),
+              graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)) {
 }
 
 void HInductionVarAnalysis::Run() {
   // Detects sequence variables (generalized induction variables) during an outer to inner
   // traversal of all loops using Gerlek's algorithm. The order is important to enable
   // range analysis on outer loop while visiting inner loops.
-  for (HReversePostOrderIterator it_graph(*graph_); !it_graph.Done(); it_graph.Advance()) {
-    HBasicBlock* graph_block = it_graph.Current();
+  for (HBasicBlock* graph_block : graph_->GetReversePostOrder()) {
     // Don't analyze irreducible loops.
     if (graph_block->IsLoopHeader() && !graph_block->GetLoopInformation()->IsIrreducible()) {
       VisitLoop(graph_block->GetLoopInformation());
@@ -245,13 +246,13 @@
   const size_t size = scc_.size();
   DCHECK_GE(size, 1u);
 
-  // Rotate proper entry-phi to front.
+  // Rotate proper loop-phi to front.
   if (size > 1) {
     ArenaVector<HInstruction*> other(graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis));
     RotateEntryPhiFirst(loop, &scc_, &other);
   }
 
-  // Analyze from entry-phi onwards.
+  // Analyze from loop-phi onwards.
   HInstruction* phi = scc_[0];
   if (!phi->IsLoopHeaderPhi()) {
     return;
@@ -263,6 +264,9 @@
     return;
   }
 
+  // Store interesting cycle.
+  AssignCycle(phi->AsPhi());
+
   // Singleton is wrap-around induction if all internal links have the same meaning.
   if (size == 1) {
     InductionInfo* update = TransferPhi(loop, phi, /* input_index */ 1);
@@ -366,6 +370,7 @@
   // can be combined with an invariant to yield a similar result. Even two linear inputs can
   // be combined. All other combinations fail, however.
   if (a != nullptr && b != nullptr) {
+    type_ = Narrowest(type_, Narrowest(a->type, b->type));
     if (a->induction_class == kInvariant && b->induction_class == kInvariant) {
       return CreateInvariantOp(op, a, b);
     } else if (a->induction_class == kLinear && b->induction_class == kLinear) {
@@ -402,6 +407,7 @@
   // can be multiplied with an invariant to yield a similar but multiplied result.
   // Two non-invariant inputs cannot be multiplied, however.
   if (a != nullptr && b != nullptr) {
+    type_ = Narrowest(type_, Narrowest(a->type, b->type));
     if (a->induction_class == kInvariant && b->induction_class == kInvariant) {
       return CreateInvariantOp(kMul, a, b);
     } else if (a->induction_class == kInvariant) {
@@ -442,6 +448,7 @@
   // Transfer over a unary negation: an invariant, linear, wrap-around, or periodic input
   // yields a similar but negated induction as result.
   if (a != nullptr) {
+    type_ = Narrowest(type_, a->type);
     if (a->induction_class == kInvariant) {
       return CreateInvariantOp(kNeg, nullptr, a);
     }
@@ -941,6 +948,23 @@
   return new (graph_->GetArena()) InductionInfo(kInvariant, op, a, b, nullptr, b->type);
 }
 
+
+void HInductionVarAnalysis::AssignCycle(HPhi* phi) {
+  ArenaSet<HInstruction*>* set = &cycles_.Put(phi, ArenaSet<HInstruction*>(
+      graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)))->second;
+  for (HInstruction* i : scc_) {
+    set->insert(i);
+  }
+}
+
+ArenaSet<HInstruction*>* HInductionVarAnalysis::LookupCycle(HPhi* phi) {
+  auto it = cycles_.find(phi);
+  if (it != cycles_.end()) {
+    return &it->second;
+  }
+  return nullptr;
+}
+
 bool HInductionVarAnalysis::IsExact(InductionInfo* info, int64_t* value) {
   return InductionVarRange(this).IsConstant(info, InductionVarRange::kExact, value);
 }
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index d190782..7027179 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -214,6 +214,8 @@
   InductionInfo* LookupInfo(HLoopInformation* loop, HInstruction* instruction);
   InductionInfo* CreateConstant(int64_t value, Primitive::Type type);
   InductionInfo* CreateSimplifiedInvariant(InductionOp op, InductionInfo* a, InductionInfo* b);
+  void AssignCycle(HPhi* phi);
+  ArenaSet<HInstruction*>* LookupCycle(HPhi* phi);
 
   // Constants.
   bool IsExact(InductionInfo* info, /*out*/ int64_t* value);
@@ -240,6 +242,11 @@
    */
   ArenaSafeMap<HLoopInformation*, ArenaSafeMap<HInstruction*, InductionInfo*>> induction_;
 
+  /**
+   * Preserves induction cycle information for each loop-phi.
+   */
+  ArenaSafeMap<HPhi*, ArenaSet<HInstruction*>> cycles_;
+
   friend class InductionVarAnalysisTest;
   friend class InductionVarRange;
   friend class InductionVarRangeTest;
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index 7599c8f..031f1d7 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -740,6 +740,31 @@
   EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))", GetTripCount(0).c_str());
 }
 
+TEST_F(InductionVarAnalysisTest, ByteInductionDerivedIntLoopControl) {
+  // Setup:
+  // for (int i = 0; i < 100; i++) {
+  //   k = (byte) i;
+  //   a[k] = 0;
+  //   k = k + 1
+  //   a[k] = 0;
+  // }
+  BuildLoopNest(1);
+  HInstruction* conv = InsertInstruction(
+      new (&allocator_) HTypeConversion(Primitive::kPrimByte, basic_[0], -1), 0);
+  HInstruction* store1 = InsertArrayStore(conv, 0);
+  HInstruction* add = InsertInstruction(
+      new (&allocator_) HAdd(Primitive::kPrimInt, conv, constant1_), 0);
+  HInstruction* store2 = InsertArrayStore(add, 0);
+
+  PerformInductionVarAnalysis();
+
+  // Byte induction (k) is "transferred" over conversion into addition (k + 1).
+  // This means only values within byte range can be trusted (even though
+  // addition can jump out of the range of course).
+  EXPECT_STREQ("((1) * i + (0)):PrimByte", GetInductionInfo(store1->InputAt(1), 0).c_str());
+  EXPECT_STREQ("((1) * i + (1)):PrimByte", GetInductionInfo(store2->InputAt(1), 0).c_str());
+}
+
 TEST_F(InductionVarAnalysisTest, ByteLoopControl1) {
   // Setup:
   // for (byte i = -128; i < 127; i++) {  // just fits!
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 7cc8b1e..235793d 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -58,22 +58,90 @@
 }
 
 /**
- * An upper bound a * (length / a) + b, where a >= 1, can be conservatively rewritten as length + b
- * because length >= 0 is true. This makes it more likely the bound is useful to clients.
+ * Detects an instruction that is >= 0. As long as the value is carried by
+ * a single instruction, arithmetic wrap-around cannot occur.
  */
-static InductionVarRange::Value SimplifyMax(InductionVarRange::Value v) {
-  int64_t value;
-  if (v.is_known &&
-      v.a_constant >= 1 &&
-      v.instruction->IsDiv() &&
-      v.instruction->InputAt(0)->IsArrayLength() &&
-      IsIntAndGet(v.instruction->InputAt(1), &value) && v.a_constant == value) {
-    return InductionVarRange::Value(v.instruction->InputAt(0), 1, v.b_constant);
+static bool IsGEZero(HInstruction* instruction) {
+  DCHECK(instruction != nullptr);
+  if (instruction->IsArrayLength()) {
+    return true;
+  } else if (instruction->IsInvokeStaticOrDirect()) {
+    switch (instruction->AsInvoke()->GetIntrinsic()) {
+      case Intrinsics::kMathMinIntInt:
+      case Intrinsics::kMathMinLongLong:
+        // Instruction MIN(>=0, >=0) is >= 0.
+        return IsGEZero(instruction->InputAt(0)) &&
+               IsGEZero(instruction->InputAt(1));
+      case Intrinsics::kMathAbsInt:
+      case Intrinsics::kMathAbsLong:
+        // Instruction ABS(x) is >= 0.
+        return true;
+      default:
+        break;
+    }
+  }
+  int64_t value = -1;
+  return IsIntAndGet(instruction, &value) && value >= 0;
+}
+
+/** Hunts "under the hood" for a suitable instruction at the hint. */
+static bool IsMaxAtHint(
+    HInstruction* instruction, HInstruction* hint, /*out*/HInstruction** suitable) {
+  if (instruction->IsInvokeStaticOrDirect()) {
+    switch (instruction->AsInvoke()->GetIntrinsic()) {
+      case Intrinsics::kMathMinIntInt:
+      case Intrinsics::kMathMinLongLong:
+        // For MIN(x, y), return most suitable x or y as maximum.
+        return IsMaxAtHint(instruction->InputAt(0), hint, suitable) ||
+               IsMaxAtHint(instruction->InputAt(1), hint, suitable);
+      default:
+        break;
+    }
+  } else {
+    *suitable = instruction;
+    while (instruction->IsArrayLength() ||
+           instruction->IsNullCheck() ||
+           instruction->IsNewArray()) {
+      instruction = instruction->InputAt(0);
+    }
+    return instruction == hint;
+  }
+  return false;
+}
+
+/** Post-analysis simplification of a minimum value that makes the bound more useful to clients. */
+static InductionVarRange::Value SimplifyMin(InductionVarRange::Value v) {
+  if (v.is_known && v.a_constant == 1 && v.b_constant <= 0) {
+    // If a == 1,  instruction >= 0 and b <= 0, just return the constant b.
+    // No arithmetic wrap-around can occur.
+    if (IsGEZero(v.instruction)) {
+      return InductionVarRange::Value(v.b_constant);
+    }
   }
   return v;
 }
 
-/** Helper method to test for a constant value. */
+/** Post-analysis simplification of a maximum value that makes the bound more useful to clients. */
+static InductionVarRange::Value SimplifyMax(InductionVarRange::Value v, HInstruction* hint) {
+  if (v.is_known && v.a_constant >= 1) {
+    // An upper bound a * (length / a) + b, where a >= 1, can be conservatively rewritten as
+    // length + b because length >= 0 is true.
+    int64_t value;
+    if (v.instruction->IsDiv() &&
+        v.instruction->InputAt(0)->IsArrayLength() &&
+        IsIntAndGet(v.instruction->InputAt(1), &value) && v.a_constant == value) {
+      return InductionVarRange::Value(v.instruction->InputAt(0), 1, v.b_constant);
+    }
+    // If a == 1, the most suitable one suffices as maximum value.
+    HInstruction* suitable = nullptr;
+    if (v.a_constant == 1 && IsMaxAtHint(v.instruction, hint, &suitable)) {
+      return InductionVarRange::Value(suitable, 1, v.b_constant);
+    }
+  }
+  return v;
+}
+
+/** Tests for a constant value. */
 static bool IsConstantValue(InductionVarRange::Value v) {
   return v.is_known && v.a_constant == 0;
 }
@@ -97,7 +165,7 @@
   }
 }
 
-/** Helper method to insert an instruction. */
+/** Inserts an instruction. */
 static HInstruction* Insert(HBasicBlock* block, HInstruction* instruction) {
   DCHECK(block != nullptr);
   DCHECK(block->GetLastInstruction() != nullptr) << block->GetBlockId();
@@ -106,7 +174,7 @@
   return instruction;
 }
 
-/** Helper method to obtain loop's control instruction. */
+/** Obtains loop's control instruction. */
 static HInstruction* GetLoopControl(HLoopInformation* loop) {
   DCHECK(loop != nullptr);
   return loop->GetHeader()->GetLastInstruction();
@@ -150,9 +218,14 @@
   chase_hint_ = chase_hint;
   bool in_body = context->GetBlock() != loop->GetHeader();
   int64_t stride_value = 0;
-  *min_val = GetVal(info, trip, in_body, /* is_min */ true);
-  *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min */ false));
+  *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min */ true));
+  *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min */ false), chase_hint);
   *needs_finite_test = NeedsTripCount(info, &stride_value) && IsUnsafeTripCount(trip);
+  chase_hint_ = nullptr;
+  // Retry chasing constants for wrap-around (merge sensitive).
+  if (!min_val->is_known && info->induction_class == HInductionVarAnalysis::kWrapAround) {
+    *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min */ true));
+  }
   return true;
 }
 
@@ -175,7 +248,7 @@
                                   needs_taken_test)
       && (stride_value == -1 ||
           stride_value == 0 ||
-          stride_value == 1);  // avoid wrap-around anomalies.
+          stride_value == 1);  // avoid arithmetic wrap-around anomalies.
 }
 
 void InductionVarRange::GenerateRange(HInstruction* context,
@@ -302,7 +375,8 @@
         return true;
       }
     }
-    // Try range analysis on the invariant, but only on proper range to avoid wrap-around anomalies.
+    // Try range analysis on the invariant, only accept a proper range
+    // to avoid arithmetic wrap-around anomalies.
     Value min_val = GetVal(info, nullptr, /* in_body */ true, /* is_min */ true);
     Value max_val = GetVal(info, nullptr, /* in_body */ true, /* is_min */ false);
     if (IsConstantValue(min_val) &&
@@ -450,25 +524,26 @@
                                                      HInductionVarAnalysis::InductionInfo* trip,
                                                      bool in_body,
                                                      bool is_min) const {
-  // Stop chasing the instruction at constant or hint.
-  int64_t value;
-  if (IsIntAndGet(instruction, &value) && CanLongValueFitIntoInt(value)) {
-    return Value(static_cast<int32_t>(value));
-  } else if (instruction == chase_hint_) {
-    return Value(instruction, 1, 0);
-  }
-  // Special cases when encountering a single instruction that denotes trip count in the
-  // loop-body: min is 1 and, when chasing constants, max of safe trip-count is max int
-  if (in_body && trip != nullptr && instruction == trip->op_a->fetch) {
+  // Special case when chasing constants: single instruction that denotes trip count in the
+  // loop-body is minimal 1 and maximal, with safe trip-count, max int,
+  if (chase_hint_ == nullptr && in_body && trip != nullptr && instruction == trip->op_a->fetch) {
     if (is_min) {
       return Value(1);
-    } else if (chase_hint_ == nullptr && !IsUnsafeTripCount(trip)) {
+    } else if (!IsUnsafeTripCount(trip)) {
       return Value(std::numeric_limits<int32_t>::max());
     }
   }
-  // Chase the instruction a bit deeper into the HIR tree, so that it becomes more likely
-  // range analysis will compare the same instructions as terminal nodes.
-  if (instruction->IsAdd()) {
+  // Unless at a constant or hint, chase the instruction a bit deeper into the HIR tree, so that
+  // it becomes more likely range analysis will compare the same instructions as terminal nodes.
+  int64_t value;
+  if (IsIntAndGet(instruction, &value) && CanLongValueFitIntoInt(value)) {
+    // Proper constant reveals best information.
+    return Value(static_cast<int32_t>(value));
+  } else if (instruction == chase_hint_) {
+    // At hint, fetch is represented by itself.
+    return Value(instruction, 1, 0);
+  } else if (instruction->IsAdd()) {
+    // Incorporate suitable constants in the chased value.
     if (IsIntAndGet(instruction->InputAt(0), &value) && CanLongValueFitIntoInt(value)) {
       return AddValue(Value(static_cast<int32_t>(value)),
                       GetFetch(instruction->InputAt(1), trip, in_body, is_min));
@@ -477,14 +552,14 @@
                       Value(static_cast<int32_t>(value)));
     }
   } else if (instruction->IsArrayLength()) {
-    // Return extreme values when chasing constants. Otherwise, chase deeper.
+    // Exploit length properties when chasing constants or chase into a new array declaration.
     if (chase_hint_ == nullptr) {
       return is_min ? Value(0) : Value(std::numeric_limits<int32_t>::max());
     } else if (instruction->InputAt(0)->IsNewArray()) {
       return GetFetch(instruction->InputAt(0)->InputAt(0), trip, in_body, is_min);
     }
   } else if (instruction->IsTypeConversion()) {
-    // Since analysis is 32-bit (or narrower) we allow a widening along the path.
+    // Since analysis is 32-bit (or narrower), chase beyond widening along the path.
     if (instruction->AsTypeConversion()->GetInputType() == Primitive::kPrimInt &&
         instruction->AsTypeConversion()->GetResultType() == Primitive::kPrimLong) {
       return GetFetch(instruction->InputAt(0), trip, in_body, is_min);
@@ -506,6 +581,7 @@
       !IsUnsafeTripCount(next_trip)) {
     return GetVal(next_info, next_trip, next_in_body, is_min);
   }
+  // Fetch is represented by itself.
   return Value(instruction, 1, 0);
 }
 
@@ -870,10 +946,11 @@
     HInstruction* opb = nullptr;
     switch (info->induction_class) {
       case HInductionVarAnalysis::kInvariant:
-        // Invariants.
+        // Invariants (note that even though is_min does not impact code generation for
+        // invariants, some effort is made to keep this parameter consistent).
         switch (info->operation) {
           case HInductionVarAnalysis::kAdd:
-          case HInductionVarAnalysis::kXor:
+          case HInductionVarAnalysis::kXor:  // no proper is_min for second arg
           case HInductionVarAnalysis::kLT:
           case HInductionVarAnalysis::kLE:
           case HInductionVarAnalysis::kGT:
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index 2f70046..034cf32 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -136,10 +136,20 @@
    */
   void ReVisit(HLoopInformation* loop) {
     induction_analysis_->induction_.erase(loop);
+    for (HInstructionIterator it(loop->GetHeader()->GetPhis()); !it.Done(); it.Advance()) {
+      induction_analysis_->cycles_.erase(it.Current()->AsPhi());
+    }
     induction_analysis_->VisitLoop(loop);
   }
 
   /**
+   * Lookup an interesting cycle associated with an entry phi.
+   */
+  ArenaSet<HInstruction*>* LookupCycle(HPhi* phi) const {
+    return induction_analysis_->LookupCycle(phi);
+  }
+
+  /**
    * Checks if header logic of a loop terminates.
    */
   bool IsFinite(HLoopInformation* loop) const;
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 9faa98a..7fe54b9 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1219,22 +1219,29 @@
     return false;
   }
 
-  HReversePostOrderIterator it(*callee_graph);
-  it.Advance();  // Past the entry block, it does not contain instructions that prevent inlining.
   size_t number_of_instructions = 0;
 
   bool can_inline_environment =
       total_number_of_dex_registers_ < kMaximumNumberOfCumulatedDexRegisters;
 
-  for (; !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
-
-    if (block->IsLoopHeader() && block->GetLoopInformation()->IsIrreducible()) {
-      // Don't inline methods with irreducible loops, they could prevent some
-      // optimizations to run.
-      VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
-                     << " could not be inlined because it contains an irreducible loop";
-      return false;
+  // Skip the entry block, it does not contain instructions that prevent inlining.
+  for (HBasicBlock* block : callee_graph->GetReversePostOrderSkipEntryBlock()) {
+    if (block->IsLoopHeader()) {
+      if (block->GetLoopInformation()->IsIrreducible()) {
+        // Don't inline methods with irreducible loops, they could prevent some
+        // optimizations to run.
+        VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
+                       << " could not be inlined because it contains an irreducible loop";
+        return false;
+      }
+      if (!block->GetLoopInformation()->HasExitEdge()) {
+        // Don't inline methods with loops without exit, since they cause the
+        // loop information to be computed incorrectly when updating after
+        // inlining.
+        VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
+                       << " could not be inlined because it contains a loop with no exit";
+        return false;
+      }
     }
 
     for (HInstructionIterator instr_it(block->GetInstructions());
@@ -1318,8 +1325,8 @@
                                   const DexCompilationUnit& dex_compilation_unit) {
   // Note: if the outermost_graph_ is being compiled OSR, we should not run any
   // optimization that could lead to a HDeoptimize. The following optimizations do not.
-  HDeadCodeElimination dce(callee_graph, stats_);
-  HConstantFolding fold(callee_graph);
+  HDeadCodeElimination dce(callee_graph, stats_, "dead_code_elimination$inliner");
+  HConstantFolding fold(callee_graph, "constant_folding$inliner");
   HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_);
   InstructionSimplifier simplify(callee_graph, stats_);
   IntrinsicsRecognizer intrinsics(callee_graph, stats_);
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 613e008..b44137d 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -81,8 +81,7 @@
       // locals (guaranteed by HGraphBuilder) and that all try blocks have been
       // visited already (from HTryBoundary scoping and reverse post order).
       bool catch_block_visited = false;
-      for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-        HBasicBlock* current = it.Current();
+      for (HBasicBlock* current : graph_->GetReversePostOrder()) {
         if (current == current_block_) {
           catch_block_visited = true;
         } else if (current->IsTryBlock()) {
@@ -276,8 +275,8 @@
     FindNativeDebugInfoLocations(native_debug_info_locations);
   }
 
-  for (HReversePostOrderIterator block_it(*graph_); !block_it.Done(); block_it.Advance()) {
-    current_block_ = block_it.Current();
+  for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+    current_block_ = block;
     uint32_t block_dex_pc = current_block_->GetDexPc();
 
     InitializeBlockLocals();
@@ -1724,7 +1723,10 @@
     if (dex_pc_in_map == dex_pc) {
       return value_in_map;
     } else {
-      skipped_interpreter_metadata_.Put(dex_pc_in_map, value_in_map);
+      // Overwrite and not Put, as quickened CHECK-CAST has two entries with
+      // the same dex_pc. This is OK, because the compiler does not care about those
+      // entries.
+      skipped_interpreter_metadata_.Overwrite(dex_pc_in_map, value_in_map);
     }
   }
 }
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 3bb1c1d..85b461d 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -106,14 +106,17 @@
   void SimplifyFP2Int(HInvoke* invoke);
   void SimplifyStringCharAt(HInvoke* invoke);
   void SimplifyStringIsEmptyOrLength(HInvoke* invoke);
+  void SimplifyNPEOnArgN(HInvoke* invoke, size_t);
   void SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind);
 
   OptimizingCompilerStats* stats_;
   bool simplification_occurred_ = false;
   int simplifications_at_current_position_ = 0;
-  // We ensure we do not loop infinitely. The value is a finger in the air guess
-  // that should allow enough simplification.
-  static constexpr int kMaxSamePositionSimplifications = 10;
+  // We ensure we do not loop infinitely. The value should not be too high, since that
+  // would allow looping around the same basic block too many times. The value should
+  // not be too low either, however, since we want to allow revisiting a basic block
+  // with many statements and simplifications at least once.
+  static constexpr int kMaxSamePositionSimplifications = 50;
 };
 
 void InstructionSimplifier::Run() {
@@ -124,20 +127,16 @@
 void InstructionSimplifierVisitor::Run() {
   // Iterate in reverse post order to open up more simplifications to users
   // of instructions that got simplified.
-  for (HReversePostOrderIterator it(*GetGraph()); !it.Done();) {
+  for (HBasicBlock* block : GetGraph()->GetReversePostOrder()) {
     // The simplification of an instruction to another instruction may yield
     // possibilities for other simplifications. So although we perform a reverse
     // post order visit, we sometimes need to revisit an instruction index.
-    simplification_occurred_ = false;
-    VisitBasicBlock(it.Current());
-    if (simplification_occurred_ &&
-        (simplifications_at_current_position_ < kMaxSamePositionSimplifications)) {
-      // New simplifications may be applicable to the instruction at the
-      // current index, so don't advance the iterator.
-      continue;
-    }
+    do {
+      simplification_occurred_ = false;
+      VisitBasicBlock(block);
+    } while (simplification_occurred_ &&
+             (simplifications_at_current_position_ < kMaxSamePositionSimplifications));
     simplifications_at_current_position_ = 0;
-    it.Advance();
   }
 }
 
@@ -609,11 +608,23 @@
   return nullptr;
 }
 
+static bool CmpHasBoolType(HInstruction* input, HInstruction* cmp) {
+  if (input->GetType() == Primitive::kPrimBoolean) {
+    return true;  // input has direct boolean type
+  } else if (cmp->GetUses().HasExactlyOneElement()) {
+    // Comparison also has boolean type if both its input and the instruction
+    // itself feed into the same phi node.
+    HInstruction* user = cmp->GetUses().front().GetUser();
+    return user->IsPhi() && user->HasInput(input) && user->HasInput(cmp);
+  }
+  return false;
+}
+
 void InstructionSimplifierVisitor::VisitEqual(HEqual* equal) {
   HInstruction* input_const = equal->GetConstantRight();
   if (input_const != nullptr) {
     HInstruction* input_value = equal->GetLeastConstantLeft();
-    if (input_value->GetType() == Primitive::kPrimBoolean && input_const->IsIntConstant()) {
+    if (CmpHasBoolType(input_value, equal) && input_const->IsIntConstant()) {
       HBasicBlock* block = equal->GetBlock();
       // We are comparing the boolean to a constant which is of type int and can
       // be any constant.
@@ -623,6 +634,7 @@
         block->RemoveInstruction(equal);
         RecordSimplification();
       } else if (input_const->AsIntConstant()->IsFalse()) {
+        // Replace (bool_value == false) with !bool_value
         equal->ReplaceWith(GetGraph()->InsertOppositeCondition(input_value, equal));
         block->RemoveInstruction(equal);
         RecordSimplification();
@@ -644,11 +656,12 @@
   HInstruction* input_const = not_equal->GetConstantRight();
   if (input_const != nullptr) {
     HInstruction* input_value = not_equal->GetLeastConstantLeft();
-    if (input_value->GetType() == Primitive::kPrimBoolean && input_const->IsIntConstant()) {
+    if (CmpHasBoolType(input_value, not_equal) && input_const->IsIntConstant()) {
       HBasicBlock* block = not_equal->GetBlock();
       // We are comparing the boolean to a constant which is of type int and can
       // be any constant.
       if (input_const->AsIntConstant()->IsTrue()) {
+        // Replace (bool_value != true) with !bool_value
         not_equal->ReplaceWith(GetGraph()->InsertOppositeCondition(input_value, not_equal));
         block->RemoveInstruction(not_equal);
         RecordSimplification();
@@ -1846,6 +1859,16 @@
   invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, replacement);
 }
 
+// This method should only be used on intrinsics whose sole way of throwing an
+// exception is raising a NPE when the nth argument is null. If that argument
+// is provably non-null, we can clear the flag.
+void InstructionSimplifierVisitor::SimplifyNPEOnArgN(HInvoke* invoke, size_t n) {
+  HInstruction* arg = invoke->InputAt(n);
+  if (!arg->CanBeNull()) {
+    invoke->SetCanThrow(false);
+  }
+}
+
 void InstructionSimplifierVisitor::SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind) {
   uint32_t dex_pc = invoke->GetDexPc();
   HMemoryBarrier* mem_barrier = new (GetGraph()->GetArena()) HMemoryBarrier(barrier_kind, dex_pc);
@@ -1899,6 +1922,10 @@
     case Intrinsics::kStringLength:
       SimplifyStringIsEmptyOrLength(instruction);
       break;
+    case Intrinsics::kStringStringIndexOf:
+    case Intrinsics::kStringStringIndexOfAfter:
+      SimplifyNPEOnArgN(instruction, 1);  // 0th has own NullCheck
+      break;
     case Intrinsics::kUnsafeLoadFence:
       SimplifyMemBarrier(instruction, MemBarrierKind::kLoadAny);
       break;
diff --git a/compiler/optimizing/instruction_simplifier_arm.h b/compiler/optimizing/instruction_simplifier_arm.h
index 782110c..9b54511 100644
--- a/compiler/optimizing/instruction_simplifier_arm.h
+++ b/compiler/optimizing/instruction_simplifier_arm.h
@@ -48,7 +48,7 @@
 class InstructionSimplifierArm : public HOptimization {
  public:
   InstructionSimplifierArm(HGraph* graph, OptimizingCompilerStats* stats)
-    : HOptimization(graph, kInstructionSimplifierArmPassName, stats) {}
+      : HOptimization(graph, kInstructionSimplifierArmPassName, stats) {}
 
   static constexpr const char* kInstructionSimplifierArmPassName = "instruction_simplifier_arm";
 
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index d0dd650..6d107d5 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -140,13 +140,6 @@
 
 void InstructionSimplifierArm64Visitor::VisitArrayGet(HArrayGet* instruction) {
   size_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
-  // Don't move the array pointer if it is charAt because we need to take the count first.
-  // TODO: Implement reading (length + compression) for String compression feature from
-  // negative offset (count_offset - data_offset) using LDP and clobbering an extra temporary.
-  // Note that "LDR (Immediate)" does not have a "signed offset" encoding.
-  if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
-    return;
-  }
   if (TryExtractArrayAccessAddress(instruction,
                                    instruction->GetArray(),
                                    instruction->GetIndex(),
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index f71684e..d4cb1f1 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -82,9 +82,10 @@
 class InstructionSimplifierArm64 : public HOptimization {
  public:
   InstructionSimplifierArm64(HGraph* graph, OptimizingCompilerStats* stats)
-    : HOptimization(graph, kInstructionSimplifierArm64PassName, stats) {}
-  static constexpr const char* kInstructionSimplifierArm64PassName
-      = "instruction_simplifier_arm64";
+      : HOptimization(graph, kInstructionSimplifierArm64PassName, stats) {}
+
+  static constexpr const char* kInstructionSimplifierArm64PassName = "instruction_simplifier_arm64";
+
   void Run() OVERRIDE {
     InstructionSimplifierArm64Visitor visitor(graph_, stats_);
     visitor.VisitReversePostOrder();
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index 04e063c..c2b1374 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -231,15 +231,6 @@
                                   HInstruction* array,
                                   HInstruction* index,
                                   size_t data_offset) {
-  if (kEmitCompilerReadBarrier) {
-    // The read barrier instrumentation does not support the
-    // HIntermediateAddress instruction yet.
-    //
-    // TODO: Handle this case properly in the ARM64 and ARM code generator and
-    // re-enable this optimization; otherwise, remove this TODO.
-    // b/26601270
-    return false;
-  }
   if (index->IsConstant() ||
       (index->IsBoundsCheck() && index->AsBoundsCheck()->GetIndex()->IsConstant())) {
     // When the index is a constant all the addressing can be fitted in the
@@ -251,14 +242,20 @@
     // The access may require a runtime call or the original array pointer.
     return false;
   }
+  if (kEmitCompilerReadBarrier &&
+      access->IsArrayGet() &&
+      access->GetType() == Primitive::kPrimNot) {
+    // For object arrays, the read barrier instrumentation requires
+    // the original array pointer.
+    return false;
+  }
 
   // Proceed to extract the base address computation.
   HGraph* graph = access->GetBlock()->GetGraph();
   ArenaAllocator* arena = graph->GetArena();
 
   HIntConstant* offset = graph->GetIntConstant(data_offset);
-  HIntermediateAddress* address =
-      new (arena) HIntermediateAddress(array, offset, kNoDexPc);
+  HIntermediateAddress* address = new (arena) HIntermediateAddress(array, offset, kNoDexPc);
   // TODO: Is it ok to not have this on the intermediate address?
   // address->SetReferenceTypeInfo(array->GetReferenceTypeInfo());
   access->GetBlock()->InsertInstructionBefore(address, access);
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 8327a4c..fc6ff7b 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -133,8 +133,7 @@
 
 void IntrinsicsRecognizer::Run() {
   ScopedObjectAccess soa(Thread::Current());
-  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
+  for (HBasicBlock* block : graph_->GetReversePostOrder()) {
     for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
          inst_it.Advance()) {
       HInstruction* inst = inst_it.Current();
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 96a6ecb..8234b24 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -652,9 +652,9 @@
       (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
        invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
   LocationSummary* locations = new (arena) LocationSummary(invoke,
-                                                           can_call ?
-                                                               LocationSummary::kCallOnSlowPath :
-                                                               LocationSummary::kNoCall,
+                                                           (can_call
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall),
                                                            kIntrinsified);
   if (can_call && kUseBakerReadBarrier) {
     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
@@ -663,7 +663,7 @@
   locations->SetInAt(1, Location::RequiresRegister());
   locations->SetInAt(2, Location::RequiresRegister());
   locations->SetOut(Location::RequiresRegister(),
-                    can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap);
+                    (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap));
   if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
     // We need a temporary register for the read barrier marking slow
     // path in InstructionCodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier.
@@ -891,8 +891,13 @@
 static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena,
                                                 HInvoke* invoke,
                                                 Primitive::Type type) {
+  bool can_call = kEmitCompilerReadBarrier &&
+      kUseBakerReadBarrier &&
+      (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
   LocationSummary* locations = new (arena) LocationSummary(invoke,
-                                                           LocationSummary::kNoCall,
+                                                           (can_call
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall),
                                                            kIntrinsified);
   locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
   locations->SetInAt(1, Location::RequiresRegister());
@@ -901,36 +906,65 @@
   locations->SetInAt(4, Location::RequiresRegister());
 
   // If heap poisoning is enabled, we don't want the unpoisoning
-  // operations to potentially clobber the output.
-  Location::OutputOverlap overlaps = (kPoisonHeapReferences && type == Primitive::kPrimNot)
+  // operations to potentially clobber the output. Likewise when
+  // emitting a (Baker) read barrier, which may call.
+  Location::OutputOverlap overlaps =
+      ((kPoisonHeapReferences && type == Primitive::kPrimNot) || can_call)
       ? Location::kOutputOverlap
       : Location::kNoOutputOverlap;
   locations->SetOut(Location::RequiresRegister(), overlaps);
 
+  // Temporary registers used in CAS. In the object case
+  // (UnsafeCASObject intrinsic), these are also used for
+  // card-marking, and possibly for (Baker) read barrier.
   locations->AddTemp(Location::RequiresRegister());  // Pointer.
   locations->AddTemp(Location::RequiresRegister());  // Temp 1.
 }
 
-static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGeneratorARM* codegen) {
+static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARM* codegen) {
   DCHECK_NE(type, Primitive::kPrimLong);
 
   ArmAssembler* assembler = codegen->GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
 
-  Register out = locations->Out().AsRegister<Register>();              // Boolean result.
+  Location out_loc = locations->Out();
+  Register out = out_loc.AsRegister<Register>();                  // Boolean result.
 
-  Register base = locations->InAt(1).AsRegister<Register>();           // Object pointer.
-  Register offset = locations->InAt(2).AsRegisterPairLow<Register>();  // Offset (discard high 4B).
-  Register expected_lo = locations->InAt(3).AsRegister<Register>();    // Expected.
-  Register value_lo = locations->InAt(4).AsRegister<Register>();       // Value.
+  Register base = locations->InAt(1).AsRegister<Register>();      // Object pointer.
+  Location offset_loc = locations->InAt(2);
+  Register offset = offset_loc.AsRegisterPairLow<Register>();     // Offset (discard high 4B).
+  Register expected = locations->InAt(3).AsRegister<Register>();  // Expected.
+  Register value = locations->InAt(4).AsRegister<Register>();     // Value.
 
-  Register tmp_ptr = locations->GetTemp(0).AsRegister<Register>();     // Pointer to actual memory.
-  Register tmp_lo = locations->GetTemp(1).AsRegister<Register>();      // Value in memory.
+  Location tmp_ptr_loc = locations->GetTemp(0);
+  Register tmp_ptr = tmp_ptr_loc.AsRegister<Register>();          // Pointer to actual memory.
+  Register tmp = locations->GetTemp(1).AsRegister<Register>();    // Value in memory.
 
   if (type == Primitive::kPrimNot) {
+    // The only read barrier implementation supporting the
+    // UnsafeCASObject intrinsic is the Baker-style read barriers.
+    DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+
     // Mark card for object assuming new value is stored. Worst case we will mark an unchanged
     // object and scan the receiver at the next GC for nothing.
     bool value_can_be_null = true;  // TODO: Worth finding out this information?
-    codegen->MarkGCCard(tmp_ptr, tmp_lo, base, value_lo, value_can_be_null);
+    codegen->MarkGCCard(tmp_ptr, tmp, base, value, value_can_be_null);
+
+    if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+      // Need to make sure the reference stored in the field is a to-space
+      // one before attempting the CAS or the CAS could fail incorrectly.
+      codegen->GenerateReferenceLoadWithBakerReadBarrier(
+          invoke,
+          out_loc,  // Unused, used only as a "temporary" within the read barrier.
+          base,
+          /* offset */ 0u,
+          /* index */ offset_loc,
+          ScaleFactor::TIMES_1,
+          tmp_ptr_loc,
+          /* needs_null_check */ false,
+          /* always_update_field */ true,
+          &tmp);
+    }
   }
 
   // Prevent reordering with prior memory operations.
@@ -942,12 +976,12 @@
   __ add(tmp_ptr, base, ShifterOperand(offset));
 
   if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
-    codegen->GetAssembler()->PoisonHeapReference(expected_lo);
-    if (value_lo == expected_lo) {
-      // Do not poison `value_lo`, as it is the same register as
-      // `expected_lo`, which has just been poisoned.
+    __ PoisonHeapReference(expected);
+    if (value == expected) {
+      // Do not poison `value`, as it is the same register as
+      // `expected`, which has just been poisoned.
     } else {
-      codegen->GetAssembler()->PoisonHeapReference(value_lo);
+      __ PoisonHeapReference(value);
     }
   }
 
@@ -959,37 +993,29 @@
   Label loop_head;
   __ Bind(&loop_head);
 
-  // TODO: When `type == Primitive::kPrimNot`, add a read barrier for
-  // the reference stored in the object before attempting the CAS,
-  // similar to the one in the art::Unsafe_compareAndSwapObject JNI
-  // implementation.
-  //
-  // Note that this code is not (yet) used when read barriers are
-  // enabled (see IntrinsicLocationsBuilderARM::VisitUnsafeCASObject).
-  DCHECK(!(type == Primitive::kPrimNot && kEmitCompilerReadBarrier));
-  __ ldrex(tmp_lo, tmp_ptr);
+  __ ldrex(tmp, tmp_ptr);
 
-  __ subs(tmp_lo, tmp_lo, ShifterOperand(expected_lo));
+  __ subs(tmp, tmp, ShifterOperand(expected));
 
   __ it(EQ, ItState::kItT);
-  __ strex(tmp_lo, value_lo, tmp_ptr, EQ);
-  __ cmp(tmp_lo, ShifterOperand(1), EQ);
+  __ strex(tmp, value, tmp_ptr, EQ);
+  __ cmp(tmp, ShifterOperand(1), EQ);
 
   __ b(&loop_head, EQ);
 
   __ dmb(ISH);
 
-  __ rsbs(out, tmp_lo, ShifterOperand(1));
+  __ rsbs(out, tmp, ShifterOperand(1));
   __ it(CC);
   __ mov(out, ShifterOperand(0), CC);
 
   if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
-    codegen->GetAssembler()->UnpoisonHeapReference(expected_lo);
-    if (value_lo == expected_lo) {
-      // Do not unpoison `value_lo`, as it is the same register as
-      // `expected_lo`, which has just been unpoisoned.
+    __ UnpoisonHeapReference(expected);
+    if (value == expected) {
+      // Do not unpoison `value`, as it is the same register as
+      // `expected`, which has just been unpoisoned.
     } else {
-      codegen->GetAssembler()->UnpoisonHeapReference(value_lo);
+      __ UnpoisonHeapReference(value);
     }
   }
 }
@@ -998,33 +1024,23 @@
   CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke, Primitive::kPrimInt);
 }
 void IntrinsicLocationsBuilderARM::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic is missing a read barrier, and
-  // therefore sometimes does not work as expected (b/25883050).
-  // Turn it off temporarily as a quick fix, until the read barrier is
-  // implemented (see TODO in GenCAS).
-  //
-  // TODO(rpl): Implement read barrier support in GenCAS and re-enable
-  // this intrinsic.
-  if (kEmitCompilerReadBarrier) {
+  // The only read barrier implementation supporting the
+  // UnsafeCASObject intrinsic is the Baker-style read barriers.
+  if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
     return;
   }
 
   CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke, Primitive::kPrimNot);
 }
 void IntrinsicCodeGeneratorARM::VisitUnsafeCASInt(HInvoke* invoke) {
-  GenCas(invoke->GetLocations(), Primitive::kPrimInt, codegen_);
+  GenCas(invoke, Primitive::kPrimInt, codegen_);
 }
 void IntrinsicCodeGeneratorARM::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic is missing a read barrier, and
-  // therefore sometimes does not work as expected (b/25883050).
-  // Turn it off temporarily as a quick fix, until the read barrier is
-  // implemented (see TODO in GenCAS).
-  //
-  // TODO(rpl): Implement read barrier support in GenCAS and re-enable
-  // this intrinsic.
-  DCHECK(!kEmitCompilerReadBarrier);
+  // The only read barrier implementation supporting the
+  // UnsafeCASObject intrinsic is the Baker-style read barriers.
+  DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
 
-  GenCas(invoke->GetLocations(), Primitive::kPrimNot, codegen_);
+  GenCas(invoke, Primitive::kPrimNot, codegen_);
 }
 
 void IntrinsicLocationsBuilderARM::VisitStringCompareTo(HInvoke* invoke) {
@@ -1042,7 +1058,6 @@
   // Need temporary registers for String compression's feature.
   if (mirror::kUseStringCompression) {
     locations->AddTemp(Location::RequiresRegister());
-    locations->AddTemp(Location::RequiresRegister());
   }
   locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
 }
@@ -1058,10 +1073,9 @@
   Register temp0 = locations->GetTemp(0).AsRegister<Register>();
   Register temp1 = locations->GetTemp(1).AsRegister<Register>();
   Register temp2 = locations->GetTemp(2).AsRegister<Register>();
-  Register temp3, temp4;
+  Register temp3;
   if (mirror::kUseStringCompression) {
     temp3 = locations->GetTemp(3).AsRegister<Register>();
-    temp4 = locations->GetTemp(4).AsRegister<Register>();
   }
 
   Label loop;
@@ -1088,41 +1102,42 @@
   // Reference equality check, return 0 if same reference.
   __ subs(out, str, ShifterOperand(arg));
   __ b(&end, EQ);
+
   if (mirror::kUseStringCompression) {
-    // Load lengths of this and argument strings.
+    // Load `count` fields of this and argument strings.
     __ ldr(temp3, Address(str, count_offset));
-    __ ldr(temp4, Address(arg, count_offset));
-    // Clean out compression flag from lengths.
-    __ bic(temp0, temp3, ShifterOperand(0x80000000));
-    __ bic(IP, temp4, ShifterOperand(0x80000000));
+    __ ldr(temp2, Address(arg, count_offset));
+    // Extract lengths from the `count` fields.
+    __ Lsr(temp0, temp3, 1u);
+    __ Lsr(temp1, temp2, 1u);
   } else {
     // Load lengths of this and argument strings.
     __ ldr(temp0, Address(str, count_offset));
-    __ ldr(IP, Address(arg, count_offset));
+    __ ldr(temp1, Address(arg, count_offset));
   }
   // out = length diff.
-  __ subs(out, temp0, ShifterOperand(IP));
+  __ subs(out, temp0, ShifterOperand(temp1));
   // temp0 = min(len(str), len(arg)).
   __ it(GT);
-  __ mov(temp0, ShifterOperand(IP), GT);
+  __ mov(temp0, ShifterOperand(temp1), GT);
   // Shorter string is empty?
   __ CompareAndBranchIfZero(temp0, &end);
 
   if (mirror::kUseStringCompression) {
     // Check if both strings using same compression style to use this comparison loop.
-    __ eors(temp3, temp3, ShifterOperand(temp4));
-    __ b(&different_compression, MI);
-  }
-  // Store offset of string value in preparation for comparison loop.
-  __ mov(temp1, ShifterOperand(value_offset));
-  if (mirror::kUseStringCompression) {
+    __ eor(temp2, temp2, ShifterOperand(temp3));
+    __ Lsrs(temp2, temp2, 1u);
+    __ b(&different_compression, CS);
     // For string compression, calculate the number of bytes to compare (not chars).
     // This could in theory exceed INT32_MAX, so treat temp0 as unsigned.
-    __ cmp(temp4, ShifterOperand(0));
-    __ it(GE);
-    __ add(temp0, temp0, ShifterOperand(temp0), GE);
+    __ Lsls(temp3, temp3, 31u);  // Extract purely the compression flag.
+    __ it(NE);
+    __ add(temp0, temp0, ShifterOperand(temp0), NE);
   }
 
+  // Store offset of string value in preparation for comparison loop.
+  __ mov(temp1, ShifterOperand(value_offset));
+
   // Assertions that must hold in order to compare multiple characters at a time.
   CHECK_ALIGNED(value_offset, 8);
   static_assert(IsAligned<8>(kObjectAlignment),
@@ -1182,69 +1197,80 @@
   // The comparison is unsigned for string compression, otherwise signed.
   __ cmp(temp0, ShifterOperand(temp1, LSR, mirror::kUseStringCompression ? 3 : 4));
   __ b(&end, mirror::kUseStringCompression ? LS : LE);
+
   // Extract the characters and calculate the difference.
-  Label uncompressed_string, continue_process;
   if (mirror::kUseStringCompression) {
-    __ cmp(temp4, ShifterOperand(0));
-    __ b(&uncompressed_string, GE);
-    __ bic(temp1, temp1, ShifterOperand(0x7));
-    __ b(&continue_process);
+    // For compressed strings we need to clear 0x7 from temp1, for uncompressed we need to clear
+    // 0xf. We also need to prepare the character extraction mask `uncompressed ? 0xffffu : 0xffu`.
+    // The compression flag is now in the highest bit of temp3, so let's play some tricks.
+    __ orr(temp3, temp3, ShifterOperand(0xffu << 23));  // uncompressed ? 0xff800000u : 0x7ff80000u
+    __ bic(temp1, temp1, ShifterOperand(temp3, LSR, 31 - 3));  // &= ~(uncompressed ? 0xfu : 0x7u)
+    __ Asr(temp3, temp3, 7u);                           // uncompressed ? 0xffff0000u : 0xff0000u.
+    __ Lsr(temp2, temp2, temp1);                        // Extract second character.
+    __ Lsr(temp3, temp3, 16u);                          // uncompressed ? 0xffffu : 0xffu
+    __ Lsr(out, IP, temp1);                             // Extract first character.
+    __ and_(temp2, temp2, ShifterOperand(temp3));
+    __ and_(out, out, ShifterOperand(temp3));
+  } else {
+    __ bic(temp1, temp1, ShifterOperand(0xf));
+    __ Lsr(temp2, temp2, temp1);
+    __ Lsr(out, IP, temp1);
+    __ movt(temp2, 0);
+    __ movt(out, 0);
   }
-  __ Bind(&uncompressed_string);
-  __ bic(temp1, temp1, ShifterOperand(0xf));
-  __ Bind(&continue_process);
 
-  __ Lsr(temp2, temp2, temp1);
-  __ Lsr(IP, IP, temp1);
-  Label calculate_difference, uncompressed_string_extract_chars;
-  if (mirror::kUseStringCompression) {
-    __ cmp(temp4, ShifterOperand(0));
-    __ b(&uncompressed_string_extract_chars, GE);
-    __ ubfx(temp2, temp2, 0, 8);
-    __ ubfx(IP, IP, 0, 8);
-    __ b(&calculate_difference);
-  }
-  __ Bind(&uncompressed_string_extract_chars);
-  __ movt(temp2, 0);
-  __ movt(IP, 0);
-  __ Bind(&calculate_difference);
-  __ sub(out, IP, ShifterOperand(temp2));
-  __ b(&end);
+  __ sub(out, out, ShifterOperand(temp2));
 
   if (mirror::kUseStringCompression) {
+    __ b(&end);
+    __ Bind(&different_compression);
+
+    // Comparison for different compression style.
     const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte);
     DCHECK_EQ(c_char_size, 1u);
-    Label loop_arg_compressed, loop_this_compressed, find_diff;
-    // Comparison for different compression style.
-    // This part is when THIS is compressed and ARG is not.
-    __ Bind(&different_compression);
-    __ add(temp2, str, ShifterOperand(value_offset));
-    __ add(temp3, arg, ShifterOperand(value_offset));
-    __ cmp(temp4, ShifterOperand(0));
-    __ b(&loop_arg_compressed, LT);
 
-    __ Bind(&loop_this_compressed);
-    __ ldrb(IP, Address(temp2, c_char_size, Address::PostIndex));
-    __ ldrh(temp4, Address(temp3, char_size, Address::PostIndex));
-    __ cmp(IP, ShifterOperand(temp4));
-    __ b(&find_diff, NE);
-    __ subs(temp0, temp0, ShifterOperand(1));
-    __ b(&loop_this_compressed, GT);
-    __ b(&end);
+    // We want to free up the temp3, currently holding `str.count`, for comparison.
+    // So, we move it to the bottom bit of the iteration count `temp0` which we tnen
+    // need to treat as unsigned. Start by freeing the bit with an ADD and continue
+    // further down by a LSRS+SBC which will flip the meaning of the flag but allow
+    // `subs temp0, #2; bhi different_compression_loop` to serve as the loop condition.
+    __ add(temp0, temp0, ShifterOperand(temp0));  // Unlike LSL, this ADD is always 16-bit.
+    // `temp1` will hold the compressed data pointer, `temp2` the uncompressed data pointer.
+    __ mov(temp1, ShifterOperand(str));
+    __ mov(temp2, ShifterOperand(arg));
+    __ Lsrs(temp3, temp3, 1u);                // Continue the move of the compression flag.
+    __ it(CS, kItThen);                       // Interleave with selection of temp1 and temp2.
+    __ mov(temp1, ShifterOperand(arg), CS);   // Preserves flags.
+    __ mov(temp2, ShifterOperand(str), CS);   // Preserves flags.
+    __ sbc(temp0, temp0, ShifterOperand(0));  // Complete the move of the compression flag.
 
-    // This part is when THIS is not compressed and ARG is.
-    __ Bind(&loop_arg_compressed);
-    __ ldrh(IP, Address(temp2, char_size, Address::PostIndex));
-    __ ldrb(temp4, Address(temp3, c_char_size, Address::PostIndex));
-    __ cmp(IP, ShifterOperand(temp4));
-    __ b(&find_diff, NE);
-    __ subs(temp0, temp0, ShifterOperand(1));
-    __ b(&loop_arg_compressed, GT);
+    // Adjust temp1 and temp2 from string pointers to data pointers.
+    __ add(temp1, temp1, ShifterOperand(value_offset));
+    __ add(temp2, temp2, ShifterOperand(value_offset));
+
+    Label different_compression_loop;
+    Label different_compression_diff;
+
+    // Main loop for different compression.
+    __ Bind(&different_compression_loop);
+    __ ldrb(IP, Address(temp1, c_char_size, Address::PostIndex));
+    __ ldrh(temp3, Address(temp2, char_size, Address::PostIndex));
+    __ cmp(IP, ShifterOperand(temp3));
+    __ b(&different_compression_diff, NE);
+    __ subs(temp0, temp0, ShifterOperand(2));
+    __ b(&different_compression_loop, HI);
     __ b(&end);
 
     // Calculate the difference.
-    __ Bind(&find_diff);
-    __ sub(out, IP, ShifterOperand(temp4));
+    __ Bind(&different_compression_diff);
+    __ sub(out, IP, ShifterOperand(temp3));
+    // Flip the difference if the `arg` is compressed.
+    // `temp0` contains inverted `str` compression flag, i.e the same as `arg` compression flag.
+    __ Lsrs(temp0, temp0, 1u);
+    static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                  "Expecting 0=compressed, 1=uncompressed");
+    __ it(CC);
+    __ rsb(out, out, ShifterOperand(0), CC);
   }
 
   __ Bind(&end);
@@ -1282,7 +1308,7 @@
   Register temp1 = locations->GetTemp(1).AsRegister<Register>();
   Register temp2 = locations->GetTemp(2).AsRegister<Register>();
 
-  Label loop, preloop;
+  Label loop;
   Label end;
   Label return_true;
   Label return_false;
@@ -1301,6 +1327,10 @@
     __ CompareAndBranchIfZero(arg, &return_false);
   }
 
+  // Reference equality check, return true if same reference.
+  __ cmp(str, ShifterOperand(arg));
+  __ b(&return_true, EQ);
+
   if (!optimizations.GetArgumentIsString()) {
     // Instanceof check for the argument by comparing class fields.
     // All string objects must have the same type since String cannot be subclassed.
@@ -1312,48 +1342,44 @@
     __ b(&return_false, NE);
   }
 
-  // Load lengths of this and argument strings.
+  // Load `count` fields of this and argument strings.
   __ ldr(temp, Address(str, count_offset));
   __ ldr(temp1, Address(arg, count_offset));
-  // Check if lengths are equal, return false if they're not.
+  // Check if `count` fields are equal, return false if they're not.
   // Also compares the compression style, if differs return false.
   __ cmp(temp, ShifterOperand(temp1));
   __ b(&return_false, NE);
-  // Return true if both strings are empty.
-  if (mirror::kUseStringCompression) {
-    // Length needs to be masked out first because 0 is treated as compressed.
-    __ bic(temp, temp, ShifterOperand(0x80000000));
-  }
+  // Return true if both strings are empty. Even with string compression `count == 0` means empty.
+  static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                "Expecting 0=compressed, 1=uncompressed");
   __ cbz(temp, &return_true);
-  // Reference equality check, return true if same reference.
-  __ cmp(str, ShifterOperand(arg));
-  __ b(&return_true, EQ);
 
-  // Assertions that must hold in order to compare strings 2 characters at a time.
+  // Assertions that must hold in order to compare strings 4 bytes at a time.
   DCHECK_ALIGNED(value_offset, 4);
   static_assert(IsAligned<4>(kObjectAlignment), "String data must be aligned for fast compare.");
 
   if (mirror::kUseStringCompression) {
-    // If not compressed, directly to fast compare. Else do preprocess on length.
-    __ cmp(temp1, ShifterOperand(0));
-    __ b(&preloop, GT);
-    // Mask out compression flag and adjust length for compressed string (8-bit)
-    // as if it is a 16-bit data, new_length = (length + 1) / 2.
-    __ add(temp, temp, ShifterOperand(1));
-    __ Lsr(temp, temp, 1);
-    __ Bind(&preloop);
+    // For string compression, calculate the number of bytes to compare (not chars).
+    // This could in theory exceed INT32_MAX, so treat temp as unsigned.
+    __ Lsrs(temp, temp, 1u);                        // Extract length and check compression flag.
+    __ it(CS);                                      // If uncompressed,
+    __ add(temp, temp, ShifterOperand(temp), CS);   //   double the byte count.
   }
-  // Loop to compare strings 2 characters at a time starting at the front of the string.
-  // Ok to do this because strings with an odd length are zero-padded.
+
+  // Store offset of string value in preparation for comparison loop.
   __ LoadImmediate(temp1, value_offset);
+
+  // Loop to compare strings 4 bytes at a time starting at the front of the string.
+  // Ok to do this because strings are zero-padded to kObjectAlignment.
   __ Bind(&loop);
   __ ldr(out, Address(str, temp1));
   __ ldr(temp2, Address(arg, temp1));
+  __ add(temp1, temp1, ShifterOperand(sizeof(uint32_t)));
   __ cmp(out, ShifterOperand(temp2));
   __ b(&return_false, NE);
-  __ add(temp1, temp1, ShifterOperand(sizeof(uint32_t)));
-  __ subs(temp, temp, ShifterOperand(sizeof(uint32_t) /  sizeof(uint16_t)));
-  __ b(&loop, GT);
+  // With string compression, we have compared 4 bytes, otherwise 2 chars.
+  __ subs(temp, temp, ShifterOperand(mirror::kUseStringCompression ? 4 : 2));
+  __ b(&loop, HI);
 
   // Return true and exit the function.
   // If loop does not result in returning false, we return true.
@@ -1929,7 +1955,7 @@
     //   if (src_ptr != end_ptr) {
     //     uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
     //     lfence;  // Load fence or artificial data dependency to prevent load-load reordering
-    //     bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+    //     bool is_gray = (rb_state == ReadBarrier::GrayState());
     //     if (is_gray) {
     //       // Slow-path copy.
     //       do {
@@ -1970,9 +1996,8 @@
     // Given the numeric representation, it's enough to check the low bit of the
     // rb_state. We do that by shifting the bit out of the lock word with LSRS
     // which can be a 16-bit instruction unlike the TST immediate.
-    static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
-    static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
-    static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+    static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+    static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
     __ Lsrs(temp2, temp2, LockWord::kReadBarrierStateShift + 1);
     // Carry flag is the last bit shifted out by LSRS.
     __ b(read_barrier_slow_path->GetEntryLabel(), CS);
@@ -2462,8 +2487,8 @@
     const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
     // String's length.
     __ ldr(IP, Address(srcObj, count_offset));
-    __ cmp(IP, ShifterOperand(0));
-    __ b(&compressed_string_preloop, LT);
+    __ tst(IP, ShifterOperand(1));
+    __ b(&compressed_string_preloop, EQ);
   }
   __ add(src_ptr, src_ptr, ShifterOperand(srcBegin, LSL, 1));
 
@@ -2498,9 +2523,10 @@
   __ subs(num_chr, num_chr, ShifterOperand(1));
   __ strh(IP, Address(dst_ptr, char_size, Address::PostIndex));
   __ b(&remainder, GT);
-  __ b(&done);
 
   if (mirror::kUseStringCompression) {
+    __ b(&done);
+
     const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte);
     DCHECK_EQ(c_char_size, 1u);
     // Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time.
@@ -2585,6 +2611,9 @@
 UNIMPLEMENTED_INTRINSIC(ARM, IntegerLowestOneBit)
 UNIMPLEMENTED_INTRINSIC(ARM, LongLowestOneBit)
 
+UNIMPLEMENTED_INTRINSIC(ARM, StringStringIndexOf);
+UNIMPLEMENTED_INTRINSIC(ARM, StringStringIndexOfAfter);
+
 // 1.8.
 UNIMPLEMENTED_INTRINSIC(ARM, UnsafeGetAndAddInt)
 UNIMPLEMENTED_INTRINSIC(ARM, UnsafeGetAndAddLong)
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index e2c1802..451abc5 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -863,9 +863,9 @@
     codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
                                                        trg_loc,
                                                        base,
-                                                       /* offset */ 0U,
+                                                       /* offset */ 0u,
                                                        /* index */ offset_loc,
-                                                       /* scale_factor */ 0U,
+                                                       /* scale_factor */ 0u,
                                                        temp,
                                                        /* needs_null_check */ false,
                                                        is_volatile);
@@ -880,7 +880,7 @@
 
     if (type == Primitive::kPrimNot) {
       DCHECK(trg.IsW());
-      codegen->MaybeGenerateReadBarrierSlow(invoke, trg_loc, trg_loc, base_loc, 0U, offset_loc);
+      codegen->MaybeGenerateReadBarrierSlow(invoke, trg_loc, trg_loc, base_loc, 0u, offset_loc);
     }
   }
 }
@@ -890,9 +890,9 @@
       (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
        invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
   LocationSummary* locations = new (arena) LocationSummary(invoke,
-                                                           can_call ?
-                                                               LocationSummary::kCallOnSlowPath :
-                                                               LocationSummary::kNoCall,
+                                                           (can_call
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall),
                                                            kIntrinsified);
   if (can_call && kUseBakerReadBarrier) {
     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
@@ -901,7 +901,7 @@
   locations->SetInAt(1, Location::RequiresRegister());
   locations->SetInAt(2, Location::RequiresRegister());
   locations->SetOut(Location::RequiresRegister(),
-                    can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap);
+                    (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap));
 }
 
 void IntrinsicLocationsBuilderARM64::VisitUnsafeGet(HInvoke* invoke) {
@@ -1086,8 +1086,13 @@
 static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena,
                                        HInvoke* invoke,
                                        Primitive::Type type) {
+  bool can_call = kEmitCompilerReadBarrier &&
+      kUseBakerReadBarrier &&
+      (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
   LocationSummary* locations = new (arena) LocationSummary(invoke,
-                                                           LocationSummary::kNoCall,
+                                                           (can_call
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall),
                                                            kIntrinsified);
   locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
   locations->SetInAt(1, Location::RequiresRegister());
@@ -1096,20 +1101,29 @@
   locations->SetInAt(4, Location::RequiresRegister());
 
   // If heap poisoning is enabled, we don't want the unpoisoning
-  // operations to potentially clobber the output.
-  Location::OutputOverlap overlaps = (kPoisonHeapReferences && type == Primitive::kPrimNot)
+  // operations to potentially clobber the output. Likewise when
+  // emitting a (Baker) read barrier, which may call.
+  Location::OutputOverlap overlaps =
+      ((kPoisonHeapReferences && type == Primitive::kPrimNot) || can_call)
       ? Location::kOutputOverlap
       : Location::kNoOutputOverlap;
   locations->SetOut(Location::RequiresRegister(), overlaps);
+  if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+    // Temporary register for (Baker) read barrier.
+    locations->AddTemp(Location::RequiresRegister());
+  }
 }
 
-static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGeneratorARM64* codegen) {
+static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARM64* codegen) {
   MacroAssembler* masm = codegen->GetVIXLAssembler();
+  LocationSummary* locations = invoke->GetLocations();
 
-  Register out = WRegisterFrom(locations->Out());                  // Boolean result.
+  Location out_loc = locations->Out();
+  Register out = WRegisterFrom(out_loc);                           // Boolean result.
 
   Register base = WRegisterFrom(locations->InAt(1));               // Object pointer.
-  Register offset = XRegisterFrom(locations->InAt(2));             // Long offset.
+  Location offset_loc = locations->InAt(2);
+  Register offset = XRegisterFrom(offset_loc);                     // Long offset.
   Register expected = RegisterFrom(locations->InAt(3), type);      // Expected.
   Register value = RegisterFrom(locations->InAt(4), type);         // Value.
 
@@ -1118,6 +1132,27 @@
     // Mark card for object assuming new value is stored.
     bool value_can_be_null = true;  // TODO: Worth finding out this information?
     codegen->MarkGCCard(base, value, value_can_be_null);
+
+    // The only read barrier implementation supporting the
+    // UnsafeCASObject intrinsic is the Baker-style read barriers.
+    DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+
+    if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+      Register temp = WRegisterFrom(locations->GetTemp(0));
+      // Need to make sure the reference stored in the field is a to-space
+      // one before attempting the CAS or the CAS could fail incorrectly.
+      codegen->GenerateReferenceLoadWithBakerReadBarrier(
+          invoke,
+          out_loc,  // Unused, used only as a "temporary" within the read barrier.
+          base,
+          /* offset */ 0u,
+          /* index */ offset_loc,
+          /* scale_factor */ 0u,
+          temp,
+          /* needs_null_check */ false,
+          /* use_load_acquire */ false,
+          /* always_update_field */ true);
+    }
   }
 
   UseScratchRegisterScope temps(masm);
@@ -1145,14 +1180,6 @@
 
   vixl::aarch64::Label loop_head, exit_loop;
   __ Bind(&loop_head);
-  // TODO: When `type == Primitive::kPrimNot`, add a read barrier for
-  // the reference stored in the object before attempting the CAS,
-  // similar to the one in the art::Unsafe_compareAndSwapObject JNI
-  // implementation.
-  //
-  // Note that this code is not (yet) used when read barriers are
-  // enabled (see IntrinsicLocationsBuilderARM64::VisitUnsafeCASObject).
-  DCHECK(!(type == Primitive::kPrimNot && kEmitCompilerReadBarrier));
   __ Ldaxr(tmp_value, MemOperand(tmp_ptr));
   __ Cmp(tmp_value, expected);
   __ B(&exit_loop, ne);
@@ -1179,14 +1206,9 @@
   CreateIntIntIntIntIntToInt(arena_, invoke, Primitive::kPrimLong);
 }
 void IntrinsicLocationsBuilderARM64::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic is missing a read barrier, and
-  // therefore sometimes does not work as expected (b/25883050).
-  // Turn it off temporarily as a quick fix, until the read barrier is
-  // implemented (see TODO in GenCAS).
-  //
-  // TODO(rpl): Implement read barrier support in GenCAS and re-enable
-  // this intrinsic.
-  if (kEmitCompilerReadBarrier) {
+  // The only read barrier implementation supporting the
+  // UnsafeCASObject intrinsic is the Baker-style read barriers.
+  if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
     return;
   }
 
@@ -1194,22 +1216,17 @@
 }
 
 void IntrinsicCodeGeneratorARM64::VisitUnsafeCASInt(HInvoke* invoke) {
-  GenCas(invoke->GetLocations(), Primitive::kPrimInt, codegen_);
+  GenCas(invoke, Primitive::kPrimInt, codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafeCASLong(HInvoke* invoke) {
-  GenCas(invoke->GetLocations(), Primitive::kPrimLong, codegen_);
+  GenCas(invoke, Primitive::kPrimLong, codegen_);
 }
 void IntrinsicCodeGeneratorARM64::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic is missing a read barrier, and
-  // therefore sometimes does not work as expected (b/25883050).
-  // Turn it off temporarily as a quick fix, until the read barrier is
-  // implemented (see TODO in GenCAS).
-  //
-  // TODO(rpl): Implement read barrier support in GenCAS and re-enable
-  // this intrinsic.
-  DCHECK(!kEmitCompilerReadBarrier);
+  // The only read barrier implementation supporting the
+  // UnsafeCASObject intrinsic is the Baker-style read barriers.
+  DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
 
-  GenCas(invoke->GetLocations(), Primitive::kPrimNot, codegen_);
+  GenCas(invoke, Primitive::kPrimNot, codegen_);
 }
 
 void IntrinsicLocationsBuilderARM64::VisitStringCompareTo(HInvoke* invoke) {
@@ -1226,7 +1243,6 @@
   // Need temporary registers for String compression's feature.
   if (mirror::kUseStringCompression) {
     locations->AddTemp(Location::RequiresRegister());
-    locations->AddTemp(Location::RequiresRegister());
   }
   locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
 }
@@ -1244,10 +1260,9 @@
   Register temp0 = WRegisterFrom(locations->GetTemp(0));
   Register temp1 = WRegisterFrom(locations->GetTemp(1));
   Register temp2 = WRegisterFrom(locations->GetTemp(2));
-  Register temp3, temp5;
+  Register temp3;
   if (mirror::kUseStringCompression) {
     temp3 = WRegisterFrom(locations->GetTemp(3));
-    temp5 = WRegisterFrom(locations->GetTemp(4));
   }
 
   vixl::aarch64::Label loop;
@@ -1274,68 +1289,65 @@
   // Reference equality check, return 0 if same reference.
   __ Subs(out, str, arg);
   __ B(&end, eq);
+
   if (mirror::kUseStringCompression) {
-    // Load lengths of this and argument strings.
+    // Load `count` fields of this and argument strings.
     __ Ldr(temp3, HeapOperand(str, count_offset));
-    __ Ldr(temp5, HeapOperand(arg, count_offset));
+    __ Ldr(temp2, HeapOperand(arg, count_offset));
     // Clean out compression flag from lengths.
-    __ Bic(temp0, temp3, Operand(static_cast<int32_t>(0x80000000)));
-    __ Bic(temp1, temp5, Operand(static_cast<int32_t>(0x80000000)));
+    __ Lsr(temp0, temp3, 1u);
+    __ Lsr(temp1, temp2, 1u);
   } else {
     // Load lengths of this and argument strings.
     __ Ldr(temp0, HeapOperand(str, count_offset));
     __ Ldr(temp1, HeapOperand(arg, count_offset));
   }
-  // Return zero if both strings are empty.
-  __ Orr(out, temp0, temp1);
-  __ Cbz(out, &end);
   // out = length diff.
   __ Subs(out, temp0, temp1);
-  // temp2 = min(len(str), len(arg)).
-  __ Csel(temp2, temp1, temp0, ge);
+  // temp0 = min(len(str), len(arg)).
+  __ Csel(temp0, temp1, temp0, ge);
   // Shorter string is empty?
-  __ Cbz(temp2, &end);
+  __ Cbz(temp0, &end);
 
   if (mirror::kUseStringCompression) {
     // Check if both strings using same compression style to use this comparison loop.
-    __ Eor(temp3.W(), temp3, Operand(temp5));
-    __ Tbnz(temp3.W(), kWRegSize - 1, &different_compression);
+    __ Eor(temp2, temp2, Operand(temp3));
+    // Interleave with compression flag extraction which is needed for both paths
+    // and also set flags which is needed only for the different compressions path.
+    __ Ands(temp3.W(), temp3.W(), Operand(1));
+    __ Tbnz(temp2, 0, &different_compression);  // Does not use flags.
   }
   // Store offset of string value in preparation for comparison loop.
   __ Mov(temp1, value_offset);
   if (mirror::kUseStringCompression) {
     // For string compression, calculate the number of bytes to compare (not chars).
-    // This could be in theory exceed INT32_MAX, so treat temp2 as unsigned.
-    vixl::aarch64::Label let_it_signed;
-    __ Cmp(temp5, Operand(0));
-    __ B(lt, &let_it_signed);
-    __ Add(temp2, temp2, Operand(temp2));
-    __ Bind(&let_it_signed);
+    // This could in theory exceed INT32_MAX, so treat temp0 as unsigned.
+    __ Lsl(temp0, temp0, temp3);
   }
 
   UseScratchRegisterScope scratch_scope(masm);
   Register temp4 = scratch_scope.AcquireX();
 
-  // Assertions that must hold in order to compare strings 4 characters at a time.
+  // Assertions that must hold in order to compare strings 8 bytes at a time.
   DCHECK_ALIGNED(value_offset, 8);
   static_assert(IsAligned<8>(kObjectAlignment), "String of odd length is not zero padded");
 
   const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
   DCHECK_EQ(char_size, 2u);
 
-  // Promote temp0 to an X reg, ready for LDR.
-  temp0 = temp0.X();
+  // Promote temp2 to an X reg, ready for LDR.
+  temp2 = temp2.X();
 
   // Loop to compare 4x16-bit characters at a time (ok because of string data alignment).
   __ Bind(&loop);
   __ Ldr(temp4, MemOperand(str.X(), temp1.X()));
-  __ Ldr(temp0, MemOperand(arg.X(), temp1.X()));
-  __ Cmp(temp4, temp0);
+  __ Ldr(temp2, MemOperand(arg.X(), temp1.X()));
+  __ Cmp(temp4, temp2);
   __ B(ne, &find_char_diff);
   __ Add(temp1, temp1, char_size * 4);
   // With string compression, we have compared 8 bytes, otherwise 4 chars.
-  __ Subs(temp2, temp2, (mirror::kUseStringCompression) ? 8 : 4);
-  __ B(hi, &loop);
+  __ Subs(temp0, temp0, (mirror::kUseStringCompression) ? 8 : 4);
+  __ B(&loop, hi);
   __ B(&end);
 
   // Promote temp1 to an X reg, ready for EOR.
@@ -1344,78 +1356,85 @@
   // Find the single character difference.
   __ Bind(&find_char_diff);
   // Get the bit position of the first character that differs.
-  __ Eor(temp1, temp0, temp4);
+  __ Eor(temp1, temp2, temp4);
   __ Rbit(temp1, temp1);
   __ Clz(temp1, temp1);
+
   // If the number of chars remaining <= the index where the difference occurs (0-3), then
   // the difference occurs outside the remaining string data, so just return length diff (out).
   // Unlike ARM, we're doing the comparison in one go here, without the subtraction at the
   // find_char_diff_2nd_cmp path, so it doesn't matter whether the comparison is signed or
   // unsigned when string compression is disabled.
   // When it's enabled, the comparison must be unsigned.
-  __ Cmp(temp2, Operand(temp1.W(), LSR, (mirror::kUseStringCompression) ? 3 : 4));
+  __ Cmp(temp0, Operand(temp1.W(), LSR, (mirror::kUseStringCompression) ? 3 : 4));
   __ B(ls, &end);
+
   // Extract the characters and calculate the difference.
-  vixl::aarch64::Label uncompressed_string, continue_process;
   if (mirror:: kUseStringCompression) {
-    __ Tbz(temp5, kWRegSize - 1, &uncompressed_string);
     __ Bic(temp1, temp1, 0x7);
-    __ B(&continue_process);
+    __ Bic(temp1, temp1, Operand(temp3.X(), LSL, 3u));
+  } else {
+    __ Bic(temp1, temp1, 0xf);
   }
-  __ Bind(&uncompressed_string);
-  __ Bic(temp1, temp1, 0xf);
-  __ Bind(&continue_process);
-
-  __ Lsr(temp0, temp0, temp1);
+  __ Lsr(temp2, temp2, temp1);
   __ Lsr(temp4, temp4, temp1);
-  vixl::aarch64::Label uncompressed_string_extract_chars;
   if (mirror::kUseStringCompression) {
-    __ Tbz(temp5, kWRegSize - 1, &uncompressed_string_extract_chars);
-    __ And(temp4, temp4, 0xff);
-    __ Sub(out, temp4.W(), Operand(temp0.W(), UXTB));
-    __ B(&end);
+    // Prioritize the case of compressed strings and calculate such result first.
+    __ Uxtb(temp1, temp4);
+    __ Sub(out, temp1.W(), Operand(temp2.W(), UXTB));
+    __ Tbz(temp3, 0u, &end);  // If actually compressed, we're done.
   }
-  __ Bind(&uncompressed_string_extract_chars);
-  __ And(temp4, temp4, 0xffff);
-  __ Sub(out, temp4.W(), Operand(temp0.W(), UXTH));
-  __ B(&end);
+  __ Uxth(temp4, temp4);
+  __ Sub(out, temp4.W(), Operand(temp2.W(), UXTH));
 
   if (mirror::kUseStringCompression) {
-    vixl::aarch64::Label loop_this_compressed, loop_arg_compressed, find_diff;
+    __ B(&end);
+    __ Bind(&different_compression);
+
+    // Comparison for different compression style.
     const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte);
     DCHECK_EQ(c_char_size, 1u);
-    temp0 = temp0.W();
     temp1 = temp1.W();
-    // Comparison for different compression style.
-    // This part is when THIS is compressed and ARG is not.
-    __ Bind(&different_compression);
-    __ Add(temp0, str, Operand(value_offset));
-    __ Add(temp1, arg, Operand(value_offset));
-    __ Cmp(temp5, Operand(0));
-    __ B(lt, &loop_arg_compressed);
+    temp2 = temp2.W();
+    temp4 = temp4.W();
 
-    __ Bind(&loop_this_compressed);
-    __ Ldrb(temp3, MemOperand(temp0.X(), c_char_size, PostIndex));
-    __ Ldrh(temp5, MemOperand(temp1.X(), char_size, PostIndex));
-    __ Cmp(temp3, Operand(temp5));
-    __ B(ne, &find_diff);
-    __ Subs(temp2, temp2, 1);
-    __ B(gt, &loop_this_compressed);
-    __ B(&end);
+    // `temp1` will hold the compressed data pointer, `temp2` the uncompressed data pointer.
+    // Note that flags have been set by the `str` compression flag extraction to `temp3`
+    // before branching to the `different_compression` label.
+    __ Csel(temp1, str, arg, eq);   // Pointer to the compressed string.
+    __ Csel(temp2, str, arg, ne);   // Pointer to the uncompressed string.
 
-    // This part is when THIS is not compressed and ARG is.
-    __ Bind(&loop_arg_compressed);
-    __ Ldrh(temp3, MemOperand(temp0.X(), char_size, PostIndex));
-    __ Ldrb(temp5, MemOperand(temp1.X(), c_char_size, PostIndex));
-    __ Cmp(temp3, Operand(temp5));
-    __ B(ne, &find_diff);
-    __ Subs(temp2, temp2, 1);
-    __ B(gt, &loop_arg_compressed);
+    // We want to free up the temp3, currently holding `str` compression flag, for comparison.
+    // So, we move it to the bottom bit of the iteration count `temp0` which we then need to treat
+    // as unsigned. Start by freeing the bit with a LSL and continue further down by a SUB which
+    // will allow `subs temp0, #2; bhi different_compression_loop` to serve as the loop condition.
+    __ Lsl(temp0, temp0, 1u);
+
+    // Adjust temp1 and temp2 from string pointers to data pointers.
+    __ Add(temp1, temp1, Operand(value_offset));
+    __ Add(temp2, temp2, Operand(value_offset));
+
+    // Complete the move of the compression flag.
+    __ Sub(temp0, temp0, Operand(temp3));
+
+    vixl::aarch64::Label different_compression_loop;
+    vixl::aarch64::Label different_compression_diff;
+
+    __ Bind(&different_compression_loop);
+    __ Ldrb(temp4, MemOperand(temp1.X(), c_char_size, PostIndex));
+    __ Ldrh(temp3, MemOperand(temp2.X(), char_size, PostIndex));
+    __ Subs(temp4, temp4, Operand(temp3));
+    __ B(&different_compression_diff, ne);
+    __ Subs(temp0, temp0, 2);
+    __ B(&different_compression_loop, hi);
     __ B(&end);
 
     // Calculate the difference.
-    __ Bind(&find_diff);
-    __ Sub(out, temp3.W(), Operand(temp5.W(), UXTH));
+    __ Bind(&different_compression_diff);
+    __ Tst(temp0, Operand(1));
+    static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                  "Expecting 0=compressed, 1=uncompressed");
+    __ Cneg(out, temp4, ne);
   }
 
   __ Bind(&end);
@@ -1451,7 +1470,7 @@
   Register temp1 = WRegisterFrom(locations->GetTemp(0));
   Register temp2 = WRegisterFrom(locations->GetTemp(1));
 
-  vixl::aarch64::Label loop, preloop;
+  vixl::aarch64::Label loop;
   vixl::aarch64::Label end;
   vixl::aarch64::Label return_true;
   vixl::aarch64::Label return_false;
@@ -1485,49 +1504,46 @@
     __ B(&return_false, ne);
   }
 
-  // Load lengths of this and argument strings.
+  // Load `count` fields of this and argument strings.
   __ Ldr(temp, MemOperand(str.X(), count_offset));
   __ Ldr(temp1, MemOperand(arg.X(), count_offset));
-  // Check if lengths are equal, return false if they're not.
+  // Check if `count` fields are equal, return false if they're not.
   // Also compares the compression style, if differs return false.
   __ Cmp(temp, temp1);
   __ B(&return_false, ne);
-  // Return true if both strings are empty.
-  if (mirror::kUseStringCompression) {
-    // Length needs to be masked out first because 0 is treated as compressed.
-    __ Bic(temp, temp, Operand(static_cast<int32_t>(0x80000000)));
-  }
+  // Return true if both strings are empty. Even with string compression `count == 0` means empty.
+  static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                "Expecting 0=compressed, 1=uncompressed");
   __ Cbz(temp, &return_true);
 
-  // Assertions that must hold in order to compare strings 4 characters at a time.
+  // Assertions that must hold in order to compare strings 8 bytes at a time.
   DCHECK_ALIGNED(value_offset, 8);
   static_assert(IsAligned<8>(kObjectAlignment), "String of odd length is not zero padded");
 
   if (mirror::kUseStringCompression) {
-    // If not compressed, directly to fast compare. Else do preprocess on length.
-    __ Cmp(temp1, Operand(0));
-    __ B(&preloop, gt);
-    // Mask out compression flag and adjust length for compressed string (8-bit)
-    // as if it is a 16-bit data, new_length = (length + 1) / 2
-    __ Add(temp, temp, 1);
-    __ Lsr(temp, temp, 1);
+    // For string compression, calculate the number of bytes to compare (not chars).
+    // This could in theory exceed INT32_MAX, so treat temp as unsigned.
+    __ Lsr(temp, temp, 1u);             // Extract length.
+    __ And(temp1, temp1, Operand(1));   // Extract compression flag.
+    __ Lsl(temp, temp, temp1);          // Calculate number of bytes to compare.
   }
 
+  // Store offset of string value in preparation for comparison loop
+  __ Mov(temp1, value_offset);
+
   temp1 = temp1.X();
   temp2 = temp2.X();
-  // Loop to compare strings 4 characters at a time starting at the beginning of the string.
-  // Ok to do this because strings are zero-padded to be 8-byte aligned.
-  // Store offset of string value in preparation for comparison loop
-  __ Bind(&preloop);
-  __ Mov(temp1, value_offset);
+  // Loop to compare strings 8 bytes at a time starting at the front of the string.
+  // Ok to do this because strings are zero-padded to kObjectAlignment.
   __ Bind(&loop);
   __ Ldr(out, MemOperand(str.X(), temp1));
   __ Ldr(temp2, MemOperand(arg.X(), temp1));
   __ Add(temp1, temp1, Operand(sizeof(uint64_t)));
   __ Cmp(out, temp2);
   __ B(&return_false, ne);
-  __ Sub(temp, temp, Operand(4), SetFlags);
-  __ B(&loop, gt);
+  // With string compression, we have compared 8 bytes, otherwise 4 chars.
+  __ Sub(temp, temp, Operand(mirror::kUseStringCompression ? 8 : 4), SetFlags);
+  __ B(&loop, hi);
 
   // Return true and exit the function.
   // If loop does not result in returning false, we return true.
@@ -1883,10 +1899,6 @@
   locations->AddTemp(Location::RequiresRegister());
   locations->AddTemp(Location::RequiresRegister());
   locations->AddTemp(Location::RequiresRegister());
-  // Need temporary register for String compression feature.
-  if (mirror::kUseStringCompression) {
-    locations->AddTemp(Location::RequiresRegister());
-  }
 }
 
 void IntrinsicCodeGeneratorARM64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
@@ -1914,10 +1926,6 @@
   Register src_ptr = XRegisterFrom(locations->GetTemp(0));
   Register num_chr = XRegisterFrom(locations->GetTemp(1));
   Register tmp1 = XRegisterFrom(locations->GetTemp(2));
-  Register tmp3;
-  if (mirror::kUseStringCompression) {
-    tmp3 = WRegisterFrom(locations->GetTemp(3));
-  }
 
   UseScratchRegisterScope temps(masm);
   Register dst_ptr = temps.AcquireX();
@@ -1940,8 +1948,8 @@
     // Location of count in string.
     const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
     // String's length.
-    __ Ldr(tmp3, MemOperand(srcObj, count_offset));
-    __ Tbnz(tmp3, kWRegSize - 1, &compressed_string_preloop);
+    __ Ldr(tmp2, MemOperand(srcObj, count_offset));
+    __ Tbz(tmp2, 0, &compressed_string_preloop);
   }
   __ Add(src_ptr, src_ptr, Operand(srcBegin, LSL, 1));
 
@@ -2642,7 +2650,7 @@
       //   if (src_ptr != end_ptr) {
       //     uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
       //     lfence;  // Load fence or artificial data dependency to prevent load-load reordering
-      //     bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+      //     bool is_gray = (rb_state == ReadBarrier::GrayState());
       //     if (is_gray) {
       //       // Slow-path copy.
       //       do {
@@ -2687,9 +2695,8 @@
       codegen_->AddSlowPath(read_barrier_slow_path);
 
       // Given the numeric representation, it's enough to check the low bit of the rb_state.
-      static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
-      static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
-      static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+      static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+      static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
       __ Tbnz(tmp, LockWord::kReadBarrierStateShift, read_barrier_slow_path->GetEntryLabel());
 
       // Fast-path copy.
@@ -2772,6 +2779,9 @@
 UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit)
 UNIMPLEMENTED_INTRINSIC(ARM64, LongLowestOneBit)
 
+UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOf);
+UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOfAfter);
+
 // 1.8.
 UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeGetAndAddInt)
 UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeGetAndAddLong)
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
new file mode 100644
index 0000000..e4bef34
--- /dev/null
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -0,0 +1,2717 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "intrinsics_arm_vixl.h"
+
+#include "arch/arm/instruction_set_features_arm.h"
+#include "code_generator_arm_vixl.h"
+#include "common_arm.h"
+#include "lock_word.h"
+#include "mirror/array-inl.h"
+
+#include "aarch32/constants-aarch32.h"
+
+namespace art {
+namespace arm {
+
+#define __ assembler->GetVIXLAssembler()->
+
+using helpers::DRegisterFrom;
+using helpers::HighRegisterFrom;
+using helpers::InputDRegisterAt;
+using helpers::InputRegisterAt;
+using helpers::InputSRegisterAt;
+using helpers::InputVRegisterAt;
+using helpers::Int32ConstantFrom;
+using helpers::LocationFrom;
+using helpers::LowRegisterFrom;
+using helpers::LowSRegisterFrom;
+using helpers::OutputDRegister;
+using helpers::OutputRegister;
+using helpers::OutputVRegister;
+using helpers::RegisterFrom;
+using helpers::SRegisterFrom;
+
+using namespace vixl::aarch32;  // NOLINT(build/namespaces)
+
+ArmVIXLAssembler* IntrinsicCodeGeneratorARMVIXL::GetAssembler() {
+  return codegen_->GetAssembler();
+}
+
+ArenaAllocator* IntrinsicCodeGeneratorARMVIXL::GetAllocator() {
+  return codegen_->GetGraph()->GetArena();
+}
+
+// Default slow-path for fallback (calling the managed code to handle the intrinsic) in an
+// intrinsified call. This will copy the arguments into the positions for a regular call.
+//
+// Note: The actual parameters are required to be in the locations given by the invoke's location
+//       summary. If an intrinsic modifies those locations before a slowpath call, they must be
+//       restored!
+//
+// Note: If an invoke wasn't sharpened, we will put down an invoke-virtual here. That's potentially
+//       sub-optimal (compared to a direct pointer call), but this is a slow-path.
+
+class IntrinsicSlowPathARMVIXL : public SlowPathCodeARMVIXL {
+ public:
+  explicit IntrinsicSlowPathARMVIXL(HInvoke* invoke)
+      : SlowPathCodeARMVIXL(invoke), invoke_(invoke) {}
+
+  Location MoveArguments(CodeGenerator* codegen) {
+    InvokeDexCallingConventionVisitorARM calling_convention_visitor;
+    IntrinsicVisitor::MoveArguments(invoke_, codegen, &calling_convention_visitor);
+    return calling_convention_visitor.GetMethodLocation();
+  }
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    ArmVIXLAssembler* assembler = down_cast<ArmVIXLAssembler*>(codegen->GetAssembler());
+    __ Bind(GetEntryLabel());
+
+    SaveLiveRegisters(codegen, invoke_->GetLocations());
+
+    Location method_loc = MoveArguments(codegen);
+
+    if (invoke_->IsInvokeStaticOrDirect()) {
+      codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), method_loc);
+    } else {
+      codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(), method_loc);
+    }
+    codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
+
+    // Copy the result back to the expected output.
+    Location out = invoke_->GetLocations()->Out();
+    if (out.IsValid()) {
+      DCHECK(out.IsRegister());  // TODO: Replace this when we support output in memory.
+      DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+      codegen->MoveFromReturnRegister(out, invoke_->GetType());
+    }
+
+    RestoreLiveRegisters(codegen, invoke_->GetLocations());
+    __ B(GetExitLabel());
+  }
+
+  const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPath"; }
+
+ private:
+  // The instruction where this slow path is happening.
+  HInvoke* const invoke_;
+
+  DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathARMVIXL);
+};
+
+// Slow path implementing the SystemArrayCopy intrinsic copy loop with read barriers.
+class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL {
+ public:
+  explicit ReadBarrierSystemArrayCopySlowPathARMVIXL(HInstruction* instruction)
+      : SlowPathCodeARMVIXL(instruction) {
+    DCHECK(kEmitCompilerReadBarrier);
+    DCHECK(kUseBakerReadBarrier);
+  }
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
+    ArmVIXLAssembler* assembler = arm_codegen->GetAssembler();
+    LocationSummary* locations = instruction_->GetLocations();
+    DCHECK(locations->CanCall());
+    DCHECK(instruction_->IsInvokeStaticOrDirect())
+        << "Unexpected instruction in read barrier arraycopy slow path: "
+        << instruction_->DebugName();
+    DCHECK(instruction_->GetLocations()->Intrinsified());
+    DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kSystemArrayCopy);
+
+    int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot);
+    uint32_t element_size_shift = Primitive::ComponentSizeShift(Primitive::kPrimNot);
+    uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value();
+
+    vixl32::Register dest = InputRegisterAt(instruction_, 2);
+    Location dest_pos = locations->InAt(3);
+    vixl32::Register src_curr_addr = RegisterFrom(locations->GetTemp(0));
+    vixl32::Register dst_curr_addr = RegisterFrom(locations->GetTemp(1));
+    vixl32::Register src_stop_addr = RegisterFrom(locations->GetTemp(2));
+    vixl32::Register tmp = RegisterFrom(locations->GetTemp(3));
+
+    __ Bind(GetEntryLabel());
+    // Compute the base destination address in `dst_curr_addr`.
+    if (dest_pos.IsConstant()) {
+      int32_t constant = Int32ConstantFrom(dest_pos);
+      __ Add(dst_curr_addr, dest, element_size * constant + offset);
+    } else {
+      __ Add(dst_curr_addr,
+             dest,
+             Operand(RegisterFrom(dest_pos), vixl32::LSL, element_size_shift));
+      __ Add(dst_curr_addr, dst_curr_addr, offset);
+    }
+
+    vixl32::Label loop;
+    __ Bind(&loop);
+    __ Ldr(tmp, MemOperand(src_curr_addr, element_size, PostIndex));
+    assembler->MaybeUnpoisonHeapReference(tmp);
+    // TODO: Inline the mark bit check before calling the runtime?
+    // tmp = ReadBarrier::Mark(tmp);
+    // No need to save live registers; it's taken care of by the
+    // entrypoint. Also, there is no need to update the stack mask,
+    // as this runtime call will not trigger a garbage collection.
+    // (See ReadBarrierMarkSlowPathARM::EmitNativeCode for more
+    // explanations.)
+    DCHECK(!tmp.IsSP());
+    DCHECK(!tmp.IsLR());
+    DCHECK(!tmp.IsPC());
+    // IP is used internally by the ReadBarrierMarkRegX entry point
+    // as a temporary (and not preserved).  It thus cannot be used by
+    // any live register in this slow path.
+    DCHECK(!src_curr_addr.Is(ip));
+    DCHECK(!dst_curr_addr.Is(ip));
+    DCHECK(!src_stop_addr.Is(ip));
+    DCHECK(!tmp.Is(ip));
+    DCHECK(tmp.IsRegister()) << tmp;
+    int32_t entry_point_offset =
+        CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmPointerSize>(tmp.GetCode());
+    // This runtime call does not require a stack map.
+    arm_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
+    assembler->MaybePoisonHeapReference(tmp);
+    __ Str(tmp, MemOperand(dst_curr_addr, element_size, PostIndex));
+    __ Cmp(src_curr_addr, src_stop_addr);
+    __ B(ne, &loop);
+    __ B(GetExitLabel());
+  }
+
+  const char* GetDescription() const OVERRIDE {
+    return "ReadBarrierSystemArrayCopySlowPathARMVIXL";
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ReadBarrierSystemArrayCopySlowPathARMVIXL);
+};
+
+IntrinsicLocationsBuilderARMVIXL::IntrinsicLocationsBuilderARMVIXL(CodeGeneratorARMVIXL* codegen)
+    : arena_(codegen->GetGraph()->GetArena()),
+      assembler_(codegen->GetAssembler()),
+      features_(codegen->GetInstructionSetFeatures()) {}
+
+bool IntrinsicLocationsBuilderARMVIXL::TryDispatch(HInvoke* invoke) {
+  Dispatch(invoke);
+  LocationSummary* res = invoke->GetLocations();
+  if (res == nullptr) {
+    return false;
+  }
+  return res->Intrinsified();
+}
+
+static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+  LocationSummary* locations = new (arena) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::RequiresFpuRegister());
+  locations->SetOut(Location::RequiresRegister());
+}
+
+static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
+  LocationSummary* locations = new (arena) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresFpuRegister());
+}
+
+static void MoveFPToInt(LocationSummary* locations, bool is64bit, ArmVIXLAssembler* assembler) {
+  Location input = locations->InAt(0);
+  Location output = locations->Out();
+  if (is64bit) {
+    __ Vmov(LowRegisterFrom(output), HighRegisterFrom(output), DRegisterFrom(input));
+  } else {
+    __ Vmov(RegisterFrom(output), SRegisterFrom(input));
+  }
+}
+
+static void MoveIntToFP(LocationSummary* locations, bool is64bit, ArmVIXLAssembler* assembler) {
+  Location input = locations->InAt(0);
+  Location output = locations->Out();
+  if (is64bit) {
+    __ Vmov(DRegisterFrom(output), LowRegisterFrom(input), HighRegisterFrom(input));
+  } else {
+    __ Vmov(SRegisterFrom(output), RegisterFrom(input));
+  }
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
+  CreateFPToIntLocations(arena_, invoke);
+}
+void IntrinsicLocationsBuilderARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
+  CreateIntToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
+  MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+}
+void IntrinsicCodeGeneratorARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
+  MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
+  CreateFPToIntLocations(arena_, invoke);
+}
+void IntrinsicLocationsBuilderARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke) {
+  CreateIntToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
+  MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+}
+void IntrinsicCodeGeneratorARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke) {
+  MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+}
+
+static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+  LocationSummary* locations = new (arena) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
+  LocationSummary* locations = new (arena) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::RequiresFpuRegister());
+  locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+}
+
+static void GenNumberOfLeadingZeros(LocationSummary* locations,
+                                    Primitive::Type type,
+                                    ArmVIXLAssembler* assembler) {
+  Location in = locations->InAt(0);
+  vixl32::Register out = RegisterFrom(locations->Out());
+
+  DCHECK((type == Primitive::kPrimInt) || (type == Primitive::kPrimLong));
+
+  if (type == Primitive::kPrimLong) {
+    vixl32::Register in_reg_lo = LowRegisterFrom(in);
+    vixl32::Register in_reg_hi = HighRegisterFrom(in);
+    vixl32::Label end;
+    __ Clz(out, in_reg_hi);
+    __ Cbnz(in_reg_hi, &end);
+    __ Clz(out, in_reg_lo);
+    __ Add(out, out, 32);
+    __ Bind(&end);
+  } else {
+    __ Clz(out, RegisterFrom(in));
+  }
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
+  CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
+  GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
+  GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
+}
+
+static void GenNumberOfTrailingZeros(LocationSummary* locations,
+                                     Primitive::Type type,
+                                     ArmVIXLAssembler* assembler) {
+  DCHECK((type == Primitive::kPrimInt) || (type == Primitive::kPrimLong));
+
+  vixl32::Register out = RegisterFrom(locations->Out());
+
+  if (type == Primitive::kPrimLong) {
+    vixl32::Register in_reg_lo = LowRegisterFrom(locations->InAt(0));
+    vixl32::Register in_reg_hi = HighRegisterFrom(locations->InAt(0));
+    vixl32::Label end;
+    __ Rbit(out, in_reg_lo);
+    __ Clz(out, out);
+    __ Cbnz(in_reg_lo, &end);
+    __ Rbit(out, in_reg_hi);
+    __ Clz(out, out);
+    __ Add(out, out, 32);
+    __ Bind(&end);
+  } else {
+    vixl32::Register in = RegisterFrom(locations->InAt(0));
+    __ Rbit(out, in);
+    __ Clz(out, out);
+  }
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+  GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+  GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
+}
+
+static void MathAbsFP(HInvoke* invoke, ArmVIXLAssembler* assembler) {
+  __ Vabs(OutputVRegister(invoke), InputVRegisterAt(invoke, 0));
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsDouble(HInvoke* invoke) {
+  CreateFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsDouble(HInvoke* invoke) {
+  MathAbsFP(invoke, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsFloat(HInvoke* invoke) {
+  CreateFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsFloat(HInvoke* invoke) {
+  MathAbsFP(invoke, GetAssembler());
+}
+
+static void CreateIntToIntPlusTemp(ArenaAllocator* arena, HInvoke* invoke) {
+  LocationSummary* locations = new (arena) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+
+  locations->AddTemp(Location::RequiresRegister());
+}
+
+static void GenAbsInteger(LocationSummary* locations,
+                          bool is64bit,
+                          ArmVIXLAssembler* assembler) {
+  Location in = locations->InAt(0);
+  Location output = locations->Out();
+
+  vixl32::Register mask = RegisterFrom(locations->GetTemp(0));
+
+  if (is64bit) {
+    vixl32::Register in_reg_lo = LowRegisterFrom(in);
+    vixl32::Register in_reg_hi = HighRegisterFrom(in);
+    vixl32::Register out_reg_lo = LowRegisterFrom(output);
+    vixl32::Register out_reg_hi = HighRegisterFrom(output);
+
+    DCHECK(!out_reg_lo.Is(in_reg_hi)) << "Diagonal overlap unexpected.";
+
+    __ Asr(mask, in_reg_hi, 31);
+    __ Adds(out_reg_lo, in_reg_lo, mask);
+    __ Adc(out_reg_hi, in_reg_hi, mask);
+    __ Eor(out_reg_lo, mask, out_reg_lo);
+    __ Eor(out_reg_hi, mask, out_reg_hi);
+  } else {
+    vixl32::Register in_reg = RegisterFrom(in);
+    vixl32::Register out_reg = RegisterFrom(output);
+
+    __ Asr(mask, in_reg, 31);
+    __ Add(out_reg, in_reg, mask);
+    __ Eor(out_reg, mask, out_reg);
+  }
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsInt(HInvoke* invoke) {
+  CreateIntToIntPlusTemp(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsInt(HInvoke* invoke) {
+  GenAbsInteger(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+}
+
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsLong(HInvoke* invoke) {
+  CreateIntToIntPlusTemp(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsLong(HInvoke* invoke) {
+  GenAbsInteger(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+}
+
+static void GenMinMax(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) {
+  vixl32::Register op1 = InputRegisterAt(invoke, 0);
+  vixl32::Register op2 = InputRegisterAt(invoke, 1);
+  vixl32::Register out = OutputRegister(invoke);
+
+  __ Cmp(op1, op2);
+
+  {
+    AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
+                               3 * kMaxInstructionSizeInBytes,
+                               CodeBufferCheckScope::kMaximumSize);
+
+    __ ite(is_min ? lt : gt);
+    __ mov(is_min ? lt : gt, out, op1);
+    __ mov(is_min ? ge : le, out, op2);
+  }
+}
+
+static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+  LocationSummary* locations = new (arena) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathMinIntInt(HInvoke* invoke) {
+  CreateIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathMinIntInt(HInvoke* invoke) {
+  GenMinMax(invoke, /* is_min */ true, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxIntInt(HInvoke* invoke) {
+  CreateIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxIntInt(HInvoke* invoke) {
+  GenMinMax(invoke, /* is_min */ false, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathSqrt(HInvoke* invoke) {
+  CreateFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathSqrt(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  __ Vsqrt(OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) {
+  CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  // Ignore upper 4B of long address.
+  __ Ldrsb(OutputRegister(invoke), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) {
+  CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  // Ignore upper 4B of long address.
+  __ Ldr(OutputRegister(invoke), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) {
+  CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  // Ignore upper 4B of long address.
+  vixl32::Register addr = LowRegisterFrom(invoke->GetLocations()->InAt(0));
+  // Worst case: Control register bit SCTLR.A = 0. Then unaligned accesses throw a processor
+  // exception. So we can't use ldrd as addr may be unaligned.
+  vixl32::Register lo = LowRegisterFrom(invoke->GetLocations()->Out());
+  vixl32::Register hi = HighRegisterFrom(invoke->GetLocations()->Out());
+  if (addr.Is(lo)) {
+    __ Ldr(hi, MemOperand(addr, 4));
+    __ Ldr(lo, addr);
+  } else {
+    __ Ldr(lo, addr);
+    __ Ldr(hi, MemOperand(addr, 4));
+  }
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekShortNative(HInvoke* invoke) {
+  CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekShortNative(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  // Ignore upper 4B of long address.
+  __ Ldrsh(OutputRegister(invoke), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+}
+
+static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
+  LocationSummary* locations = new (arena) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) {
+  CreateIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  __ Strb(InputRegisterAt(invoke, 1), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) {
+  CreateIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  __ Str(InputRegisterAt(invoke, 1), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) {
+  CreateIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  // Ignore upper 4B of long address.
+  vixl32::Register addr = LowRegisterFrom(invoke->GetLocations()->InAt(0));
+  // Worst case: Control register bit SCTLR.A = 0. Then unaligned accesses throw a processor
+  // exception. So we can't use ldrd as addr may be unaligned.
+  __ Str(LowRegisterFrom(invoke->GetLocations()->InAt(1)), addr);
+  __ Str(HighRegisterFrom(invoke->GetLocations()->InAt(1)), MemOperand(addr, 4));
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeShortNative(HInvoke* invoke) {
+  CreateIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeShortNative(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  __ Strh(InputRegisterAt(invoke, 1), LowRegisterFrom(invoke->GetLocations()->InAt(0)));
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitThreadCurrentThread(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitThreadCurrentThread(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  __ Ldr(OutputRegister(invoke),
+         MemOperand(tr, Thread::PeerOffset<kArmPointerSize>().Int32Value()));
+}
+
+static void GenUnsafeGet(HInvoke* invoke,
+                         Primitive::Type type,
+                         bool is_volatile,
+                         CodeGeneratorARMVIXL* codegen) {
+  LocationSummary* locations = invoke->GetLocations();
+  ArmVIXLAssembler* assembler = codegen->GetAssembler();
+  Location base_loc = locations->InAt(1);
+  vixl32::Register base = InputRegisterAt(invoke, 1);     // Object pointer.
+  Location offset_loc = locations->InAt(2);
+  vixl32::Register offset = LowRegisterFrom(offset_loc);  // Long offset, lo part only.
+  Location trg_loc = locations->Out();
+
+  switch (type) {
+    case Primitive::kPrimInt: {
+      vixl32::Register trg = RegisterFrom(trg_loc);
+      __ Ldr(trg, MemOperand(base, offset));
+      if (is_volatile) {
+        __ Dmb(vixl32::ISH);
+      }
+      break;
+    }
+
+    case Primitive::kPrimNot: {
+      vixl32::Register trg = RegisterFrom(trg_loc);
+      if (kEmitCompilerReadBarrier) {
+        if (kUseBakerReadBarrier) {
+          Location temp = locations->GetTemp(0);
+          codegen->GenerateReferenceLoadWithBakerReadBarrier(
+              invoke, trg_loc, base, 0U, offset_loc, TIMES_1, temp, /* needs_null_check */ false);
+          if (is_volatile) {
+            __ Dmb(vixl32::ISH);
+          }
+        } else {
+          __ Ldr(trg, MemOperand(base, offset));
+          if (is_volatile) {
+            __ Dmb(vixl32::ISH);
+          }
+          codegen->GenerateReadBarrierSlow(invoke, trg_loc, trg_loc, base_loc, 0U, offset_loc);
+        }
+      } else {
+        __ Ldr(trg, MemOperand(base, offset));
+        if (is_volatile) {
+          __ Dmb(vixl32::ISH);
+        }
+        assembler->MaybeUnpoisonHeapReference(trg);
+      }
+      break;
+    }
+
+    case Primitive::kPrimLong: {
+      vixl32::Register trg_lo = LowRegisterFrom(trg_loc);
+      vixl32::Register trg_hi = HighRegisterFrom(trg_loc);
+      if (is_volatile && !codegen->GetInstructionSetFeatures().HasAtomicLdrdAndStrd()) {
+        __ Ldrexd(trg_lo, trg_hi, MemOperand(base, offset));
+      } else {
+        __ Ldrd(trg_lo, trg_hi, MemOperand(base, offset));
+      }
+      if (is_volatile) {
+        __ Dmb(vixl32::ISH);
+      }
+      break;
+    }
+
+    default:
+      LOG(FATAL) << "Unexpected type " << type;
+      UNREACHABLE();
+  }
+}
+
+static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
+                                          HInvoke* invoke,
+                                          Primitive::Type type) {
+  bool can_call = kEmitCompilerReadBarrier &&
+      (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
+       invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
+  LocationSummary* locations = new (arena) LocationSummary(invoke,
+                                                           (can_call
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall),
+                                                           kIntrinsified);
+  if (can_call && kUseBakerReadBarrier) {
+    locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
+  }
+  locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(2, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(),
+                    (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap));
+  if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+    // We need a temporary register for the read barrier marking slow
+    // path in InstructionCodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier.
+    locations->AddTemp(Location::RequiresRegister());
+  }
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGet(HInvoke* invoke) {
+  CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt);
+}
+void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetVolatile(HInvoke* invoke) {
+  CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt);
+}
+void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetLong(HInvoke* invoke) {
+  CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong);
+}
+void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
+  CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong);
+}
+void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetObject(HInvoke* invoke) {
+  CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot);
+}
+void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
+  CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGet(HInvoke* invoke) {
+  GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ false, codegen_);
+}
+void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetVolatile(HInvoke* invoke) {
+  GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ true, codegen_);
+}
+void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetLong(HInvoke* invoke) {
+  GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ false, codegen_);
+}
+void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
+  GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ true, codegen_);
+}
+void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObject(HInvoke* invoke) {
+  GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ false, codegen_);
+}
+void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
+  GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ true, codegen_);
+}
+
+static void CreateIntIntIntIntToVoid(ArenaAllocator* arena,
+                                     const ArmInstructionSetFeatures& features,
+                                     Primitive::Type type,
+                                     bool is_volatile,
+                                     HInvoke* invoke) {
+  LocationSummary* locations = new (arena) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(2, Location::RequiresRegister());
+  locations->SetInAt(3, Location::RequiresRegister());
+
+  if (type == Primitive::kPrimLong) {
+    // Potentially need temps for ldrexd-strexd loop.
+    if (is_volatile && !features.HasAtomicLdrdAndStrd()) {
+      locations->AddTemp(Location::RequiresRegister());  // Temp_lo.
+      locations->AddTemp(Location::RequiresRegister());  // Temp_hi.
+    }
+  } else if (type == Primitive::kPrimNot) {
+    // Temps for card-marking.
+    locations->AddTemp(Location::RequiresRegister());  // Temp.
+    locations->AddTemp(Location::RequiresRegister());  // Card.
+  }
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePut(HInvoke* invoke) {
+  CreateIntIntIntIntToVoid(arena_, features_, Primitive::kPrimInt, /* is_volatile */ false, invoke);
+}
+void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) {
+  CreateIntIntIntIntToVoid(arena_, features_, Primitive::kPrimInt, /* is_volatile */ false, invoke);
+}
+void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) {
+  CreateIntIntIntIntToVoid(arena_, features_, Primitive::kPrimInt, /* is_volatile */ true, invoke);
+}
+void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObject(HInvoke* invoke) {
+  CreateIntIntIntIntToVoid(arena_, features_, Primitive::kPrimNot, /* is_volatile */ false, invoke);
+}
+void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
+  CreateIntIntIntIntToVoid(arena_, features_, Primitive::kPrimNot, /* is_volatile */ false, invoke);
+}
+void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
+  CreateIntIntIntIntToVoid(arena_, features_, Primitive::kPrimNot, /* is_volatile */ true, invoke);
+}
+void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLong(HInvoke* invoke) {
+  CreateIntIntIntIntToVoid(
+      arena_, features_, Primitive::kPrimLong, /* is_volatile */ false, invoke);
+}
+void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) {
+  CreateIntIntIntIntToVoid(
+      arena_, features_, Primitive::kPrimLong, /* is_volatile */ false, invoke);
+}
+void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) {
+  CreateIntIntIntIntToVoid(
+      arena_, features_, Primitive::kPrimLong, /* is_volatile */ true, invoke);
+}
+
+static void GenUnsafePut(LocationSummary* locations,
+                         Primitive::Type type,
+                         bool is_volatile,
+                         bool is_ordered,
+                         CodeGeneratorARMVIXL* codegen) {
+  ArmVIXLAssembler* assembler = codegen->GetAssembler();
+
+  vixl32::Register base = RegisterFrom(locations->InAt(1));       // Object pointer.
+  vixl32::Register offset = LowRegisterFrom(locations->InAt(2));  // Long offset, lo part only.
+  vixl32::Register value;
+
+  if (is_volatile || is_ordered) {
+    __ Dmb(vixl32::ISH);
+  }
+
+  if (type == Primitive::kPrimLong) {
+    vixl32::Register value_lo = LowRegisterFrom(locations->InAt(3));
+    vixl32::Register value_hi = HighRegisterFrom(locations->InAt(3));
+    value = value_lo;
+    if (is_volatile && !codegen->GetInstructionSetFeatures().HasAtomicLdrdAndStrd()) {
+      vixl32::Register temp_lo = RegisterFrom(locations->GetTemp(0));
+      vixl32::Register temp_hi = RegisterFrom(locations->GetTemp(1));
+      UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+      const vixl32::Register temp_reg = temps.Acquire();
+
+      __ Add(temp_reg, base, offset);
+      vixl32::Label loop_head;
+      __ Bind(&loop_head);
+      __ Ldrexd(temp_lo, temp_hi, temp_reg);
+      __ Strexd(temp_lo, value_lo, value_hi, temp_reg);
+      __ Cmp(temp_lo, 0);
+      __ B(ne, &loop_head);
+    } else {
+      __ Strd(value_lo, value_hi, MemOperand(base, offset));
+    }
+  } else {
+    value = RegisterFrom(locations->InAt(3));
+    vixl32::Register source = value;
+    if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
+      vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
+      __ Mov(temp, value);
+      assembler->PoisonHeapReference(temp);
+      source = temp;
+    }
+    __ Str(source, MemOperand(base, offset));
+  }
+
+  if (is_volatile) {
+    __ Dmb(vixl32::ISH);
+  }
+
+  if (type == Primitive::kPrimNot) {
+    vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
+    vixl32::Register card = RegisterFrom(locations->GetTemp(1));
+    bool value_can_be_null = true;  // TODO: Worth finding out this information?
+    codegen->MarkGCCard(temp, card, base, value, value_can_be_null);
+  }
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePut(HInvoke* invoke) {
+  GenUnsafePut(invoke->GetLocations(),
+               Primitive::kPrimInt,
+               /* is_volatile */ false,
+               /* is_ordered */ false,
+               codegen_);
+}
+void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) {
+  GenUnsafePut(invoke->GetLocations(),
+               Primitive::kPrimInt,
+               /* is_volatile */ false,
+               /* is_ordered */ true,
+               codegen_);
+}
+void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) {
+  GenUnsafePut(invoke->GetLocations(),
+               Primitive::kPrimInt,
+               /* is_volatile */ true,
+               /* is_ordered */ false,
+               codegen_);
+}
+void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObject(HInvoke* invoke) {
+  GenUnsafePut(invoke->GetLocations(),
+               Primitive::kPrimNot,
+               /* is_volatile */ false,
+               /* is_ordered */ false,
+               codegen_);
+}
+void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
+  GenUnsafePut(invoke->GetLocations(),
+               Primitive::kPrimNot,
+               /* is_volatile */ false,
+               /* is_ordered */ true,
+               codegen_);
+}
+void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
+  GenUnsafePut(invoke->GetLocations(),
+               Primitive::kPrimNot,
+               /* is_volatile */ true,
+               /* is_ordered */ false,
+               codegen_);
+}
+void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLong(HInvoke* invoke) {
+  GenUnsafePut(invoke->GetLocations(),
+               Primitive::kPrimLong,
+               /* is_volatile */ false,
+               /* is_ordered */ false,
+               codegen_);
+}
+void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) {
+  GenUnsafePut(invoke->GetLocations(),
+               Primitive::kPrimLong,
+               /* is_volatile */ false,
+               /* is_ordered */ true,
+               codegen_);
+}
+void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) {
+  GenUnsafePut(invoke->GetLocations(),
+               Primitive::kPrimLong,
+               /* is_volatile */ true,
+               /* is_ordered */ false,
+               codegen_);
+}
+
+static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena,
+                                                HInvoke* invoke,
+                                                Primitive::Type type) {
+  bool can_call = kEmitCompilerReadBarrier &&
+      kUseBakerReadBarrier &&
+      (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
+  LocationSummary* locations = new (arena) LocationSummary(invoke,
+                                                           (can_call
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall),
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(2, Location::RequiresRegister());
+  locations->SetInAt(3, Location::RequiresRegister());
+  locations->SetInAt(4, Location::RequiresRegister());
+
+  // If heap poisoning is enabled, we don't want the unpoisoning
+  // operations to potentially clobber the output. Likewise when
+  // emitting a (Baker) read barrier, which may call.
+  Location::OutputOverlap overlaps =
+      ((kPoisonHeapReferences && type == Primitive::kPrimNot) || can_call)
+      ? Location::kOutputOverlap
+      : Location::kNoOutputOverlap;
+  locations->SetOut(Location::RequiresRegister(), overlaps);
+
+  // Temporary registers used in CAS. In the object case
+  // (UnsafeCASObject intrinsic), these are also used for
+  // card-marking, and possibly for (Baker) read barrier.
+  locations->AddTemp(Location::RequiresRegister());  // Pointer.
+  locations->AddTemp(Location::RequiresRegister());  // Temp 1.
+}
+
+static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARMVIXL* codegen) {
+  DCHECK_NE(type, Primitive::kPrimLong);
+
+  ArmVIXLAssembler* assembler = codegen->GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  Location out_loc = locations->Out();
+  vixl32::Register out = OutputRegister(invoke);                      // Boolean result.
+
+  vixl32::Register base = InputRegisterAt(invoke, 1);                 // Object pointer.
+  Location offset_loc = locations->InAt(2);
+  vixl32::Register offset = LowRegisterFrom(offset_loc);              // Offset (discard high 4B).
+  vixl32::Register expected = InputRegisterAt(invoke, 3);             // Expected.
+  vixl32::Register value = InputRegisterAt(invoke, 4);                // Value.
+
+  Location tmp_ptr_loc = locations->GetTemp(0);
+  vixl32::Register tmp_ptr = RegisterFrom(tmp_ptr_loc);               // Pointer to actual memory.
+  vixl32::Register tmp = RegisterFrom(locations->GetTemp(1));         // Value in memory.
+
+  if (type == Primitive::kPrimNot) {
+    // The only read barrier implementation supporting the
+    // UnsafeCASObject intrinsic is the Baker-style read barriers.
+    DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+
+    // Mark card for object assuming new value is stored. Worst case we will mark an unchanged
+    // object and scan the receiver at the next GC for nothing.
+    bool value_can_be_null = true;  // TODO: Worth finding out this information?
+    codegen->MarkGCCard(tmp_ptr, tmp, base, value, value_can_be_null);
+
+    if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+      // Need to make sure the reference stored in the field is a to-space
+      // one before attempting the CAS or the CAS could fail incorrectly.
+      codegen->GenerateReferenceLoadWithBakerReadBarrier(
+          invoke,
+          out_loc,  // Unused, used only as a "temporary" within the read barrier.
+          base,
+          /* offset */ 0u,
+          /* index */ offset_loc,
+          ScaleFactor::TIMES_1,
+          tmp_ptr_loc,
+          /* needs_null_check */ false,
+          /* always_update_field */ true,
+          &tmp);
+    }
+  }
+
+  // Prevent reordering with prior memory operations.
+  // Emit a DMB ISH instruction instead of an DMB ISHST one, as the
+  // latter allows a preceding load to be delayed past the STXR
+  // instruction below.
+  __ Dmb(vixl32::ISH);
+
+  __ Add(tmp_ptr, base, offset);
+
+  if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
+    codegen->GetAssembler()->PoisonHeapReference(expected);
+    if (value.Is(expected)) {
+      // Do not poison `value`, as it is the same register as
+      // `expected`, which has just been poisoned.
+    } else {
+      codegen->GetAssembler()->PoisonHeapReference(value);
+    }
+  }
+
+  // do {
+  //   tmp = [r_ptr] - expected;
+  // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
+  // result = tmp != 0;
+
+  vixl32::Label loop_head;
+  __ Bind(&loop_head);
+
+  __ Ldrex(tmp, tmp_ptr);
+
+  __ Subs(tmp, tmp, expected);
+
+  {
+    AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
+                               3 * kMaxInstructionSizeInBytes,
+                               CodeBufferCheckScope::kMaximumSize);
+
+    __ itt(eq);
+    __ strex(eq, tmp, value, tmp_ptr);
+    __ cmp(eq, tmp, 1);
+  }
+
+  __ B(eq, &loop_head);
+
+  __ Dmb(vixl32::ISH);
+
+  __ Rsbs(out, tmp, 1);
+
+  {
+    AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
+                               2 * kMaxInstructionSizeInBytes,
+                               CodeBufferCheckScope::kMaximumSize);
+
+    __ it(cc);
+    __ mov(cc, out, 0);
+  }
+
+  if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
+    codegen->GetAssembler()->UnpoisonHeapReference(expected);
+    if (value.Is(expected)) {
+      // Do not unpoison `value`, as it is the same register as
+      // `expected`, which has just been unpoisoned.
+    } else {
+      codegen->GetAssembler()->UnpoisonHeapReference(value);
+    }
+  }
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASInt(HInvoke* invoke) {
+  CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke, Primitive::kPrimInt);
+}
+void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASObject(HInvoke* invoke) {
+  // The only read barrier implementation supporting the
+  // UnsafeCASObject intrinsic is the Baker-style read barriers.
+  if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+    return;
+  }
+
+  CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke, Primitive::kPrimNot);
+}
+void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeCASInt(HInvoke* invoke) {
+  GenCas(invoke, Primitive::kPrimInt, codegen_);
+}
+void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeCASObject(HInvoke* invoke) {
+  // The only read barrier implementation supporting the
+  // UnsafeCASObject intrinsic is the Baker-style read barriers.
+  DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+
+  GenCas(invoke, Primitive::kPrimNot, codegen_);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitStringCompareTo(HInvoke* invoke) {
+  // The inputs plus one temp.
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            invoke->InputAt(1)->CanBeNull()
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+  // Need temporary registers for String compression's feature.
+  if (mirror::kUseStringCompression) {
+    locations->AddTemp(Location::RequiresRegister());
+  }
+  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitStringCompareTo(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  vixl32::Register str = InputRegisterAt(invoke, 0);
+  vixl32::Register arg = InputRegisterAt(invoke, 1);
+  vixl32::Register out = OutputRegister(invoke);
+
+  vixl32::Register temp0 = RegisterFrom(locations->GetTemp(0));
+  vixl32::Register temp1 = RegisterFrom(locations->GetTemp(1));
+  vixl32::Register temp2 = RegisterFrom(locations->GetTemp(2));
+  vixl32::Register temp3;
+  if (mirror::kUseStringCompression) {
+    temp3 = RegisterFrom(locations->GetTemp(3));
+  }
+
+  vixl32::Label loop;
+  vixl32::Label find_char_diff;
+  vixl32::Label end;
+  vixl32::Label different_compression;
+
+  // Get offsets of count and value fields within a string object.
+  const int32_t count_offset = mirror::String::CountOffset().Int32Value();
+  const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
+
+  // Note that the null check must have been done earlier.
+  DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+  // Take slow path and throw if input can be and is null.
+  SlowPathCodeARMVIXL* slow_path = nullptr;
+  const bool can_slow_path = invoke->InputAt(1)->CanBeNull();
+  if (can_slow_path) {
+    slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
+    codegen_->AddSlowPath(slow_path);
+    __ Cbz(arg, slow_path->GetEntryLabel());
+  }
+
+  // Reference equality check, return 0 if same reference.
+  __ Subs(out, str, arg);
+  __ B(eq, &end);
+
+  if (mirror::kUseStringCompression) {
+    // Load `count` fields of this and argument strings.
+    __ Ldr(temp3, MemOperand(str, count_offset));
+    __ Ldr(temp2, MemOperand(arg, count_offset));
+    // Extract lengths from the `count` fields.
+    __ Lsr(temp0, temp3, 1u);
+    __ Lsr(temp1, temp2, 1u);
+  } else {
+    // Load lengths of this and argument strings.
+    __ Ldr(temp0, MemOperand(str, count_offset));
+    __ Ldr(temp1, MemOperand(arg, count_offset));
+  }
+  // out = length diff.
+  __ Subs(out, temp0, temp1);
+  // temp0 = min(len(str), len(arg)).
+
+  {
+    AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
+                               2 * kMaxInstructionSizeInBytes,
+                               CodeBufferCheckScope::kMaximumSize);
+
+    __ it(gt);
+    __ mov(gt, temp0, temp1);
+  }
+
+  // Shorter string is empty?
+  __ Cbz(temp0, &end);
+
+  if (mirror::kUseStringCompression) {
+    // Check if both strings using same compression style to use this comparison loop.
+    __ Eors(temp2, temp2, temp3);
+    __ Lsrs(temp2, temp2, 1u);
+    __ B(cs, &different_compression);
+    // For string compression, calculate the number of bytes to compare (not chars).
+    // This could in theory exceed INT32_MAX, so treat temp0 as unsigned.
+    __ Lsls(temp3, temp3, 31u);  // Extract purely the compression flag.
+
+    AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
+                               2 * kMaxInstructionSizeInBytes,
+                               CodeBufferCheckScope::kMaximumSize);
+
+    __ it(ne);
+    __ add(ne, temp0, temp0, temp0);
+  }
+
+  // Store offset of string value in preparation for comparison loop.
+  __ Mov(temp1, value_offset);
+
+  // Assertions that must hold in order to compare multiple characters at a time.
+  CHECK_ALIGNED(value_offset, 8);
+  static_assert(IsAligned<8>(kObjectAlignment),
+                "String data must be 8-byte aligned for unrolled CompareTo loop.");
+
+  const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+  DCHECK_EQ(char_size, 2u);
+
+  UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+
+  vixl32::Label find_char_diff_2nd_cmp;
+  // Unrolled loop comparing 4x16-bit chars per iteration (ok because of string data alignment).
+  __ Bind(&loop);
+  vixl32::Register temp_reg = temps.Acquire();
+  __ Ldr(temp_reg, MemOperand(str, temp1));
+  __ Ldr(temp2, MemOperand(arg, temp1));
+  __ Cmp(temp_reg, temp2);
+  __ B(ne, &find_char_diff);
+  __ Add(temp1, temp1, char_size * 2);
+
+  __ Ldr(temp_reg, MemOperand(str, temp1));
+  __ Ldr(temp2, MemOperand(arg, temp1));
+  __ Cmp(temp_reg, temp2);
+  __ B(ne, &find_char_diff_2nd_cmp);
+  __ Add(temp1, temp1, char_size * 2);
+  // With string compression, we have compared 8 bytes, otherwise 4 chars.
+  __ Subs(temp0, temp0, (mirror::kUseStringCompression ? 8 : 4));
+  __ B(hi, &loop);
+  __ B(&end);
+
+  __ Bind(&find_char_diff_2nd_cmp);
+  if (mirror::kUseStringCompression) {
+    __ Subs(temp0, temp0, 4);  // 4 bytes previously compared.
+    __ B(ls, &end);  // Was the second comparison fully beyond the end?
+  } else {
+    // Without string compression, we can start treating temp0 as signed
+    // and rely on the signed comparison below.
+    __ Sub(temp0, temp0, 2);
+  }
+
+  // Find the single character difference.
+  __ Bind(&find_char_diff);
+  // Get the bit position of the first character that differs.
+  __ Eor(temp1, temp2, temp_reg);
+  __ Rbit(temp1, temp1);
+  __ Clz(temp1, temp1);
+
+  // temp0 = number of characters remaining to compare.
+  // (Without string compression, it could be < 1 if a difference is found by the second CMP
+  // in the comparison loop, and after the end of the shorter string data).
+
+  // Without string compression (temp1 >> 4) = character where difference occurs between the last
+  // two words compared, in the interval [0,1].
+  // (0 for low half-word different, 1 for high half-word different).
+  // With string compression, (temp1 << 3) = byte where the difference occurs,
+  // in the interval [0,3].
+
+  // If temp0 <= (temp1 >> (kUseStringCompression ? 3 : 4)), the difference occurs outside
+  // the remaining string data, so just return length diff (out).
+  // The comparison is unsigned for string compression, otherwise signed.
+  __ Cmp(temp0, Operand(temp1, vixl32::LSR, (mirror::kUseStringCompression ? 3 : 4)));
+  __ B((mirror::kUseStringCompression ? ls : le), &end);
+
+  // Extract the characters and calculate the difference.
+  if (mirror::kUseStringCompression) {
+    // For compressed strings we need to clear 0x7 from temp1, for uncompressed we need to clear
+    // 0xf. We also need to prepare the character extraction mask `uncompressed ? 0xffffu : 0xffu`.
+    // The compression flag is now in the highest bit of temp3, so let's play some tricks.
+    __ orr(temp3, temp3, 0xffu << 23);                  // uncompressed ? 0xff800000u : 0x7ff80000u
+    __ bic(temp1, temp1, Operand(temp3, vixl32::LSR, 31 - 3));  // &= ~(uncompressed ? 0xfu : 0x7u)
+    __ Asr(temp3, temp3, 7u);                           // uncompressed ? 0xffff0000u : 0xff0000u.
+    __ Lsr(temp2, temp2, temp1);                        // Extract second character.
+    __ Lsr(temp3, temp3, 16u);                          // uncompressed ? 0xffffu : 0xffu
+    __ Lsr(out, temp_reg, temp1);                       // Extract first character.
+    __ and_(temp2, temp2, temp3);
+    __ and_(out, out, temp3);
+  } else {
+    __ bic(temp1, temp1, 0xf);
+    __ Lsr(temp2, temp2, temp1);
+    __ Lsr(out, temp_reg, temp1);
+    __ movt(temp2, 0);
+    __ movt(out, 0);
+  }
+
+  __ Sub(out, out, temp2);
+  temps.Release(temp_reg);
+
+  if (mirror::kUseStringCompression) {
+    __ B(&end);
+    __ Bind(&different_compression);
+
+    // Comparison for different compression style.
+    const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte);
+    DCHECK_EQ(c_char_size, 1u);
+
+    // We want to free up the temp3, currently holding `str.count`, for comparison.
+    // So, we move it to the bottom bit of the iteration count `temp0` which we tnen
+    // need to treat as unsigned. Start by freeing the bit with an ADD and continue
+    // further down by a LSRS+SBC which will flip the meaning of the flag but allow
+    // `subs temp0, #2; bhi different_compression_loop` to serve as the loop condition.
+    __ add(temp0, temp0, temp0);              // Unlike LSL, this ADD is always 16-bit.
+    // `temp1` will hold the compressed data pointer, `temp2` the uncompressed data pointer.
+    __ mov(temp1, str);
+    __ mov(temp2, arg);
+    __ Lsrs(temp3, temp3, 1u);                // Continue the move of the compression flag.
+    {
+      AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
+                                 3 * kMaxInstructionSizeInBytes,
+                                 CodeBufferCheckScope::kMaximumSize);
+      __ itt(cs);                             // Interleave with selection of temp1 and temp2.
+      __ mov(cs, temp1, arg);                 // Preserves flags.
+      __ mov(cs, temp2, str);                 // Preserves flags.
+    }
+    __ sbc(temp0, temp0, 0);                  // Complete the move of the compression flag.
+
+    // Adjust temp1 and temp2 from string pointers to data pointers.
+    __ add(temp1, temp1, value_offset);
+    __ add(temp2, temp2, value_offset);
+
+    vixl32::Label different_compression_loop;
+    vixl32::Label different_compression_diff;
+
+    // Main loop for different compression.
+    temp_reg = temps.Acquire();
+    __ Bind(&different_compression_loop);
+    __ Ldrb(temp_reg, MemOperand(temp1, c_char_size, PostIndex));
+    __ Ldrh(temp3, MemOperand(temp2, char_size, PostIndex));
+    __ cmp(temp_reg, temp3);
+    __ B(ne, &different_compression_diff);
+    __ Subs(temp0, temp0, 2);
+    __ B(hi, &different_compression_loop);
+    __ B(&end);
+
+    // Calculate the difference.
+    __ Bind(&different_compression_diff);
+    __ Sub(out, temp_reg, temp3);
+    temps.Release(temp_reg);
+    // Flip the difference if the `arg` is compressed.
+    // `temp0` contains inverted `str` compression flag, i.e the same as `arg` compression flag.
+    __ Lsrs(temp0, temp0, 1u);
+    static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                  "Expecting 0=compressed, 1=uncompressed");
+
+    AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
+                               2 * kMaxInstructionSizeInBytes,
+                               CodeBufferCheckScope::kMaximumSize);
+    __ it(cc);
+    __ rsb(cc, out, out, 0);
+  }
+
+  __ Bind(&end);
+
+  if (can_slow_path) {
+    __ Bind(slow_path->GetExitLabel());
+  }
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitStringEquals(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  InvokeRuntimeCallingConventionARMVIXL calling_convention;
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  // Temporary registers to store lengths of strings and for calculations.
+  // Using instruction cbz requires a low register, so explicitly set a temp to be R0.
+  locations->AddTemp(LocationFrom(r0));
+  locations->AddTemp(Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+
+  locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  vixl32::Register str = InputRegisterAt(invoke, 0);
+  vixl32::Register arg = InputRegisterAt(invoke, 1);
+  vixl32::Register out = OutputRegister(invoke);
+
+  vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
+  vixl32::Register temp1 = RegisterFrom(locations->GetTemp(1));
+  vixl32::Register temp2 = RegisterFrom(locations->GetTemp(2));
+
+  vixl32::Label loop;
+  vixl32::Label end;
+  vixl32::Label return_true;
+  vixl32::Label return_false;
+
+  // Get offsets of count, value, and class fields within a string object.
+  const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+  const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value();
+  const uint32_t class_offset = mirror::Object::ClassOffset().Uint32Value();
+
+  // Note that the null check must have been done earlier.
+  DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+  StringEqualsOptimizations optimizations(invoke);
+  if (!optimizations.GetArgumentNotNull()) {
+    // Check if input is null, return false if it is.
+    __ Cbz(arg, &return_false);
+  }
+
+  // Reference equality check, return true if same reference.
+  __ Cmp(str, arg);
+  __ B(eq, &return_true);
+
+  if (!optimizations.GetArgumentIsString()) {
+    // Instanceof check for the argument by comparing class fields.
+    // All string objects must have the same type since String cannot be subclassed.
+    // Receiver must be a string object, so its class field is equal to all strings' class fields.
+    // If the argument is a string object, its class field must be equal to receiver's class field.
+    __ Ldr(temp, MemOperand(str, class_offset));
+    __ Ldr(temp1, MemOperand(arg, class_offset));
+    __ Cmp(temp, temp1);
+    __ B(ne, &return_false);
+  }
+
+  // Load `count` fields of this and argument strings.
+  __ Ldr(temp, MemOperand(str, count_offset));
+  __ Ldr(temp1, MemOperand(arg, count_offset));
+  // Check if `count` fields are equal, return false if they're not.
+  // Also compares the compression style, if differs return false.
+  __ Cmp(temp, temp1);
+  __ B(ne, &return_false);
+  // Return true if both strings are empty. Even with string compression `count == 0` means empty.
+  static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                "Expecting 0=compressed, 1=uncompressed");
+  __ Cbz(temp, &return_true);
+
+  // Assertions that must hold in order to compare strings 4 bytes at a time.
+  DCHECK_ALIGNED(value_offset, 4);
+  static_assert(IsAligned<4>(kObjectAlignment), "String data must be aligned for fast compare.");
+
+  if (mirror::kUseStringCompression) {
+    // For string compression, calculate the number of bytes to compare (not chars).
+    // This could in theory exceed INT32_MAX, so treat temp as unsigned.
+    __ Lsrs(temp, temp, 1u);                        // Extract length and check compression flag.
+    AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
+                               2 * kMaxInstructionSizeInBytes,
+                               CodeBufferCheckScope::kMaximumSize);
+    __ it(cs);                                      // If uncompressed,
+    __ add(cs, temp, temp, temp);                   //   double the byte count.
+  }
+
+  // Store offset of string value in preparation for comparison loop.
+  __ Mov(temp1, value_offset);
+
+  // Loop to compare strings 4 bytes at a time starting at the front of the string.
+  // Ok to do this because strings are zero-padded to kObjectAlignment.
+  __ Bind(&loop);
+  __ Ldr(out, MemOperand(str, temp1));
+  __ Ldr(temp2, MemOperand(arg, temp1));
+  __ Add(temp1, temp1, sizeof(uint32_t));
+  __ Cmp(out, temp2);
+  __ B(ne, &return_false);
+  // With string compression, we have compared 4 bytes, otherwise 2 chars.
+  __ Subs(temp, temp, mirror::kUseStringCompression ? 4 : 2);
+  __ B(hi, &loop);
+
+  // Return true and exit the function.
+  // If loop does not result in returning false, we return true.
+  __ Bind(&return_true);
+  __ Mov(out, 1);
+  __ B(&end);
+
+  // Return false and exit the function.
+  __ Bind(&return_false);
+  __ Mov(out, 0);
+  __ Bind(&end);
+}
+
+static void GenerateVisitStringIndexOf(HInvoke* invoke,
+                                       ArmVIXLAssembler* assembler,
+                                       CodeGeneratorARMVIXL* codegen,
+                                       ArenaAllocator* allocator,
+                                       bool start_at_zero) {
+  LocationSummary* locations = invoke->GetLocations();
+
+  // Note that the null check must have been done earlier.
+  DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+  // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically,
+  // or directly dispatch for a large constant, or omit slow-path for a small constant or a char.
+  SlowPathCodeARMVIXL* slow_path = nullptr;
+  HInstruction* code_point = invoke->InputAt(1);
+  if (code_point->IsIntConstant()) {
+    if (static_cast<uint32_t>(code_point->AsIntConstant()->GetValue()) >
+        std::numeric_limits<uint16_t>::max()) {
+      // Always needs the slow-path. We could directly dispatch to it, but this case should be
+      // rare, so for simplicity just put the full slow-path down and branch unconditionally.
+      slow_path = new (allocator) IntrinsicSlowPathARMVIXL(invoke);
+      codegen->AddSlowPath(slow_path);
+      __ B(slow_path->GetEntryLabel());
+      __ Bind(slow_path->GetExitLabel());
+      return;
+    }
+  } else if (code_point->GetType() != Primitive::kPrimChar) {
+    vixl32::Register char_reg = InputRegisterAt(invoke, 1);
+    // 0xffff is not modified immediate but 0x10000 is, so use `>= 0x10000` instead of `> 0xffff`.
+    __ Cmp(char_reg, static_cast<uint32_t>(std::numeric_limits<uint16_t>::max()) + 1);
+    slow_path = new (allocator) IntrinsicSlowPathARMVIXL(invoke);
+    codegen->AddSlowPath(slow_path);
+    __ B(hs, slow_path->GetEntryLabel());
+  }
+
+  if (start_at_zero) {
+    vixl32::Register tmp_reg = RegisterFrom(locations->GetTemp(0));
+    DCHECK(tmp_reg.Is(r2));
+    // Start-index = 0.
+    __ Mov(tmp_reg, 0);
+  }
+
+  codegen->InvokeRuntime(kQuickIndexOf, invoke, invoke->GetDexPc(), slow_path);
+  CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
+
+  if (slow_path != nullptr) {
+    __ Bind(slow_path->GetExitLabel());
+  }
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kCallOnMainAndSlowPath,
+                                                            kIntrinsified);
+  // We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
+  // best to align the inputs accordingly.
+  InvokeRuntimeCallingConventionARMVIXL calling_convention;
+  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+  locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+  locations->SetOut(LocationFrom(r0));
+
+  // Need to send start-index=0.
+  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(2)));
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
+  GenerateVisitStringIndexOf(
+      invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ true);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kCallOnMainAndSlowPath,
+                                                            kIntrinsified);
+  // We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
+  // best to align the inputs accordingly.
+  InvokeRuntimeCallingConventionARMVIXL calling_convention;
+  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+  locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+  locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
+  locations->SetOut(LocationFrom(r0));
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
+  GenerateVisitStringIndexOf(
+      invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromBytes(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kCallOnMainAndSlowPath,
+                                                            kIntrinsified);
+  InvokeRuntimeCallingConventionARMVIXL calling_convention;
+  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+  locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+  locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
+  locations->SetInAt(3, LocationFrom(calling_convention.GetRegisterAt(3)));
+  locations->SetOut(LocationFrom(r0));
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitStringNewStringFromBytes(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  vixl32::Register byte_array = InputRegisterAt(invoke, 0);
+  __ Cmp(byte_array, 0);
+  SlowPathCodeARMVIXL* slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
+  codegen_->AddSlowPath(slow_path);
+  __ B(eq, slow_path->GetEntryLabel());
+
+  codegen_->InvokeRuntime(kQuickAllocStringFromBytes, invoke, invoke->GetDexPc(), slow_path);
+  CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
+  __ Bind(slow_path->GetExitLabel());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromChars(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kCallOnMainOnly,
+                                                            kIntrinsified);
+  InvokeRuntimeCallingConventionARMVIXL calling_convention;
+  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+  locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+  locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
+  locations->SetOut(LocationFrom(r0));
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitStringNewStringFromChars(HInvoke* invoke) {
+  // No need to emit code checking whether `locations->InAt(2)` is a null
+  // pointer, as callers of the native method
+  //
+  //   java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
+  //
+  // all include a null check on `data` before calling that method.
+  codegen_->InvokeRuntime(kQuickAllocStringFromChars, invoke, invoke->GetDexPc());
+  CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromString(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kCallOnMainAndSlowPath,
+                                                            kIntrinsified);
+  InvokeRuntimeCallingConventionARMVIXL calling_convention;
+  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+  locations->SetOut(LocationFrom(r0));
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitStringNewStringFromString(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  vixl32::Register string_to_copy = InputRegisterAt(invoke, 0);
+  __ Cmp(string_to_copy, 0);
+  SlowPathCodeARMVIXL* slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
+  codegen_->AddSlowPath(slow_path);
+  __ B(eq, slow_path->GetEntryLabel());
+
+  codegen_->InvokeRuntime(kQuickAllocStringFromString, invoke, invoke->GetDexPc(), slow_path);
+  CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
+
+  __ Bind(slow_path->GetExitLabel());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
+  // The only read barrier implementation supporting the
+  // SystemArrayCopy intrinsic is the Baker-style read barriers.
+  if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+    return;
+  }
+
+  CodeGenerator::CreateSystemArrayCopyLocationSummary(invoke);
+  LocationSummary* locations = invoke->GetLocations();
+  if (locations == nullptr) {
+    return;
+  }
+
+  HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
+  HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
+  HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
+
+  if (src_pos != nullptr && !assembler_->ShifterOperandCanAlwaysHold(src_pos->GetValue())) {
+    locations->SetInAt(1, Location::RequiresRegister());
+  }
+  if (dest_pos != nullptr && !assembler_->ShifterOperandCanAlwaysHold(dest_pos->GetValue())) {
+    locations->SetInAt(3, Location::RequiresRegister());
+  }
+  if (length != nullptr && !assembler_->ShifterOperandCanAlwaysHold(length->GetValue())) {
+    locations->SetInAt(4, Location::RequiresRegister());
+  }
+  if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+    // Temporary register IP cannot be used in
+    // ReadBarrierSystemArrayCopySlowPathARM (because that register
+    // is clobbered by ReadBarrierMarkRegX entry points). Get an extra
+    // temporary register from the register allocator.
+    locations->AddTemp(Location::RequiresRegister());
+  }
+}
+
+static void CheckPosition(ArmVIXLAssembler* assembler,
+                          Location pos,
+                          vixl32::Register input,
+                          Location length,
+                          SlowPathCodeARMVIXL* slow_path,
+                          vixl32::Register temp,
+                          bool length_is_input_length = false) {
+  // Where is the length in the Array?
+  const uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value();
+
+  if (pos.IsConstant()) {
+    int32_t pos_const = Int32ConstantFrom(pos);
+    if (pos_const == 0) {
+      if (!length_is_input_length) {
+        // Check that length(input) >= length.
+        __ Ldr(temp, MemOperand(input, length_offset));
+        if (length.IsConstant()) {
+          __ Cmp(temp, Int32ConstantFrom(length));
+        } else {
+          __ Cmp(temp, RegisterFrom(length));
+        }
+        __ B(lt, slow_path->GetEntryLabel());
+      }
+    } else {
+      // Check that length(input) >= pos.
+      __ Ldr(temp, MemOperand(input, length_offset));
+      __ Subs(temp, temp, pos_const);
+      __ B(lt, slow_path->GetEntryLabel());
+
+      // Check that (length(input) - pos) >= length.
+      if (length.IsConstant()) {
+        __ Cmp(temp, Int32ConstantFrom(length));
+      } else {
+        __ Cmp(temp, RegisterFrom(length));
+      }
+      __ B(lt, slow_path->GetEntryLabel());
+    }
+  } else if (length_is_input_length) {
+    // The only way the copy can succeed is if pos is zero.
+    vixl32::Register pos_reg = RegisterFrom(pos);
+    __ Cbnz(pos_reg, slow_path->GetEntryLabel());
+  } else {
+    // Check that pos >= 0.
+    vixl32::Register pos_reg = RegisterFrom(pos);
+    __ Cmp(pos_reg, 0);
+    __ B(lt, slow_path->GetEntryLabel());
+
+    // Check that pos <= length(input).
+    __ Ldr(temp, MemOperand(input, length_offset));
+    __ Subs(temp, temp, pos_reg);
+    __ B(lt, slow_path->GetEntryLabel());
+
+    // Check that (length(input) - pos) >= length.
+    if (length.IsConstant()) {
+      __ Cmp(temp, Int32ConstantFrom(length));
+    } else {
+      __ Cmp(temp, RegisterFrom(length));
+    }
+    __ B(lt, slow_path->GetEntryLabel());
+  }
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
+  // The only read barrier implementation supporting the
+  // SystemArrayCopy intrinsic is the Baker-style read barriers.
+  DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+
+  ArmVIXLAssembler* assembler = GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+  uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
+
+  vixl32::Register src = InputRegisterAt(invoke, 0);
+  Location src_pos = locations->InAt(1);
+  vixl32::Register dest = InputRegisterAt(invoke, 2);
+  Location dest_pos = locations->InAt(3);
+  Location length = locations->InAt(4);
+  Location temp1_loc = locations->GetTemp(0);
+  vixl32::Register temp1 = RegisterFrom(temp1_loc);
+  Location temp2_loc = locations->GetTemp(1);
+  vixl32::Register temp2 = RegisterFrom(temp2_loc);
+  Location temp3_loc = locations->GetTemp(2);
+  vixl32::Register temp3 = RegisterFrom(temp3_loc);
+
+  SlowPathCodeARMVIXL* intrinsic_slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
+  codegen_->AddSlowPath(intrinsic_slow_path);
+
+  vixl32::Label conditions_on_positions_validated;
+  SystemArrayCopyOptimizations optimizations(invoke);
+
+  // If source and destination are the same, we go to slow path if we need to do
+  // forward copying.
+  if (src_pos.IsConstant()) {
+    int32_t src_pos_constant = Int32ConstantFrom(src_pos);
+    if (dest_pos.IsConstant()) {
+      int32_t dest_pos_constant = Int32ConstantFrom(dest_pos);
+      if (optimizations.GetDestinationIsSource()) {
+        // Checked when building locations.
+        DCHECK_GE(src_pos_constant, dest_pos_constant);
+      } else if (src_pos_constant < dest_pos_constant) {
+        __ Cmp(src, dest);
+        __ B(eq, intrinsic_slow_path->GetEntryLabel());
+      }
+
+      // Checked when building locations.
+      DCHECK(!optimizations.GetDestinationIsSource()
+             || (src_pos_constant >= Int32ConstantFrom(dest_pos)));
+    } else {
+      if (!optimizations.GetDestinationIsSource()) {
+        __ Cmp(src, dest);
+        __ B(ne, &conditions_on_positions_validated);
+      }
+      __ Cmp(RegisterFrom(dest_pos), src_pos_constant);
+      __ B(gt, intrinsic_slow_path->GetEntryLabel());
+    }
+  } else {
+    if (!optimizations.GetDestinationIsSource()) {
+      __ Cmp(src, dest);
+      __ B(ne, &conditions_on_positions_validated);
+    }
+    if (dest_pos.IsConstant()) {
+      int32_t dest_pos_constant = Int32ConstantFrom(dest_pos);
+      __ Cmp(RegisterFrom(src_pos), dest_pos_constant);
+    } else {
+      __ Cmp(RegisterFrom(src_pos), RegisterFrom(dest_pos));
+    }
+    __ B(lt, intrinsic_slow_path->GetEntryLabel());
+  }
+
+  __ Bind(&conditions_on_positions_validated);
+
+  if (!optimizations.GetSourceIsNotNull()) {
+    // Bail out if the source is null.
+    __ Cbz(src, intrinsic_slow_path->GetEntryLabel());
+  }
+
+  if (!optimizations.GetDestinationIsNotNull() && !optimizations.GetDestinationIsSource()) {
+    // Bail out if the destination is null.
+    __ Cbz(dest, intrinsic_slow_path->GetEntryLabel());
+  }
+
+  // If the length is negative, bail out.
+  // We have already checked in the LocationsBuilder for the constant case.
+  if (!length.IsConstant() &&
+      !optimizations.GetCountIsSourceLength() &&
+      !optimizations.GetCountIsDestinationLength()) {
+    __ Cmp(RegisterFrom(length), 0);
+    __ B(lt, intrinsic_slow_path->GetEntryLabel());
+  }
+
+  // Validity checks: source.
+  CheckPosition(assembler,
+                src_pos,
+                src,
+                length,
+                intrinsic_slow_path,
+                temp1,
+                optimizations.GetCountIsSourceLength());
+
+  // Validity checks: dest.
+  CheckPosition(assembler,
+                dest_pos,
+                dest,
+                length,
+                intrinsic_slow_path,
+                temp1,
+                optimizations.GetCountIsDestinationLength());
+
+  if (!optimizations.GetDoesNotNeedTypeCheck()) {
+    // Check whether all elements of the source array are assignable to the component
+    // type of the destination array. We do two checks: the classes are the same,
+    // or the destination is Object[]. If none of these checks succeed, we go to the
+    // slow path.
+
+    if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+      if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+        // /* HeapReference<Class> */ temp1 = src->klass_
+        codegen_->GenerateFieldLoadWithBakerReadBarrier(
+            invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false);
+        // Bail out if the source is not a non primitive array.
+        // /* HeapReference<Class> */ temp1 = temp1->component_type_
+        codegen_->GenerateFieldLoadWithBakerReadBarrier(
+            invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+        __ Cbz(temp1, intrinsic_slow_path->GetEntryLabel());
+        // If heap poisoning is enabled, `temp1` has been unpoisoned
+        // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+        // /* uint16_t */ temp1 = static_cast<uint16>(temp1->primitive_type_);
+        __ Ldrh(temp1, MemOperand(temp1, primitive_offset));
+        static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+        __ Cbnz(temp1, intrinsic_slow_path->GetEntryLabel());
+      }
+
+      // /* HeapReference<Class> */ temp1 = dest->klass_
+      codegen_->GenerateFieldLoadWithBakerReadBarrier(
+          invoke, temp1_loc, dest, class_offset, temp2_loc, /* needs_null_check */ false);
+
+      if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
+        // Bail out if the destination is not a non primitive array.
+        //
+        // Register `temp1` is not trashed by the read barrier emitted
+        // by GenerateFieldLoadWithBakerReadBarrier below, as that
+        // method produces a call to a ReadBarrierMarkRegX entry point,
+        // which saves all potentially live registers, including
+        // temporaries such a `temp1`.
+        // /* HeapReference<Class> */ temp2 = temp1->component_type_
+        codegen_->GenerateFieldLoadWithBakerReadBarrier(
+            invoke, temp2_loc, temp1, component_offset, temp3_loc, /* needs_null_check */ false);
+        __ Cbz(temp2, intrinsic_slow_path->GetEntryLabel());
+        // If heap poisoning is enabled, `temp2` has been unpoisoned
+        // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+        // /* uint16_t */ temp2 = static_cast<uint16>(temp2->primitive_type_);
+        __ Ldrh(temp2, MemOperand(temp2, primitive_offset));
+        static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+        __ Cbnz(temp2, intrinsic_slow_path->GetEntryLabel());
+      }
+
+      // For the same reason given earlier, `temp1` is not trashed by the
+      // read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
+      // /* HeapReference<Class> */ temp2 = src->klass_
+      codegen_->GenerateFieldLoadWithBakerReadBarrier(
+          invoke, temp2_loc, src, class_offset, temp3_loc, /* needs_null_check */ false);
+      // Note: if heap poisoning is on, we are comparing two unpoisoned references here.
+      __ Cmp(temp1, temp2);
+
+      if (optimizations.GetDestinationIsTypedObjectArray()) {
+        vixl32::Label do_copy;
+        __ B(eq, &do_copy);
+        // /* HeapReference<Class> */ temp1 = temp1->component_type_
+        codegen_->GenerateFieldLoadWithBakerReadBarrier(
+            invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+        // /* HeapReference<Class> */ temp1 = temp1->super_class_
+        // We do not need to emit a read barrier for the following
+        // heap reference load, as `temp1` is only used in a
+        // comparison with null below, and this reference is not
+        // kept afterwards.
+        __ Ldr(temp1, MemOperand(temp1, super_offset));
+        __ Cbnz(temp1, intrinsic_slow_path->GetEntryLabel());
+        __ Bind(&do_copy);
+      } else {
+        __ B(ne, intrinsic_slow_path->GetEntryLabel());
+      }
+    } else {
+      // Non read barrier code.
+
+      // /* HeapReference<Class> */ temp1 = dest->klass_
+      __ Ldr(temp1, MemOperand(dest, class_offset));
+      // /* HeapReference<Class> */ temp2 = src->klass_
+      __ Ldr(temp2, MemOperand(src, class_offset));
+      bool did_unpoison = false;
+      if (!optimizations.GetDestinationIsNonPrimitiveArray() ||
+          !optimizations.GetSourceIsNonPrimitiveArray()) {
+        // One or two of the references need to be unpoisoned. Unpoison them
+        // both to make the identity check valid.
+        assembler->MaybeUnpoisonHeapReference(temp1);
+        assembler->MaybeUnpoisonHeapReference(temp2);
+        did_unpoison = true;
+      }
+
+      if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
+        // Bail out if the destination is not a non primitive array.
+        // /* HeapReference<Class> */ temp3 = temp1->component_type_
+        __ Ldr(temp3, MemOperand(temp1, component_offset));
+        __ Cbz(temp3, intrinsic_slow_path->GetEntryLabel());
+        assembler->MaybeUnpoisonHeapReference(temp3);
+        // /* uint16_t */ temp3 = static_cast<uint16>(temp3->primitive_type_);
+        __ Ldrh(temp3, MemOperand(temp3, primitive_offset));
+        static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+        __ Cbnz(temp3, intrinsic_slow_path->GetEntryLabel());
+      }
+
+      if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+        // Bail out if the source is not a non primitive array.
+        // /* HeapReference<Class> */ temp3 = temp2->component_type_
+        __ Ldr(temp3, MemOperand(temp2, component_offset));
+        __ Cbz(temp3, intrinsic_slow_path->GetEntryLabel());
+        assembler->MaybeUnpoisonHeapReference(temp3);
+        // /* uint16_t */ temp3 = static_cast<uint16>(temp3->primitive_type_);
+        __ Ldrh(temp3, MemOperand(temp3, primitive_offset));
+        static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+        __ Cbnz(temp3, intrinsic_slow_path->GetEntryLabel());
+      }
+
+      __ Cmp(temp1, temp2);
+
+      if (optimizations.GetDestinationIsTypedObjectArray()) {
+        vixl32::Label do_copy;
+        __ B(eq, &do_copy);
+        if (!did_unpoison) {
+          assembler->MaybeUnpoisonHeapReference(temp1);
+        }
+        // /* HeapReference<Class> */ temp1 = temp1->component_type_
+        __ Ldr(temp1, MemOperand(temp1, component_offset));
+        assembler->MaybeUnpoisonHeapReference(temp1);
+        // /* HeapReference<Class> */ temp1 = temp1->super_class_
+        __ Ldr(temp1, MemOperand(temp1, super_offset));
+        // No need to unpoison the result, we're comparing against null.
+        __ Cbnz(temp1, intrinsic_slow_path->GetEntryLabel());
+        __ Bind(&do_copy);
+      } else {
+        __ B(ne, intrinsic_slow_path->GetEntryLabel());
+      }
+    }
+  } else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+    DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
+    // Bail out if the source is not a non primitive array.
+    if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+      // /* HeapReference<Class> */ temp1 = src->klass_
+      codegen_->GenerateFieldLoadWithBakerReadBarrier(
+          invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false);
+      // /* HeapReference<Class> */ temp3 = temp1->component_type_
+      codegen_->GenerateFieldLoadWithBakerReadBarrier(
+          invoke, temp3_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+      __ Cbz(temp3, intrinsic_slow_path->GetEntryLabel());
+      // If heap poisoning is enabled, `temp3` has been unpoisoned
+      // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
+    } else {
+      // /* HeapReference<Class> */ temp1 = src->klass_
+      __ Ldr(temp1, MemOperand(src, class_offset));
+      assembler->MaybeUnpoisonHeapReference(temp1);
+      // /* HeapReference<Class> */ temp3 = temp1->component_type_
+      __ Ldr(temp3, MemOperand(temp1, component_offset));
+      __ Cbz(temp3, intrinsic_slow_path->GetEntryLabel());
+      assembler->MaybeUnpoisonHeapReference(temp3);
+    }
+    // /* uint16_t */ temp3 = static_cast<uint16>(temp3->primitive_type_);
+    __ Ldrh(temp3, MemOperand(temp3, primitive_offset));
+    static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+    __ Cbnz(temp3, intrinsic_slow_path->GetEntryLabel());
+  }
+
+  int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot);
+  uint32_t element_size_shift = Primitive::ComponentSizeShift(Primitive::kPrimNot);
+  uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value();
+
+  // Compute the base source address in `temp1`.
+  if (src_pos.IsConstant()) {
+    int32_t constant = Int32ConstantFrom(src_pos);
+    __ Add(temp1, src, element_size * constant + offset);
+  } else {
+    __ Add(temp1, src, Operand(RegisterFrom(src_pos), vixl32::LSL, element_size_shift));
+    __ Add(temp1, temp1, offset);
+  }
+
+  // Compute the end source address in `temp3`.
+  if (length.IsConstant()) {
+    int32_t constant = Int32ConstantFrom(length);
+    __ Add(temp3, temp1, element_size * constant);
+  } else {
+    __ Add(temp3, temp1, Operand(RegisterFrom(length), vixl32::LSL, element_size_shift));
+  }
+
+  if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+    // The base destination address is computed later, as `temp2` is
+    // used for intermediate computations.
+
+    // SystemArrayCopy implementation for Baker read barriers (see
+    // also CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier):
+    //
+    //   if (src_ptr != end_ptr) {
+    //     uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
+    //     lfence;  // Load fence or artificial data dependency to prevent load-load reordering
+    //     bool is_gray = (rb_state == ReadBarrier::GrayState());
+    //     if (is_gray) {
+    //       // Slow-path copy.
+    //       do {
+    //         *dest_ptr++ = MaybePoison(ReadBarrier::Mark(MaybeUnpoison(*src_ptr++)));
+    //       } while (src_ptr != end_ptr)
+    //     } else {
+    //       // Fast-path copy.
+    //       do {
+    //         *dest_ptr++ = *src_ptr++;
+    //       } while (src_ptr != end_ptr)
+    //     }
+    //   }
+
+    vixl32::Label loop, done;
+
+    // Don't enter copy loop if `length == 0`.
+    __ Cmp(temp1, temp3);
+    __ B(eq, &done);
+
+    // /* int32_t */ monitor = src->monitor_
+    __ Ldr(temp2, MemOperand(src, monitor_offset));
+    // /* LockWord */ lock_word = LockWord(monitor)
+    static_assert(sizeof(LockWord) == sizeof(int32_t),
+                  "art::LockWord and int32_t have different sizes.");
+
+    // Introduce a dependency on the lock_word including the rb_state,
+    // which shall prevent load-load reordering without using
+    // a memory barrier (which would be more expensive).
+    // `src` is unchanged by this operation, but its value now depends
+    // on `temp2`.
+    __ Add(src, src, Operand(temp2, vixl32::LSR, 32));
+
+    // Slow path used to copy array when `src` is gray.
+    SlowPathCodeARMVIXL* read_barrier_slow_path =
+        new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARMVIXL(invoke);
+    codegen_->AddSlowPath(read_barrier_slow_path);
+
+    // Given the numeric representation, it's enough to check the low bit of the
+    // rb_state. We do that by shifting the bit out of the lock word with LSRS
+    // which can be a 16-bit instruction unlike the TST immediate.
+    static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+    static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
+    __ Lsrs(temp2, temp2, LockWord::kReadBarrierStateShift + 1);
+    // Carry flag is the last bit shifted out by LSRS.
+    __ B(cs, read_barrier_slow_path->GetEntryLabel());
+
+    // Fast-path copy.
+
+    // Compute the base destination address in `temp2`.
+    if (dest_pos.IsConstant()) {
+      int32_t constant = Int32ConstantFrom(dest_pos);
+      __ Add(temp2, dest, element_size * constant + offset);
+    } else {
+      __ Add(temp2, dest, Operand(RegisterFrom(dest_pos), vixl32::LSL, element_size_shift));
+      __ Add(temp2, temp2, offset);
+    }
+
+    // Iterate over the arrays and do a raw copy of the objects. We don't need to
+    // poison/unpoison.
+    __ Bind(&loop);
+
+    {
+      UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+      const vixl32::Register temp_reg = temps.Acquire();
+
+      __ Ldr(temp_reg, MemOperand(temp1, element_size, PostIndex));
+      __ Str(temp_reg, MemOperand(temp2, element_size, PostIndex));
+    }
+
+    __ Cmp(temp1, temp3);
+    __ B(ne, &loop);
+
+    __ Bind(read_barrier_slow_path->GetExitLabel());
+    __ Bind(&done);
+  } else {
+    // Non read barrier code.
+
+    // Compute the base destination address in `temp2`.
+    if (dest_pos.IsConstant()) {
+      int32_t constant = Int32ConstantFrom(dest_pos);
+      __ Add(temp2, dest, element_size * constant + offset);
+    } else {
+      __ Add(temp2, dest, Operand(RegisterFrom(dest_pos), vixl32::LSL, element_size_shift));
+      __ Add(temp2, temp2, offset);
+    }
+
+    // Iterate over the arrays and do a raw copy of the objects. We don't need to
+    // poison/unpoison.
+    vixl32::Label loop, done;
+    __ Cmp(temp1, temp3);
+    __ B(eq, &done);
+    __ Bind(&loop);
+
+    {
+      UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+      const vixl32::Register temp_reg = temps.Acquire();
+
+      __ Ldr(temp_reg, MemOperand(temp1, element_size, PostIndex));
+      __ Str(temp_reg, MemOperand(temp2, element_size, PostIndex));
+    }
+
+    __ Cmp(temp1, temp3);
+    __ B(ne, &loop);
+    __ Bind(&done);
+  }
+
+  // We only need one card marking on the destination array.
+  codegen_->MarkGCCard(temp1, temp2, dest, NoReg, /* value_can_be_null */ false);
+
+  __ Bind(intrinsic_slow_path->GetExitLabel());
+}
+
+static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
+  // If the graph is debuggable, all callee-saved floating-point registers are blocked by
+  // the code generator. Furthermore, the register allocator creates fixed live intervals
+  // for all caller-saved registers because we are doing a function call. As a result, if
+  // the input and output locations are unallocated, the register allocator runs out of
+  // registers and fails; however, a debuggable graph is not the common case.
+  if (invoke->GetBlock()->GetGraph()->IsDebuggable()) {
+    return;
+  }
+
+  DCHECK_EQ(invoke->GetNumberOfArguments(), 1U);
+  DCHECK_EQ(invoke->InputAt(0)->GetType(), Primitive::kPrimDouble);
+  DCHECK_EQ(invoke->GetType(), Primitive::kPrimDouble);
+
+  LocationSummary* const locations = new (arena) LocationSummary(invoke,
+                                                                 LocationSummary::kCallOnMainOnly,
+                                                                 kIntrinsified);
+  const InvokeRuntimeCallingConventionARMVIXL calling_convention;
+
+  locations->SetInAt(0, Location::RequiresFpuRegister());
+  locations->SetOut(Location::RequiresFpuRegister());
+  // Native code uses the soft float ABI.
+  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
+  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
+}
+
+static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
+  // If the graph is debuggable, all callee-saved floating-point registers are blocked by
+  // the code generator. Furthermore, the register allocator creates fixed live intervals
+  // for all caller-saved registers because we are doing a function call. As a result, if
+  // the input and output locations are unallocated, the register allocator runs out of
+  // registers and fails; however, a debuggable graph is not the common case.
+  if (invoke->GetBlock()->GetGraph()->IsDebuggable()) {
+    return;
+  }
+
+  DCHECK_EQ(invoke->GetNumberOfArguments(), 2U);
+  DCHECK_EQ(invoke->InputAt(0)->GetType(), Primitive::kPrimDouble);
+  DCHECK_EQ(invoke->InputAt(1)->GetType(), Primitive::kPrimDouble);
+  DCHECK_EQ(invoke->GetType(), Primitive::kPrimDouble);
+
+  LocationSummary* const locations = new (arena) LocationSummary(invoke,
+                                                                 LocationSummary::kCallOnMainOnly,
+                                                                 kIntrinsified);
+  const InvokeRuntimeCallingConventionARMVIXL calling_convention;
+
+  locations->SetInAt(0, Location::RequiresFpuRegister());
+  locations->SetInAt(1, Location::RequiresFpuRegister());
+  locations->SetOut(Location::RequiresFpuRegister());
+  // Native code uses the soft float ABI.
+  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
+  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
+  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(2)));
+  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(3)));
+}
+
+static void GenFPToFPCall(HInvoke* invoke,
+                          ArmVIXLAssembler* assembler,
+                          CodeGeneratorARMVIXL* codegen,
+                          QuickEntrypointEnum entry) {
+  LocationSummary* const locations = invoke->GetLocations();
+
+  DCHECK_EQ(invoke->GetNumberOfArguments(), 1U);
+  DCHECK(locations->WillCall() && locations->Intrinsified());
+
+  // Native code uses the soft float ABI.
+  __ Vmov(RegisterFrom(locations->GetTemp(0)),
+          RegisterFrom(locations->GetTemp(1)),
+          InputDRegisterAt(invoke, 0));
+  codegen->InvokeRuntime(entry, invoke, invoke->GetDexPc());
+  __ Vmov(OutputDRegister(invoke),
+          RegisterFrom(locations->GetTemp(0)),
+          RegisterFrom(locations->GetTemp(1)));
+}
+
+static void GenFPFPToFPCall(HInvoke* invoke,
+                            ArmVIXLAssembler* assembler,
+                            CodeGeneratorARMVIXL* codegen,
+                            QuickEntrypointEnum entry) {
+  LocationSummary* const locations = invoke->GetLocations();
+
+  DCHECK_EQ(invoke->GetNumberOfArguments(), 2U);
+  DCHECK(locations->WillCall() && locations->Intrinsified());
+
+  // Native code uses the soft float ABI.
+  __ Vmov(RegisterFrom(locations->GetTemp(0)),
+          RegisterFrom(locations->GetTemp(1)),
+          InputDRegisterAt(invoke, 0));
+  __ Vmov(RegisterFrom(locations->GetTemp(2)),
+          RegisterFrom(locations->GetTemp(3)),
+          InputDRegisterAt(invoke, 1));
+  codegen->InvokeRuntime(entry, invoke, invoke->GetDexPc());
+  __ Vmov(OutputDRegister(invoke),
+          RegisterFrom(locations->GetTemp(0)),
+          RegisterFrom(locations->GetTemp(1)));
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathCos(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathCos(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickCos);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathSin(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathSin(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickSin);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathAcos(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathAcos(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickAcos);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathAsin(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathAsin(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickAsin);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathAtan(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathAtan(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickAtan);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathCbrt(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathCbrt(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickCbrt);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathCosh(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathCosh(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickCosh);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathExp(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathExp(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickExp);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathExpm1(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathExpm1(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickExpm1);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathLog(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathLog(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickLog);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathLog10(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathLog10(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickLog10);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathSinh(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathSinh(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickSinh);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathTan(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathTan(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickTan);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathTanh(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathTanh(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickTanh);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathAtan2(HInvoke* invoke) {
+  CreateFPFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathAtan2(HInvoke* invoke) {
+  GenFPFPToFPCall(invoke, GetAssembler(), codegen_, kQuickAtan2);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathHypot(HInvoke* invoke) {
+  CreateFPFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathHypot(HInvoke* invoke) {
+  GenFPFPToFPCall(invoke, GetAssembler(), codegen_, kQuickHypot);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathNextAfter(HInvoke* invoke) {
+  CreateFPFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathNextAfter(HInvoke* invoke) {
+  GenFPFPToFPCall(invoke, GetAssembler(), codegen_, kQuickNextAfter);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitIntegerReverse(HInvoke* invoke) {
+  CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitIntegerReverse(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  __ Rbit(OutputRegister(invoke), InputRegisterAt(invoke, 0));
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitLongReverse(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitLongReverse(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  vixl32::Register in_reg_lo  = LowRegisterFrom(locations->InAt(0));
+  vixl32::Register in_reg_hi  = HighRegisterFrom(locations->InAt(0));
+  vixl32::Register out_reg_lo = LowRegisterFrom(locations->Out());
+  vixl32::Register out_reg_hi = HighRegisterFrom(locations->Out());
+
+  __ Rbit(out_reg_lo, in_reg_hi);
+  __ Rbit(out_reg_hi, in_reg_lo);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitIntegerReverseBytes(HInvoke* invoke) {
+  CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitIntegerReverseBytes(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  __ Rev(OutputRegister(invoke), InputRegisterAt(invoke, 0));
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitLongReverseBytes(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitLongReverseBytes(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  vixl32::Register in_reg_lo  = LowRegisterFrom(locations->InAt(0));
+  vixl32::Register in_reg_hi  = HighRegisterFrom(locations->InAt(0));
+  vixl32::Register out_reg_lo = LowRegisterFrom(locations->Out());
+  vixl32::Register out_reg_hi = HighRegisterFrom(locations->Out());
+
+  __ Rev(out_reg_lo, in_reg_hi);
+  __ Rev(out_reg_hi, in_reg_lo);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitShortReverseBytes(HInvoke* invoke) {
+  CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitShortReverseBytes(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  __ Revsh(OutputRegister(invoke), InputRegisterAt(invoke, 0));
+}
+
+static void GenBitCount(HInvoke* instr, Primitive::Type type, ArmVIXLAssembler* assembler) {
+  DCHECK(Primitive::IsIntOrLongType(type)) << type;
+  DCHECK_EQ(instr->GetType(), Primitive::kPrimInt);
+  DCHECK_EQ(Primitive::PrimitiveKind(instr->InputAt(0)->GetType()), type);
+
+  bool is_long = type == Primitive::kPrimLong;
+  LocationSummary* locations = instr->GetLocations();
+  Location in = locations->InAt(0);
+  vixl32::Register src_0 = is_long ? LowRegisterFrom(in) : RegisterFrom(in);
+  vixl32::Register src_1 = is_long ? HighRegisterFrom(in) : src_0;
+  vixl32::SRegister tmp_s = LowSRegisterFrom(locations->GetTemp(0));
+  vixl32::DRegister tmp_d = DRegisterFrom(locations->GetTemp(0));
+  vixl32::Register  out_r = OutputRegister(instr);
+
+  // Move data from core register(s) to temp D-reg for bit count calculation, then move back.
+  // According to Cortex A57 and A72 optimization guides, compared to transferring to full D-reg,
+  // transferring data from core reg to upper or lower half of vfp D-reg requires extra latency,
+  // That's why for integer bit count, we use 'vmov d0, r0, r0' instead of 'vmov d0[0], r0'.
+  __ Vmov(tmp_d, src_1, src_0);     // Temp DReg |--src_1|--src_0|
+  __ Vcnt(Untyped8, tmp_d, tmp_d);  // Temp DReg |c|c|c|c|c|c|c|c|
+  __ Vpaddl(U8, tmp_d, tmp_d);      // Temp DReg |--c|--c|--c|--c|
+  __ Vpaddl(U16, tmp_d, tmp_d);     // Temp DReg |------c|------c|
+  if (is_long) {
+    __ Vpaddl(U32, tmp_d, tmp_d);   // Temp DReg |--------------c|
+  }
+  __ Vmov(out_r, tmp_s);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitIntegerBitCount(HInvoke* invoke) {
+  CreateIntToIntLocations(arena_, invoke);
+  invoke->GetLocations()->AddTemp(Location::RequiresFpuRegister());
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitIntegerBitCount(HInvoke* invoke) {
+  GenBitCount(invoke, Primitive::kPrimInt, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitLongBitCount(HInvoke* invoke) {
+  VisitIntegerBitCount(invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitLongBitCount(HInvoke* invoke) {
+  GenBitCount(invoke, Primitive::kPrimLong, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(2, Location::RequiresRegister());
+  locations->SetInAt(3, Location::RequiresRegister());
+  locations->SetInAt(4, Location::RequiresRegister());
+
+  // Temporary registers to store lengths of strings and for calculations.
+  locations->AddTemp(Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+  ArmVIXLAssembler* assembler = GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  // Check assumption that sizeof(Char) is 2 (used in scaling below).
+  const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+  DCHECK_EQ(char_size, 2u);
+
+  // Location of data in char array buffer.
+  const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
+
+  // Location of char array data in string.
+  const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value();
+
+  // void getCharsNoCheck(int srcBegin, int srcEnd, char[] dst, int dstBegin);
+  // Since getChars() calls getCharsNoCheck() - we use registers rather than constants.
+  vixl32::Register srcObj = InputRegisterAt(invoke, 0);
+  vixl32::Register srcBegin = InputRegisterAt(invoke, 1);
+  vixl32::Register srcEnd = InputRegisterAt(invoke, 2);
+  vixl32::Register dstObj = InputRegisterAt(invoke, 3);
+  vixl32::Register dstBegin = InputRegisterAt(invoke, 4);
+
+  vixl32::Register num_chr = RegisterFrom(locations->GetTemp(0));
+  vixl32::Register src_ptr = RegisterFrom(locations->GetTemp(1));
+  vixl32::Register dst_ptr = RegisterFrom(locations->GetTemp(2));
+
+  vixl32::Label done, compressed_string_loop;
+  // dst to be copied.
+  __ Add(dst_ptr, dstObj, data_offset);
+  __ Add(dst_ptr, dst_ptr, Operand(dstBegin, vixl32::LSL, 1));
+
+  __ Subs(num_chr, srcEnd, srcBegin);
+  // Early out for valid zero-length retrievals.
+  __ B(eq, &done);
+
+  // src range to copy.
+  __ Add(src_ptr, srcObj, value_offset);
+
+  UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+  vixl32::Register temp;
+  vixl32::Label compressed_string_preloop;
+  if (mirror::kUseStringCompression) {
+    // Location of count in string.
+    const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+    temp = temps.Acquire();
+    // String's length.
+    __ Ldr(temp, MemOperand(srcObj, count_offset));
+    __ Tst(temp, 1);
+    temps.Release(temp);
+    __ B(eq, &compressed_string_preloop);
+  }
+  __ Add(src_ptr, src_ptr, Operand(srcBegin, vixl32::LSL, 1));
+
+  // Do the copy.
+  vixl32::Label loop, remainder;
+
+  temp = temps.Acquire();
+  // Save repairing the value of num_chr on the < 4 character path.
+  __ Subs(temp, num_chr, 4);
+  __ B(lt, &remainder);
+
+  // Keep the result of the earlier subs, we are going to fetch at least 4 characters.
+  __ Mov(num_chr, temp);
+
+  // Main loop used for longer fetches loads and stores 4x16-bit characters at a time.
+  // (LDRD/STRD fault on unaligned addresses and it's not worth inlining extra code
+  // to rectify these everywhere this intrinsic applies.)
+  __ Bind(&loop);
+  __ Ldr(temp, MemOperand(src_ptr, char_size * 2));
+  __ Subs(num_chr, num_chr, 4);
+  __ Str(temp, MemOperand(dst_ptr, char_size * 2));
+  __ Ldr(temp, MemOperand(src_ptr, char_size * 4, PostIndex));
+  __ Str(temp, MemOperand(dst_ptr, char_size * 4, PostIndex));
+  temps.Release(temp);
+  __ B(ge, &loop);
+
+  __ Adds(num_chr, num_chr, 4);
+  __ B(eq, &done);
+
+  // Main loop for < 4 character case and remainder handling. Loads and stores one
+  // 16-bit Java character at a time.
+  __ Bind(&remainder);
+  temp = temps.Acquire();
+  __ Ldrh(temp, MemOperand(src_ptr, char_size, PostIndex));
+  __ Subs(num_chr, num_chr, 1);
+  __ Strh(temp, MemOperand(dst_ptr, char_size, PostIndex));
+  temps.Release(temp);
+  __ B(gt, &remainder);
+
+  if (mirror::kUseStringCompression) {
+    __ B(&done);
+
+    const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte);
+    DCHECK_EQ(c_char_size, 1u);
+    // Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time.
+    __ Bind(&compressed_string_preloop);
+    __ Add(src_ptr, src_ptr, srcBegin);
+    __ Bind(&compressed_string_loop);
+    temp = temps.Acquire();
+    __ Ldrb(temp, MemOperand(src_ptr, c_char_size, PostIndex));
+    __ Strh(temp, MemOperand(dst_ptr, char_size, PostIndex));
+    temps.Release(temp);
+    __ Subs(num_chr, num_chr, 1);
+    __ B(gt, &compressed_string_loop);
+  }
+
+  __ Bind(&done);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitFloatIsInfinite(HInvoke* invoke) {
+  CreateFPToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitFloatIsInfinite(HInvoke* invoke) {
+  ArmVIXLAssembler* const assembler = GetAssembler();
+  const vixl32::Register out = OutputRegister(invoke);
+  // Shifting left by 1 bit makes the value encodable as an immediate operand;
+  // we don't care about the sign bit anyway.
+  constexpr uint32_t infinity = kPositiveInfinityFloat << 1U;
+
+  __ Vmov(out, InputSRegisterAt(invoke, 0));
+  // We don't care about the sign bit, so shift left.
+  __ Lsl(out, out, 1);
+  __ Eor(out, out, infinity);
+  // If the result is 0, then it has 32 leading zeros, and less than that otherwise.
+  __ Clz(out, out);
+  // Any number less than 32 logically shifted right by 5 bits results in 0;
+  // the same operation on 32 yields 1.
+  __ Lsr(out, out, 5);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitDoubleIsInfinite(HInvoke* invoke) {
+  CreateFPToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitDoubleIsInfinite(HInvoke* invoke) {
+  ArmVIXLAssembler* const assembler = GetAssembler();
+  const vixl32::Register out = OutputRegister(invoke);
+  UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+  const vixl32::Register temp = temps.Acquire();
+  // The highest 32 bits of double precision positive infinity separated into
+  // two constants encodable as immediate operands.
+  constexpr uint32_t infinity_high  = 0x7f000000U;
+  constexpr uint32_t infinity_high2 = 0x00f00000U;
+
+  static_assert((infinity_high | infinity_high2) ==
+                    static_cast<uint32_t>(kPositiveInfinityDouble >> 32U),
+                "The constants do not add up to the high 32 bits of double "
+                "precision positive infinity.");
+  __ Vmov(temp, out, InputDRegisterAt(invoke, 0));
+  __ Eor(out, out, infinity_high);
+  __ Eor(out, out, infinity_high2);
+  // We don't care about the sign bit, so shift left.
+  __ Orr(out, temp, Operand(out, vixl32::LSL, 1));
+  // If the result is 0, then it has 32 leading zeros, and less than that otherwise.
+  __ Clz(out, out);
+  // Any number less than 32 logically shifted right by 5 bits results in 0;
+  // the same operation on 32 yields 1.
+  __ Lsr(out, out, 5);
+}
+
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMinDoubleDouble)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMinFloatFloat)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxDoubleDouble)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxFloatFloat)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMinLongLong)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxLongLong)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathCeil)          // Could be done by changing rounding mode, maybe?
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathFloor)         // Could be done by changing rounding mode, maybe?
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRint)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble)   // Could be done by changing rounding mode, maybe?
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundFloat)    // Could be done by changing rounding mode, maybe?
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong)     // High register pressure.
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, SystemArrayCopyChar)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, IntegerHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, LongHighestOneBit)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, IntegerLowestOneBit)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, LongLowestOneBit)
+
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOf);
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOfAfter);
+
+// 1.8.
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeGetAndAddInt)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeGetAndAddLong)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeGetAndSetInt)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeGetAndSetLong)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeGetAndSetObject)
+
+UNREACHABLE_INTRINSICS(ARMVIXL)
+
+#undef __
+
+}  // namespace arm
+}  // namespace art
diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h
new file mode 100644
index 0000000..6e79cb7
--- /dev/null
+++ b/compiler/optimizing/intrinsics_arm_vixl.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INTRINSICS_ARM_VIXL_H_
+#define ART_COMPILER_OPTIMIZING_INTRINSICS_ARM_VIXL_H_
+
+#include "intrinsics.h"
+#include "utils/arm/assembler_arm_vixl.h"
+
+namespace art {
+
+namespace arm {
+
+class ArmVIXLAssembler;
+class CodeGeneratorARMVIXL;
+
+class IntrinsicLocationsBuilderARMVIXL FINAL : public IntrinsicVisitor {
+ public:
+  explicit IntrinsicLocationsBuilderARMVIXL(CodeGeneratorARMVIXL* codegen);
+
+  // Define visitor methods.
+
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
+  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+#include "intrinsics_list.h"
+INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+
+  // Check whether an invoke is an intrinsic, and if so, create a location summary. Returns whether
+  // a corresponding LocationSummary with the intrinsified_ flag set was generated and attached to
+  // the invoke.
+  bool TryDispatch(HInvoke* invoke);
+
+ private:
+  ArenaAllocator* arena_;
+  ArmVIXLAssembler* assembler_;
+  const ArmInstructionSetFeatures& features_;
+
+  DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARMVIXL);
+};
+
+class IntrinsicCodeGeneratorARMVIXL FINAL : public IntrinsicVisitor {
+ public:
+  explicit IntrinsicCodeGeneratorARMVIXL(CodeGeneratorARMVIXL* codegen) : codegen_(codegen) {}
+
+  // Define visitor methods.
+
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
+  void Visit ## Name(HInvoke* invoke) OVERRIDE;
+#include "intrinsics_list.h"
+INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+
+ private:
+  ArenaAllocator* GetAllocator();
+  ArmVIXLAssembler* GetAssembler();
+
+  CodeGeneratorARMVIXL* codegen_;
+
+  DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorARMVIXL);
+};
+
+}  // namespace arm
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_INTRINSICS_ARM_VIXL_H_
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 5239f8f..7c81588 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2495,6 +2495,9 @@
 UNIMPLEMENTED_INTRINSIC(MIPS, MathTan)
 UNIMPLEMENTED_INTRINSIC(MIPS, MathTanh)
 
+UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOf);
+UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOfAfter);
+
 // 1.8.
 UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndAddInt)
 UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndAddLong)
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 1d153e2..2d4f417 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1947,6 +1947,9 @@
 UNIMPLEMENTED_INTRINSIC(MIPS64, MathTan)
 UNIMPLEMENTED_INTRINSIC(MIPS64, MathTanh)
 
+UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOf);
+UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOfAfter);
+
 // 1.8.
 UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndAddInt)
 UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndAddLong)
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index f41e4d9..06ab46f 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1408,21 +1408,22 @@
   // compression style is decided on alloc.
   __ cmpl(ecx, Address(arg, count_offset));
   __ j(kNotEqual, &return_false);
+  // Return true if strings are empty. Even with string compression `count == 0` means empty.
+  static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                "Expecting 0=compressed, 1=uncompressed");
+  __ jecxz(&return_true);
 
   if (mirror::kUseStringCompression) {
     NearLabel string_uncompressed;
-    // Differ cases into both compressed or both uncompressed. Different compression style
-    // is cut above.
-    __ cmpl(ecx, Immediate(0));
-    __ j(kGreaterEqual, &string_uncompressed);
+    // Extract length and differentiate between both compressed or both uncompressed.
+    // Different compression style is cut above.
+    __ shrl(ecx, Immediate(1));
+    __ j(kCarrySet, &string_uncompressed);
     // Divide string length by 2, rounding up, and continue as if uncompressed.
-    // Merge clearing the compression flag (+0x80000000) with +1 for rounding.
-    __ addl(ecx, Immediate(0x80000001));
+    __ addl(ecx, Immediate(1));
     __ shrl(ecx, Immediate(1));
     __ Bind(&string_uncompressed);
   }
-  // Return true if strings are empty.
-  __ jecxz(&return_true);
   // Load starting addresses of string values into ESI/EDI as required for repe_cmpsl instruction.
   __ leal(esi, Address(str, value_offset));
   __ leal(edi, Address(arg, value_offset));
@@ -1535,21 +1536,24 @@
   // Location of count within the String object.
   int32_t count_offset = mirror::String::CountOffset().Int32Value();
 
-  // Load string length, i.e., the count field of the string.
+  // Load the count field of the string containing the length and compression flag.
   __ movl(string_length, Address(string_obj, count_offset));
-  if (mirror::kUseStringCompression) {
-    string_length_flagged = locations->GetTemp(2).AsRegister<Register>();
-    __ movl(string_length_flagged, string_length);
-    // Mask out first bit used as compression flag.
-    __ andl(string_length, Immediate(INT32_MAX));
-  }
 
-  // Do a zero-length check.
+  // Do a zero-length check. Even with string compression `count == 0` means empty.
+  static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                "Expecting 0=compressed, 1=uncompressed");
   // TODO: Support jecxz.
   NearLabel not_found_label;
   __ testl(string_length, string_length);
   __ j(kEqual, &not_found_label);
 
+  if (mirror::kUseStringCompression) {
+    string_length_flagged = locations->GetTemp(2).AsRegister<Register>();
+    __ movl(string_length_flagged, string_length);
+    // Extract the length and shift out the least significant bit used as compression flag.
+    __ shrl(string_length, Immediate(1));
+  }
+
   if (start_at_zero) {
     // Number of chars to scan is the same as the string length.
     __ movl(counter, string_length);
@@ -1570,8 +1574,8 @@
 
     if (mirror::kUseStringCompression) {
       NearLabel modify_counter, offset_uncompressed_label;
-      __ cmpl(string_length_flagged, Immediate(0));
-      __ j(kGreaterEqual, &offset_uncompressed_label);
+      __ testl(string_length_flagged, Immediate(1));
+      __ j(kNotZero, &offset_uncompressed_label);
       // Move to the start of the string: string_obj + value_offset + start_index.
       __ leal(string_obj, Address(string_obj, counter, ScaleFactor::TIMES_1, value_offset));
       __ jmp(&modify_counter);
@@ -1593,8 +1597,8 @@
   if (mirror::kUseStringCompression) {
     NearLabel uncompressed_string_comparison;
     NearLabel comparison_done;
-    __ cmpl(string_length_flagged, Immediate(0));
-    __ j(kGreater, &uncompressed_string_comparison);
+    __ testl(string_length_flagged, Immediate(1));
+    __ j(kNotZero, &uncompressed_string_comparison);
 
     // Check if EAX (search_value) is ASCII.
     __ cmpl(search_value, Immediate(127));
@@ -1787,8 +1791,10 @@
     __ cfi().AdjustCFAOffset(stack_adjust);
 
     NearLabel copy_loop, copy_uncompressed;
-    __ cmpl(Address(obj, count_offset), Immediate(0));
-    __ j(kGreaterEqual, &copy_uncompressed);
+    __ testl(Address(obj, count_offset), Immediate(1));
+    static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                  "Expecting 0=compressed, 1=uncompressed");
+    __ j(kNotZero, &copy_uncompressed);
     // Compute the address of the source string by adding the number of chars from
     // the source beginning to the value offset of a string.
     __ leal(ESI, CodeGeneratorX86::ArrayAddress(obj, srcBegin, TIMES_1, value_offset));
@@ -2056,9 +2062,9 @@
       (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
        invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
   LocationSummary* locations = new (arena) LocationSummary(invoke,
-                                                           can_call ?
-                                                               LocationSummary::kCallOnSlowPath :
-                                                               LocationSummary::kNoCall,
+                                                           (can_call
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall),
                                                            kIntrinsified);
   if (can_call && kUseBakerReadBarrier) {
     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
@@ -2076,7 +2082,7 @@
     }
   } else {
     locations->SetOut(Location::RequiresRegister(),
-                      can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap);
+                      (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap));
   }
 }
 
@@ -2255,10 +2261,16 @@
   GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, /* is_volatile */ true, codegen_);
 }
 
-static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, Primitive::Type type,
+static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena,
+                                       Primitive::Type type,
                                        HInvoke* invoke) {
+  bool can_call = kEmitCompilerReadBarrier &&
+      kUseBakerReadBarrier &&
+      (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
   LocationSummary* locations = new (arena) LocationSummary(invoke,
-                                                           LocationSummary::kNoCall,
+                                                           (can_call
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall),
                                                            kIntrinsified);
   locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
   locations->SetInAt(1, Location::RequiresRegister());
@@ -2278,7 +2290,8 @@
   // Force a byte register for the output.
   locations->SetOut(Location::RegisterLocation(EAX));
   if (type == Primitive::kPrimNot) {
-    // Need temp registers for card-marking.
+    // Need temporary registers for card-marking, and possibly for
+    // (Baker) read barrier.
     locations->AddTemp(Location::RequiresRegister());  // Possibly used for reference poisoning too.
     // Need a byte register for marking.
     locations->AddTemp(Location::RegisterLocation(ECX));
@@ -2294,14 +2307,9 @@
 }
 
 void IntrinsicLocationsBuilderX86::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic is missing a read barrier, and
-  // therefore sometimes does not work as expected (b/25883050).
-  // Turn it off temporarily as a quick fix, until the read barrier is
-  // implemented (see TODO in GenCAS).
-  //
-  // TODO(rpl): Implement read barrier support in GenCAS and re-enable
-  // this intrinsic.
-  if (kEmitCompilerReadBarrier) {
+  // The only read barrier implementation supporting the
+  // UnsafeCASObject intrinsic is the Baker-style read barriers.
+  if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
     return;
   }
 
@@ -2317,7 +2325,18 @@
   Location out = locations->Out();
   DCHECK_EQ(out.AsRegister<Register>(), EAX);
 
+  // The address of the field within the holding object.
+  Address field_addr(base, offset, ScaleFactor::TIMES_1, 0);
+
   if (type == Primitive::kPrimNot) {
+    // The only read barrier implementation supporting the
+    // UnsafeCASObject intrinsic is the Baker-style read barriers.
+    DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+
+    Location temp1_loc = locations->GetTemp(0);
+    Register temp1 = temp1_loc.AsRegister<Register>();
+    Register temp2 = locations->GetTemp(1).AsRegister<Register>();
+
     Register expected = locations->InAt(3).AsRegister<Register>();
     // Ensure `expected` is in EAX (required by the CMPXCHG instruction).
     DCHECK_EQ(expected, EAX);
@@ -2325,11 +2344,20 @@
 
     // Mark card for object assuming new value is stored.
     bool value_can_be_null = true;  // TODO: Worth finding out this information?
-    codegen->MarkGCCard(locations->GetTemp(0).AsRegister<Register>(),
-                        locations->GetTemp(1).AsRegister<Register>(),
-                        base,
-                        value,
-                        value_can_be_null);
+    codegen->MarkGCCard(temp1, temp2, base, value, value_can_be_null);
+
+    if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+      // Need to make sure the reference stored in the field is a to-space
+      // one before attempting the CAS or the CAS could fail incorrectly.
+      codegen->GenerateReferenceLoadWithBakerReadBarrier(
+          invoke,
+          temp1_loc,  // Unused, used only as a "temporary" within the read barrier.
+          base,
+          field_addr,
+          /* needs_null_check */ false,
+          /* always_update_field */ true,
+          &temp2);
+    }
 
     bool base_equals_value = (base == value);
     if (kPoisonHeapReferences) {
@@ -2337,7 +2365,7 @@
         // If `base` and `value` are the same register location, move
         // `value` to a temporary register.  This way, poisoning
         // `value` won't invalidate `base`.
-        value = locations->GetTemp(0).AsRegister<Register>();
+        value = temp1;
         __ movl(value, base);
       }
 
@@ -2356,19 +2384,12 @@
       __ PoisonHeapReference(value);
     }
 
-    // TODO: Add a read barrier for the reference stored in the object
-    // before attempting the CAS, similar to the one in the
-    // art::Unsafe_compareAndSwapObject JNI implementation.
-    //
-    // Note that this code is not (yet) used when read barriers are
-    // enabled (see IntrinsicLocationsBuilderX86::VisitUnsafeCASObject).
-    DCHECK(!kEmitCompilerReadBarrier);
-    __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value);
+    __ LockCmpxchgl(field_addr, value);
 
     // LOCK CMPXCHG has full barrier semantics, and we don't need
     // scheduling barriers at this time.
 
-    // Convert ZF into the boolean result.
+    // Convert ZF into the Boolean result.
     __ setb(kZero, out.AsRegister<Register>());
     __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>());
 
@@ -2392,8 +2413,7 @@
       // Ensure the expected value is in EAX (required by the CMPXCHG
       // instruction).
       DCHECK_EQ(locations->InAt(3).AsRegister<Register>(), EAX);
-      __ LockCmpxchgl(Address(base, offset, TIMES_1, 0),
-                      locations->InAt(4).AsRegister<Register>());
+      __ LockCmpxchgl(field_addr, locations->InAt(4).AsRegister<Register>());
     } else if (type == Primitive::kPrimLong) {
       // Ensure the expected value is in EAX:EDX and that the new
       // value is in EBX:ECX (required by the CMPXCHG8B instruction).
@@ -2401,7 +2421,7 @@
       DCHECK_EQ(locations->InAt(3).AsRegisterPairHigh<Register>(), EDX);
       DCHECK_EQ(locations->InAt(4).AsRegisterPairLow<Register>(), EBX);
       DCHECK_EQ(locations->InAt(4).AsRegisterPairHigh<Register>(), ECX);
-      __ LockCmpxchg8b(Address(base, offset, TIMES_1, 0));
+      __ LockCmpxchg8b(field_addr);
     } else {
       LOG(FATAL) << "Unexpected CAS type " << type;
     }
@@ -2409,7 +2429,7 @@
     // LOCK CMPXCHG/LOCK CMPXCHG8B have full barrier semantics, and we
     // don't need scheduling barriers at this time.
 
-    // Convert ZF into the boolean result.
+    // Convert ZF into the Boolean result.
     __ setb(kZero, out.AsRegister<Register>());
     __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>());
   }
@@ -2424,14 +2444,9 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic is missing a read barrier, and
-  // therefore sometimes does not work as expected (b/25883050).
-  // Turn it off temporarily as a quick fix, until the read barrier is
-  // implemented (see TODO in GenCAS).
-  //
-  // TODO(rpl): Implement read barrier support in GenCAS and re-enable
-  // this intrinsic.
-  DCHECK(!kEmitCompilerReadBarrier);
+  // The only read barrier implementation supporting the
+  // UnsafeCASObject intrinsic is the Baker-style read barriers.
+  DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
 
   GenCAS(Primitive::kPrimNot, invoke, codegen_);
 }
@@ -3191,7 +3206,7 @@
     //   if (src_ptr != end_ptr) {
     //     uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
     //     lfence;  // Load fence or artificial data dependency to prevent load-load reordering
-    //     bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+    //     bool is_gray = (rb_state == ReadBarrier::GrayState());
     //     if (is_gray) {
     //       // Slow-path copy.
     //       for (size_t i = 0; i != length; ++i) {
@@ -3213,14 +3228,13 @@
     __ j(kEqual, &done);
 
     // Given the numeric representation, it's enough to check the low bit of the rb_state.
-    static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
-    static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
-    static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+    static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+    static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
     constexpr uint32_t gray_byte_position = LockWord::kReadBarrierStateShift / kBitsPerByte;
     constexpr uint32_t gray_bit_position = LockWord::kReadBarrierStateShift % kBitsPerByte;
     constexpr int32_t test_value = static_cast<int8_t>(1 << gray_bit_position);
 
-    // if (rb_state == ReadBarrier::gray_ptr_)
+    // if (rb_state == ReadBarrier::GrayState())
     //   goto slow_path;
     // At this point, just do the "if" and make sure that flags are preserved until the branch.
     __ testb(Address(src, monitor_offset + gray_byte_position), Immediate(test_value));
@@ -3315,6 +3329,9 @@
 UNIMPLEMENTED_INTRINSIC(X86, IntegerLowestOneBit)
 UNIMPLEMENTED_INTRINSIC(X86, LongLowestOneBit)
 
+UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOf);
+UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOfAfter);
+
 // 1.8.
 UNIMPLEMENTED_INTRINSIC(X86, UnsafeGetAndAddInt)
 UNIMPLEMENTED_INTRINSIC(X86, UnsafeGetAndAddLong)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 4b0afca..2ea8670 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1399,7 +1399,7 @@
     //   if (src_ptr != end_ptr) {
     //     uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
     //     lfence;  // Load fence or artificial data dependency to prevent load-load reordering
-    //     bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+    //     bool is_gray = (rb_state == ReadBarrier::GrayState());
     //     if (is_gray) {
     //       // Slow-path copy.
     //       do {
@@ -1420,14 +1420,13 @@
     __ j(kEqual, &done);
 
     // Given the numeric representation, it's enough to check the low bit of the rb_state.
-    static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
-    static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
-    static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+    static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+    static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
     constexpr uint32_t gray_byte_position = LockWord::kReadBarrierStateShift / kBitsPerByte;
     constexpr uint32_t gray_bit_position = LockWord::kReadBarrierStateShift % kBitsPerByte;
     constexpr int32_t test_value = static_cast<int8_t>(1 << gray_bit_position);
 
-    // if (rb_state == ReadBarrier::gray_ptr_)
+    // if (rb_state == ReadBarrier::GrayState())
     //   goto slow_path;
     // At this point, just do the "if" and make sure that flags are preserved until the branch.
     __ testb(Address(src, monitor_offset + gray_byte_position), Immediate(test_value));
@@ -1575,20 +1574,23 @@
   // compression style is decided on alloc.
   __ cmpl(rcx, Address(arg, count_offset));
   __ j(kNotEqual, &return_false);
+  // Return true if both strings are empty. Even with string compression `count == 0` means empty.
+  static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                "Expecting 0=compressed, 1=uncompressed");
+  __ jrcxz(&return_true);
 
   if (mirror::kUseStringCompression) {
     NearLabel string_uncompressed;
-    // Both string are compressed.
-    __ cmpl(rcx, Immediate(0));
-    __ j(kGreaterEqual, &string_uncompressed);
+    // Extract length and differentiate between both compressed or both uncompressed.
+    // Different compression style is cut above.
+    __ shrl(rcx, Immediate(1));
+    __ j(kCarrySet, &string_uncompressed);
     // Divide string length by 2, rounding up, and continue as if uncompressed.
     // Merge clearing the compression flag with +1 for rounding.
-    __ addl(rcx, Immediate(static_cast<int32_t>(0x80000001)));
+    __ addl(rcx, Immediate(1));
     __ shrl(rcx, Immediate(1));
     __ Bind(&string_uncompressed);
   }
-  // Return true if both strings are empty.
-  __ jrcxz(&return_true);
   // Load starting addresses of string values into RSI/RDI as required for repe_cmpsq instruction.
   __ leal(rsi, Address(str, value_offset));
   __ leal(rdi, Address(arg, value_offset));
@@ -1695,21 +1697,22 @@
   // Location of count within the String object.
   int32_t count_offset = mirror::String::CountOffset().Int32Value();
 
-  // Load string length, i.e., the count field of the string.
+  // Load the count field of the string containing the length and compression flag.
   __ movl(string_length, Address(string_obj, count_offset));
-  if (mirror::kUseStringCompression) {
-    // Use TMP to keep string_length_flagged.
-    __ movl(CpuRegister(TMP), string_length);
-    // Mask out first bit used as compression flag.
-    __ andl(string_length, Immediate(INT32_MAX));
-  }
 
-  // Do a length check.
+  // Do a zero-length check. Even with string compression `count == 0` means empty.
   // TODO: Support jecxz.
   NearLabel not_found_label;
   __ testl(string_length, string_length);
   __ j(kEqual, &not_found_label);
 
+  if (mirror::kUseStringCompression) {
+    // Use TMP to keep string_length_flagged.
+    __ movl(CpuRegister(TMP), string_length);
+    // Mask out first bit used as compression flag.
+    __ shrl(string_length, Immediate(1));
+  }
+
   if (start_at_zero) {
     // Number of chars to scan is the same as the string length.
     __ movl(counter, string_length);
@@ -1729,8 +1732,8 @@
 
     if (mirror::kUseStringCompression) {
       NearLabel modify_counter, offset_uncompressed_label;
-      __ cmpl(CpuRegister(TMP), Immediate(0));
-      __ j(kGreaterEqual, &offset_uncompressed_label);
+      __ testl(CpuRegister(TMP), Immediate(1));
+      __ j(kNotZero, &offset_uncompressed_label);
       __ leaq(string_obj, Address(string_obj, counter, ScaleFactor::TIMES_1, value_offset));
       __ jmp(&modify_counter);
       // Move to the start of the string: string_obj + value_offset + 2 * start_index.
@@ -1748,8 +1751,8 @@
   if (mirror::kUseStringCompression) {
     NearLabel uncompressed_string_comparison;
     NearLabel comparison_done;
-    __ cmpl(CpuRegister(TMP), Immediate(0));
-    __ j(kGreater, &uncompressed_string_comparison);
+    __ testl(CpuRegister(TMP), Immediate(1));
+    __ j(kNotZero, &uncompressed_string_comparison);
     // Check if RAX (search_value) is ASCII.
     __ cmpl(search_value, Immediate(127));
     __ j(kGreater, &not_found_label);
@@ -1932,8 +1935,10 @@
     // Location of count in string.
     const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
 
-    __ cmpl(Address(obj, count_offset), Immediate(0));
-    __ j(kGreaterEqual, &copy_uncompressed);
+    __ testl(Address(obj, count_offset), Immediate(1));
+    static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
+                  "Expecting 0=compressed, 1=uncompressed");
+    __ j(kNotZero, &copy_uncompressed);
     // Compute the address of the source string by adding the number of chars from
     // the source beginning to the value offset of a string.
     __ leaq(CpuRegister(RSI),
@@ -2172,9 +2177,9 @@
       (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
        invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
   LocationSummary* locations = new (arena) LocationSummary(invoke,
-                                                           can_call ?
-                                                               LocationSummary::kCallOnSlowPath :
-                                                               LocationSummary::kNoCall,
+                                                           (can_call
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall),
                                                            kIntrinsified);
   if (can_call && kUseBakerReadBarrier) {
     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
@@ -2183,7 +2188,7 @@
   locations->SetInAt(1, Location::RequiresRegister());
   locations->SetInAt(2, Location::RequiresRegister());
   locations->SetOut(Location::RequiresRegister(),
-                    can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap);
+                    (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap));
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitUnsafeGet(HInvoke* invoke) {
@@ -2333,10 +2338,16 @@
   GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, /* is_volatile */ true, codegen_);
 }
 
-static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, Primitive::Type type,
+static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena,
+                                       Primitive::Type type,
                                        HInvoke* invoke) {
+  bool can_call = kEmitCompilerReadBarrier &&
+      kUseBakerReadBarrier &&
+      (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
   LocationSummary* locations = new (arena) LocationSummary(invoke,
-                                                           LocationSummary::kNoCall,
+                                                           (can_call
+                                                                ? LocationSummary::kCallOnSlowPath
+                                                                : LocationSummary::kNoCall),
                                                            kIntrinsified);
   locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
   locations->SetInAt(1, Location::RequiresRegister());
@@ -2347,7 +2358,8 @@
 
   locations->SetOut(Location::RequiresRegister());
   if (type == Primitive::kPrimNot) {
-    // Need temp registers for card-marking.
+    // Need temporary registers for card-marking, and possibly for
+    // (Baker) read barrier.
     locations->AddTemp(Location::RequiresRegister());  // Possibly used for reference poisoning too.
     locations->AddTemp(Location::RequiresRegister());
   }
@@ -2362,14 +2374,9 @@
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic is missing a read barrier, and
-  // therefore sometimes does not work as expected (b/25883050).
-  // Turn it off temporarily as a quick fix, until the read barrier is
-  // implemented (see TODO in GenCAS).
-  //
-  // TODO(rpl): Implement read barrier support in GenCAS and re-enable
-  // this intrinsic.
-  if (kEmitCompilerReadBarrier) {
+  // The only read barrier implementation supporting the
+  // UnsafeCASObject intrinsic is the Baker-style read barriers.
+  if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
     return;
   }
 
@@ -2386,16 +2393,37 @@
   // Ensure `expected` is in RAX (required by the CMPXCHG instruction).
   DCHECK_EQ(expected.AsRegister(), RAX);
   CpuRegister value = locations->InAt(4).AsRegister<CpuRegister>();
-  CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+  Location out_loc = locations->Out();
+  CpuRegister out = out_loc.AsRegister<CpuRegister>();
 
   if (type == Primitive::kPrimNot) {
+    // The only read barrier implementation supporting the
+    // UnsafeCASObject intrinsic is the Baker-style read barriers.
+    DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
+
+    CpuRegister temp1 = locations->GetTemp(0).AsRegister<CpuRegister>();
+    CpuRegister temp2 = locations->GetTemp(1).AsRegister<CpuRegister>();
+
     // Mark card for object assuming new value is stored.
     bool value_can_be_null = true;  // TODO: Worth finding out this information?
-    codegen->MarkGCCard(locations->GetTemp(0).AsRegister<CpuRegister>(),
-                        locations->GetTemp(1).AsRegister<CpuRegister>(),
-                        base,
-                        value,
-                        value_can_be_null);
+    codegen->MarkGCCard(temp1, temp2, base, value, value_can_be_null);
+
+    // The address of the field within the holding object.
+    Address field_addr(base, offset, ScaleFactor::TIMES_1, 0);
+
+    if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+      // Need to make sure the reference stored in the field is a to-space
+      // one before attempting the CAS or the CAS could fail incorrectly.
+      codegen->GenerateReferenceLoadWithBakerReadBarrier(
+          invoke,
+          out_loc,  // Unused, used only as a "temporary" within the read barrier.
+          base,
+          field_addr,
+          /* needs_null_check */ false,
+          /* always_update_field */ true,
+          &temp1,
+          &temp2);
+    }
 
     bool base_equals_value = (base.AsRegister() == value.AsRegister());
     Register value_reg = value.AsRegister();
@@ -2404,7 +2432,7 @@
         // If `base` and `value` are the same register location, move
         // `value_reg` to a temporary register.  This way, poisoning
         // `value_reg` won't invalidate `base`.
-        value_reg = locations->GetTemp(0).AsRegister<CpuRegister>().AsRegister();
+        value_reg = temp1.AsRegister();
         __ movl(CpuRegister(value_reg), base);
       }
 
@@ -2423,19 +2451,12 @@
       __ PoisonHeapReference(CpuRegister(value_reg));
     }
 
-    // TODO: Add a read barrier for the reference stored in the object
-    // before attempting the CAS, similar to the one in the
-    // art::Unsafe_compareAndSwapObject JNI implementation.
-    //
-    // Note that this code is not (yet) used when read barriers are
-    // enabled (see IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject).
-    DCHECK(!kEmitCompilerReadBarrier);
-    __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), CpuRegister(value_reg));
+    __ LockCmpxchgl(field_addr, CpuRegister(value_reg));
 
     // LOCK CMPXCHG has full barrier semantics, and we don't need
     // scheduling barriers at this time.
 
-    // Convert ZF into the boolean result.
+    // Convert ZF into the Boolean result.
     __ setcc(kZero, out);
     __ movzxb(out, out);
 
@@ -2468,7 +2489,7 @@
     // LOCK CMPXCHG has full barrier semantics, and we don't need
     // scheduling barriers at this time.
 
-    // Convert ZF into the boolean result.
+    // Convert ZF into the Boolean result.
     __ setcc(kZero, out);
     __ movzxb(out, out);
   }
@@ -2483,14 +2504,9 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic is missing a read barrier, and
-  // therefore sometimes does not work as expected (b/25883050).
-  // Turn it off temporarily as a quick fix, until the read barrier is
-  // implemented (see TODO in GenCAS).
-  //
-  // TODO(rpl): Implement read barrier support in GenCAS and re-enable
-  // this intrinsic.
-  DCHECK(!kEmitCompilerReadBarrier);
+  // The only read barrier implementation supporting the
+  // UnsafeCASObject intrinsic is the Baker-style read barriers.
+  DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
 
   GenCAS(Primitive::kPrimNot, invoke, codegen_);
 }
@@ -2982,6 +2998,9 @@
 UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
 UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
 
+UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOf);
+UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOfAfter);
+
 // 1.8.
 UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeGetAndAddInt)
 UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeGetAndAddLong)
diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc
index a0ded74..f0086fb 100644
--- a/compiler/optimizing/licm.cc
+++ b/compiler/optimizing/licm.cc
@@ -15,6 +15,7 @@
  */
 
 #include "licm.h"
+
 #include "side_effects_analysis.h"
 
 namespace art {
@@ -90,8 +91,7 @@
   }
 
   // Post order visit to visit inner loops before outer loops.
-  for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
+  for (HBasicBlock* block : graph_->GetPostOrder()) {
     if (!block->IsLoopHeader()) {
       // Only visit the loop when we reach the header.
       continue;
@@ -120,17 +120,17 @@
       }
       DCHECK(!loop_info->IsIrreducible());
 
-      // We can move an instruction that can throw only if it is the first
-      // throwing instruction in the loop. Note that the first potentially
-      // throwing instruction encountered that is not hoisted stops this
-      // optimization. Non-throwing instruction can still be hoisted.
-      bool found_first_non_hoisted_throwing_instruction_in_loop = !inner->IsLoopHeader();
+      // We can move an instruction that can throw only as long as it is the first visible
+      // instruction (throw or write) in the loop. Note that the first potentially visible
+      // instruction that is not hoisted stops this optimization. Non-throwing instructions,
+      // on the other hand, can still be hoisted.
+      bool found_first_non_hoisted_visible_instruction_in_loop = !inner->IsLoopHeader();
       for (HInstructionIterator inst_it(inner->GetInstructions());
            !inst_it.Done();
            inst_it.Advance()) {
         HInstruction* instruction = inst_it.Current();
         if (instruction->CanBeMoved()
-            && (!instruction->CanThrow() || !found_first_non_hoisted_throwing_instruction_in_loop)
+            && (!instruction->CanThrow() || !found_first_non_hoisted_visible_instruction_in_loop)
             && !instruction->GetSideEffects().MayDependOn(loop_effects)
             && InputsAreDefinedBeforeLoop(instruction)) {
           // We need to update the environment if the instruction has a loop header
@@ -142,10 +142,10 @@
           }
           instruction->MoveBefore(pre_header->GetLastInstruction());
           MaybeRecordStat(MethodCompilationStat::kLoopInvariantMoved);
-        } else if (instruction->CanThrow()) {
-          // If `instruction` can throw, we cannot move further instructions
-          // that can throw as well.
-          found_first_non_hoisted_throwing_instruction_in_loop = true;
+        } else if (instruction->CanThrow() || instruction->DoesAnyWrite()) {
+          // If `instruction` can do something visible (throw or write),
+          // we cannot move further instructions that can throw.
+          found_first_non_hoisted_visible_instruction_in_loop = true;
         }
       }
     }
diff --git a/compiler/optimizing/linear_order.cc b/compiler/optimizing/linear_order.cc
index 3af212f..80cecd4 100644
--- a/compiler/optimizing/linear_order.cc
+++ b/compiler/optimizing/linear_order.cc
@@ -94,8 +94,7 @@
   //      for it.
   ArenaVector<uint32_t> forward_predecessors(graph->GetBlocks().size(),
                                              allocator->Adapter(kArenaAllocLinearOrder));
-  for (HReversePostOrderIterator it(*graph); !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
+  for (HBasicBlock* block : graph->GetReversePostOrder()) {
     size_t number_of_forward_predecessors = block->GetPredecessors().size();
     if (block->IsLoopHeader()) {
       number_of_forward_predecessors -= block->GetLoopInformation()->NumberOfBackEdges();
diff --git a/compiler/optimizing/linear_order.h b/compiler/optimizing/linear_order.h
index cdbdd07..7122d67 100644
--- a/compiler/optimizing/linear_order.h
+++ b/compiler/optimizing/linear_order.h
@@ -30,16 +30,12 @@
 //
 // for (HBasicBlock* block : linear_order)                   // linear order
 //
-// for (HBasicBlock* block : LinearPostOrder(linear_order))  // linear post order
+// for (HBasicBlock* block : ReverseRange(linear_order))     // linear post order
 //
 void LinearizeGraph(const HGraph* graph,
                     ArenaAllocator* allocator,
                     ArenaVector<HBasicBlock*>* linear_order);
 
-inline auto LinearPostOrder(const ArenaVector<HBasicBlock*>& linear_order) {
-  return MakeIterationRange(linear_order.rbegin(), linear_order.rend());
-}
-
 }  // namespace art
 
 #endif  // ART_COMPILER_OPTIMIZING_LINEAR_ORDER_H_
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index bd74368..37b58de 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -56,8 +56,7 @@
   liveness.Analyze();
 
   std::ostringstream buffer;
-  for (HInsertionOrderIterator it(*graph); !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
+  for (HBasicBlock* block : graph->GetBlocks()) {
     buffer << "Block " << block->GetBlockId() << std::endl;
     size_t ssa_values = liveness.GetNumberOfSsaValues();
     BitVector* live_in = liveness.GetLiveInSet(*block);
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 820fa29..15e6059 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -33,11 +33,11 @@
  public:
   ReferenceInfo(HInstruction* reference, size_t pos) : reference_(reference), position_(pos) {
     is_singleton_ = true;
-    is_singleton_and_not_returned_ = true;
+    is_singleton_and_non_escaping_ = true;
     if (!reference_->IsNewInstance() && !reference_->IsNewArray()) {
       // For references not allocated in the method, don't assume anything.
       is_singleton_ = false;
-      is_singleton_and_not_returned_ = false;
+      is_singleton_and_non_escaping_ = false;
       return;
     }
 
@@ -50,7 +50,7 @@
         // BoundType shouldn't normally be necessary for a NewInstance.
         // Just be conservative for the uncommon cases.
         is_singleton_ = false;
-        is_singleton_and_not_returned_ = false;
+        is_singleton_and_non_escaping_ = false;
         return;
       }
       if (user->IsPhi() || user->IsSelect() || user->IsInvoke() ||
@@ -62,21 +62,37 @@
         // reference_ is merged to HPhi/HSelect, passed to a callee, or stored to heap.
         // reference_ isn't the only name that can refer to its value anymore.
         is_singleton_ = false;
-        is_singleton_and_not_returned_ = false;
+        is_singleton_and_non_escaping_ = false;
         return;
       }
       if ((user->IsUnresolvedInstanceFieldGet() && (reference_ == user->InputAt(0))) ||
           (user->IsUnresolvedInstanceFieldSet() && (reference_ == user->InputAt(0)))) {
-        // The field is accessed in an unresolved way. We mark the object as a singleton to
-        // disable load/store optimizations on it.
+        // The field is accessed in an unresolved way. We mark the object as a non-singleton
+        // to disable load/store optimizations on it.
         // Note that we could optimize this case and still perform some optimizations until
         // we hit the unresolved access, but disabling is the simplest.
         is_singleton_ = false;
-        is_singleton_and_not_returned_ = false;
+        is_singleton_and_non_escaping_ = false;
         return;
       }
       if (user->IsReturn()) {
-        is_singleton_and_not_returned_ = false;
+        is_singleton_and_non_escaping_ = false;
+      }
+    }
+
+    if (!is_singleton_ || !is_singleton_and_non_escaping_) {
+      return;
+    }
+
+    // Look at Environment uses and if it's for HDeoptimize, it's treated the same
+    // as a return which escapes at the end of executing the compiled code. We don't
+    // do store elimination for singletons that escape through HDeoptimize.
+    // Other Environment uses are fine since LSE is disabled for debuggable.
+    for (const HUseListNode<HEnvironment*>& use : reference_->GetEnvUses()) {
+      HEnvironment* user = use.GetUser();
+      if (user->GetHolder()->IsDeoptimize()) {
+        is_singleton_and_non_escaping_ = false;
+        break;
       }
     }
   }
@@ -96,17 +112,22 @@
     return is_singleton_;
   }
 
-  // Returns true if reference_ is a singleton and not returned to the caller.
+  // Returns true if reference_ is a singleton and not returned to the caller or
+  // used as an environment local of an HDeoptimize instruction.
   // The allocation and stores into reference_ may be eliminated for such cases.
-  bool IsSingletonAndNotReturned() const {
-    return is_singleton_and_not_returned_;
+  bool IsSingletonAndNonEscaping() const {
+    return is_singleton_and_non_escaping_;
   }
 
  private:
   HInstruction* const reference_;
   const size_t position_;     // position in HeapLocationCollector's ref_info_array_.
   bool is_singleton_;         // can only be referred to by a single name in the method.
-  bool is_singleton_and_not_returned_;  // reference_ is singleton and not returned to caller.
+
+  // reference_ is singleton and does not escape in the end either by
+  // returning to the caller, or being used as an environment local of an
+  // HDeoptimize instruction.
+  bool is_singleton_and_non_escaping_;
 
   DISALLOW_COPY_AND_ASSIGN(ReferenceInfo);
 };
@@ -202,8 +223,7 @@
                          kArenaAllocLSE),
         has_heap_stores_(false),
         has_volatile_(false),
-        has_monitor_operations_(false),
-        may_deoptimize_(false) {}
+        has_monitor_operations_(false) {}
 
   size_t GetNumberOfHeapLocations() const {
     return heap_locations_.size();
@@ -236,13 +256,6 @@
     return has_monitor_operations_;
   }
 
-  // Returns whether this method may be deoptimized.
-  // Currently we don't have meta data support for deoptimizing
-  // a method that eliminates allocations/stores.
-  bool MayDeoptimize() const {
-    return may_deoptimize_;
-  }
-
   // Find and return the heap location index in heap_locations_.
   size_t FindHeapLocationIndex(ReferenceInfo* ref_info,
                                size_t offset,
@@ -493,10 +506,6 @@
     CreateReferenceInfoForReferenceType(instruction);
   }
 
-  void VisitDeoptimize(HDeoptimize* instruction ATTRIBUTE_UNUSED) OVERRIDE {
-    may_deoptimize_ = true;
-  }
-
   void VisitMonitorOperation(HMonitorOperation* monitor ATTRIBUTE_UNUSED) OVERRIDE {
     has_monitor_operations_ = true;
   }
@@ -508,7 +517,6 @@
                             // alias analysis and won't be as effective.
   bool has_volatile_;       // If there are volatile field accesses.
   bool has_monitor_operations_;    // If there are monitor operations.
-  bool may_deoptimize_;     // Only true for HDeoptimize with single-frame deoptimization.
 
   DISALLOW_COPY_AND_ASSIGN(HeapLocationCollector);
 };
@@ -663,27 +671,59 @@
     if (predecessors.size() == 0) {
       return;
     }
+
     ArenaVector<HInstruction*>& heap_values = heap_values_for_[block->GetBlockId()];
     for (size_t i = 0; i < heap_values.size(); i++) {
-      HInstruction* pred0_value = heap_values_for_[predecessors[0]->GetBlockId()][i];
-      heap_values[i] = pred0_value;
-      if (pred0_value != kUnknownHeapValue) {
-        for (size_t j = 1; j < predecessors.size(); j++) {
-          HInstruction* pred_value = heap_values_for_[predecessors[j]->GetBlockId()][i];
-          if (pred_value != pred0_value) {
-            heap_values[i] = kUnknownHeapValue;
-            break;
-          }
+      HInstruction* merged_value = nullptr;
+      // Whether merged_value is a result that's merged from all predecessors.
+      bool from_all_predecessors = true;
+      ReferenceInfo* ref_info = heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo();
+      HInstruction* singleton_ref = nullptr;
+      if (ref_info->IsSingletonAndNonEscaping()) {
+        // We do more analysis of liveness when merging heap values for such
+        // cases since stores into such references may potentially be eliminated.
+        singleton_ref = ref_info->GetReference();
+      }
+
+      for (HBasicBlock* predecessor : predecessors) {
+        HInstruction* pred_value = heap_values_for_[predecessor->GetBlockId()][i];
+        if ((singleton_ref != nullptr) &&
+            !singleton_ref->GetBlock()->Dominates(predecessor)) {
+          // singleton_ref is not live in this predecessor. Skip this predecessor since
+          // it does not really have the location.
+          DCHECK_EQ(pred_value, kUnknownHeapValue);
+          from_all_predecessors = false;
+          continue;
+        }
+        if (merged_value == nullptr) {
+          // First seen heap value.
+          merged_value = pred_value;
+        } else if (pred_value != merged_value) {
+          // There are conflicting values.
+          merged_value = kUnknownHeapValue;
+          break;
         }
       }
 
-      if (heap_values[i] == kUnknownHeapValue) {
+      if (merged_value == kUnknownHeapValue) {
+        // There are conflicting heap values from different predecessors.
         // Keep the last store in each predecessor since future loads cannot be eliminated.
-        for (size_t j = 0; j < predecessors.size(); j++) {
-          ArenaVector<HInstruction*>& pred_values = heap_values_for_[predecessors[j]->GetBlockId()];
+        for (HBasicBlock* predecessor : predecessors) {
+          ArenaVector<HInstruction*>& pred_values = heap_values_for_[predecessor->GetBlockId()];
           KeepIfIsStore(pred_values[i]);
         }
       }
+
+      if ((merged_value == nullptr) || !from_all_predecessors) {
+        DCHECK(singleton_ref != nullptr);
+        DCHECK((singleton_ref->GetBlock() == block) ||
+               !singleton_ref->GetBlock()->Dominates(block));
+        // singleton_ref is not defined before block or defined only in some of its
+        // predecessors, so block doesn't really have the location at its entry.
+        heap_values[i] = kUnknownHeapValue;
+      } else {
+        heap_values[i] = merged_value;
+      }
     }
   }
 
@@ -812,8 +852,7 @@
     } else if (index != nullptr) {
       // For array element, don't eliminate stores since it can be easily aliased
       // with non-constant index.
-    } else if (!heap_location_collector_.MayDeoptimize() &&
-               ref_info->IsSingletonAndNotReturned()) {
+    } else if (ref_info->IsSingletonAndNonEscaping()) {
       // Store into a field of a singleton that's not returned. The value cannot be
       // killed due to aliasing/invocation. It can be redundant since future loads can
       // directly get the value set by this instruction. The value can still be killed due to
@@ -987,8 +1026,7 @@
       // new_instance isn't used for field accesses. No need to process it.
       return;
     }
-    if (!heap_location_collector_.MayDeoptimize() &&
-        ref_info->IsSingletonAndNotReturned() &&
+    if (ref_info->IsSingletonAndNonEscaping() &&
         !new_instance->IsFinalizable() &&
         !new_instance->NeedsAccessCheck()) {
       singleton_new_instances_.push_back(new_instance);
@@ -1046,8 +1084,8 @@
     return;
   }
   HeapLocationCollector heap_location_collector(graph_);
-  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    heap_location_collector.VisitBasicBlock(it.Current());
+  for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+    heap_location_collector.VisitBasicBlock(block);
   }
   if (heap_location_collector.GetNumberOfHeapLocations() > kMaxNumberOfHeapLocations) {
     // Bail out if there are too many heap locations to deal with.
@@ -1065,8 +1103,8 @@
   }
   heap_location_collector.BuildAliasingMatrix();
   LSEVisitor lse_visitor(graph_, heap_location_collector, side_effects_);
-  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    lse_visitor.VisitBasicBlock(it.Current());
+  for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+    lse_visitor.VisitBasicBlock(block);
   }
   lse_visitor.RemoveInstructions();
 }
diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc
index d157509..a9fe209 100644
--- a/compiler/optimizing/locations.cc
+++ b/compiler/optimizing/locations.cc
@@ -16,11 +16,16 @@
 
 #include "locations.h"
 
+#include <type_traits>
+
 #include "nodes.h"
 #include "code_generator.h"
 
 namespace art {
 
+// Verify that Location is trivially copyable.
+static_assert(std::is_trivially_copyable<Location>::value, "Location should be trivially copyable");
+
 LocationSummary::LocationSummary(HInstruction* instruction,
                                  CallKind call_kind,
                                  bool intrinsified)
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index da27928..091b58a 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -91,12 +91,9 @@
     DCHECK(!IsValid());
   }
 
-  Location(const Location& other) : value_(other.value_) {}
+  Location(const Location& other) = default;
 
-  Location& operator=(const Location& other) {
-    value_ = other.value_;
-    return *this;
-  }
+  Location& operator=(const Location& other) = default;
 
   bool IsConstant() const {
     return (value_ & kLocationConstantMask) == kConstant;
@@ -328,7 +325,6 @@
         LOG(FATAL) << "Should not use this location kind";
     }
     UNREACHABLE();
-    return "?";
   }
 
   // Unallocated locations.
@@ -529,6 +525,12 @@
     temps_.push_back(location);
   }
 
+  void AddRegisterTemps(size_t count) {
+    for (size_t i = 0; i < count; ++i) {
+      AddTemp(Location::RequiresRegister());
+    }
+  }
+
   Location GetTemp(uint32_t at) const {
     return temps_[at];
   }
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index b88e73b..f4616e3 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -20,82 +20,6 @@
 
 namespace art {
 
-// Detects a potential induction cycle. Note that the actual induction
-// information is queried later if its last value is really needed.
-static bool IsPhiInduction(HPhi* phi, ArenaSet<HInstruction*>* iset) {
-  DCHECK(iset->empty());
-  HInputsRef inputs = phi->GetInputs();
-  if (inputs.size() == 2) {
-    HLoopInformation* loop_info = phi->GetBlock()->GetLoopInformation();
-    HInstruction* op = inputs[1];
-    if (op->GetBlock()->GetLoopInformation() == loop_info) {
-      // Chase a simple chain back to phi.
-      while (!op->IsPhi()) {
-        // Binary operation with single use in same loop.
-        if (!op->IsBinaryOperation() || !op->GetUses().HasExactlyOneElement()) {
-          return false;
-        }
-        // Chase back either through left or right operand.
-        iset->insert(op);
-        HInstruction* a = op->InputAt(0);
-        HInstruction* b = op->InputAt(1);
-        if (a->GetBlock()->GetLoopInformation() == loop_info && b != phi) {
-          op = a;
-        } else if (b->GetBlock()->GetLoopInformation() == loop_info) {
-          op = b;
-        } else {
-          return false;
-        }
-      }
-      // Closed the cycle?
-      if (op == phi) {
-       iset->insert(phi);
-       return true;
-      }
-    }
-  }
-  return false;
-}
-
-// Find: phi: Phi(init, addsub)
-//       s:   SuspendCheck
-//       c:   Condition(phi, bound)
-//       i:   If(c)
-// TODO: Find a less pattern matching approach?
-static bool IsEmptyHeader(HBasicBlock* block, ArenaSet<HInstruction*>* iset) {
-  DCHECK(iset->empty());
-  HInstruction* phi = block->GetFirstPhi();
-  if (phi != nullptr && phi->GetNext() == nullptr && IsPhiInduction(phi->AsPhi(), iset)) {
-    HInstruction* s = block->GetFirstInstruction();
-    if (s != nullptr && s->IsSuspendCheck()) {
-      HInstruction* c = s->GetNext();
-      if (c != nullptr && c->IsCondition() && c->GetUses().HasExactlyOneElement()) {
-        HInstruction* i = c->GetNext();
-        if (i != nullptr && i->IsIf() && i->InputAt(0) == c) {
-          iset->insert(c);
-          iset->insert(s);
-          return true;
-        }
-      }
-    }
-  }
-  return false;
-}
-
-// Does the loop-body consist of induction cycle and direct control flow only?
-static bool IsEmptyBody(HBasicBlock* block, ArenaSet<HInstruction*>* iset) {
-  if (block->GetFirstPhi() == nullptr) {
-    for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
-      HInstruction* instruction = it.Current();
-      if (!instruction->IsGoto() && iset->find(instruction) == iset->end()) {
-        return false;
-      }
-    }
-    return true;
-  }
-  return false;
-}
-
 // Remove the instruction from the graph. A bit more elaborate than the usual
 // instruction removal, since there may be a cycle in the use structure.
 static void RemoveFromCycle(HInstruction* instruction) {
@@ -104,6 +28,30 @@
   instruction->GetBlock()->RemoveInstructionOrPhi(instruction, /*ensure_safety=*/ false);
 }
 
+// Detect a goto block and sets succ to the single successor.
+static bool IsGotoBlock(HBasicBlock* block, /*out*/ HBasicBlock** succ) {
+  if (block->GetPredecessors().size() == 1 &&
+      block->GetSuccessors().size() == 1 &&
+      block->IsSingleGoto()) {
+    *succ = block->GetSingleSuccessor();
+    return true;
+  }
+  return false;
+}
+
+// Detect an early exit loop.
+static bool IsEarlyExit(HLoopInformation* loop_info) {
+  HBlocksInLoopReversePostOrderIterator it_loop(*loop_info);
+  for (it_loop.Advance(); !it_loop.Done(); it_loop.Advance()) {
+    for (HBasicBlock* successor : it_loop.Current()->GetSuccessors()) {
+      if (!loop_info->Contains(*successor)) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
 //
 // Class methods.
 //
@@ -242,9 +190,11 @@
     HPhi* phi = it.Current()->AsPhi();
     iset_->clear();
     int32_t use_count = 0;
-    if (IsPhiInduction(phi, iset_) &&
+    if (IsPhiInduction(phi) &&
         IsOnlyUsedAfterLoop(node->loop_info, phi, &use_count) &&
-        TryReplaceWithLastValue(phi, use_count, preheader)) {
+        // No uses, or no early-exit with proper replacement.
+        (use_count == 0 ||
+         (!IsEarlyExit(node->loop_info) && TryReplaceWithLastValue(phi, preheader)))) {
       for (HInstruction* i : *iset_) {
         RemoveFromCycle(i);
       }
@@ -254,32 +204,57 @@
 }
 
 void HLoopOptimization::SimplifyBlocks(LoopNode* node) {
-  for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
-    // Remove instructions that are dead, usually resulting from eliminating induction cycles.
-    for (HBackwardInstructionIterator i(block->GetInstructions()); !i.Done(); i.Advance()) {
-      HInstruction* instruction = i.Current();
-      if (instruction->IsDeadAndRemovable()) {
-        block->RemoveInstruction(instruction);
+  // Repeat the block simplifications until no more changes occur. Note that since
+  // each simplification consists of eliminating code (without introducing new code),
+  // this process is always finite.
+  bool changed;
+  do {
+    changed = false;
+    // Iterate over all basic blocks in the loop-body.
+    for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
+      HBasicBlock* block = it.Current();
+      // Remove dead instructions from the loop-body.
+      for (HBackwardInstructionIterator i(block->GetInstructions()); !i.Done(); i.Advance()) {
+        HInstruction* instruction = i.Current();
+        if (instruction->IsDeadAndRemovable()) {
+          changed = true;
+          block->RemoveInstruction(instruction);
+        }
       }
-    }
-    // Remove trivial control flow blocks from the loop-body, again usually resulting
-    // from eliminating induction cycles.
-    if (block->GetPredecessors().size() == 1 &&
-        block->GetSuccessors().size() == 1 &&
-        block->GetFirstInstruction()->IsGoto()) {
-      HBasicBlock* pred = block->GetSinglePredecessor();
-      HBasicBlock* succ = block->GetSingleSuccessor();
-      if (succ->GetPredecessors().size() == 1) {
+      // Remove trivial control flow blocks from the loop-body.
+      HBasicBlock* succ = nullptr;
+      if (IsGotoBlock(block, &succ) && succ->GetPredecessors().size() == 1) {
+        // Trivial goto block can be removed.
+        HBasicBlock* pred = block->GetSinglePredecessor();
+        changed = true;
         pred->ReplaceSuccessor(block, succ);
-        block->ClearDominanceInformation();
-        block->SetDominator(pred);  // needed by next disconnect.
+        block->RemoveDominatedBlock(succ);
         block->DisconnectAndDelete();
         pred->AddDominatedBlock(succ);
         succ->SetDominator(pred);
+      } else if (block->GetSuccessors().size() == 2) {
+        // Trivial if block can be bypassed to either branch.
+        HBasicBlock* succ0 = block->GetSuccessors()[0];
+        HBasicBlock* succ1 = block->GetSuccessors()[1];
+        HBasicBlock* meet0 = nullptr;
+        HBasicBlock* meet1 = nullptr;
+        if (succ0 != succ1 &&
+            IsGotoBlock(succ0, &meet0) &&
+            IsGotoBlock(succ1, &meet1) &&
+            meet0 == meet1 &&  // meets again
+            meet0 != block &&  // no self-loop
+            meet0->GetPhis().IsEmpty()) {  // not used for merging
+          changed = true;
+          succ0->DisconnectAndDelete();
+          if (block->Dominates(meet0)) {
+            block->RemoveDominatedBlock(meet0);
+            succ1->AddDominatedBlock(meet0);
+            meet0->SetDominator(succ1);
+          }
+        }
       }
     }
-  }
+  } while (changed);
 }
 
 void HLoopOptimization::RemoveIfEmptyInnerLoop(LoopNode* node) {
@@ -314,15 +289,15 @@
   // subsequent index uses, if any, with the last value and remove the loop.
   iset_->clear();
   int32_t use_count = 0;
-  if (IsEmptyHeader(header, iset_) &&
-      IsEmptyBody(body, iset_) &&
+  if (IsEmptyHeader(header) &&
+      IsEmptyBody(body) &&
       IsOnlyUsedAfterLoop(node->loop_info, header->GetFirstPhi(), &use_count) &&
-      TryReplaceWithLastValue(header->GetFirstPhi(), use_count, preheader)) {
+      // No uses, or proper replacement.
+      (use_count == 0 || TryReplaceWithLastValue(header->GetFirstPhi(), preheader))) {
     body->DisconnectAndDelete();
     exit->RemovePredecessor(header);
     header->RemoveSuccessor(exit);
-    header->ClearDominanceInformation();
-    header->SetDominator(preheader);  // needed by next disconnect.
+    header->RemoveDominatedBlock(exit);
     header->DisconnectAndDelete();
     preheader->AddSuccessor(exit);
     preheader->AddInstruction(new (graph_->GetArena()) HGoto());  // global allocator
@@ -333,6 +308,69 @@
   }
 }
 
+bool HLoopOptimization::IsPhiInduction(HPhi* phi) {
+  ArenaSet<HInstruction*>* set = induction_range_.LookupCycle(phi);
+  if (set != nullptr) {
+    DCHECK(iset_->empty());
+    for (HInstruction* i : *set) {
+      // Check that, other than instructions that are no longer in the graph (removed earlier)
+      // each instruction is removable and, other than the phi, uses are contained in the cycle.
+      if (!i->IsInBlock()) {
+        continue;
+      } else if (!i->IsRemovable()) {
+        return false;
+      } else if (i != phi) {
+        for (const HUseListNode<HInstruction*>& use : i->GetUses()) {
+          if (set->find(use.GetUser()) == set->end()) {
+            return false;
+          }
+        }
+      }
+      iset_->insert(i);  // copy
+    }
+    return true;
+  }
+  return false;
+}
+
+// Find: phi: Phi(init, addsub)
+//       s:   SuspendCheck
+//       c:   Condition(phi, bound)
+//       i:   If(c)
+// TODO: Find a less pattern matching approach?
+bool HLoopOptimization::IsEmptyHeader(HBasicBlock* block) {
+  DCHECK(iset_->empty());
+  HInstruction* phi = block->GetFirstPhi();
+  if (phi != nullptr && phi->GetNext() == nullptr && IsPhiInduction(phi->AsPhi())) {
+    HInstruction* s = block->GetFirstInstruction();
+    if (s != nullptr && s->IsSuspendCheck()) {
+      HInstruction* c = s->GetNext();
+      if (c != nullptr && c->IsCondition() && c->GetUses().HasExactlyOneElement()) {
+        HInstruction* i = c->GetNext();
+        if (i != nullptr && i->IsIf() && i->InputAt(0) == c) {
+          iset_->insert(c);
+          iset_->insert(s);
+          return true;
+        }
+      }
+    }
+  }
+  return false;
+}
+
+bool HLoopOptimization::IsEmptyBody(HBasicBlock* block) {
+  if (block->GetFirstPhi() == nullptr) {
+    for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+      HInstruction* instruction = it.Current();
+      if (!instruction->IsGoto() && iset_->find(instruction) == iset_->end()) {
+        return false;
+      }
+    }
+    return true;
+  }
+  return false;
+}
+
 bool HLoopOptimization::IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
                                             HInstruction* instruction,
                                             /*out*/ int32_t* use_count) {
@@ -373,20 +411,16 @@
   }
 }
 
-bool HLoopOptimization::TryReplaceWithLastValue(HInstruction* instruction,
-                                                int32_t use_count,
-                                                HBasicBlock* block) {
-  // If true uses appear after the loop, replace these uses with the last value. Environment
-  // uses can consume this value too, since any first true use is outside the loop (although
-  // this may imply that de-opting may look "ahead" a bit on the phi value). If there are only
-  // environment uses, the value is dropped altogether, since the computations have no effect.
-  if (use_count > 0) {
-    if (!induction_range_.CanGenerateLastValue(instruction)) {
-      return false;
-    }
+bool HLoopOptimization::TryReplaceWithLastValue(HInstruction* instruction, HBasicBlock* block) {
+  // Try to replace outside uses with the last value. Environment uses can consume this
+  // value too, since any first true use is outside the loop (although this may imply
+  // that de-opting may look "ahead" a bit on the phi value). If there are only environment
+  // uses, the value is dropped altogether, since the computations have no effect.
+  if (induction_range_.CanGenerateLastValue(instruction)) {
     ReplaceAllUses(instruction, induction_range_.GenerateLastValue(instruction, graph_, block));
+    return true;
   }
-  return true;
+  return false;
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 4113357..3391bef 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -64,13 +64,15 @@
   void SimplifyBlocks(LoopNode* node);
   void RemoveIfEmptyInnerLoop(LoopNode* node);
 
+  bool IsPhiInduction(HPhi* phi);
+  bool IsEmptyHeader(HBasicBlock* block);
+  bool IsEmptyBody(HBasicBlock* block);
+
   bool IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
                            HInstruction* instruction,
                            /*out*/ int32_t* use_count);
   void ReplaceAllUses(HInstruction* instruction, HInstruction* replacement);
-  bool TryReplaceWithLastValue(HInstruction* instruction,
-                               int32_t use_count,
-                               HBasicBlock* block);
+  bool TryReplaceWithLastValue(HInstruction* instruction, HBasicBlock* block);
 
   // Range information based on prior induction variable analysis.
   InductionVarRange induction_range_;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 59cc009..680381a 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -179,16 +179,16 @@
 }
 
 void HGraph::ClearDominanceInformation() {
-  for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
-    it.Current()->ClearDominanceInformation();
+  for (HBasicBlock* block : GetReversePostOrder()) {
+    block->ClearDominanceInformation();
   }
   reverse_post_order_.clear();
 }
 
 void HGraph::ClearLoopInformation() {
   SetHasIrreducibleLoops(false);
-  for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
-    it.Current()->SetLoopInformation(nullptr);
+  for (HBasicBlock* block : GetReversePostOrder()) {
+    block->SetLoopInformation(nullptr);
   }
 }
 
@@ -275,8 +275,7 @@
     bool update_occurred = true;
     while (update_occurred) {
       update_occurred = false;
-      for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
-        HBasicBlock* block = it.Current();
+      for (HBasicBlock* block : GetReversePostOrder()) {
         for (HBasicBlock* successor : block->GetSuccessors()) {
           update_occurred |= UpdateDominatorOfSuccessor(block, successor);
         }
@@ -287,8 +286,7 @@
   // Make sure that there are no remaining blocks whose dominator information
   // needs to be updated.
   if (kIsDebugBuild) {
-    for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
-      HBasicBlock* block = it.Current();
+    for (HBasicBlock* block : GetReversePostOrder()) {
       for (HBasicBlock* successor : block->GetSuccessors()) {
         DCHECK(!UpdateDominatorOfSuccessor(block, successor));
       }
@@ -297,8 +295,7 @@
 
   // Populate `dominated_blocks_` information after computing all dominators.
   // The potential presence of irreducible loops requires to do it after.
-  for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
+  for (HBasicBlock* block : GetReversePostOrder()) {
     if (!block->IsEntryBlock()) {
       block->GetDominator()->AddDominatedBlock(block);
     }
@@ -375,8 +372,7 @@
 void HGraph::ComputeTryBlockInformation() {
   // Iterate in reverse post order to propagate try membership information from
   // predecessors to their successors.
-  for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
+  for (HBasicBlock* block : GetReversePostOrder()) {
     if (block->IsEntryBlock() || block->IsCatchBlock()) {
       // Catch blocks after simplification have only exceptional predecessors
       // and hence are never in tries.
@@ -446,8 +442,7 @@
   // We iterate post order to ensure we visit inner loops before outer loops.
   // `PopulateRecursive` needs this guarantee to know whether a natural loop
   // contains an irreducible loop.
-  for (HPostOrderIterator it(*this); !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
+  for (HBasicBlock* block : GetPostOrder()) {
     if (block->IsLoopHeader()) {
       if (block->IsCatchBlock()) {
         // TODO: Dealing with exceptional back edges could be tricky because
@@ -740,6 +735,20 @@
   return true;
 }
 
+
+bool HLoopInformation::HasExitEdge() const {
+  // Determine if this loop has at least one exit edge.
+  HBlocksInLoopReversePostOrderIterator it_loop(*this);
+  for (; !it_loop.Done(); it_loop.Advance()) {
+    for (HBasicBlock* successor : it_loop.Current()->GetSuccessors()) {
+      if (!Contains(*successor)) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
 bool HBasicBlock::Dominates(HBasicBlock* other) const {
   // Walk up the dominator tree from `other`, to find out if `this`
   // is an ancestor.
@@ -1134,8 +1143,8 @@
 }
 
 void HGraphVisitor::VisitReversePostOrder() {
-  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    VisitBasicBlock(it.Current());
+  for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+    VisitBasicBlock(block);
   }
 }
 
@@ -1437,10 +1446,10 @@
   AddInstruction(new (GetGraph()->GetArena()) HGoto(new_block->GetDexPc()));
 
   for (HBasicBlock* successor : GetSuccessors()) {
-    new_block->successors_.push_back(successor);
     successor->predecessors_[successor->GetPredecessorIndexOf(this)] = new_block;
   }
-  successors_.clear();
+  new_block->successors_.swap(successors_);
+  DCHECK(successors_.empty());
   AddSuccessor(new_block);
 
   GetGraph()->AddBlock(new_block);
@@ -1454,10 +1463,10 @@
   HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(), GetDexPc());
 
   for (HBasicBlock* predecessor : GetPredecessors()) {
-    new_block->predecessors_.push_back(predecessor);
     predecessor->successors_[predecessor->GetSuccessorIndexOf(this)] = new_block;
   }
-  predecessors_.clear();
+  new_block->predecessors_.swap(predecessors_);
+  DCHECK(predecessors_.empty());
   AddPredecessor(new_block);
 
   GetGraph()->AddBlock(new_block);
@@ -1482,16 +1491,16 @@
   new_block->instructions_.SetBlockOfInstructions(new_block);
 
   for (HBasicBlock* successor : GetSuccessors()) {
-    new_block->successors_.push_back(successor);
     successor->predecessors_[successor->GetPredecessorIndexOf(this)] = new_block;
   }
-  successors_.clear();
+  new_block->successors_.swap(successors_);
+  DCHECK(successors_.empty());
 
   for (HBasicBlock* dominated : GetDominatedBlocks()) {
     dominated->dominator_ = new_block;
-    new_block->dominated_blocks_.push_back(dominated);
   }
-  dominated_blocks_.clear();
+  new_block->dominated_blocks_.swap(dominated_blocks_);
+  DCHECK(dominated_blocks_.empty());
   return new_block;
 }
 
@@ -1509,16 +1518,16 @@
 
   new_block->instructions_.SetBlockOfInstructions(new_block);
   for (HBasicBlock* successor : GetSuccessors()) {
-    new_block->successors_.push_back(successor);
     successor->predecessors_[successor->GetPredecessorIndexOf(this)] = new_block;
   }
-  successors_.clear();
+  new_block->successors_.swap(successors_);
+  DCHECK(successors_.empty());
 
   for (HBasicBlock* dominated : GetDominatedBlocks()) {
     dominated->dominator_ = new_block;
-    new_block->dominated_blocks_.push_back(dominated);
   }
-  dominated_blocks_.clear();
+  new_block->dominated_blocks_.swap(dominated_blocks_);
+  DCHECK(dominated_blocks_.empty());
   return new_block;
 }
 
@@ -1857,17 +1866,19 @@
 
   // Update links to the successors of `other`.
   successors_.clear();
-  while (!other->successors_.empty()) {
-    HBasicBlock* successor = other->GetSuccessors()[0];
-    successor->ReplacePredecessor(other, this);
+  for (HBasicBlock* successor : other->GetSuccessors()) {
+    successor->predecessors_[successor->GetPredecessorIndexOf(other)] = this;
   }
+  successors_.swap(other->successors_);
+  DCHECK(other->successors_.empty());
 
   // Update the dominator tree.
   RemoveDominatedBlock(other);
   for (HBasicBlock* dominated : other->GetDominatedBlocks()) {
-    dominated_blocks_.push_back(dominated);
     dominated->SetDominator(this);
   }
+  dominated_blocks_.insert(
+      dominated_blocks_.end(), other->dominated_blocks_.begin(), other->dominated_blocks_.end());
   other->dominated_blocks_.clear();
   other->dominator_ = nullptr;
 
@@ -1894,16 +1905,18 @@
 
   // Update links to the successors of `other`.
   successors_.clear();
-  while (!other->successors_.empty()) {
-    HBasicBlock* successor = other->GetSuccessors()[0];
-    successor->ReplacePredecessor(other, this);
+  for (HBasicBlock* successor : other->GetSuccessors()) {
+    successor->predecessors_[successor->GetPredecessorIndexOf(other)] = this;
   }
+  successors_.swap(other->successors_);
+  DCHECK(other->successors_.empty());
 
   // Update the dominator tree.
   for (HBasicBlock* dominated : other->GetDominatedBlocks()) {
-    dominated_blocks_.push_back(dominated);
     dominated->SetDominator(this);
   }
+  dominated_blocks_.insert(
+      dominated_blocks_.end(), other->dominated_blocks_.begin(), other->dominated_blocks_.end());
   other->dominated_blocks_.clear();
   other->dominator_ = nullptr;
   other->graph_ = nullptr;
@@ -1986,10 +1999,8 @@
   // Update the environments in this graph to have the invoke's environment
   // as parent.
   {
-    HReversePostOrderIterator it(*this);
-    it.Advance();  // Skip the entry block, we do not need to update the entry's suspend check.
-    for (; !it.Done(); it.Advance()) {
-      HBasicBlock* block = it.Current();
+    // Skip the entry block, we do not need to update the entry's suspend check.
+    for (HBasicBlock* block : GetReversePostOrderSkipEntryBlock()) {
       for (HInstructionIterator instr_it(block->GetInstructions());
            !instr_it.Done();
            instr_it.Advance()) {
@@ -2070,8 +2081,7 @@
 
     // Do a reverse post order of the blocks in the callee and do (1), (2), (3)
     // and (4) to the blocks that apply.
-    for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
-      HBasicBlock* current = it.Current();
+    for (HBasicBlock* current : GetReversePostOrder()) {
       if (current != exit_block_ && current != entry_block_ && current != first) {
         DCHECK(current->GetTryCatchInformation() == nullptr);
         DCHECK(current->GetGraph() == this);
@@ -2529,8 +2539,6 @@
       return os << "BootImageLinkTimePcRelative";
     case HLoadString::LoadKind::kBootImageAddress:
       return os << "BootImageAddress";
-    case HLoadString::LoadKind::kDexCacheAddress:
-      return os << "DexCacheAddress";
     case HLoadString::LoadKind::kBssEntry:
       return os << "BssEntry";
     case HLoadString::LoadKind::kDexCacheViaMethod:
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6f4f3c9..e0c582a 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -25,6 +25,7 @@
 #include "base/arena_containers.h"
 #include "base/arena_object.h"
 #include "base/array_ref.h"
+#include "base/iteration_range.h"
 #include "base/stl_util.h"
 #include "base/transform_array_ref.h"
 #include "dex_file.h"
@@ -460,10 +461,23 @@
     return reverse_post_order_;
   }
 
+  ArrayRef<HBasicBlock* const> GetReversePostOrderSkipEntryBlock() {
+    DCHECK(GetReversePostOrder()[0] == entry_block_);
+    return ArrayRef<HBasicBlock* const>(GetReversePostOrder()).SubArray(1);
+  }
+
+  IterationRange<ArenaVector<HBasicBlock*>::const_reverse_iterator> GetPostOrder() const {
+    return ReverseRange(GetReversePostOrder());
+  }
+
   const ArenaVector<HBasicBlock*>& GetLinearOrder() const {
     return linear_order_;
   }
 
+  IterationRange<ArenaVector<HBasicBlock*>::const_reverse_iterator> GetLinearPostOrder() const {
+    return ReverseRange(GetLinearOrder());
+  }
+
   bool HasBoundsChecks() const {
     return has_bounds_checks_;
   }
@@ -755,6 +769,8 @@
 
   bool DominatesAllBackEdges(HBasicBlock* block);
 
+  bool HasExitEdge() const;
+
  private:
   // Internal recursive implementation of `Populate`.
   void PopulateRecursive(HBasicBlock* block);
@@ -1841,6 +1857,15 @@
   size_t InputCount() const { return GetInputRecords().size(); }
   HInstruction* InputAt(size_t i) const { return InputRecordAt(i).GetInstruction(); }
 
+  bool HasInput(HInstruction* input) const {
+    for (const HInstruction* i : GetInputs()) {
+      if (i == input) {
+        return true;
+      }
+    }
+    return false;
+  }
+
   void SetRawInputAt(size_t index, HInstruction* input) {
     SetRawInputRecordAt(index, HUserRecord<HInstruction*>(input));
   }
@@ -1931,19 +1956,22 @@
     return !HasEnvironmentUses() && GetUses().HasExactlyOneElement();
   }
 
-  bool IsDeadAndRemovable() const {
+  bool IsRemovable() const {
     return
-        !HasSideEffects() &&
+        !DoesAnyWrite() &&
         !CanThrow() &&
         !IsSuspendCheck() &&
         !IsControlFlow() &&
         !IsNativeDebugInfo() &&
         !IsParameterValue() &&
-        !HasUses() &&
         // If we added an explicit barrier then we should keep it.
         !IsMemoryBarrier();
   }
 
+  bool IsDeadAndRemovable() const {
+    return IsRemovable() && !HasUses();
+  }
+
   // Does this instruction strictly dominate `other_instruction`?
   // Returns false if this instruction and `other_instruction` are the same.
   // Aborts if this instruction and `other_instruction` are both phis.
@@ -3756,6 +3784,8 @@
     return GetEnvironment()->IsFromInlinedInvoke();
   }
 
+  void SetCanThrow(bool can_throw) { SetPackedFlag<kFlagCanThrow>(can_throw); }
+
   bool CanThrow() const OVERRIDE { return GetPackedFlag<kFlagCanThrow>(); }
 
   bool CanBeMoved() const OVERRIDE { return IsIntrinsic(); }
@@ -3814,8 +3844,6 @@
     SetPackedFlag<kFlagCanThrow>(true);
   }
 
-  void SetCanThrow(bool can_throw) { SetPackedFlag<kFlagCanThrow>(can_throw); }
-
   uint32_t number_of_arguments_;
   ArtMethod* const resolved_method_;
   ArenaVector<HUserRecord<HInstruction*>> inputs_;
@@ -5653,10 +5681,6 @@
     // GetIncludePatchInformation().
     kBootImageAddress,
 
-    // Load from the resolved strings array at an absolute address.
-    // Used for strings outside the boot image referenced by JIT-compiled code.
-    kDexCacheAddress,
-
     // Load from an entry in the .bss section using a PC-relative load.
     // Used for strings outside boot image when .bss is accessible with a PC-relative load.
     kBssEntry,
@@ -5781,7 +5805,7 @@
   }
 
   static bool HasAddress(LoadKind load_kind) {
-    return load_kind == LoadKind::kBootImageAddress || load_kind == LoadKind::kDexCacheAddress;
+    return load_kind == LoadKind::kBootImageAddress;
   }
 
   void SetLoadKindInternal(LoadKind load_kind);
@@ -6615,58 +6639,6 @@
   DISALLOW_COPY_AND_ASSIGN(HGraphDelegateVisitor);
 };
 
-class HInsertionOrderIterator : public ValueObject {
- public:
-  explicit HInsertionOrderIterator(const HGraph& graph) : graph_(graph), index_(0) {}
-
-  bool Done() const { return index_ == graph_.GetBlocks().size(); }
-  HBasicBlock* Current() const { return graph_.GetBlocks()[index_]; }
-  void Advance() { ++index_; }
-
- private:
-  const HGraph& graph_;
-  size_t index_;
-
-  DISALLOW_COPY_AND_ASSIGN(HInsertionOrderIterator);
-};
-
-class HReversePostOrderIterator : public ValueObject {
- public:
-  explicit HReversePostOrderIterator(const HGraph& graph) : graph_(graph), index_(0) {
-    // Check that reverse post order of the graph has been built.
-    DCHECK(!graph.GetReversePostOrder().empty());
-  }
-
-  bool Done() const { return index_ == graph_.GetReversePostOrder().size(); }
-  HBasicBlock* Current() const { return graph_.GetReversePostOrder()[index_]; }
-  void Advance() { ++index_; }
-
- private:
-  const HGraph& graph_;
-  size_t index_;
-
-  DISALLOW_COPY_AND_ASSIGN(HReversePostOrderIterator);
-};
-
-class HPostOrderIterator : public ValueObject {
- public:
-  explicit HPostOrderIterator(const HGraph& graph)
-      : graph_(graph), index_(graph_.GetReversePostOrder().size()) {
-    // Check that reverse post order of the graph has been built.
-    DCHECK(!graph.GetReversePostOrder().empty());
-  }
-
-  bool Done() const { return index_ == 0; }
-  HBasicBlock* Current() const { return graph_.GetReversePostOrder()[index_ - 1u]; }
-  void Advance() { --index_; }
-
- private:
-  const HGraph& graph_;
-  size_t index_;
-
-  DISALLOW_COPY_AND_ASSIGN(HPostOrderIterator);
-};
-
 // Iterator over the blocks that art part of the loop. Includes blocks part
 // of an inner loop. The order in which the blocks are iterated is on their
 // block id.
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index bacf994..013e110 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -52,7 +52,7 @@
   void SetUpFrame(InstructionSet isa) {
     // Setup simple context.
     std::string error;
-    isa_features_.reset(InstructionSetFeatures::FromVariant(isa, "default", &error));
+    isa_features_ = InstructionSetFeatures::FromVariant(isa, "default", &error);
     graph_ = CreateGraph(&allocator_);
     // Generate simple frame with some spills.
     code_gen_ = CodeGenerator::Create(graph_, isa, *isa_features_, opts_);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 03870ab..499514d 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -18,6 +18,8 @@
 
 #include <fstream>
 #include <memory>
+#include <sstream>
+
 #include <stdint.h>
 
 #ifdef ART_ENABLE_CODEGEN_arm
@@ -46,6 +48,7 @@
 #include "base/arena_containers.h"
 #include "base/dumpable.h"
 #include "base/macros.h"
+#include "base/mutex.h"
 #include "base/timing_logger.h"
 #include "bounds_check_elimination.h"
 #include "builder.h"
@@ -135,14 +138,18 @@
   PassObserver(HGraph* graph,
                CodeGenerator* codegen,
                std::ostream* visualizer_output,
-               CompilerDriver* compiler_driver)
+               CompilerDriver* compiler_driver,
+               Mutex& dump_mutex)
       : graph_(graph),
         cached_method_name_(),
         timing_logger_enabled_(compiler_driver->GetDumpPasses()),
         timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true),
         disasm_info_(graph->GetArena()),
+        visualizer_oss_(),
+        visualizer_output_(visualizer_output),
         visualizer_enabled_(!compiler_driver->GetCompilerOptions().GetDumpCfgFileName().empty()),
-        visualizer_(visualizer_output, graph, *codegen),
+        visualizer_(&visualizer_oss_, graph, *codegen),
+        visualizer_dump_mutex_(dump_mutex),
         graph_in_bad_state_(false) {
     if (timing_logger_enabled_ || visualizer_enabled_) {
       if (!IsVerboseMethod(compiler_driver, GetMethodName())) {
@@ -160,11 +167,13 @@
       LOG(INFO) << "TIMINGS " << GetMethodName();
       LOG(INFO) << Dumpable<TimingLogger>(timing_logger_);
     }
+    DCHECK(visualizer_oss_.str().empty());
   }
 
-  void DumpDisassembly() const {
+  void DumpDisassembly() REQUIRES(!visualizer_dump_mutex_) {
     if (visualizer_enabled_) {
       visualizer_.DumpGraphWithDisassembly();
+      FlushVisualizer();
     }
   }
 
@@ -179,24 +188,34 @@
   }
 
  private:
-  void StartPass(const char* pass_name) {
+  void StartPass(const char* pass_name) REQUIRES(!visualizer_dump_mutex_) {
     VLOG(compiler) << "Starting pass: " << pass_name;
     // Dump graph first, then start timer.
     if (visualizer_enabled_) {
       visualizer_.DumpGraph(pass_name, /* is_after_pass */ false, graph_in_bad_state_);
+      FlushVisualizer();
     }
     if (timing_logger_enabled_) {
       timing_logger_.StartTiming(pass_name);
     }
   }
 
-  void EndPass(const char* pass_name) {
+  void FlushVisualizer() REQUIRES(!visualizer_dump_mutex_) {
+    MutexLock mu(Thread::Current(), visualizer_dump_mutex_);
+    *visualizer_output_ << visualizer_oss_.str();
+    visualizer_output_->flush();
+    visualizer_oss_.str("");
+    visualizer_oss_.clear();
+  }
+
+  void EndPass(const char* pass_name) REQUIRES(!visualizer_dump_mutex_) {
     // Pause timer first, then dump graph.
     if (timing_logger_enabled_) {
       timing_logger_.EndTiming();
     }
     if (visualizer_enabled_) {
       visualizer_.DumpGraph(pass_name, /* is_after_pass */ true, graph_in_bad_state_);
+      FlushVisualizer();
     }
 
     // Validate the HGraph if running in debug mode.
@@ -237,8 +256,11 @@
 
   DisassemblyInformation disasm_info_;
 
+  std::ostringstream visualizer_oss_;
+  std::ostream* visualizer_output_;
   bool visualizer_enabled_;
   HGraphVisualizer visualizer_;
+  Mutex& visualizer_dump_mutex_;
 
   // Flag to be set by the compiler if the pass failed and the graph is not
   // expected to validate.
@@ -369,13 +391,16 @@
 
   std::unique_ptr<std::ostream> visualizer_output_;
 
+  mutable Mutex dump_mutex_;  // To synchronize visualizer writing.
+
   DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
 };
 
 static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */
 
 OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver)
-    : Compiler(driver, kMaximumCompilationTimeBeforeWarning) {}
+    : Compiler(driver, kMaximumCompilationTimeBeforeWarning),
+      dump_mutex_("Visualizer dump lock") {}
 
 void OptimizingCompiler::Init() {
   // Enable C1visualizer output. Must be done in Init() because the compiler
@@ -383,9 +408,6 @@
   CompilerDriver* driver = GetCompilerDriver();
   const std::string cfg_file_name = driver->GetCompilerOptions().GetDumpCfgFileName();
   if (!cfg_file_name.empty()) {
-    CHECK_EQ(driver->GetThreadCount(), 1U)
-      << "Graph visualizer requires the compiler to run single-threaded. "
-      << "Invoke the compiler with '-j1'.";
     std::ios_base::openmode cfg_file_mode =
         driver->GetCompilerOptions().GetDumpCfgAppend() ? std::ofstream::app : std::ofstream::out;
     visualizer_output_.reset(new std::ofstream(cfg_file_name, cfg_file_mode));
@@ -602,17 +624,14 @@
   UNUSED(codegen);  // To avoid compilation error when compiling for svelte
   OptimizingCompilerStats* stats = compilation_stats_.get();
   ArenaAllocator* arena = graph->GetArena();
-#ifdef ART_USE_VIXL_ARM_BACKEND
-  UNUSED(arena);
-  UNUSED(pass_observer);
-  UNUSED(stats);
-#endif
   switch (instruction_set) {
-#if defined(ART_ENABLE_CODEGEN_arm) && !defined(ART_USE_VIXL_ARM_BACKEND)
+#if defined(ART_ENABLE_CODEGEN_arm)
     case kThumb2:
     case kArm: {
+#ifndef ART_USE_VIXL_ARM_BACKEND
       arm::DexCacheArrayFixups* fixups =
           new (arena) arm::DexCacheArrayFixups(graph, codegen, stats);
+#endif
       arm::InstructionSimplifierArm* simplifier =
           new (arena) arm::InstructionSimplifierArm(graph, stats);
       SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
@@ -621,7 +640,9 @@
         simplifier,
         side_effects,
         gvn,
+#ifndef ART_USE_VIXL_ARM_BACKEND
         fixups
+#endif
       };
       RunOptimizations(arm_optimizations, arraysize(arm_optimizations), pass_observer);
       break;
@@ -732,8 +753,10 @@
   HDeadCodeElimination* dce1 = new (arena) HDeadCodeElimination(
       graph, stats, "dead_code_elimination$initial");
   HDeadCodeElimination* dce2 = new (arena) HDeadCodeElimination(
+      graph, stats, "dead_code_elimination$after_inlining");
+  HDeadCodeElimination* dce3 = new (arena) HDeadCodeElimination(
       graph, stats, "dead_code_elimination$final");
-  HConstantFolding* fold1 = new (arena) HConstantFolding(graph);
+  HConstantFolding* fold1 = new (arena) HConstantFolding(graph, "constant_folding");
   InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats);
   HSelectGenerator* select_generator = new (arena) HSelectGenerator(graph, stats);
   HConstantFolding* fold2 = new (arena) HConstantFolding(
@@ -772,6 +795,7 @@
     select_generator,
     fold2,  // TODO: if we don't inline we can also skip fold2.
     simplify2,
+    dce2,
     side_effects,
     gvn,
     licm,
@@ -781,7 +805,7 @@
     fold3,  // evaluates code generated by dynamic bce
     simplify3,
     lse,
-    dce2,
+    dce3,
     // The codegen has a few assumptions that only the instruction simplifier
     // can satisfy. For example, the code generator does not expect to see a
     // HTypeConversion from a type to the same type.
@@ -951,7 +975,8 @@
   PassObserver pass_observer(graph,
                              codegen.get(),
                              visualizer_output_.get(),
-                             compiler_driver);
+                             compiler_driver,
+                             dump_mutex_);
 
   VLOG(compiler) << "Building " << pass_observer.GetMethodName();
 
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index 7b66ef3..0db6088 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -20,8 +20,7 @@
 
 void PrepareForRegisterAllocation::Run() {
   // Order does not matter.
-  for (HReversePostOrderIterator it(*GetGraph()); !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
+  for (HBasicBlock* block : GetGraph()->GetReversePostOrder()) {
     // No need to visit the phis.
     for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
          inst_it.Advance()) {
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index d93c9dd..d588dea 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -123,8 +123,7 @@
   // TODO: move this to the graph checker.
   if (kIsDebugBuild) {
     ScopedObjectAccess soa(Thread::Current());
-    for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-      HBasicBlock* block = it.Current();
+    for (HBasicBlock* block : graph_->GetReversePostOrder()) {
       for (HInstructionIterator iti(block->GetInstructions()); !iti.Done(); iti.Advance()) {
         HInstruction* instr = iti.Current();
         if (instr->GetType() == Primitive::kPrimNot) {
@@ -158,8 +157,8 @@
   // To properly propagate type info we need to visit in the dominator-based order.
   // Reverse post order guarantees a node's dominators are visited first.
   // We take advantage of this order in `VisitBasicBlock`.
-  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    VisitBasicBlock(it.Current());
+  for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+    VisitBasicBlock(block);
   }
 
   ProcessWorklist();
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index caf6647..5991791 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -374,7 +374,9 @@
       if (current->GetType() == Primitive::kPrimNot) {
         DCHECK(interval->GetDefinedBy()->IsActualObject())
             << interval->GetDefinedBy()->DebugName()
-            << "@" << safepoint_position->GetInstruction()->DebugName();
+            << '(' << interval->GetDefinedBy()->GetId() << ')'
+            << "@" << safepoint_position->GetInstruction()->DebugName()
+            << '(' << safepoint_position->GetInstruction()->GetId() << ')';
         LocationSummary* locations = safepoint_position->GetLocations();
         if (current->GetParent()->HasSpillSlot()) {
           locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index 9610774..aa0d371 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -758,7 +758,7 @@
 }
 
 void RegisterAllocatorGraphColor::ProcessInstructions() {
-  for (HBasicBlock* block : LinearPostOrder(codegen_->GetGraph()->GetLinearOrder())) {
+  for (HBasicBlock* block : codegen_->GetGraph()->GetLinearPostOrder()) {
     // Note that we currently depend on this ordering, since some helper
     // code is designed for linear scan register allocation.
     for (HBackwardInstructionIterator instr_it(block->GetInstructions());
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index 4e69bc8..1a391ce 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -163,7 +163,7 @@
 void RegisterAllocatorLinearScan::AllocateRegistersInternal() {
   // Iterate post-order, to ensure the list is sorted, and the last added interval
   // is the one with the lowest start position.
-  for (HBasicBlock* block : LinearPostOrder(codegen_->GetGraph()->GetLinearOrder())) {
+  for (HBasicBlock* block : codegen_->GetGraph()->GetLinearPostOrder()) {
     for (HBackwardInstructionIterator back_it(block->GetInstructions()); !back_it.Done();
          back_it.Advance()) {
       ProcessInstruction(back_it.Current());
diff --git a/compiler/optimizing/select_generator.cc b/compiler/optimizing/select_generator.cc
index e409035..46d0d0e 100644
--- a/compiler/optimizing/select_generator.cc
+++ b/compiler/optimizing/select_generator.cc
@@ -76,8 +76,7 @@
   // Iterate in post order in the unlikely case that removing one occurrence of
   // the selection pattern empties a branch block of another occurrence.
   // Otherwise the order does not matter.
-  for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
+  for (HBasicBlock* block : graph_->GetPostOrder()) {
     if (!block->EndsWithIf()) continue;
 
     // Find elements of the diamond pattern.
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index fd1db59..63e4ca6 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -325,7 +325,6 @@
       load_string->SetLoadKindWithStringReference(load_kind, dex_file, string_index);
       break;
     case HLoadString::LoadKind::kBootImageAddress:
-    case HLoadString::LoadKind::kDexCacheAddress:
       DCHECK_NE(address, 0u);
       load_string->SetLoadKindWithAddress(load_kind, address);
       break;
diff --git a/compiler/optimizing/side_effects_analysis.cc b/compiler/optimizing/side_effects_analysis.cc
index 1dc6986..6d82e8e 100644
--- a/compiler/optimizing/side_effects_analysis.cc
+++ b/compiler/optimizing/side_effects_analysis.cc
@@ -26,8 +26,7 @@
 
   // In DEBUG mode, ensure side effects are properly initialized to empty.
   if (kIsDebugBuild) {
-    for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-      HBasicBlock* block = it.Current();
+    for (HBasicBlock* block : graph_->GetReversePostOrder()) {
       SideEffects effects = GetBlockEffects(block);
       DCHECK(effects.DoesNothing());
       if (block->IsLoopHeader()) {
@@ -38,9 +37,7 @@
   }
 
   // Do a post order visit to ensure we visit a loop header after its loop body.
-  for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
-
+  for (HBasicBlock* block : graph_->GetPostOrder()) {
     SideEffects effects = SideEffects::None();
     // Update `effects` with the side effects of all instructions in this block.
     for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 03807ba..ae1e369 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -25,8 +25,8 @@
 
 void SsaBuilder::FixNullConstantType() {
   // The order doesn't matter here.
-  for (HReversePostOrderIterator itb(*graph_); !itb.Done(); itb.Advance()) {
-    for (HInstructionIterator it(itb.Current()->GetInstructions()); !it.Done(); it.Advance()) {
+  for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+    for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
       HInstruction* equality_instr = it.Current();
       if (!equality_instr->IsEqual() && !equality_instr->IsNotEqual()) {
         continue;
@@ -57,8 +57,8 @@
 
 void SsaBuilder::EquivalentPhisCleanup() {
   // The order doesn't matter here.
-  for (HReversePostOrderIterator itb(*graph_); !itb.Done(); itb.Advance()) {
-    for (HInstructionIterator it(itb.Current()->GetPhis()); !it.Done(); it.Advance()) {
+  for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+    for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
       HPhi* phi = it.Current()->AsPhi();
       HPhi* next = phi->GetNextEquivalentPhiWithSameType();
       if (next != nullptr) {
@@ -79,8 +79,7 @@
 }
 
 void SsaBuilder::FixEnvironmentPhis() {
-  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
+  for (HBasicBlock* block : graph_->GetReversePostOrder()) {
     for (HInstructionIterator it_phis(block->GetPhis()); !it_phis.Done(); it_phis.Advance()) {
       HPhi* phi = it_phis.Current()->AsPhi();
       // If the phi is not dead, or has no environment uses, there is nothing to do.
@@ -228,8 +227,7 @@
 void SsaBuilder::RunPrimitiveTypePropagation() {
   ArenaVector<HPhi*> worklist(graph_->GetArena()->Adapter(kArenaAllocGraphBuilder));
 
-  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
+  for (HBasicBlock* block : graph_->GetReversePostOrder()) {
     if (block->IsLoopHeader()) {
       for (HInstructionIterator phi_it(block->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
         HPhi* phi = phi_it.Current()->AsPhi();
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 76cf8fe..e8e12e1 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -139,7 +139,7 @@
 void SsaLivenessAnalysis::ComputeLiveRanges() {
   // Do a post order visit, adding inputs of instructions live in the block where
   // that instruction is defined, and killing instructions that are being visited.
-  for (HBasicBlock* block : LinearPostOrder(graph_->GetLinearOrder())) {
+  for (HBasicBlock* block : ReverseRange(graph_->GetLinearOrder())) {
     BitVector* kill = GetKillSet(*block);
     BitVector* live_in = GetLiveInSet(*block);
 
@@ -256,15 +256,13 @@
   do {
     changed = false;
 
-    for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-      const HBasicBlock& block = *it.Current();
-
+    for (const HBasicBlock* block : graph_->GetPostOrder()) {
       // The live_in set depends on the kill set (which does not
       // change in this loop), and the live_out set.  If the live_out
       // set does not change, there is no need to update the live_in set.
-      if (UpdateLiveOut(block) && UpdateLiveIn(block)) {
+      if (UpdateLiveOut(*block) && UpdateLiveIn(*block)) {
         if (kIsDebugBuild) {
-          CheckNoLiveInIrreducibleLoop(block);
+          CheckNoLiveInIrreducibleLoop(*block);
         }
         changed = true;
       }
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index b1ec99a..aec7a3c 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -34,8 +34,7 @@
   ArenaSet<HPhi*> initially_live(graph_->GetArena()->Adapter(kArenaAllocSsaPhiElimination));
 
   // Add to the worklist phis referenced by non-phi instructions.
-  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
+  for (HBasicBlock* block : graph_->GetReversePostOrder()) {
     for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
       HPhi* phi = inst_it.Current()->AsPhi();
       if (phi->IsDead()) {
@@ -84,8 +83,7 @@
   // Remove phis that are not live. Visit in post order so that phis
   // that are not inputs of loop phis can be removed when they have
   // no users left (dead phis might use dead phis).
-  for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
+  for (HBasicBlock* block : graph_->GetPostOrder()) {
     HInstruction* current = block->GetFirstPhi();
     HInstruction* next = nullptr;
     HPhi* phi;
@@ -119,8 +117,7 @@
 void SsaRedundantPhiElimination::Run() {
   // Add all phis in the worklist. Order does not matter for correctness, and
   // neither will necessarily converge faster.
-  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
-    HBasicBlock* block = it.Current();
+  for (HBasicBlock* block : graph_->GetReversePostOrder()) {
     for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
       worklist_.push_back(inst_it.Current()->AsPhi());
     }
diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc
index 8045bd2..e3b9fb6 100644
--- a/compiler/utils/arm/assembler_arm_vixl.cc
+++ b/compiler/utils/arm/assembler_arm_vixl.cc
@@ -62,6 +62,12 @@
   ___ Rsb(reg, reg, 0);
 }
 
+void ArmVIXLAssembler::MaybePoisonHeapReference(vixl32::Register reg) {
+  if (kPoisonHeapReferences) {
+    PoisonHeapReference(reg);
+  }
+}
+
 void ArmVIXLAssembler::MaybeUnpoisonHeapReference(vixl32::Register reg) {
   if (kPoisonHeapReferences) {
     UnpoisonHeapReference(reg);
diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h
index c5575fa..e020628 100644
--- a/compiler/utils/arm/assembler_arm_vixl.h
+++ b/compiler/utils/arm/assembler_arm_vixl.h
@@ -72,6 +72,8 @@
   void PoisonHeapReference(vixl32::Register reg);
   // Unpoison a heap reference contained in `reg`.
   void UnpoisonHeapReference(vixl32::Register reg);
+  // Poison a heap reference contained in `reg` if heap poisoning is enabled.
+  void MaybePoisonHeapReference(vixl32::Register reg);
   // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
   void MaybeUnpoisonHeapReference(vixl32::Register reg);
 
@@ -94,7 +96,7 @@
   void StoreRegisterList(RegList regs, size_t stack_offset);
 
   bool ShifterOperandCanAlwaysHold(uint32_t immediate);
-  bool ShifterOperandCanHold(Opcode opcode, uint32_t immediate, SetCc set_cc);
+  bool ShifterOperandCanHold(Opcode opcode, uint32_t immediate, SetCc set_cc = kCcDontCare);
   bool CanSplitLoadStoreOffset(int32_t allowed_offset_bits,
                                int32_t offset,
                                /*out*/ int32_t* add_to_base,
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index 8a9fd90..23b2774 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -49,7 +49,7 @@
   return dwarf::Reg::ArmFp(static_cast<int>(reg.GetCode()));
 }
 
-static constexpr size_t kFramePointerSize = static_cast<size_t>(kArmPointerSize);;
+static constexpr size_t kFramePointerSize = static_cast<size_t>(kArmPointerSize);
 
 void ArmVIXLJNIMacroAssembler::BuildFrame(size_t frame_size,
                                           ManagedRegister method_reg,
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 9c65280..b34e125 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -51,30 +51,30 @@
 
   typedef std::string (*TestFn)(AssemblerTest* assembler_test, Ass* assembler);
 
-  void DriverFn(TestFn f, std::string test_name) {
+  void DriverFn(TestFn f, const std::string& test_name) {
     DriverWrapper(f(this, assembler_.get()), test_name);
   }
 
   // This driver assumes the assembler has already been called.
-  void DriverStr(std::string assembly_string, std::string test_name) {
+  void DriverStr(const std::string& assembly_string, const std::string& test_name) {
     DriverWrapper(assembly_string, test_name);
   }
 
-  std::string RepeatR(void (Ass::*f)(Reg), std::string fmt) {
+  std::string RepeatR(void (Ass::*f)(Reg), const std::string& fmt) {
     return RepeatTemplatedRegister<Reg>(f,
         GetRegisters(),
         &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
         fmt);
   }
 
-  std::string Repeatr(void (Ass::*f)(Reg), std::string fmt) {
+  std::string Repeatr(void (Ass::*f)(Reg), const std::string& fmt) {
     return RepeatTemplatedRegister<Reg>(f,
         GetRegisters(),
         &AssemblerTest::GetRegName<RegisterView::kUseSecondaryName>,
         fmt);
   }
 
-  std::string RepeatRR(void (Ass::*f)(Reg, Reg), std::string fmt) {
+  std::string RepeatRR(void (Ass::*f)(Reg, Reg), const std::string& fmt) {
     return RepeatTemplatedRegisters<Reg, Reg>(f,
         GetRegisters(),
         GetRegisters(),
@@ -83,7 +83,7 @@
         fmt);
   }
 
-  std::string RepeatRRNoDupes(void (Ass::*f)(Reg, Reg), std::string fmt) {
+  std::string RepeatRRNoDupes(void (Ass::*f)(Reg, Reg), const std::string& fmt) {
     return RepeatTemplatedRegistersNoDupes<Reg, Reg>(f,
         GetRegisters(),
         GetRegisters(),
@@ -92,7 +92,7 @@
         fmt);
   }
 
-  std::string Repeatrr(void (Ass::*f)(Reg, Reg), std::string fmt) {
+  std::string Repeatrr(void (Ass::*f)(Reg, Reg), const std::string& fmt) {
     return RepeatTemplatedRegisters<Reg, Reg>(f,
         GetRegisters(),
         GetRegisters(),
@@ -101,7 +101,7 @@
         fmt);
   }
 
-  std::string RepeatRRR(void (Ass::*f)(Reg, Reg, Reg), std::string fmt) {
+  std::string RepeatRRR(void (Ass::*f)(Reg, Reg, Reg), const std::string& fmt) {
     return RepeatTemplatedRegisters<Reg, Reg, Reg>(f,
         GetRegisters(),
         GetRegisters(),
@@ -112,7 +112,7 @@
         fmt);
   }
 
-  std::string Repeatrb(void (Ass::*f)(Reg, Reg), std::string fmt) {
+  std::string Repeatrb(void (Ass::*f)(Reg, Reg), const std::string& fmt) {
     return RepeatTemplatedRegisters<Reg, Reg>(f,
         GetRegisters(),
         GetRegisters(),
@@ -121,7 +121,7 @@
         fmt);
   }
 
-  std::string RepeatRr(void (Ass::*f)(Reg, Reg), std::string fmt) {
+  std::string RepeatRr(void (Ass::*f)(Reg, Reg), const std::string& fmt) {
     return RepeatTemplatedRegisters<Reg, Reg>(f,
         GetRegisters(),
         GetRegisters(),
@@ -130,11 +130,11 @@
         fmt);
   }
 
-  std::string RepeatRI(void (Ass::*f)(Reg, const Imm&), size_t imm_bytes, std::string fmt) {
+  std::string RepeatRI(void (Ass::*f)(Reg, const Imm&), size_t imm_bytes, const std::string& fmt) {
     return RepeatRegisterImm<RegisterView::kUsePrimaryName>(f, imm_bytes, fmt);
   }
 
-  std::string Repeatri(void (Ass::*f)(Reg, const Imm&), size_t imm_bytes, std::string fmt) {
+  std::string Repeatri(void (Ass::*f)(Reg, const Imm&), size_t imm_bytes, const std::string& fmt) {
     return RepeatRegisterImm<RegisterView::kUseSecondaryName>(f, imm_bytes, fmt);
   }
 
@@ -145,7 +145,7 @@
                                               const std::vector<Reg2*> reg2_registers,
                                               std::string (AssemblerTest::*GetName1)(const Reg1&),
                                               std::string (AssemblerTest::*GetName2)(const Reg2&),
-                                              std::string fmt) {
+                                              const std::string& fmt) {
     std::string str;
     std::vector<int64_t> imms = CreateImmediateValuesBits(abs(imm_bits), (imm_bits > 0));
 
@@ -195,7 +195,7 @@
                                               std::string (AssemblerTest::*GetName1)(const Reg1&),
                                               std::string (AssemblerTest::*GetName2)(const Reg2&),
                                               int imm_bits,
-                                              std::string fmt) {
+                                              const std::string& fmt) {
     std::vector<int64_t> imms = CreateImmediateValuesBits(abs(imm_bits), (imm_bits > 0));
 
     WarnOnCombinations(reg1_registers.size() * reg2_registers.size() * imms.size());
@@ -245,7 +245,7 @@
                                              int imm_bits,
                                              const std::vector<Reg*> registers,
                                              std::string (AssemblerTest::*GetName)(const RegType&),
-                                             std::string fmt) {
+                                             const std::string& fmt) {
     std::string str;
     std::vector<int64_t> imms = CreateImmediateValuesBits(abs(imm_bits), (imm_bits > 0));
 
@@ -281,7 +281,7 @@
   }
 
   template <typename ImmType>
-  std::string RepeatRRIb(void (Ass::*f)(Reg, Reg, ImmType), int imm_bits, std::string fmt) {
+  std::string RepeatRRIb(void (Ass::*f)(Reg, Reg, ImmType), int imm_bits, const std::string& fmt) {
     return RepeatTemplatedRegistersImmBits<Reg, Reg, ImmType>(f,
         imm_bits,
         GetRegisters(),
@@ -292,7 +292,7 @@
   }
 
   template <typename ImmType>
-  std::string RepeatRIb(void (Ass::*f)(Reg, ImmType), int imm_bits, std::string fmt) {
+  std::string RepeatRIb(void (Ass::*f)(Reg, ImmType), int imm_bits, const std::string& fmt) {
     return RepeatTemplatedRegisterImmBits<Reg, ImmType>(f,
         imm_bits,
         GetRegisters(),
@@ -301,7 +301,9 @@
   }
 
   template <typename ImmType>
-  std::string RepeatFRIb(void (Ass::*f)(FPReg, Reg, ImmType), int imm_bits, std::string fmt) {
+  std::string RepeatFRIb(void (Ass::*f)(FPReg, Reg, ImmType),
+                         int imm_bits,
+                         const std::string& fmt) {
     return RepeatTemplatedRegistersImmBits<FPReg, Reg, ImmType>(f,
         imm_bits,
         GetFPRegisters(),
@@ -311,7 +313,7 @@
         fmt);
   }
 
-  std::string RepeatFF(void (Ass::*f)(FPReg, FPReg), std::string fmt) {
+  std::string RepeatFF(void (Ass::*f)(FPReg, FPReg), const std::string& fmt) {
     return RepeatTemplatedRegisters<FPReg, FPReg>(f,
                                                   GetFPRegisters(),
                                                   GetFPRegisters(),
@@ -320,7 +322,7 @@
                                                   fmt);
   }
 
-  std::string RepeatFFF(void (Ass::*f)(FPReg, FPReg, FPReg), std::string fmt) {
+  std::string RepeatFFF(void (Ass::*f)(FPReg, FPReg, FPReg), const std::string& fmt) {
     return RepeatTemplatedRegisters<FPReg, FPReg, FPReg>(f,
                                                          GetFPRegisters(),
                                                          GetFPRegisters(),
@@ -331,7 +333,7 @@
                                                          fmt);
   }
 
-  std::string RepeatFFR(void (Ass::*f)(FPReg, FPReg, Reg), std::string fmt) {
+  std::string RepeatFFR(void (Ass::*f)(FPReg, FPReg, Reg), const std::string& fmt) {
     return RepeatTemplatedRegisters<FPReg, FPReg, Reg>(
         f,
         GetFPRegisters(),
@@ -345,7 +347,7 @@
 
   std::string RepeatFFI(void (Ass::*f)(FPReg, FPReg, const Imm&),
                         size_t imm_bytes,
-                        std::string fmt) {
+                        const std::string& fmt) {
     return RepeatTemplatedRegistersImm<FPReg, FPReg>(f,
                                                      GetFPRegisters(),
                                                      GetFPRegisters(),
@@ -356,7 +358,9 @@
   }
 
   template <typename ImmType>
-  std::string RepeatFFIb(void (Ass::*f)(FPReg, FPReg, ImmType), int imm_bits, std::string fmt) {
+  std::string RepeatFFIb(void (Ass::*f)(FPReg, FPReg, ImmType),
+                         int imm_bits,
+                         const std::string& fmt) {
     return RepeatTemplatedRegistersImmBits<FPReg, FPReg, ImmType>(f,
                                                                   imm_bits,
                                                                   GetFPRegisters(),
@@ -367,7 +371,9 @@
   }
 
   template <typename ImmType>
-  std::string RepeatIbFF(void (Ass::*f)(ImmType, FPReg, FPReg), int imm_bits, std::string fmt) {
+  std::string RepeatIbFF(void (Ass::*f)(ImmType, FPReg, FPReg),
+                         int imm_bits,
+                         const std::string& fmt) {
     return RepeatTemplatedImmBitsRegisters<ImmType, FPReg, FPReg>(f,
                                                                   GetFPRegisters(),
                                                                   GetFPRegisters(),
@@ -377,7 +383,7 @@
                                                                   fmt);
   }
 
-  std::string RepeatFR(void (Ass::*f)(FPReg, Reg), std::string fmt) {
+  std::string RepeatFR(void (Ass::*f)(FPReg, Reg), const std::string& fmt) {
     return RepeatTemplatedRegisters<FPReg, Reg>(f,
         GetFPRegisters(),
         GetRegisters(),
@@ -386,7 +392,7 @@
         fmt);
   }
 
-  std::string RepeatFr(void (Ass::*f)(FPReg, Reg), std::string fmt) {
+  std::string RepeatFr(void (Ass::*f)(FPReg, Reg), const std::string& fmt) {
     return RepeatTemplatedRegisters<FPReg, Reg>(f,
         GetFPRegisters(),
         GetRegisters(),
@@ -395,7 +401,7 @@
         fmt);
   }
 
-  std::string RepeatRF(void (Ass::*f)(Reg, FPReg), std::string fmt) {
+  std::string RepeatRF(void (Ass::*f)(Reg, FPReg), const std::string& fmt) {
     return RepeatTemplatedRegisters<Reg, FPReg>(f,
         GetRegisters(),
         GetFPRegisters(),
@@ -404,7 +410,7 @@
         fmt);
   }
 
-  std::string RepeatrF(void (Ass::*f)(Reg, FPReg), std::string fmt) {
+  std::string RepeatrF(void (Ass::*f)(Reg, FPReg), const std::string& fmt) {
     return RepeatTemplatedRegisters<Reg, FPReg>(f,
         GetRegisters(),
         GetFPRegisters(),
@@ -413,7 +419,9 @@
         fmt);
   }
 
-  std::string RepeatI(void (Ass::*f)(const Imm&), size_t imm_bytes, std::string fmt,
+  std::string RepeatI(void (Ass::*f)(const Imm&),
+                      size_t imm_bytes,
+                      const std::string& fmt,
                       bool as_uint = false) {
     std::string str;
     std::vector<int64_t> imms = CreateImmediateValues(imm_bytes, as_uint);
@@ -651,7 +659,7 @@
   std::string RepeatTemplatedRegister(void (Ass::*f)(RegType),
                                       const std::vector<RegType*> registers,
                                       std::string (AssemblerTest::*GetName)(const RegType&),
-                                      std::string fmt) {
+                                      const std::string& fmt) {
     std::string str;
     for (auto reg : registers) {
       (assembler_.get()->*f)(*reg);
@@ -679,7 +687,7 @@
                                        const std::vector<Reg2*> reg2_registers,
                                        std::string (AssemblerTest::*GetName1)(const Reg1&),
                                        std::string (AssemblerTest::*GetName2)(const Reg2&),
-                                       std::string fmt) {
+                                       const std::string& fmt) {
     WarnOnCombinations(reg1_registers.size() * reg2_registers.size());
 
     std::string str;
@@ -717,7 +725,7 @@
                                               const std::vector<Reg2*> reg2_registers,
                                               std::string (AssemblerTest::*GetName1)(const Reg1&),
                                               std::string (AssemblerTest::*GetName2)(const Reg2&),
-                                              std::string fmt) {
+                                              const std::string& fmt) {
     WarnOnCombinations(reg1_registers.size() * reg2_registers.size());
 
     std::string str;
@@ -758,7 +766,7 @@
                                        std::string (AssemblerTest::*GetName1)(const Reg1&),
                                        std::string (AssemblerTest::*GetName2)(const Reg2&),
                                        std::string (AssemblerTest::*GetName3)(const Reg3&),
-                                       std::string fmt) {
+                                       const std::string& fmt) {
     std::string str;
     for (auto reg1 : reg1_registers) {
       for (auto reg2 : reg2_registers) {
@@ -803,7 +811,7 @@
                                           std::string (AssemblerTest::*GetName1)(const Reg1&),
                                           std::string (AssemblerTest::*GetName2)(const Reg2&),
                                           size_t imm_bytes,
-                                          std::string fmt) {
+                                          const std::string& fmt) {
     std::vector<int64_t> imms = CreateImmediateValues(imm_bytes);
     WarnOnCombinations(reg1_registers.size() * reg2_registers.size() * imms.size());
 
@@ -895,8 +903,9 @@
 
  private:
   template <RegisterView kRegView>
-  std::string RepeatRegisterImm(void (Ass::*f)(Reg, const Imm&), size_t imm_bytes,
-                                  std::string fmt) {
+  std::string RepeatRegisterImm(void (Ass::*f)(Reg, const Imm&),
+                                size_t imm_bytes,
+                                const std::string& fmt) {
     const std::vector<Reg*> registers = GetRegisters();
     std::string str;
     std::vector<int64_t> imms = CreateImmediateValues(imm_bytes);
@@ -938,7 +947,7 @@
   virtual void Pad(std::vector<uint8_t>& data ATTRIBUTE_UNUSED) {
   }
 
-  void DriverWrapper(std::string assembly_text, std::string test_name) {
+  void DriverWrapper(const std::string& assembly_text, const std::string& test_name) {
     assembler_->FinalizeCode();
     size_t cs = assembler_->CodeSize();
     std::unique_ptr<std::vector<uint8_t>> data(new std::vector<uint8_t>(cs));
diff --git a/compiler/utils/assembler_test_base.h b/compiler/utils/assembler_test_base.h
index 8c71292..ac24ee9 100644
--- a/compiler/utils/assembler_test_base.h
+++ b/compiler/utils/assembler_test_base.h
@@ -106,7 +106,9 @@
   // Driver() assembles and compares the results. If the results are not equal and we have a
   // disassembler, disassemble both and check whether they have the same mnemonics (in which case
   // we just warn).
-  void Driver(const std::vector<uint8_t>& data, std::string assembly_text, std::string test_name) {
+  void Driver(const std::vector<uint8_t>& data,
+              const std::string& assembly_text,
+              const std::string& test_name) {
     EXPECT_NE(assembly_text.length(), 0U) << "Empty assembly";
 
     NativeAssemblerResult res;
@@ -229,7 +231,7 @@
     bool success = Exec(args, error_msg);
     if (!success) {
       LOG(ERROR) << "Assembler command line:";
-      for (std::string arg : args) {
+      for (const std::string& arg : args) {
         LOG(ERROR) << arg;
       }
     }
@@ -238,7 +240,7 @@
 
   // Runs objdump -h on the binary file and extracts the first line with .text.
   // Returns "" on failure.
-  std::string Objdump(std::string file) {
+  std::string Objdump(const std::string& file) {
     bool have_objdump = FileExists(FindTool(objdump_cmd_name_));
     EXPECT_TRUE(have_objdump) << "Cannot find objdump: " << GetObjdumpCommand();
     if (!have_objdump) {
@@ -287,8 +289,9 @@
   }
 
   // Disassemble both binaries and compare the text.
-  bool DisassembleBinaries(const std::vector<uint8_t>& data, const std::vector<uint8_t>& as,
-                           std::string test_name) {
+  bool DisassembleBinaries(const std::vector<uint8_t>& data,
+                           const std::vector<uint8_t>& as,
+                           const std::string& test_name) {
     std::string disassembler = GetDisassembleCommand();
     if (disassembler.length() == 0) {
       LOG(WARNING) << "No dissassembler command.";
@@ -324,7 +327,7 @@
     return result;
   }
 
-  bool DisassembleBinary(std::string file, std::string* error_msg) {
+  bool DisassembleBinary(const std::string& file, std::string* error_msg) {
     std::vector<std::string> args;
 
     // Encaspulate the whole command line in a single string passed to
@@ -345,7 +348,7 @@
     return Exec(args, error_msg);
   }
 
-  std::string WriteToFile(const std::vector<uint8_t>& buffer, std::string test_name) {
+  std::string WriteToFile(const std::vector<uint8_t>& buffer, const std::string& test_name) {
     std::string file_name = GetTmpnam() + std::string("---") + test_name;
     const char* data = reinterpret_cast<const char*>(buffer.data());
     std::ofstream s_out(file_name + ".o");
@@ -354,7 +357,7 @@
     return file_name + ".o";
   }
 
-  bool CompareFiles(std::string f1, std::string f2) {
+  bool CompareFiles(const std::string& f1, const std::string& f2) {
     std::ifstream f1_in(f1);
     std::ifstream f2_in(f2);
 
@@ -369,7 +372,9 @@
   }
 
   // Compile the given assembly code and extract the binary, if possible. Put result into res.
-  bool Compile(std::string assembly_code, NativeAssemblerResult* res, std::string test_name) {
+  bool Compile(const std::string& assembly_code,
+               NativeAssemblerResult* res,
+               const std::string& test_name) {
     res->ok = false;
     res->code.reset(nullptr);
 
@@ -438,7 +443,7 @@
   // Check whether file exists. Is used for commands, so strips off any parameters: anything after
   // the first space. We skip to the last slash for this, so it should work with directories with
   // spaces.
-  static bool FileExists(std::string file) {
+  static bool FileExists(const std::string& file) {
     if (file.length() == 0) {
       return false;
     }
@@ -478,7 +483,7 @@
     return getcwd(temp, 1024) ? std::string(temp) + "/" : std::string("");
   }
 
-  std::string FindTool(std::string tool_name) {
+  std::string FindTool(const std::string& tool_name) {
     // Find the current tool. Wild-card pattern is "arch-string*tool-name".
     std::string gcc_path = GetRootPath() + GetGCCRootPath();
     std::vector<std::string> args;
@@ -522,7 +527,8 @@
 
   // Helper for below. If name_predicate is empty, search for all files, otherwise use it for the
   // "-name" option.
-  static void FindToolDumpPrintout(std::string name_predicate, std::string tmp_file) {
+  static void FindToolDumpPrintout(const std::string& name_predicate,
+                                   const std::string& tmp_file) {
     std::string gcc_path = GetRootPath() + GetGCCRootPath();
     std::vector<std::string> args;
     args.push_back("find");
@@ -562,7 +568,7 @@
   }
 
   // For debug purposes.
-  void FindToolDump(std::string tool_name) {
+  void FindToolDump(const std::string& tool_name) {
     // Check with the tool name.
     FindToolDumpPrintout(architecture_string_ + "*" + tool_name, GetTmpnam());
     FindToolDumpPrintout("", GetTmpnam());
diff --git a/compiler/utils/jni_macro_assembler_test.h b/compiler/utils/jni_macro_assembler_test.h
index 829f34b..293f4cd 100644
--- a/compiler/utils/jni_macro_assembler_test.h
+++ b/compiler/utils/jni_macro_assembler_test.h
@@ -39,12 +39,12 @@
 
   typedef std::string (*TestFn)(JNIMacroAssemblerTest* assembler_test, Ass* assembler);
 
-  void DriverFn(TestFn f, std::string test_name) {
+  void DriverFn(TestFn f, const std::string& test_name) {
     DriverWrapper(f(this, assembler_.get()), test_name);
   }
 
   // This driver assumes the assembler has already been called.
-  void DriverStr(std::string assembly_string, std::string test_name) {
+  void DriverStr(const std::string& assembly_string, const std::string& test_name) {
     DriverWrapper(assembly_string, test_name);
   }
 
@@ -128,7 +128,7 @@
   virtual void Pad(std::vector<uint8_t>& data ATTRIBUTE_UNUSED) {
   }
 
-  void DriverWrapper(std::string assembly_text, std::string test_name) {
+  void DriverWrapper(const std::string& assembly_text, const std::string& test_name) {
     assembler_->FinalizeCode();
     size_t cs = assembler_->CodeSize();
     std::unique_ptr<std::vector<uint8_t>> data(new std::vector<uint8_t>(cs));
diff --git a/compiler/utils/managed_register.h b/compiler/utils/managed_register.h
index 46adb3f..184cdf5 100644
--- a/compiler/utils/managed_register.h
+++ b/compiler/utils/managed_register.h
@@ -17,8 +17,11 @@
 #ifndef ART_COMPILER_UTILS_MANAGED_REGISTER_H_
 #define ART_COMPILER_UTILS_MANAGED_REGISTER_H_
 
+#include <type_traits>
 #include <vector>
 
+#include "base/value_object.h"
+
 namespace art {
 
 namespace arm {
@@ -42,17 +45,14 @@
 class X86_64ManagedRegister;
 }
 
-class ManagedRegister {
+class ManagedRegister : public ValueObject {
  public:
   // ManagedRegister is a value class. There exists no method to change the
   // internal state. We therefore allow a copy constructor and an
   // assignment-operator.
-  constexpr ManagedRegister(const ManagedRegister& other) : id_(other.id_) { }
+  constexpr ManagedRegister(const ManagedRegister& other) = default;
 
-  ManagedRegister& operator=(const ManagedRegister& other) {
-    id_ = other.id_;
-    return *this;
-  }
+  ManagedRegister& operator=(const ManagedRegister& other) = default;
 
   constexpr arm::ArmManagedRegister AsArm() const;
   constexpr arm64::Arm64ManagedRegister AsArm64() const;
@@ -85,6 +85,9 @@
   int id_;
 };
 
+static_assert(std::is_trivially_copyable<ManagedRegister>::value,
+              "ManagedRegister should be trivially copyable");
+
 class ManagedRegisterSpill : public ManagedRegister {
  public:
   // ManagedRegisterSpill contains information about data type size and location in caller frame
@@ -115,18 +118,18 @@
  public:
   // The ManagedRegister does not have information about size and offset.
   // In this case it's size and offset determined by BuildFrame (assembler)
-  void push_back(ManagedRegister __x) {
-    ManagedRegisterSpill spill(__x);
+  void push_back(ManagedRegister x) {
+    ManagedRegisterSpill spill(x);
     std::vector<ManagedRegisterSpill>::push_back(spill);
   }
 
-  void push_back(ManagedRegister __x, int32_t __size) {
-    ManagedRegisterSpill spill(__x, __size);
+  void push_back(ManagedRegister x, int32_t size) {
+    ManagedRegisterSpill spill(x, size);
     std::vector<ManagedRegisterSpill>::push_back(spill);
   }
 
-  void push_back(ManagedRegisterSpill __x) {
-    std::vector<ManagedRegisterSpill>::push_back(__x);
+  void push_back(ManagedRegisterSpill x) {
+    std::vector<ManagedRegisterSpill>::push_back(x);
   }
  private:
 };
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index 3ef2f94..a52f519 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -219,7 +219,7 @@
   void BranchCondTwoRegsHelper(void (mips::MipsAssembler::*f)(mips::Register,
                                                               mips::Register,
                                                               mips::MipsLabel*),
-                               std::string instr_name) {
+                               const std::string& instr_name) {
     mips::MipsLabel label;
     (Base::GetAssembler()->*f)(mips::A0, mips::A1, &label);
     constexpr size_t kAdduCount1 = 63;
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index 75149cf..c24e1b1 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -188,7 +188,7 @@
 
   void BranchCondOneRegHelper(void (mips::MipsAssembler::*f)(mips::Register,
                                                              mips::MipsLabel*),
-                              std::string instr_name) {
+                              const std::string& instr_name) {
     mips::MipsLabel label;
     (Base::GetAssembler()->*f)(mips::A0, &label);
     constexpr size_t kAdduCount1 = 63;
@@ -217,7 +217,7 @@
   void BranchCondTwoRegsHelper(void (mips::MipsAssembler::*f)(mips::Register,
                                                               mips::Register,
                                                               mips::MipsLabel*),
-                               std::string instr_name) {
+                               const std::string& instr_name) {
     mips::MipsLabel label;
     (Base::GetAssembler()->*f)(mips::A0, mips::A1, &label);
     constexpr size_t kAdduCount1 = 63;
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 1fdef96..ba8f25e 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -212,7 +212,7 @@
 
   void BranchCondOneRegHelper(void (mips64::Mips64Assembler::*f)(mips64::GpuRegister,
                                                                  mips64::Mips64Label*),
-                              std::string instr_name) {
+                              const std::string& instr_name) {
     mips64::Mips64Label label;
     (Base::GetAssembler()->*f)(mips64::A0, &label);
     constexpr size_t kAdduCount1 = 63;
@@ -241,7 +241,7 @@
   void BranchCondTwoRegsHelper(void (mips64::Mips64Assembler::*f)(mips64::GpuRegister,
                                                                   mips64::GpuRegister,
                                                                   mips64::Mips64Label*),
-                               std::string instr_name) {
+                               const std::string& instr_name) {
     mips64::Mips64Label label;
     (Base::GetAssembler()->*f)(mips64::A0, mips64::A1, &label);
     constexpr size_t kAdduCount1 = 63;
diff --git a/runtime/verifier/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
similarity index 64%
rename from runtime/verifier/verifier_deps_test.cc
rename to compiler/verifier_deps_test.cc
index 71203e6..6b690aa 100644
--- a/runtime/verifier/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -14,14 +14,17 @@
  * limitations under the License.
  */
 
-#include "verifier_deps.h"
+// Test is in compiler, as it uses compiler related code.
+#include "verifier/verifier_deps.h"
 
 #include "class_linker.h"
-#include "common_runtime_test.h"
+#include "compiler/common_compiler_test.h"
+#include "compiler/driver/compiler_options.h"
+#include "compiler/driver/compiler_driver.h"
 #include "compiler_callbacks.h"
 #include "dex_file.h"
 #include "handle_scope-inl.h"
-#include "method_verifier-inl.h"
+#include "verifier/method_verifier-inl.h"
 #include "mirror/class_loader.h"
 #include "runtime.h"
 #include "thread.h"
@@ -47,10 +50,10 @@
   verifier::VerifierDeps* deps_;
 };
 
-class VerifierDepsTest : public CommonRuntimeTest {
+class VerifierDepsTest : public CommonCompilerTest {
  public:
   void SetUpRuntimeOptions(RuntimeOptions* options) {
-    CommonRuntimeTest::SetUpRuntimeOptions(options);
+    CommonCompilerTest::SetUpRuntimeOptions(options);
     callbacks_.reset(new VerifierDepsCompilerCallbacks());
   }
 
@@ -69,6 +72,22 @@
     return klass;
   }
 
+  void SetupCompilerDriver() {
+    compiler_options_->boot_image_ = false;
+    compiler_driver_->InitializeThreadPools();
+  }
+
+  void VerifyWithCompilerDriver(verifier::VerifierDeps* deps) {
+    TimingLogger timings("Verify", false, false);
+    // The compiler driver handles the verifier deps in the callbacks, so
+    // remove what this class did for unit testing.
+    verifier_deps_.reset(nullptr);
+    callbacks_->SetVerifierDeps(nullptr);
+    compiler_driver_->Verify(class_loader_, dex_files_, deps, &timings);
+    // The compiler driver may have updated the VerifierDeps in the callback object.
+    verifier_deps_.reset(callbacks_->GetVerifierDeps());
+  }
+
   void SetVerifierDeps(const std::vector<const DexFile*>& dex_files) {
     verifier_deps_.reset(new verifier::VerifierDeps(dex_files));
     VerifierDepsCompilerCallbacks* callbacks =
@@ -76,17 +95,24 @@
     callbacks->SetVerifierDeps(verifier_deps_.get());
   }
 
+  void LoadDexFile(ScopedObjectAccess* soa, const char* name1, const char* name2 = nullptr)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    class_loader_ = (name2 == nullptr) ? LoadDex(name1) : LoadMultiDex(name1, name2);
+    dex_files_ = GetDexFiles(class_loader_);
+    primary_dex_file_ = dex_files_.front();
+
+    SetVerifierDeps(dex_files_);
+    StackHandleScope<1> hs(soa->Self());
+    Handle<mirror::ClassLoader> loader =
+        hs.NewHandle(soa->Decode<mirror::ClassLoader>(class_loader_));
+    for (const DexFile* dex_file : dex_files_) {
+      class_linker_->RegisterDexFile(*dex_file, loader.Get());
+    }
+  }
+
   void LoadDexFile(ScopedObjectAccess* soa) REQUIRES_SHARED(Locks::mutator_lock_) {
-    class_loader_ = LoadDex("VerifierDeps");
-    std::vector<const DexFile*> dex_files = GetDexFiles(class_loader_);
-    CHECK_EQ(dex_files.size(), 1u);
-    dex_file_ = dex_files.front();
-
-    SetVerifierDeps(dex_files);
-
-    ObjPtr<mirror::ClassLoader> loader = soa->Decode<mirror::ClassLoader>(class_loader_);
-    class_linker_->RegisterDexFile(*dex_file_, loader.Ptr());
-
+    LoadDexFile(soa, "VerifierDeps");
+    CHECK_EQ(dex_files_.size(), 1u);
     klass_Main_ = FindClassByName("LMain;", soa);
     CHECK(klass_Main_ != nullptr);
   }
@@ -95,16 +121,16 @@
     ScopedObjectAccess soa(Thread::Current());
     LoadDexFile(&soa);
 
-    StackHandleScope<2> hs(Thread::Current());
+    StackHandleScope<2> hs(soa.Self());
     Handle<mirror::ClassLoader> class_loader_handle(
         hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader_)));
     Handle<mirror::DexCache> dex_cache_handle(hs.NewHandle(klass_Main_->GetDexCache()));
 
     const DexFile::ClassDef* class_def = klass_Main_->GetClassDef();
-    const uint8_t* class_data = dex_file_->GetClassData(*class_def);
+    const uint8_t* class_data = primary_dex_file_->GetClassData(*class_def);
     CHECK(class_data != nullptr);
 
-    ClassDataItemIterator it(*dex_file_, class_data);
+    ClassDataItemIterator it(*primary_dex_file_, class_data);
     while (it.HasNextStaticField() || it.HasNextInstanceField()) {
       it.Next();
     }
@@ -112,7 +138,7 @@
     ArtMethod* method = nullptr;
     while (it.HasNextDirectMethod()) {
       ArtMethod* resolved_method = class_linker_->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
-          *dex_file_,
+          *primary_dex_file_,
           it.GetMemberIndex(),
           dex_cache_handle,
           class_loader_handle,
@@ -128,7 +154,7 @@
     CHECK(method != nullptr);
 
     MethodVerifier verifier(Thread::Current(),
-                            dex_file_,
+                            primary_dex_file_,
                             dex_cache_handle,
                             class_loader_handle,
                             *class_def,
@@ -145,25 +171,13 @@
     return !verifier.HasFailures();
   }
 
-  void VerifyDexFile() {
-    std::string error_msg;
-    ScopedObjectAccess soa(Thread::Current());
-
-    LoadDexFile(&soa);
-    SetVerifierDeps({ dex_file_ });
-
-    for (size_t i = 0; i < dex_file_->NumClassDefs(); i++) {
-      const char* descriptor = dex_file_->GetClassDescriptor(dex_file_->GetClassDef(i));
-      mirror::Class* klass = FindClassByName(descriptor, &soa);
-      if (klass != nullptr) {
-        MethodVerifier::VerifyClass(Thread::Current(),
-                                    klass,
-                                    nullptr,
-                                    true,
-                                    HardFailLogMode::kLogWarning,
-                                    &error_msg);
-      }
+  void VerifyDexFile(const char* multidex = nullptr) {
+    {
+      ScopedObjectAccess soa(Thread::Current());
+      LoadDexFile(&soa, "VerifierDeps", multidex);
     }
+    SetupCompilerDriver();
+    VerifyWithCompilerDriver(/* verifier_deps */ nullptr);
   }
 
   bool TestAssignabilityRecording(const std::string& dst,
@@ -176,7 +190,7 @@
     DCHECK(klass_dst != nullptr);
     mirror::Class* klass_src = FindClassByName(src, &soa);
     DCHECK(klass_src != nullptr);
-    verifier_deps_->AddAssignability(*dex_file_,
+    verifier_deps_->AddAssignability(*primary_dex_file_,
                                      klass_dst,
                                      klass_src,
                                      is_strict,
@@ -184,6 +198,48 @@
     return true;
   }
 
+  // Check that the status of classes in `class_loader_` match the
+  // expected status in `deps`.
+  void VerifyClassStatus(const verifier::VerifierDeps& deps) {
+    ScopedObjectAccess soa(Thread::Current());
+    StackHandleScope<2> hs(soa.Self());
+    Handle<mirror::ClassLoader> class_loader_handle(
+        hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader_)));
+    MutableHandle<mirror::Class> cls(hs.NewHandle<mirror::Class>(nullptr));
+    for (const DexFile* dex_file : dex_files_) {
+      const std::vector<uint16_t>& unverified_classes = deps.GetUnverifiedClasses(*dex_file);
+      std::set<uint16_t> set(unverified_classes.begin(), unverified_classes.end());
+      for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
+        const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+        const char* descriptor = dex_file->GetClassDescriptor(class_def);
+        cls.Assign(class_linker_->FindClass(soa.Self(), descriptor, class_loader_handle));
+        if (cls.Get() == nullptr) {
+          CHECK(soa.Self()->IsExceptionPending());
+          soa.Self()->ClearException();
+        } else if (set.find(class_def.class_idx_) == set.end()) {
+          ASSERT_EQ(cls->GetStatus(), mirror::Class::kStatusVerified);
+        } else {
+          ASSERT_LT(cls->GetStatus(), mirror::Class::kStatusVerified);
+        }
+      }
+    }
+  }
+
+  bool HasUnverifiedClass(const std::string& cls) {
+    const DexFile::TypeId* type_id = primary_dex_file_->FindTypeId(cls.c_str());
+    DCHECK(type_id != nullptr);
+    uint16_t index = primary_dex_file_->GetIndexForTypeId(*type_id);
+    MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_);
+    for (const auto& dex_dep : verifier_deps_->dex_deps_) {
+      for (uint16_t entry : dex_dep.second->unverified_classes_) {
+        if (index == entry) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
   // Iterates over all assignability records and tries to find an entry which
   // matches the expected destination/source pair.
   bool HasAssignable(const std::string& expected_destination,
@@ -361,6 +417,7 @@
     bool has_classes = false;
     bool has_fields = false;
     bool has_methods = false;
+    bool has_unverified_classes = false;
 
     for (auto& entry : verifier_deps_->dex_deps_) {
       has_strings |= !entry.second->strings_.empty();
@@ -371,13 +428,32 @@
       has_methods |= !entry.second->direct_methods_.empty();
       has_methods |= !entry.second->virtual_methods_.empty();
       has_methods |= !entry.second->interface_methods_.empty();
+      has_unverified_classes |= !entry.second->unverified_classes_.empty();
     }
 
-    return has_strings && has_assignability && has_classes && has_fields && has_methods;
+    return has_strings &&
+           has_assignability &&
+           has_classes &&
+           has_fields &&
+           has_methods &&
+           has_unverified_classes;
+  }
+
+  static std::set<VerifierDeps::MethodResolution>* GetMethods(
+      VerifierDeps::DexFileDeps* deps, MethodResolutionKind resolution_kind) {
+    if (resolution_kind == kDirectMethodResolution) {
+      return &deps->direct_methods_;
+    } else if (resolution_kind == kVirtualMethodResolution) {
+      return &deps->virtual_methods_;
+    } else {
+      DCHECK_EQ(resolution_kind, kInterfaceMethodResolution);
+      return &deps->interface_methods_;
+    }
   }
 
   std::unique_ptr<verifier::VerifierDeps> verifier_deps_;
-  const DexFile* dex_file_;
+  std::vector<const DexFile*> dex_files_;
+  const DexFile* primary_dex_file_;
   jobject class_loader_;
   mirror::Class* klass_Main_;
 };
@@ -388,21 +464,21 @@
 
   MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_);
 
-  uint32_t id_Main1 = verifier_deps_->GetIdFromString(*dex_file_, "LMain;");
-  ASSERT_LT(id_Main1, dex_file_->NumStringIds());
-  ASSERT_EQ("LMain;", verifier_deps_->GetStringFromId(*dex_file_, id_Main1));
+  uint32_t id_Main1 = verifier_deps_->GetIdFromString(*primary_dex_file_, "LMain;");
+  ASSERT_LT(id_Main1, primary_dex_file_->NumStringIds());
+  ASSERT_EQ("LMain;", verifier_deps_->GetStringFromId(*primary_dex_file_, id_Main1));
 
-  uint32_t id_Main2 = verifier_deps_->GetIdFromString(*dex_file_, "LMain;");
-  ASSERT_LT(id_Main2, dex_file_->NumStringIds());
-  ASSERT_EQ("LMain;", verifier_deps_->GetStringFromId(*dex_file_, id_Main2));
+  uint32_t id_Main2 = verifier_deps_->GetIdFromString(*primary_dex_file_, "LMain;");
+  ASSERT_LT(id_Main2, primary_dex_file_->NumStringIds());
+  ASSERT_EQ("LMain;", verifier_deps_->GetStringFromId(*primary_dex_file_, id_Main2));
 
-  uint32_t id_Lorem1 = verifier_deps_->GetIdFromString(*dex_file_, "Lorem ipsum");
-  ASSERT_GE(id_Lorem1, dex_file_->NumStringIds());
-  ASSERT_EQ("Lorem ipsum", verifier_deps_->GetStringFromId(*dex_file_, id_Lorem1));
+  uint32_t id_Lorem1 = verifier_deps_->GetIdFromString(*primary_dex_file_, "Lorem ipsum");
+  ASSERT_GE(id_Lorem1, primary_dex_file_->NumStringIds());
+  ASSERT_EQ("Lorem ipsum", verifier_deps_->GetStringFromId(*primary_dex_file_, id_Lorem1));
 
-  uint32_t id_Lorem2 = verifier_deps_->GetIdFromString(*dex_file_, "Lorem ipsum");
-  ASSERT_GE(id_Lorem2, dex_file_->NumStringIds());
-  ASSERT_EQ("Lorem ipsum", verifier_deps_->GetStringFromId(*dex_file_, id_Lorem2));
+  uint32_t id_Lorem2 = verifier_deps_->GetIdFromString(*primary_dex_file_, "Lorem ipsum");
+  ASSERT_GE(id_Lorem2, primary_dex_file_->NumStringIds());
+  ASSERT_EQ("Lorem ipsum", verifier_deps_->GetStringFromId(*primary_dex_file_, id_Lorem2));
 
   ASSERT_EQ(id_Main1, id_Main2);
   ASSERT_EQ(id_Lorem1, id_Lorem2);
@@ -1049,12 +1125,408 @@
   ASSERT_TRUE(HasEachKindOfRecord());
 
   std::vector<uint8_t> buffer;
-  verifier_deps_->Encode(&buffer);
+  verifier_deps_->Encode(dex_files_, &buffer);
   ASSERT_FALSE(buffer.empty());
 
-  VerifierDeps decoded_deps({ dex_file_ }, ArrayRef<uint8_t>(buffer));
+  VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
   ASSERT_TRUE(verifier_deps_->Equals(decoded_deps));
 }
 
+TEST_F(VerifierDepsTest, EncodeDecodeMulti) {
+  VerifyDexFile("MultiDex");
+
+  ASSERT_GT(NumberOfCompiledDexFiles(), 1u);
+  std::vector<uint8_t> buffer;
+  verifier_deps_->Encode(dex_files_, &buffer);
+  ASSERT_FALSE(buffer.empty());
+
+  // Create new DexFile, to mess with std::map order: the verifier deps used
+  // to iterate over the map, which doesn't guarantee insertion order. We fixed
+  // this by passing the expected order when encoding/decoding.
+  std::vector<std::unique_ptr<const DexFile>> first_dex_files = OpenTestDexFiles("VerifierDeps");
+  std::vector<std::unique_ptr<const DexFile>> second_dex_files = OpenTestDexFiles("MultiDex");
+  std::vector<const DexFile*> dex_files;
+  for (auto& dex_file : first_dex_files) {
+    dex_files.push_back(dex_file.get());
+  }
+  for (auto& dex_file : second_dex_files) {
+    dex_files.push_back(dex_file.get());
+  }
+
+  // Dump the new verifier deps to ensure it can properly read the data.
+  VerifierDeps decoded_deps(dex_files, ArrayRef<const uint8_t>(buffer));
+  std::ostringstream stream;
+  VariableIndentationOutputStream os(&stream);
+  decoded_deps.Dump(&os);
+}
+
+TEST_F(VerifierDepsTest, UnverifiedClasses) {
+  VerifyDexFile();
+  ASSERT_FALSE(HasUnverifiedClass("LMyThread;"));
+  // Test that a class with a soft failure is recorded.
+  ASSERT_TRUE(HasUnverifiedClass("LMain;"));
+  // Test that a class with hard failure is recorded.
+  ASSERT_TRUE(HasUnverifiedClass("LMyVerificationFailure;"));
+  // Test that a class with unresolved super is recorded.
+  ASSERT_FALSE(HasUnverifiedClass("LMyClassWithNoSuper;"));
+  // Test that a class with unresolved super and hard failure is recorded.
+  ASSERT_TRUE(HasUnverifiedClass("LMyClassWithNoSuperButFailures;"));
+}
+
+// Returns the next resolution kind in the enum.
+static MethodResolutionKind GetNextResolutionKind(MethodResolutionKind resolution_kind) {
+  if (resolution_kind == kDirectMethodResolution) {
+    return kVirtualMethodResolution;
+  } else if (resolution_kind == kVirtualMethodResolution) {
+    return kInterfaceMethodResolution;
+  } else {
+    DCHECK_EQ(resolution_kind, kInterfaceMethodResolution);
+    return kDirectMethodResolution;
+  }
+}
+
+TEST_F(VerifierDepsTest, VerifyDeps) {
+  VerifyDexFile();
+
+  ASSERT_EQ(1u, NumberOfCompiledDexFiles());
+  ASSERT_TRUE(HasEachKindOfRecord());
+
+  // When validating, we create a new class loader, as
+  // the existing `class_loader_` may contain erroneous classes,
+  // that ClassLinker::FindClass won't return.
+
+  ScopedObjectAccess soa(Thread::Current());
+  StackHandleScope<1> hs(soa.Self());
+  MutableHandle<mirror::ClassLoader> new_class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
+  {
+    new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+    ASSERT_TRUE(verifier_deps_->ValidateDependencies(new_class_loader, soa.Self()));
+  }
+
+  std::vector<uint8_t> buffer;
+  verifier_deps_->Encode(dex_files_, &buffer);
+  ASSERT_FALSE(buffer.empty());
+
+  {
+    VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+    new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+    ASSERT_TRUE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+  }
+
+  // Fiddle with the dependencies to make sure we catch any change and fail to verify.
+
+  {
+    // Mess up with the assignable_types.
+    VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+    VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+    deps->assignable_types_.insert(*deps->unassignable_types_.begin());
+    new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+    ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+  }
+
+  {
+    // Mess up with the unassignable_types.
+    VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+    VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+    deps->unassignable_types_.insert(*deps->assignable_types_.begin());
+    new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+    ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+  }
+
+  // Mess up with classes.
+  {
+    VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+    VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+    bool found = false;
+    for (const auto& entry : deps->classes_) {
+      if (entry.IsResolved()) {
+        deps->classes_.insert(VerifierDeps::ClassResolution(
+            entry.GetDexTypeIndex(), VerifierDeps::kUnresolvedMarker));
+        found = true;
+        break;
+      }
+    }
+    ASSERT_TRUE(found);
+    new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+    ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+  }
+
+  {
+    VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+    VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+    bool found = false;
+    for (const auto& entry : deps->classes_) {
+      if (!entry.IsResolved()) {
+        deps->classes_.insert(VerifierDeps::ClassResolution(
+            entry.GetDexTypeIndex(), VerifierDeps::kUnresolvedMarker - 1));
+        found = true;
+        break;
+      }
+    }
+    ASSERT_TRUE(found);
+    new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+    ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+  }
+
+  {
+    VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+    VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+    bool found = false;
+    for (const auto& entry : deps->classes_) {
+      if (entry.IsResolved()) {
+        deps->classes_.insert(VerifierDeps::ClassResolution(
+            entry.GetDexTypeIndex(), entry.GetAccessFlags() - 1));
+        found = true;
+        break;
+      }
+    }
+    ASSERT_TRUE(found);
+    new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+    ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+  }
+
+  // Mess up with fields.
+  {
+    VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+    VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+    bool found = false;
+    for (const auto& entry : deps->fields_) {
+      if (entry.IsResolved()) {
+        deps->fields_.insert(VerifierDeps::FieldResolution(entry.GetDexFieldIndex(),
+                                                           VerifierDeps::kUnresolvedMarker,
+                                                           entry.GetDeclaringClassIndex()));
+        found = true;
+        break;
+      }
+    }
+    ASSERT_TRUE(found);
+    new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+    ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+  }
+
+  {
+    VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+    VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+    bool found = false;
+    for (const auto& entry : deps->fields_) {
+      if (!entry.IsResolved()) {
+        deps->fields_.insert(VerifierDeps::FieldResolution(0 /* we know there is a field there */,
+                                                           VerifierDeps::kUnresolvedMarker - 1,
+                                                           0  /* we know there is a class there */));
+        found = true;
+        break;
+      }
+    }
+    ASSERT_TRUE(found);
+    new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+    ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+  }
+
+  {
+    VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+    VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+    bool found = false;
+    for (const auto& entry : deps->fields_) {
+      if (entry.IsResolved()) {
+        deps->fields_.insert(VerifierDeps::FieldResolution(entry.GetDexFieldIndex(),
+                                                           entry.GetAccessFlags() - 1,
+                                                           entry.GetDeclaringClassIndex()));
+        found = true;
+        break;
+      }
+    }
+    ASSERT_TRUE(found);
+    new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+    ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+  }
+
+  {
+    VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+    VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+    bool found = false;
+    for (const auto& entry : deps->fields_) {
+      static constexpr uint32_t kNewTypeIndex = 0;
+      if (entry.GetDeclaringClassIndex() != kNewTypeIndex) {
+        deps->fields_.insert(VerifierDeps::FieldResolution(entry.GetDexFieldIndex(),
+                                                           entry.GetAccessFlags(),
+                                                           kNewTypeIndex));
+        found = true;
+        break;
+      }
+    }
+    ASSERT_TRUE(found);
+    new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+    ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+  }
+
+  // Mess up with methods.
+  for (MethodResolutionKind resolution_kind :
+            { kDirectMethodResolution, kVirtualMethodResolution, kInterfaceMethodResolution }) {
+    {
+      VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+      VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+      bool found = false;
+      std::set<VerifierDeps::MethodResolution>* methods = GetMethods(deps, resolution_kind);
+      for (const auto& entry : *methods) {
+        if (entry.IsResolved()) {
+          methods->insert(VerifierDeps::MethodResolution(entry.GetDexMethodIndex(),
+                                                         VerifierDeps::kUnresolvedMarker,
+                                                         entry.GetDeclaringClassIndex()));
+          found = true;
+          break;
+        }
+      }
+      ASSERT_TRUE(found);
+      new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+      ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+    }
+
+    {
+      VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+      VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+      bool found = false;
+      std::set<VerifierDeps::MethodResolution>* methods = GetMethods(deps, resolution_kind);
+      for (const auto& entry : *methods) {
+        if (!entry.IsResolved()) {
+          methods->insert(VerifierDeps::MethodResolution(0 /* we know there is a method there */,
+                                                         VerifierDeps::kUnresolvedMarker - 1,
+                                                         0  /* we know there is a class there */));
+          found = true;
+          break;
+        }
+      }
+      ASSERT_TRUE(found);
+      new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+      ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+    }
+
+    {
+      VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+      VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+      bool found = false;
+      std::set<VerifierDeps::MethodResolution>* methods = GetMethods(deps, resolution_kind);
+      for (const auto& entry : *methods) {
+        if (entry.IsResolved()) {
+          methods->insert(VerifierDeps::MethodResolution(entry.GetDexMethodIndex(),
+                                                         entry.GetAccessFlags() - 1,
+                                                         entry.GetDeclaringClassIndex()));
+          found = true;
+          break;
+        }
+      }
+      ASSERT_TRUE(found);
+      new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+      ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+    }
+
+    {
+      VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+      VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+      bool found = false;
+      std::set<VerifierDeps::MethodResolution>* methods = GetMethods(deps, resolution_kind);
+      for (const auto& entry : *methods) {
+        static constexpr uint32_t kNewTypeIndex = 0;
+        if (entry.IsResolved() && entry.GetDeclaringClassIndex() != kNewTypeIndex) {
+          methods->insert(VerifierDeps::MethodResolution(entry.GetDexMethodIndex(),
+                                                         entry.GetAccessFlags(),
+                                                         kNewTypeIndex));
+          found = true;
+          break;
+        }
+      }
+      ASSERT_TRUE(found);
+      new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+      ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+    }
+
+    {
+      VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+      VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+      bool found = false;
+      std::set<VerifierDeps::MethodResolution>* methods = GetMethods(deps, resolution_kind);
+      for (const auto& entry : *methods) {
+        if (entry.IsResolved()) {
+          GetMethods(deps, GetNextResolutionKind(resolution_kind))->insert(
+              VerifierDeps::MethodResolution(entry.GetDexMethodIndex(),
+                                             entry.GetAccessFlags(),
+                                             entry.GetDeclaringClassIndex()));
+          found = true;
+        }
+      }
+      ASSERT_TRUE(found);
+      new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+      ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+    }
+
+    {
+      VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+      VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+      bool found = false;
+      std::set<VerifierDeps::MethodResolution>* methods = GetMethods(deps, resolution_kind);
+      for (const auto& entry : *methods) {
+        if (entry.IsResolved()) {
+          GetMethods(deps, GetNextResolutionKind(GetNextResolutionKind(resolution_kind)))->insert(
+              VerifierDeps::MethodResolution(entry.GetDexMethodIndex(),
+                                             entry.GetAccessFlags(),
+                                             entry.GetDeclaringClassIndex()));
+          found = true;
+        }
+      }
+      ASSERT_TRUE(found);
+      new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+      ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+    }
+  }
+}
+
+TEST_F(VerifierDepsTest, CompilerDriver) {
+  SetupCompilerDriver();
+
+  // Test both multi-dex and single-dex configuration.
+  for (const char* multi : { "MultiDex", static_cast<const char*>(nullptr) }) {
+    // Test that the compiler driver behaves as expected when the dependencies
+    // verify and when they don't verify.
+    for (bool verify_failure : { false, true }) {
+      {
+        ScopedObjectAccess soa(Thread::Current());
+        LoadDexFile(&soa, "VerifierDeps", multi);
+      }
+      VerifyWithCompilerDriver(/* verifier_deps */ nullptr);
+
+      std::vector<uint8_t> buffer;
+      verifier_deps_->Encode(dex_files_, &buffer);
+
+      {
+        ScopedObjectAccess soa(Thread::Current());
+        LoadDexFile(&soa, "VerifierDeps", multi);
+      }
+      verifier::VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+      if (verify_failure) {
+        // Just taint the decoded VerifierDeps with one invalid entry.
+        VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+        bool found = false;
+        for (const auto& entry : deps->classes_) {
+          if (entry.IsResolved()) {
+            deps->classes_.insert(VerifierDeps::ClassResolution(
+                entry.GetDexTypeIndex(), VerifierDeps::kUnresolvedMarker));
+            found = true;
+            break;
+          }
+        }
+        ASSERT_TRUE(found);
+      }
+      VerifyWithCompilerDriver(&decoded_deps);
+
+      if (verify_failure) {
+        ASSERT_FALSE(verifier_deps_ == nullptr);
+        ASSERT_FALSE(verifier_deps_->Equals(decoded_deps));
+      } else {
+        ASSERT_TRUE(verifier_deps_ == nullptr);
+        VerifyClassStatus(decoded_deps);
+      }
+    }
+  }
+}
+
 }  // namespace verifier
 }  // namespace art
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 8bbe685..65703a2 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -517,7 +517,8 @@
       thread_count_(sysconf(_SC_NPROCESSORS_CONF)),
       start_ns_(NanoTime()),
       oat_fd_(-1),
-      vdex_fd_(-1),
+      input_vdex_fd_(-1),
+      output_vdex_fd_(-1),
       zip_fd_(-1),
       image_base_(0U),
       image_classes_zip_filename_(nullptr),
@@ -590,8 +591,13 @@
     ParseUintOption(option, "--zip-fd", &zip_fd_, Usage);
   }
 
-  void ParseVdexFd(const StringPiece& option) {
-    ParseUintOption(option, "--vdex-fd", &vdex_fd_, Usage);
+  void ParseInputVdexFd(const StringPiece& option) {
+    // Note that the input vdex fd might be -1.
+    ParseIntOption(option, "--input-vdex-fd", &input_vdex_fd_, Usage);
+  }
+
+  void ParseOutputVdexFd(const StringPiece& option) {
+    ParseUintOption(option, "--output-vdex-fd", &output_vdex_fd_, Usage);
   }
 
   void ParseOatFd(const StringPiece& option) {
@@ -637,9 +643,8 @@
   void ParseInstructionSetVariant(const StringPiece& option, ParserOptions* parser_options) {
     DCHECK(option.starts_with("--instruction-set-variant="));
     StringPiece str = option.substr(strlen("--instruction-set-variant=")).data();
-    instruction_set_features_.reset(
-        InstructionSetFeatures::FromVariant(
-            instruction_set_, str.as_string(), &parser_options->error_msg));
+    instruction_set_features_ = InstructionSetFeatures::FromVariant(
+        instruction_set_, str.as_string(), &parser_options->error_msg);
     if (instruction_set_features_.get() == nullptr) {
       Usage("%s", parser_options->error_msg.c_str());
     }
@@ -648,19 +653,18 @@
   void ParseInstructionSetFeatures(const StringPiece& option, ParserOptions* parser_options) {
     DCHECK(option.starts_with("--instruction-set-features="));
     StringPiece str = option.substr(strlen("--instruction-set-features=")).data();
-    if (instruction_set_features_.get() == nullptr) {
-      instruction_set_features_.reset(
-          InstructionSetFeatures::FromVariant(
-              instruction_set_, "default", &parser_options->error_msg));
+    if (instruction_set_features_ == nullptr) {
+      instruction_set_features_ = InstructionSetFeatures::FromVariant(
+          instruction_set_, "default", &parser_options->error_msg);
       if (instruction_set_features_.get() == nullptr) {
         Usage("Problem initializing default instruction set features variant: %s",
               parser_options->error_msg.c_str());
       }
     }
-    instruction_set_features_.reset(
+    instruction_set_features_ =
         instruction_set_features_->AddFeaturesFromString(str.as_string(),
-                                                         &parser_options->error_msg));
-    if (instruction_set_features_.get() == nullptr) {
+                                                         &parser_options->error_msg);
+    if (instruction_set_features_ == nullptr) {
       Usage("Error parsing '%s': %s", option.data(), parser_options->error_msg.c_str());
     }
   }
@@ -709,9 +713,9 @@
       Usage("--oat-file should not be used with --oat-fd");
     }
 
-    if ((vdex_fd_ == -1) != (oat_fd_ == -1)) {
+    if ((output_vdex_fd_ == -1) != (oat_fd_ == -1)) {
       Usage("VDEX and OAT output must be specified either with one --oat-filename "
-            "or with --oat-fd and --vdex-fd file descriptors");
+            "or with --oat-fd and --output-vdex-fd file descriptors");
     }
 
     if (!parser_options->oat_symbols.empty() && oat_fd_ != -1) {
@@ -722,8 +726,8 @@
       Usage("--oat-symbols should not be used with --host");
     }
 
-    if (vdex_fd_ != -1 && !image_filenames_.empty()) {
-      Usage("--vdex-fd should not be used with --image");
+    if (output_vdex_fd_ != -1 && !image_filenames_.empty()) {
+      Usage("--output-vdex-fd should not be used with --image");
     }
 
     if (oat_fd_ != -1 && !image_filenames_.empty()) {
@@ -828,9 +832,8 @@
     // If no instruction set feature was given, use the default one for the target
     // instruction set.
     if (instruction_set_features_.get() == nullptr) {
-      instruction_set_features_.reset(
-          InstructionSetFeatures::FromVariant(
-              instruction_set_, "default", &parser_options->error_msg));
+      instruction_set_features_ = InstructionSetFeatures::FromVariant(
+         instruction_set_, "default", &parser_options->error_msg);
       if (instruction_set_features_.get() == nullptr) {
         Usage("Problem initializing default instruction set features variant: %s",
               parser_options->error_msg.c_str());
@@ -1117,8 +1120,10 @@
         ParseZipFd(option);
       } else if (option.starts_with("--zip-location=")) {
         zip_location_ = option.substr(strlen("--zip-location=")).data();
-      } else if (option.starts_with("--vdex-fd=")) {
-        ParseVdexFd(option);
+      } else if (option.starts_with("--input-vdex-fd=")) {
+        ParseInputVdexFd(option);
+      } else if (option.starts_with("--output-vdex-fd=")) {
+        ParseOutputVdexFd(option);
       } else if (option.starts_with("--oat-file=")) {
         oat_filenames_.push_back(option.substr(strlen("--oat-file=")).data());
       } else if (option.starts_with("--oat-symbols=")) {
@@ -1261,7 +1266,7 @@
         }
         oat_files_.push_back(std::move(oat_file));
 
-        DCHECK_EQ(vdex_fd_, -1);
+        DCHECK_EQ(output_vdex_fd_, -1);
         std::string vdex_filename = ReplaceFileExtension(oat_filename, "vdex");
         std::unique_ptr<File> vdex_file(OS::CreateEmptyFile(vdex_filename.c_str()));
         if (vdex_file.get() == nullptr) {
@@ -1287,9 +1292,9 @@
       }
       oat_files_.push_back(std::move(oat_file));
 
-      DCHECK_NE(vdex_fd_, -1);
+      DCHECK_NE(output_vdex_fd_, -1);
       std::string vdex_location = ReplaceFileExtension(oat_location_, "vdex");
-      std::unique_ptr<File> vdex_file(new File(vdex_fd_, vdex_location, /* check_usage */ true));
+      std::unique_ptr<File> vdex_file(new File(output_vdex_fd_, vdex_location, /* check_usage */ true));
       if (vdex_file.get() == nullptr) {
         PLOG(ERROR) << "Failed to create vdex file: " << vdex_location;
         return false;
@@ -1500,12 +1505,6 @@
 
     dex_files_ = MakeNonOwningPointerVector(opened_dex_files_);
 
-    if (!IsBootImage()) {
-      // Collect verification dependencies when compiling an app.
-      verifier_deps_.reset(new verifier::VerifierDeps(dex_files_));
-      callbacks_->SetVerifierDeps(verifier_deps_.get());
-    }
-
     // We had to postpone the swap decision till now, as this is the point when we actually
     // know about the dex files we're going to use.
 
@@ -1663,7 +1662,7 @@
                                      swap_fd_,
                                      profile_compilation_info_.get()));
     driver_->SetDexFilesForOatFile(dex_files_);
-    driver_->CompileAll(class_loader_, dex_files_, timings_);
+    driver_->CompileAll(class_loader_, dex_files_, /* verifier_deps */ nullptr, timings_);
   }
 
   // Notes on the interleaving of creating the images and oat files to
@@ -1788,13 +1787,13 @@
     {
       TimingLogger::ScopedTiming t2("dex2oat Write VDEX", timings_);
       DCHECK(IsBootImage() || oat_files_.size() == 1u);
-      DCHECK_EQ(IsBootImage(), verifier_deps_ == nullptr);
+      verifier::VerifierDeps* verifier_deps = callbacks_->GetVerifierDeps();
       for (size_t i = 0, size = oat_files_.size(); i != size; ++i) {
         File* vdex_file = vdex_files_[i].get();
         std::unique_ptr<BufferedOutputStream> vdex_out(
             MakeUnique<BufferedOutputStream>(MakeUnique<FileOutputStream>(vdex_file)));
 
-        if (!oat_writers_[i]->WriteVerifierDeps(vdex_out.get(), verifier_deps_.get())) {
+        if (!oat_writers_[i]->WriteVerifierDeps(vdex_out.get(), verifier_deps)) {
           LOG(ERROR) << "Failed to write verifier dependencies into VDEX " << vdex_file->GetPath();
           return false;
         }
@@ -2362,6 +2361,11 @@
       LOG(ERROR) << "Failed to create runtime";
       return false;
     }
+
+    // Runtime::Init will rename this thread to be "main". Prefer "dex2oat" so that "top" and
+    // "ps -a" don't change to non-descript "main."
+    SetThreadName(kIsDebugBuild ? "dex2oatd" : "dex2oat");
+
     runtime_.reset(Runtime::Current());
     runtime_->SetInstructionSet(instruction_set_);
     for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
@@ -2585,7 +2589,8 @@
   std::vector<const char*> oat_filenames_;
   std::vector<const char*> oat_unstripped_;
   int oat_fd_;
-  int vdex_fd_;
+  int input_vdex_fd_;
+  int output_vdex_fd_;
   std::vector<const char*> dex_filenames_;
   std::vector<const char*> dex_locations_;
   int zip_fd_;
@@ -2648,9 +2653,6 @@
   std::vector<std::vector<const DexFile*>> dex_files_per_oat_file_;
   std::unordered_map<const DexFile*, size_t> dex_file_oat_index_map_;
 
-  // Collector of verifier dependencies.
-  std::unique_ptr<verifier::VerifierDeps> verifier_deps_;
-
   // Backing storage.
   std::vector<std::string> char_backing_storage_;
 
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 58dd047..fa32178 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -438,9 +438,7 @@
 
     Copy(GetDexSrc1(), dex_location);
 
-    std::vector<std::string> copy(extra_args);
-
-    GenerateOdexForTest(dex_location, odex_location, filter, copy);
+    GenerateOdexForTest(dex_location, odex_location, filter, extra_args);
 
     CheckValidity();
     ASSERT_TRUE(success_);
diff --git a/dexlayout/dex_visualize.cc b/dexlayout/dex_visualize.cc
index bc9ca6d..7c55659 100644
--- a/dexlayout/dex_visualize.cc
+++ b/dexlayout/dex_visualize.cc
@@ -350,7 +350,8 @@
   const uint32_t class_defs_size = header->GetCollections().ClassDefsSize();
   for (uint32_t class_index = 0; class_index < class_defs_size; class_index++) {
     dex_ir::ClassDef* class_def = header->GetCollections().GetClassDef(class_index);
-    if (profile_info_ != nullptr && !profile_info_->ContainsClass(*dex_file, class_index)) {
+    uint16_t type_idx = class_def->ClassType()->GetIndex();
+    if (profile_info_ != nullptr && !profile_info_->ContainsClass(*dex_file, type_idx)) {
       continue;
     }
     dumper->DumpAddressRange(class_def, class_index);
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 2b30a1b..aa80655 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -1527,7 +1527,7 @@
   // Output dex file.
   if (options_.output_dex_directory_ != nullptr) {
     std::string output_location(options_.output_dex_directory_);
-    size_t last_slash = dex_file->GetLocation().rfind("/");
+    size_t last_slash = dex_file->GetLocation().rfind('/');
     output_location.append(dex_file->GetLocation().substr(last_slash));
     DexWriter::OutputDexFile(*header, output_location.c_str());
   }
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 89544d7..c7f36be 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -37,12 +37,12 @@
   bool FullPlainOutputExec(std::string* error_msg) {
     // TODO: dexdump2 -> dexdump ?
     ScratchFile dexdump_output;
-    std::string dexdump_filename = dexdump_output.GetFilename();
+    const std::string& dexdump_filename = dexdump_output.GetFilename();
     std::string dexdump = GetTestAndroidRoot() + "/bin/dexdump2";
     EXPECT_TRUE(OS::FileExists(dexdump.c_str())) << dexdump << " should be a valid file path";
 
     ScratchFile dexlayout_output;
-    std::string dexlayout_filename = dexlayout_output.GetFilename();
+    const std::string& dexlayout_filename = dexlayout_output.GetFilename();
     std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
     EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
 
@@ -70,8 +70,8 @@
   // Runs DexFileOutput test.
   bool DexFileOutputExec(std::string* error_msg) {
     ScratchFile tmp_file;
-    std::string tmp_name = tmp_file.GetFilename();
-    size_t tmp_last_slash = tmp_name.rfind("/");
+    const std::string& tmp_name = tmp_file.GetFilename();
+    size_t tmp_last_slash = tmp_name.rfind('/');
     std::string tmp_dir = tmp_name.substr(0, tmp_last_slash + 1);
     std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
     EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
@@ -84,7 +84,7 @@
         return false;
       }
 
-      size_t dex_file_last_slash = dex_file.rfind("/");
+      size_t dex_file_last_slash = dex_file.rfind('/');
       std::string dex_file_name = dex_file.substr(dex_file_last_slash + 1);
       std::vector<std::string> unzip_exec_argv =
           { "/usr/bin/unzip", dex_file, "classes.dex", "-d", tmp_dir};
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index f197fc1..a374686 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -89,7 +89,7 @@
 
   // Return suffix of the file path after the last /. (e.g. /foo/bar -> bar, bar -> bar)
   static std::string BaseName(const std::string& str) {
-    size_t idx = str.rfind("/");
+    size_t idx = str.rfind('/');
     if (idx == std::string::npos) {
       return str;
     }
@@ -516,8 +516,8 @@
 
       // Sanity check that we are reading a real object
       CHECK(obj->GetClass() != nullptr) << "Image object at address " << obj << " has null class";
-      if (kUseBakerOrBrooksReadBarrier) {
-        obj->AssertReadBarrierPointer();
+      if (kUseBakerReadBarrier) {
+        obj->AssertReadBarrierState();
       }
 
       // Iterate every page this object belongs to
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 4e81d50..4c01c14 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -66,6 +66,7 @@
 #include "type_lookup_table.h"
 #include "vdex_file.h"
 #include "verifier/method_verifier.h"
+#include "verifier/verifier_deps.h"
 #include "well_known_classes.h"
 
 #include <sys/stat.h>
@@ -115,13 +116,13 @@
 
   bool Symbolize() {
     const InstructionSet isa = oat_file_->GetOatHeader().GetInstructionSet();
-    const InstructionSetFeatures* features = InstructionSetFeatures::FromBitmap(
+    std::unique_ptr<const InstructionSetFeatures> features = InstructionSetFeatures::FromBitmap(
         isa, oat_file_->GetOatHeader().GetInstructionSetFeaturesBitmap());
 
     File* elf_file = OS::CreateEmptyFile(output_name_.c_str());
     std::unique_ptr<BufferedOutputStream> output_stream(
         MakeUnique<BufferedOutputStream>(MakeUnique<FileOutputStream>(elf_file)));
-    builder_.reset(new ElfBuilder<ElfTypes>(isa, features, output_stream.get()));
+    builder_.reset(new ElfBuilder<ElfTypes>(isa, features.get(), output_stream.get()));
 
     builder_->Start();
 
@@ -483,6 +484,28 @@
     os << "\n";
 
     if (!options_.dump_header_only_) {
+      VariableIndentationOutputStream vios(&os);
+      VdexFile::Header vdex_header = oat_file_.GetVdexFile()->GetHeader();
+      if (vdex_header.IsValid()) {
+        std::string error_msg;
+        std::vector<const DexFile*> dex_files;
+        for (size_t i = 0; i < oat_dex_files_.size(); i++) {
+          const DexFile* dex_file = OpenDexFile(oat_dex_files_[i], &error_msg);
+          if (dex_file == nullptr) {
+            os << "Error opening dex file: " << error_msg << std::endl;
+            return false;
+          }
+          dex_files.push_back(dex_file);
+        }
+        verifier::VerifierDeps deps(dex_files, oat_file_.GetVdexFile()->GetVerifierDepsData());
+        deps.Dump(&vios);
+      } else {
+        os << "UNRECOGNIZED vdex file, magic "
+           << vdex_header.GetMagic()
+           << ", version "
+           << vdex_header.GetVersion()
+           << "\n";
+      }
       for (size_t i = 0; i < oat_dex_files_.size(); i++) {
         const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
         CHECK(oat_dex_file != nullptr);
@@ -2760,7 +2783,7 @@
 
     bool result = klass->GetImt(pointer_size) == object_class->GetImt(pointer_size);
 
-    if (klass->GetIfTable() == nullptr) {
+    if (klass->GetIfTable()->Count() == 0) {
       DCHECK(result);
     }
 
@@ -2866,25 +2889,23 @@
     std::cerr << " Interfaces:" << std::endl;
     // Run through iftable, find methods that slot here, see if they fit.
     mirror::IfTable* if_table = klass->GetIfTable();
-    if (if_table != nullptr) {
-      for (size_t i = 0, num_interfaces = klass->GetIfTableCount(); i < num_interfaces; ++i) {
-        mirror::Class* iface = if_table->GetInterface(i);
-        std::string iface_name;
-        std::cerr << "  " << iface->GetDescriptor(&iface_name) << std::endl;
+    for (size_t i = 0, num_interfaces = klass->GetIfTableCount(); i < num_interfaces; ++i) {
+      mirror::Class* iface = if_table->GetInterface(i);
+      std::string iface_name;
+      std::cerr << "  " << iface->GetDescriptor(&iface_name) << std::endl;
 
-        for (ArtMethod& iface_method : iface->GetVirtualMethods(pointer_size)) {
-          uint32_t class_hash, name_hash, signature_hash;
-          ImTable::GetImtHashComponents(&iface_method, &class_hash, &name_hash, &signature_hash);
-          uint32_t imt_slot = ImTable::GetImtIndex(&iface_method);
-          std::cerr << "    " << iface_method.PrettyMethod(true)
-              << " slot=" << imt_slot
-              << std::hex
-              << " class_hash=0x" << class_hash
-              << " name_hash=0x" << name_hash
-              << " signature_hash=0x" << signature_hash
-              << std::dec
-              << std::endl;
-        }
+      for (ArtMethod& iface_method : iface->GetVirtualMethods(pointer_size)) {
+        uint32_t class_hash, name_hash, signature_hash;
+        ImTable::GetImtHashComponents(&iface_method, &class_hash, &name_hash, &signature_hash);
+        uint32_t imt_slot = ImTable::GetImtIndex(&iface_method);
+        std::cerr << "    " << iface_method.PrettyMethod(true)
+            << " slot=" << imt_slot
+            << std::hex
+            << " class_hash=0x" << class_hash
+            << " name_hash=0x" << name_hash
+            << " signature_hash=0x" << signature_hash
+            << std::dec
+            << std::endl;
       }
     }
   }
@@ -2949,18 +2970,16 @@
         } else {
           // Run through iftable, find methods that slot here, see if they fit.
           mirror::IfTable* if_table = klass->GetIfTable();
-          if (if_table != nullptr) {
-            for (size_t i = 0, num_interfaces = klass->GetIfTableCount(); i < num_interfaces; ++i) {
-              mirror::Class* iface = if_table->GetInterface(i);
-              size_t num_methods = iface->NumDeclaredVirtualMethods();
-              if (num_methods > 0) {
-                for (ArtMethod& iface_method : iface->GetMethods(pointer_size)) {
-                  if (ImTable::GetImtIndex(&iface_method) == index) {
-                    std::string i_name = iface_method.PrettyMethod(true);
-                    if (StartsWith(i_name, method.c_str())) {
-                      std::cerr << "  Slot " << index << " (1)" << std::endl;
-                      std::cerr << "    " << p_name << " (" << i_name << ")" << std::endl;
-                    }
+          for (size_t i = 0, num_interfaces = klass->GetIfTableCount(); i < num_interfaces; ++i) {
+            mirror::Class* iface = if_table->GetInterface(i);
+            size_t num_methods = iface->NumDeclaredVirtualMethods();
+            if (num_methods > 0) {
+              for (ArtMethod& iface_method : iface->GetMethods(pointer_size)) {
+                if (ImTable::GetImtIndex(&iface_method) == index) {
+                  std::string i_name = iface_method.PrettyMethod(true);
+                  if (StartsWith(i_name, method.c_str())) {
+                    std::cerr << "  Slot " << index << " (1)" << std::endl;
+                    std::cerr << "    " << p_name << " (" << i_name << ")" << std::endl;
                   }
                 }
               }
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 986f265..db28a3f 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -498,7 +498,7 @@
   return true;
 }
 
-class PatchOatArtFieldVisitor : public ArtFieldVisitor {
+class PatchOat::PatchOatArtFieldVisitor : public ArtFieldVisitor {
  public:
   explicit PatchOatArtFieldVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
 
@@ -517,7 +517,7 @@
   image_header->VisitPackedArtFields(&visitor, heap_->Begin());
 }
 
-class PatchOatArtMethodVisitor : public ArtMethodVisitor {
+class PatchOat::PatchOatArtMethodVisitor : public ArtMethodVisitor {
  public:
   explicit PatchOatArtMethodVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
 
@@ -558,7 +558,7 @@
       pointer_size);
 }
 
-class FixupRootVisitor : public RootVisitor {
+class PatchOat::FixupRootVisitor : public RootVisitor {
  public:
   explicit FixupRootVisitor(const PatchOat* patch_oat) : patch_oat_(patch_oat) {
   }
@@ -610,7 +610,7 @@
 }
 
 
-class RelocatedPointerVisitor {
+class PatchOat::RelocatedPointerVisitor {
  public:
   explicit RelocatedPointerVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
 
@@ -747,13 +747,8 @@
 void PatchOat::VisitObject(mirror::Object* object) {
   mirror::Object* copy = RelocatedCopyOf(object);
   CHECK(copy != nullptr);
-  if (kUseBakerOrBrooksReadBarrier) {
-    object->AssertReadBarrierPointer();
-    if (kUseBrooksReadBarrier) {
-      mirror::Object* moved_to = RelocatedAddressOfPointer(object);
-      copy->SetReadBarrierPointer(moved_to);
-      DCHECK_EQ(copy->GetReadBarrierPointer(), moved_to);
-    }
+  if (kUseBakerReadBarrier) {
+    object->AssertReadBarrierState();
   }
   PatchOat::PatchVisitor visitor(this, copy);
   object->VisitReferences<kVerifyNone>(visitor, visitor);
@@ -767,16 +762,14 @@
     if (vtable != nullptr) {
       vtable->Fixup(RelocatedCopyOfFollowImages(vtable), pointer_size, native_visitor);
     }
-    auto* iftable = klass->GetIfTable();
-    if (iftable != nullptr) {
-      for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
-        if (iftable->GetMethodArrayCount(i) > 0) {
-          auto* method_array = iftable->GetMethodArray(i);
-          CHECK(method_array != nullptr);
-          method_array->Fixup(RelocatedCopyOfFollowImages(method_array),
-                              pointer_size,
-                              native_visitor);
-        }
+    mirror::IfTable* iftable = klass->GetIfTable();
+    for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
+      if (iftable->GetMethodArrayCount(i) > 0) {
+        auto* method_array = iftable->GetMethodArray(i);
+        CHECK(method_array != nullptr);
+        method_array->Fixup(RelocatedCopyOfFollowImages(method_array),
+                            pointer_size,
+                            native_visitor);
       }
     }
   } else if (object->GetClass() == mirror::Method::StaticClass() ||
@@ -1073,7 +1066,7 @@
   TimingLogger::ScopedTiming pt("patch image and oat", &timings);
 
   std::string output_directory =
-      output_image_filename.substr(0, output_image_filename.find_last_of("/"));
+      output_image_filename.substr(0, output_image_filename.find_last_of('/'));
   bool ret = PatchOat::Patch(input_image_location, base_delta, output_directory, isa, &timings);
 
   if (kIsDebugBuild) {
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index e7a3e91..a519631 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -230,10 +230,11 @@
 
   TimingLogger* timings_;
 
-  friend class FixupRootVisitor;
-  friend class RelocatedPointerVisitor;
-  friend class PatchOatArtFieldVisitor;
-  friend class PatchOatArtMethodVisitor;
+  class FixupRootVisitor;
+  class RelocatedPointerVisitor;
+  class PatchOatArtFieldVisitor;
+  class PatchOatArtMethodVisitor;
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(PatchOat);
 };
 
diff --git a/profman/profman.cc b/profman/profman.cc
index b17816b..bfef834 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -354,7 +354,7 @@
   }
 
   int GenerateTestProfile() {
-    int profile_test_fd = open(test_profile_.c_str(), O_CREAT | O_TRUNC | O_WRONLY);
+    int profile_test_fd = open(test_profile_.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0644);
     if (profile_test_fd < 0) {
       std::cerr << "Cannot open " << test_profile_ << strerror(errno);
       return -1;
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 6945eb0..c6f479f 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -120,9 +120,12 @@
         "linear_alloc.cc",
         "mem_map.cc",
         "memory_region.cc",
+        "method_handles.cc",
         "mirror/array.cc",
         "mirror/class.cc",
+        "mirror/class_ext.cc",
         "mirror/dex_cache.cc",
+        "mirror/emulated_stack_frame.cc",
         "mirror/executable.cc",
         "mirror/field.cc",
         "mirror/method.cc",
@@ -403,7 +406,8 @@
 
 gensrcs {
     name: "art_operator_srcs",
-    cmd: "art/tools/generate-operator-out.py art/runtime $in > $out",
+    cmd: "$(location generate-operator-out.py) art/runtime $(in) > $(out)",
+    tool_files: ["generate-operator-out.py"],
     srcs: [
         "arch/instruction_set.h",
         "base/allocator.h",
@@ -565,7 +569,6 @@
         "utils_test.cc",
         "verifier/method_verifier_test.cc",
         "verifier/reg_type_test.cc",
-        "verifier/verifier_deps_test.cc",
         "zip_archive_test.cc",
     ],
     shared_libs: [
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index cb8edff..de72d3a 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -30,8 +30,7 @@
 namespace art {
 
 // Cast entrypoints.
-extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass,
-                                          const mirror::Class* ref_class);
+extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class);
 
 // Read barrier entrypoints.
 // art_quick_read_barrier_mark_regX uses an non-standard calling
@@ -68,12 +67,27 @@
 // Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
 extern "C" int64_t __aeabi_ldivmod(int64_t, int64_t);
 
+void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_marking) {
+  qpoints->pReadBarrierMarkReg00 = is_marking ? art_quick_read_barrier_mark_reg00 : nullptr;
+  qpoints->pReadBarrierMarkReg01 = is_marking ? art_quick_read_barrier_mark_reg01 : nullptr;
+  qpoints->pReadBarrierMarkReg02 = is_marking ? art_quick_read_barrier_mark_reg02 : nullptr;
+  qpoints->pReadBarrierMarkReg03 = is_marking ? art_quick_read_barrier_mark_reg03 : nullptr;
+  qpoints->pReadBarrierMarkReg04 = is_marking ? art_quick_read_barrier_mark_reg04 : nullptr;
+  qpoints->pReadBarrierMarkReg05 = is_marking ? art_quick_read_barrier_mark_reg05 : nullptr;
+  qpoints->pReadBarrierMarkReg06 = is_marking ? art_quick_read_barrier_mark_reg06 : nullptr;
+  qpoints->pReadBarrierMarkReg07 = is_marking ? art_quick_read_barrier_mark_reg07 : nullptr;
+  qpoints->pReadBarrierMarkReg08 = is_marking ? art_quick_read_barrier_mark_reg08 : nullptr;
+  qpoints->pReadBarrierMarkReg09 = is_marking ? art_quick_read_barrier_mark_reg09 : nullptr;
+  qpoints->pReadBarrierMarkReg10 = is_marking ? art_quick_read_barrier_mark_reg10 : nullptr;
+  qpoints->pReadBarrierMarkReg11 = is_marking ? art_quick_read_barrier_mark_reg11 : nullptr;
+}
+
 void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
   DefaultInitEntryPoints(jpoints, qpoints);
 
   // Cast
-  qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
-  qpoints->pCheckCast = art_quick_check_cast;
+  qpoints->pInstanceofNonTrivial = artInstanceOfFromCode;
+  qpoints->pCheckInstanceOf = art_quick_check_instance_of;
 
   // Math
   qpoints->pIdivmod = __aeabi_idivmod;
@@ -124,18 +138,7 @@
 
   // Read barrier.
   qpoints->pReadBarrierJni = ReadBarrierJni;
-  qpoints->pReadBarrierMarkReg00 = art_quick_read_barrier_mark_reg00;
-  qpoints->pReadBarrierMarkReg01 = art_quick_read_barrier_mark_reg01;
-  qpoints->pReadBarrierMarkReg02 = art_quick_read_barrier_mark_reg02;
-  qpoints->pReadBarrierMarkReg03 = art_quick_read_barrier_mark_reg03;
-  qpoints->pReadBarrierMarkReg04 = art_quick_read_barrier_mark_reg04;
-  qpoints->pReadBarrierMarkReg05 = art_quick_read_barrier_mark_reg05;
-  qpoints->pReadBarrierMarkReg06 = art_quick_read_barrier_mark_reg06;
-  qpoints->pReadBarrierMarkReg07 = art_quick_read_barrier_mark_reg07;
-  qpoints->pReadBarrierMarkReg08 = art_quick_read_barrier_mark_reg08;
-  qpoints->pReadBarrierMarkReg09 = art_quick_read_barrier_mark_reg09;
-  qpoints->pReadBarrierMarkReg10 = art_quick_read_barrier_mark_reg10;
-  qpoints->pReadBarrierMarkReg11 = art_quick_read_barrier_mark_reg11;
+  UpdateReadBarrierEntrypoints(qpoints, /*is_marking*/ false);
   qpoints->pReadBarrierMarkReg12 = nullptr;  // Cannot use register 12 (IP) to pass arguments.
   qpoints->pReadBarrierMarkReg13 = nullptr;  // Cannot use register 13 (SP) to pass arguments.
   qpoints->pReadBarrierMarkReg14 = nullptr;  // Cannot use register 14 (LR) to pass arguments.
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index c3a5829..c81a93c 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -33,7 +33,7 @@
 
 namespace art {
 
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromVariant(
+ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromVariant(
     const std::string& variant, std::string* error_msg) {
   // Assume all ARM processors are SMP.
   // TODO: set the SMP support based on variant.
@@ -69,7 +69,7 @@
     if (FindVariantInArray(unsupported_arm_variants, arraysize(unsupported_arm_variants),
                            variant)) {
       *error_msg = StringPrintf("Attempt to use unsupported ARM variant: %s", variant.c_str());
-      return nullptr;
+      return ArmFeaturesUniquePtr();
     }
     // Warn if the variant is unknown.
     // TODO: some of the variants below may have feature support, but that support is currently
@@ -97,17 +97,17 @@
           << ") using conservative defaults";
     }
   }
-  return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+  return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae));
 }
 
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
   bool smp = (bitmap & kSmpBitfield) != 0;
   bool has_div = (bitmap & kDivBitfield) != 0;
   bool has_atomic_ldrd_strd = (bitmap & kAtomicLdrdStrdBitfield) != 0;
-  return new ArmInstructionSetFeatures(smp, has_div, has_atomic_ldrd_strd);
+  return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_atomic_ldrd_strd));
 }
 
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCppDefines() {
+ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromCppDefines() {
   const bool smp = true;
 #if defined(__ARM_ARCH_EXT_IDIV__)
   const bool has_div = true;
@@ -119,10 +119,10 @@
 #else
   const bool has_lpae = false;
 #endif
-  return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+  return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae));
 }
 
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCpuInfo() {
+ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromCpuInfo() {
   // Look in /proc/cpuinfo for features we need.  Only use this when we can guarantee that
   // the kernel puts the appropriate feature flags in here.  Sometimes it doesn't.
   bool smp = false;
@@ -157,10 +157,10 @@
   } else {
     LOG(ERROR) << "Failed to open /proc/cpuinfo";
   }
-  return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+  return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae));
 }
 
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromHwcap() {
+ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromHwcap() {
   bool smp = sysconf(_SC_NPROCESSORS_CONF) > 1;
 
   bool has_div = false;
@@ -180,7 +180,7 @@
   }
 #endif
 
-  return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+  return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae));
 }
 
 // A signal handler called by a fault for an illegal instruction.  We record the fact in r0
@@ -198,7 +198,7 @@
 #endif
 }
 
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromAssembly() {
+ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromAssembly() {
   const bool smp = true;
 
   // See if have a sdiv instruction.  Register a signal handler and try to execute an sdiv
@@ -226,7 +226,7 @@
 #else
   const bool has_lpae = false;
 #endif
-  return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+  return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae));
 }
 
 bool ArmInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
@@ -265,7 +265,8 @@
   return result;
 }
 
-const InstructionSetFeatures* ArmInstructionSetFeatures::AddFeaturesFromSplitString(
+std::unique_ptr<const InstructionSetFeatures>
+ArmInstructionSetFeatures::AddFeaturesFromSplitString(
     const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
   bool has_atomic_ldrd_strd = has_atomic_ldrd_strd_;
   bool has_div = has_div_;
@@ -284,7 +285,8 @@
       return nullptr;
     }
   }
-  return new ArmInstructionSetFeatures(smp, has_div, has_atomic_ldrd_strd);
+  return std::unique_ptr<const InstructionSetFeatures>(
+      new ArmInstructionSetFeatures(smp, has_div, has_atomic_ldrd_strd));
 }
 
 }  // namespace art
diff --git a/runtime/arch/arm/instruction_set_features_arm.h b/runtime/arch/arm/instruction_set_features_arm.h
index 221bf1f..204d1d7 100644
--- a/runtime/arch/arm/instruction_set_features_arm.h
+++ b/runtime/arch/arm/instruction_set_features_arm.h
@@ -21,29 +21,31 @@
 
 namespace art {
 
+class ArmInstructionSetFeatures;
+using ArmFeaturesUniquePtr = std::unique_ptr<const ArmInstructionSetFeatures>;
+
 // Instruction set features relevant to the ARM architecture.
 class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
  public:
   // Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
-  static const ArmInstructionSetFeatures* FromVariant(const std::string& variant,
-                                                      std::string* error_msg);
+  static ArmFeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
 
   // Parse a bitmap and create an InstructionSetFeatures.
-  static const ArmInstructionSetFeatures* FromBitmap(uint32_t bitmap);
+  static ArmFeaturesUniquePtr FromBitmap(uint32_t bitmap);
 
   // Turn C pre-processor #defines into the equivalent instruction set features.
-  static const ArmInstructionSetFeatures* FromCppDefines();
+  static ArmFeaturesUniquePtr FromCppDefines();
 
   // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
-  static const ArmInstructionSetFeatures* FromCpuInfo();
+  static ArmFeaturesUniquePtr FromCpuInfo();
 
   // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
   // InstructionSetFeatures.
-  static const ArmInstructionSetFeatures* FromHwcap();
+  static ArmFeaturesUniquePtr FromHwcap();
 
   // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
   // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
-  static const ArmInstructionSetFeatures* FromAssembly();
+  static ArmFeaturesUniquePtr FromAssembly();
 
   bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
 
@@ -71,7 +73,7 @@
 
  protected:
   // Parse a vector of the form "div", "lpae" adding these to a new ArmInstructionSetFeatures.
-  const InstructionSetFeatures*
+  std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
                                  std::string* error_msg) const OVERRIDE;
 
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index bf70c55..3a83eaf 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -764,11 +764,12 @@
 END art_quick_unlock_object_no_inline
 
     /*
-     * Entry from managed code that calls artIsAssignableFromCode and on failure calls
-     * artThrowClassCastException.
+     * Entry from managed code that calls artInstanceOfFromCode and on failure calls
+     * artThrowClassCastExceptionForObject.
      */
-    .extern artThrowClassCastException
-ENTRY art_quick_check_cast
+    .extern artInstanceOfFromCode
+    .extern artThrowClassCastExceptionForObject
+ENTRY art_quick_check_instance_of
     push {r0-r1, lr}                    @ save arguments, link register and pad
     .cfi_adjust_cfa_offset 12
     .cfi_rel_offset r0, 0
@@ -776,7 +777,7 @@
     .cfi_rel_offset lr, 8
     sub sp, #4
     .cfi_adjust_cfa_offset 4
-    bl artIsAssignableFromCode
+    bl artInstanceOfFromCode
     cbz    r0, .Lthrow_class_cast_exception
     add sp, #4
     .cfi_adjust_cfa_offset -4
@@ -792,9 +793,9 @@
     .cfi_restore lr
     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r2       @ save all registers as basis for long jump context
     mov r2, r9                      @ pass Thread::Current
-    bl  artThrowClassCastException  @ (Class*, Class*, Thread*)
+    bl  artThrowClassCastExceptionForObject  @ (Object*, Class*, Thread*)
     bkpt
-END art_quick_check_cast
+END art_quick_check_instance_of
 
 // Restore rReg's value from [sp, #offset] if rReg is not the same as rExclude.
 .macro POP_REG_NE rReg, offset, rExclude
@@ -1768,12 +1769,15 @@
     .cfi_rel_offset r10, 4
     .cfi_rel_offset r11, 8
     .cfi_rel_offset lr, 12
+#if (STRING_COMPRESSION_FEATURE)
+    ldr   r4, [r0, #MIRROR_STRING_COUNT_OFFSET]
+#else
     ldr   r3, [r0, #MIRROR_STRING_COUNT_OFFSET]
+#endif
     add   r0, #MIRROR_STRING_VALUE_OFFSET
 #if (STRING_COMPRESSION_FEATURE)
     /* r4 count (with flag) and r3 holds actual length */
-    mov   r4, r3
-    bic   r3, #2147483648
+    lsr   r3, r4, #1
 #endif
     /* Clamp start to [0..count] */
     cmp   r2, #0
@@ -1788,8 +1792,8 @@
 
     /* Build pointer to start of data to compare and pre-bias */
 #if (STRING_COMPRESSION_FEATURE)
-    cmp   r4, #0
-    blt   .Lstring_indexof_compressed
+    lsrs  r4, r4, #1
+    bcc   .Lstring_indexof_compressed
 #endif
     add   r0, r0, r2, lsl #1
     sub   r0, #2
@@ -1999,11 +2003,17 @@
     // Check lock word for mark bit, if marked return. Use IP for scratch since it is blocked.
     ldr ip, [\reg, MIRROR_OBJECT_LOCK_WORD_OFFSET]
     tst ip, #LOCK_WORD_MARK_BIT_MASK_SHIFTED
-    beq .Lslow_rb_\name
+    beq .Lnot_marked_rb_\name
     // Already marked, return right away.
 .Lret_rb_\name:
     bx lr
 
+.Lnot_marked_rb_\name:
+    // Test that both the forwarding state bits are 1.
+    mvn ip, ip
+    tst ip, #(LOCK_WORD_STATE_FORWARDING_ADDRESS << LOCK_WORD_STATE_SHIFT)
+    beq .Lret_forwarding_address\name
+
 .Lslow_rb_\name:
     // Save IP: the kSaveEverything entrypoint art_quick_resolve_string makes a tail call here.
     push  {r0-r4, r9, ip, lr}           @ save return address, core caller-save registers and ip
@@ -2064,6 +2074,12 @@
     .cfi_restore ip
     .cfi_restore lr
     bx lr
+.Lret_forwarding_address\name:
+    // Shift left by the forwarding address shift. This clears out the state bits since they are
+    // in the top 2 bits of the lock word.
+    mvn ip, ip
+    lsl \reg, ip, #LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT
+    bx lr
 END \name
 .endm
 
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index c2078f0..6add107 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -30,8 +30,7 @@
 namespace art {
 
 // Cast entrypoints.
-extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass,
-                                          const mirror::Class* ref_class);
+extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class);
 
 // Read barrier entrypoints.
 // art_quick_read_barrier_mark_regX uses an non-standard calling
@@ -71,12 +70,53 @@
 extern "C" mirror::Object* art_quick_read_barrier_mark_reg28(mirror::Object*);
 extern "C" mirror::Object* art_quick_read_barrier_mark_reg29(mirror::Object*);
 
+void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_marking) {
+  // ARM64 is the architecture with the largest number of core
+  // registers (32) that supports the read barrier configuration.
+  // Because registers 30 (LR) and 31 (SP/XZR) cannot be used to pass
+  // arguments, only define ReadBarrierMarkRegX entrypoints for the
+  // first 30 registers.  This limitation is not a problem on other
+  // supported architectures (ARM, x86 and x86-64) either, as they
+  // have less core registers (resp. 16, 8 and 16).  (We may have to
+  // revise that design choice if read barrier support is added for
+  // MIPS and/or MIPS64.)
+  qpoints->pReadBarrierMarkReg00 = is_marking ? art_quick_read_barrier_mark_reg00 : nullptr;
+  qpoints->pReadBarrierMarkReg01 = is_marking ? art_quick_read_barrier_mark_reg01 : nullptr;
+  qpoints->pReadBarrierMarkReg02 = is_marking ? art_quick_read_barrier_mark_reg02 : nullptr;
+  qpoints->pReadBarrierMarkReg03 = is_marking ? art_quick_read_barrier_mark_reg03 : nullptr;
+  qpoints->pReadBarrierMarkReg04 = is_marking ? art_quick_read_barrier_mark_reg04 : nullptr;
+  qpoints->pReadBarrierMarkReg05 = is_marking ? art_quick_read_barrier_mark_reg05 : nullptr;
+  qpoints->pReadBarrierMarkReg06 = is_marking ? art_quick_read_barrier_mark_reg06 : nullptr;
+  qpoints->pReadBarrierMarkReg07 = is_marking ? art_quick_read_barrier_mark_reg07 : nullptr;
+  qpoints->pReadBarrierMarkReg08 = is_marking ? art_quick_read_barrier_mark_reg08 : nullptr;
+  qpoints->pReadBarrierMarkReg09 = is_marking ? art_quick_read_barrier_mark_reg09 : nullptr;
+  qpoints->pReadBarrierMarkReg10 = is_marking ? art_quick_read_barrier_mark_reg10 : nullptr;
+  qpoints->pReadBarrierMarkReg11 = is_marking ? art_quick_read_barrier_mark_reg11 : nullptr;
+  qpoints->pReadBarrierMarkReg12 = is_marking ? art_quick_read_barrier_mark_reg12 : nullptr;
+  qpoints->pReadBarrierMarkReg13 = is_marking ? art_quick_read_barrier_mark_reg13 : nullptr;
+  qpoints->pReadBarrierMarkReg14 = is_marking ? art_quick_read_barrier_mark_reg14 : nullptr;
+  qpoints->pReadBarrierMarkReg15 = is_marking ? art_quick_read_barrier_mark_reg15 : nullptr;
+  qpoints->pReadBarrierMarkReg17 = is_marking ? art_quick_read_barrier_mark_reg17 : nullptr;
+  qpoints->pReadBarrierMarkReg18 = is_marking ? art_quick_read_barrier_mark_reg18 : nullptr;
+  qpoints->pReadBarrierMarkReg19 = is_marking ? art_quick_read_barrier_mark_reg19 : nullptr;
+  qpoints->pReadBarrierMarkReg20 = is_marking ? art_quick_read_barrier_mark_reg20 : nullptr;
+  qpoints->pReadBarrierMarkReg21 = is_marking ? art_quick_read_barrier_mark_reg21 : nullptr;
+  qpoints->pReadBarrierMarkReg22 = is_marking ? art_quick_read_barrier_mark_reg22 : nullptr;
+  qpoints->pReadBarrierMarkReg23 = is_marking ? art_quick_read_barrier_mark_reg23 : nullptr;
+  qpoints->pReadBarrierMarkReg24 = is_marking ? art_quick_read_barrier_mark_reg24 : nullptr;
+  qpoints->pReadBarrierMarkReg25 = is_marking ? art_quick_read_barrier_mark_reg25 : nullptr;
+  qpoints->pReadBarrierMarkReg26 = is_marking ? art_quick_read_barrier_mark_reg26 : nullptr;
+  qpoints->pReadBarrierMarkReg27 = is_marking ? art_quick_read_barrier_mark_reg27 : nullptr;
+  qpoints->pReadBarrierMarkReg28 = is_marking ? art_quick_read_barrier_mark_reg28 : nullptr;
+  qpoints->pReadBarrierMarkReg29 = is_marking ? art_quick_read_barrier_mark_reg29 : nullptr;
+}
+
 void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
   DefaultInitEntryPoints(jpoints, qpoints);
 
   // Cast
-  qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
-  qpoints->pCheckCast = art_quick_check_cast;
+  qpoints->pInstanceofNonTrivial = artInstanceOfFromCode;
+  qpoints->pCheckInstanceOf = art_quick_check_instance_of;
 
   // Math
   // TODO null entrypoints not needed for ARM64 - generate inline.
@@ -127,45 +167,8 @@
 
   // Read barrier.
   qpoints->pReadBarrierJni = ReadBarrierJni;
-  // ARM64 is the architecture with the largest number of core
-  // registers (32) that supports the read barrier configuration.
-  // Because registers 30 (LR) and 31 (SP/XZR) cannot be used to pass
-  // arguments, only define ReadBarrierMarkRegX entrypoints for the
-  // first 30 registers.  This limitation is not a problem on other
-  // supported architectures (ARM, x86 and x86-64) either, as they
-  // have less core registers (resp. 16, 8 and 16).  (We may have to
-  // revise that design choice if read barrier support is added for
-  // MIPS and/or MIPS64.)
-  qpoints->pReadBarrierMarkReg00 = art_quick_read_barrier_mark_reg00;
-  qpoints->pReadBarrierMarkReg01 = art_quick_read_barrier_mark_reg01;
-  qpoints->pReadBarrierMarkReg02 = art_quick_read_barrier_mark_reg02;
-  qpoints->pReadBarrierMarkReg03 = art_quick_read_barrier_mark_reg03;
-  qpoints->pReadBarrierMarkReg04 = art_quick_read_barrier_mark_reg04;
-  qpoints->pReadBarrierMarkReg05 = art_quick_read_barrier_mark_reg05;
-  qpoints->pReadBarrierMarkReg06 = art_quick_read_barrier_mark_reg06;
-  qpoints->pReadBarrierMarkReg07 = art_quick_read_barrier_mark_reg07;
-  qpoints->pReadBarrierMarkReg08 = art_quick_read_barrier_mark_reg08;
-  qpoints->pReadBarrierMarkReg09 = art_quick_read_barrier_mark_reg09;
-  qpoints->pReadBarrierMarkReg10 = art_quick_read_barrier_mark_reg10;
-  qpoints->pReadBarrierMarkReg11 = art_quick_read_barrier_mark_reg11;
-  qpoints->pReadBarrierMarkReg12 = art_quick_read_barrier_mark_reg12;
-  qpoints->pReadBarrierMarkReg13 = art_quick_read_barrier_mark_reg13;
-  qpoints->pReadBarrierMarkReg14 = art_quick_read_barrier_mark_reg14;
-  qpoints->pReadBarrierMarkReg15 = art_quick_read_barrier_mark_reg15;
   qpoints->pReadBarrierMarkReg16 = nullptr;  // IP0 is used as a temp by the asm stub.
-  qpoints->pReadBarrierMarkReg17 = art_quick_read_barrier_mark_reg17;
-  qpoints->pReadBarrierMarkReg18 = art_quick_read_barrier_mark_reg18;
-  qpoints->pReadBarrierMarkReg19 = art_quick_read_barrier_mark_reg19;
-  qpoints->pReadBarrierMarkReg20 = art_quick_read_barrier_mark_reg20;
-  qpoints->pReadBarrierMarkReg21 = art_quick_read_barrier_mark_reg21;
-  qpoints->pReadBarrierMarkReg22 = art_quick_read_barrier_mark_reg22;
-  qpoints->pReadBarrierMarkReg23 = art_quick_read_barrier_mark_reg23;
-  qpoints->pReadBarrierMarkReg24 = art_quick_read_barrier_mark_reg24;
-  qpoints->pReadBarrierMarkReg25 = art_quick_read_barrier_mark_reg25;
-  qpoints->pReadBarrierMarkReg26 = art_quick_read_barrier_mark_reg26;
-  qpoints->pReadBarrierMarkReg27 = art_quick_read_barrier_mark_reg27;
-  qpoints->pReadBarrierMarkReg28 = art_quick_read_barrier_mark_reg28;
-  qpoints->pReadBarrierMarkReg29 = art_quick_read_barrier_mark_reg29;
+  UpdateReadBarrierEntrypoints(qpoints, /*is_marking*/ false);
   qpoints->pReadBarrierSlow = artReadBarrierSlow;
   qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
 };
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index cad13b2..4e7dea3 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -19,12 +19,13 @@
 #include <fstream>
 #include <sstream>
 
+#include "base/stl_util.h"
 #include "base/stringprintf.h"
 #include "utils.h"  // For Trim.
 
 namespace art {
 
-const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromVariant(
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromVariant(
     const std::string& variant, std::string* error_msg) {
   const bool smp = true;  // Conservative default.
 
@@ -52,22 +53,23 @@
   // The variants that need a fix for 843419 are the same that need a fix for 835769.
   bool needs_a53_843419_fix = needs_a53_835769_fix;
 
-  return new Arm64InstructionSetFeatures(smp, needs_a53_835769_fix, needs_a53_843419_fix);
+  return Arm64FeaturesUniquePtr(
+      new Arm64InstructionSetFeatures(smp, needs_a53_835769_fix, needs_a53_843419_fix));
 }
 
-const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
   bool smp = (bitmap & kSmpBitfield) != 0;
   bool is_a53 = (bitmap & kA53Bitfield) != 0;
-  return new Arm64InstructionSetFeatures(smp, is_a53, is_a53);
+  return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(smp, is_a53, is_a53));
 }
 
-const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromCppDefines() {
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromCppDefines() {
   const bool smp = true;
   const bool is_a53 = true;  // Pessimistically assume all ARM64s are A53s.
-  return new Arm64InstructionSetFeatures(smp, is_a53, is_a53);
+  return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(smp, is_a53, is_a53));
 }
 
-const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromCpuInfo() {
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromCpuInfo() {
   // Look in /proc/cpuinfo for features we need.  Only use this when we can guarantee that
   // the kernel puts the appropriate feature flags in here.  Sometimes it doesn't.
   bool smp = false;
@@ -89,16 +91,16 @@
   } else {
     LOG(ERROR) << "Failed to open /proc/cpuinfo";
   }
-  return new Arm64InstructionSetFeatures(smp, is_a53, is_a53);
+  return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(smp, is_a53, is_a53));
 }
 
-const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromHwcap() {
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromHwcap() {
   bool smp = sysconf(_SC_NPROCESSORS_CONF) > 1;
   const bool is_a53 = true;  // Pessimistically assume all ARM64s are A53s.
-  return new Arm64InstructionSetFeatures(smp, is_a53, is_a53);
+  return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(smp, is_a53, is_a53));
 }
 
-const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromAssembly() {
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromAssembly() {
   UNIMPLEMENTED(WARNING);
   return FromCppDefines();
 }
@@ -130,7 +132,8 @@
   return result;
 }
 
-const InstructionSetFeatures* Arm64InstructionSetFeatures::AddFeaturesFromSplitString(
+std::unique_ptr<const InstructionSetFeatures>
+Arm64InstructionSetFeatures::AddFeaturesFromSplitString(
     const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
   bool is_a53 = fix_cortex_a53_835769_;
   for (auto i = features.begin(); i != features.end(); i++) {
@@ -144,7 +147,8 @@
       return nullptr;
     }
   }
-  return new Arm64InstructionSetFeatures(smp, is_a53, is_a53);
+  return std::unique_ptr<const InstructionSetFeatures>(
+      new Arm64InstructionSetFeatures(smp, is_a53, is_a53));
 }
 
 }  // namespace art
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index abd7e83..e51aa1c 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -21,29 +21,31 @@
 
 namespace art {
 
+class Arm64InstructionSetFeatures;
+using Arm64FeaturesUniquePtr = std::unique_ptr<const Arm64InstructionSetFeatures>;
+
 // Instruction set features relevant to the ARM64 architecture.
 class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
  public:
   // Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
-  static const Arm64InstructionSetFeatures* FromVariant(const std::string& variant,
-                                                        std::string* error_msg);
+  static Arm64FeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
 
   // Parse a bitmap and create an InstructionSetFeatures.
-  static const Arm64InstructionSetFeatures* FromBitmap(uint32_t bitmap);
+  static Arm64FeaturesUniquePtr FromBitmap(uint32_t bitmap);
 
   // Turn C pre-processor #defines into the equivalent instruction set features.
-  static const Arm64InstructionSetFeatures* FromCppDefines();
+  static Arm64FeaturesUniquePtr FromCppDefines();
 
   // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
-  static const Arm64InstructionSetFeatures* FromCpuInfo();
+  static Arm64FeaturesUniquePtr FromCpuInfo();
 
   // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
   // InstructionSetFeatures.
-  static const Arm64InstructionSetFeatures* FromHwcap();
+  static Arm64FeaturesUniquePtr FromHwcap();
 
   // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
   // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
-  static const Arm64InstructionSetFeatures* FromAssembly();
+  static Arm64FeaturesUniquePtr FromAssembly();
 
   bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
 
@@ -70,7 +72,7 @@
 
  protected:
   // Parse a vector of the form "a53" adding these to a new ArmInstructionSetFeatures.
-  const InstructionSetFeatures*
+  std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
                                  std::string* error_msg) const OVERRIDE;
 
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 483cee3..73bca03 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1294,18 +1294,19 @@
 END art_quick_unlock_object_no_inline
 
     /*
-     * Entry from managed code that calls artIsAssignableFromCode and on failure calls
-     * artThrowClassCastException.
+     * Entry from managed code that calls artInstanceOfFromCode and on failure calls
+     * artThrowClassCastExceptionForObject.
      */
-    .extern artThrowClassCastException
-ENTRY art_quick_check_cast
+    .extern artInstanceOfFromCode
+    .extern artThrowClassCastExceptionForObject
+ENTRY art_quick_check_instance_of
     // Store arguments and link register
     // Stack needs to be 16B aligned on calls.
     SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32
     SAVE_REG xLR, 24
 
     // Call runtime code
-    bl artIsAssignableFromCode
+    bl artInstanceOfFromCode
 
     // Check for exception
     cbz x0, .Lthrow_class_cast_exception
@@ -1324,9 +1325,9 @@
 
     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
     mov x2, xSELF                     // pass Thread::Current
-    bl artThrowClassCastException     // (Class*, Class*, Thread*)
+    bl artThrowClassCastExceptionForObject     // (Object*, Class*, Thread*)
     brk 0                             // We should not return here...
-END art_quick_check_cast
+END art_quick_check_instance_of
 
 // Restore xReg's value from [sp, #offset] if xReg is not the same as xExclude.
 .macro POP_REG_NE xReg, offset, xExclude
@@ -2402,12 +2403,15 @@
      *    w2:   Starting offset in string data
      */
 ENTRY art_quick_indexof
+#if (STRING_COMPRESSION_FEATURE)
+    ldr   w4, [x0, #MIRROR_STRING_COUNT_OFFSET]
+#else
     ldr   w3, [x0, #MIRROR_STRING_COUNT_OFFSET]
+#endif
     add   x0, x0, #MIRROR_STRING_VALUE_OFFSET
 #if (STRING_COMPRESSION_FEATURE)
     /* w4 holds count (with flag) and w3 holds actual length */
-    mov   w4, w3
-    and   w3, w3, #2147483647
+    lsr   w3, w4, #1
 #endif
     /* Clamp start to [0..count] */
     cmp   w2, #0
@@ -2419,7 +2423,7 @@
     mov   x5, x0
 
 #if (STRING_COMPRESSION_FEATURE)
-    tbnz  w4, #31, .Lstring_indexof_compressed
+    tbz   w4, #0, .Lstring_indexof_compressed
 #endif
     /* Build pointer to start of data to compare and pre-bias */
     add   x0, x0, x2, lsl #1
@@ -2539,10 +2543,17 @@
      */
     // Use wIP0 as temp and check the mark bit of the reference. wIP0 is not used by the compiler.
     ldr   wIP0, [\xreg, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
-    tbz   wIP0, #LOCK_WORD_MARK_BIT_SHIFT, .Lslow_rb_\name
+    tbz   wIP0, #LOCK_WORD_MARK_BIT_SHIFT, .Lnot_marked_rb_\name
 .Lret_rb_\name:
     ret
+.Lnot_marked_rb_\name:
+    // Check if the top two bits are one, if this is the case it is a forwarding address.
+    mvn wIP0, wIP0
+    cmp wzr, wIP0, lsr #30
+    beq .Lret_forwarding_address\name
 .Lslow_rb_\name:
+    // We must not clobber IP0 since art_quick_resolve_string makes a tail call here and relies on
+    // IP0 being restored.
     // Save all potentially live caller-save core registers.
     SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 368
     SAVE_TWO_REGS  x2,  x3, 16
@@ -2608,6 +2619,12 @@
     RESTORE_REG xLR, 360
     DECREASE_FRAME 368
     ret
+.Lret_forwarding_address\name:
+    mvn wIP0, wIP0
+    // Shift left by the forwarding address shift. This clears out the state bits since they are
+    // in the top 2 bits of the lock word.
+    lsl \wreg, wIP0, #LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT
+    ret
 END \name
 .endm
 
diff --git a/runtime/arch/instruction_set_features.cc b/runtime/arch/instruction_set_features.cc
index 898f83a..b32391f 100644
--- a/runtime/arch/instruction_set_features.cc
+++ b/runtime/arch/instruction_set_features.cc
@@ -29,29 +29,28 @@
 
 namespace art {
 
-const InstructionSetFeatures* InstructionSetFeatures::FromVariant(InstructionSet isa,
-                                                                  const std::string& variant,
-                                                                  std::string* error_msg) {
-  const InstructionSetFeatures* result;
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromVariant(
+    InstructionSet isa, const std::string& variant, std::string* error_msg) {
+  std::unique_ptr<const InstructionSetFeatures> result;
   switch (isa) {
     case kArm:
     case kThumb2:
-      result = ArmInstructionSetFeatures::FromVariant(variant, error_msg);
+      result.reset(ArmInstructionSetFeatures::FromVariant(variant, error_msg).release());
       break;
     case kArm64:
-      result = Arm64InstructionSetFeatures::FromVariant(variant, error_msg);
+      result.reset(Arm64InstructionSetFeatures::FromVariant(variant, error_msg).release());
       break;
     case kMips:
-      result = MipsInstructionSetFeatures::FromVariant(variant, error_msg);
+      result.reset(MipsInstructionSetFeatures::FromVariant(variant, error_msg).release());
       break;
     case kMips64:
       result = Mips64InstructionSetFeatures::FromVariant(variant, error_msg);
       break;
     case kX86:
-      result = X86InstructionSetFeatures::FromVariant(variant, error_msg);
+      result.reset(X86InstructionSetFeatures::FromVariant(variant, error_msg).release());
       break;
     case kX86_64:
-      result = X86_64InstructionSetFeatures::FromVariant(variant, error_msg);
+      result.reset(X86_64InstructionSetFeatures::FromVariant(variant, error_msg).release());
       break;
     default:
       UNIMPLEMENTED(FATAL) << isa;
@@ -61,28 +60,28 @@
   return result;
 }
 
-const InstructionSetFeatures* InstructionSetFeatures::FromBitmap(InstructionSet isa,
-                                                                 uint32_t bitmap) {
-  const InstructionSetFeatures* result;
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromBitmap(InstructionSet isa,
+                                                                                 uint32_t bitmap) {
+  std::unique_ptr<const InstructionSetFeatures> result;
   switch (isa) {
     case kArm:
     case kThumb2:
-      result = ArmInstructionSetFeatures::FromBitmap(bitmap);
+      result.reset(ArmInstructionSetFeatures::FromBitmap(bitmap).release());
       break;
     case kArm64:
-      result = Arm64InstructionSetFeatures::FromBitmap(bitmap);
+      result.reset(Arm64InstructionSetFeatures::FromBitmap(bitmap).release());
       break;
     case kMips:
-      result = MipsInstructionSetFeatures::FromBitmap(bitmap);
+      result.reset(MipsInstructionSetFeatures::FromBitmap(bitmap).release());
       break;
     case kMips64:
       result = Mips64InstructionSetFeatures::FromBitmap(bitmap);
       break;
     case kX86:
-      result = X86InstructionSetFeatures::FromBitmap(bitmap);
+      result.reset(X86InstructionSetFeatures::FromBitmap(bitmap).release());
       break;
     case kX86_64:
-      result = X86_64InstructionSetFeatures::FromBitmap(bitmap);
+      result.reset(X86_64InstructionSetFeatures::FromBitmap(bitmap).release());
       break;
     default:
       UNIMPLEMENTED(FATAL) << isa;
@@ -92,27 +91,27 @@
   return result;
 }
 
-const InstructionSetFeatures* InstructionSetFeatures::FromCppDefines() {
-  const InstructionSetFeatures* result;
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromCppDefines() {
+  std::unique_ptr<const InstructionSetFeatures> result;
   switch (kRuntimeISA) {
     case kArm:
     case kThumb2:
-      result = ArmInstructionSetFeatures::FromCppDefines();
+      result.reset(ArmInstructionSetFeatures::FromCppDefines().release());
       break;
     case kArm64:
-      result = Arm64InstructionSetFeatures::FromCppDefines();
+      result.reset(Arm64InstructionSetFeatures::FromCppDefines().release());
       break;
     case kMips:
-      result = MipsInstructionSetFeatures::FromCppDefines();
+      result.reset(MipsInstructionSetFeatures::FromCppDefines().release());
       break;
     case kMips64:
       result = Mips64InstructionSetFeatures::FromCppDefines();
       break;
     case kX86:
-      result = X86InstructionSetFeatures::FromCppDefines();
+      result.reset(X86InstructionSetFeatures::FromCppDefines().release());
       break;
     case kX86_64:
-      result = X86_64InstructionSetFeatures::FromCppDefines();
+      result.reset(X86_64InstructionSetFeatures::FromCppDefines().release());
       break;
     default:
       UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -122,27 +121,27 @@
 }
 
 
-const InstructionSetFeatures* InstructionSetFeatures::FromCpuInfo() {
-  const InstructionSetFeatures* result;
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromCpuInfo() {
+  std::unique_ptr<const InstructionSetFeatures> result;
   switch (kRuntimeISA) {
     case kArm:
     case kThumb2:
-      result = ArmInstructionSetFeatures::FromCpuInfo();
+      result.reset(ArmInstructionSetFeatures::FromCpuInfo().release());
       break;
     case kArm64:
-      result = Arm64InstructionSetFeatures::FromCpuInfo();
+      result.reset(Arm64InstructionSetFeatures::FromCpuInfo().release());
       break;
     case kMips:
-      result = MipsInstructionSetFeatures::FromCpuInfo();
+      result.reset(MipsInstructionSetFeatures::FromCpuInfo().release());
       break;
     case kMips64:
       result = Mips64InstructionSetFeatures::FromCpuInfo();
       break;
     case kX86:
-      result = X86InstructionSetFeatures::FromCpuInfo();
+      result.reset(X86InstructionSetFeatures::FromCpuInfo().release());
       break;
     case kX86_64:
-      result = X86_64InstructionSetFeatures::FromCpuInfo();
+      result.reset(X86_64InstructionSetFeatures::FromCpuInfo().release());
       break;
     default:
       UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -151,27 +150,27 @@
   return result;
 }
 
-const InstructionSetFeatures* InstructionSetFeatures::FromHwcap() {
-  const InstructionSetFeatures* result;
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromHwcap() {
+  std::unique_ptr<const InstructionSetFeatures> result;
   switch (kRuntimeISA) {
     case kArm:
     case kThumb2:
-      result = ArmInstructionSetFeatures::FromHwcap();
+      result.reset(ArmInstructionSetFeatures::FromHwcap().release());
       break;
     case kArm64:
-      result = Arm64InstructionSetFeatures::FromHwcap();
+      result.reset(Arm64InstructionSetFeatures::FromHwcap().release());
       break;
     case kMips:
-      result = MipsInstructionSetFeatures::FromHwcap();
+      result.reset(MipsInstructionSetFeatures::FromHwcap().release());
       break;
     case kMips64:
       result = Mips64InstructionSetFeatures::FromHwcap();
       break;
     case kX86:
-      result = X86InstructionSetFeatures::FromHwcap();
+      result.reset(X86InstructionSetFeatures::FromHwcap().release());
       break;
     case kX86_64:
-      result = X86_64InstructionSetFeatures::FromHwcap();
+      result.reset(X86_64InstructionSetFeatures::FromHwcap().release());
       break;
     default:
       UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -180,27 +179,27 @@
   return result;
 }
 
-const InstructionSetFeatures* InstructionSetFeatures::FromAssembly() {
-  const InstructionSetFeatures* result;
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromAssembly() {
+  std::unique_ptr<const InstructionSetFeatures> result;
   switch (kRuntimeISA) {
     case kArm:
     case kThumb2:
-      result = ArmInstructionSetFeatures::FromAssembly();
+      result.reset(ArmInstructionSetFeatures::FromAssembly().release());
       break;
     case kArm64:
-      result = Arm64InstructionSetFeatures::FromAssembly();
+      result.reset(Arm64InstructionSetFeatures::FromAssembly().release());
       break;
     case kMips:
-      result = MipsInstructionSetFeatures::FromAssembly();
+      result.reset(MipsInstructionSetFeatures::FromAssembly().release());
       break;
     case kMips64:
       result = Mips64InstructionSetFeatures::FromAssembly();
       break;
     case kX86:
-      result = X86InstructionSetFeatures::FromAssembly();
+      result.reset(X86InstructionSetFeatures::FromAssembly().release());
       break;
     case kX86_64:
-      result = X86_64InstructionSetFeatures::FromAssembly();
+      result.reset(X86_64InstructionSetFeatures::FromAssembly().release());
       break;
     default:
       UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -209,11 +208,11 @@
   return result;
 }
 
-const InstructionSetFeatures* InstructionSetFeatures::AddFeaturesFromString(
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::AddFeaturesFromString(
     const std::string& feature_list, std::string* error_msg) const {
   if (feature_list.empty()) {
     *error_msg = "No instruction set features specified";
-    return nullptr;
+    return std::unique_ptr<const InstructionSetFeatures>();
   }
   std::vector<std::string> features;
   Split(feature_list, ',', &features);
@@ -223,7 +222,7 @@
   for (auto it = features.begin(); it != features.end();) {
     if (use_default) {
       *error_msg = "Unexpected instruction set features after 'default'";
-      return nullptr;
+      return std::unique_ptr<const InstructionSetFeatures>();
     }
     std::string feature = Trim(*it);
     bool erase = false;
@@ -233,7 +232,7 @@
         erase = true;
       } else {
         *error_msg = "Unexpected instruction set features before 'default'";
-        return nullptr;
+        return std::unique_ptr<const InstructionSetFeatures>();
       }
     } else if (feature == "smp") {
       smp = true;
diff --git a/runtime/arch/instruction_set_features.h b/runtime/arch/instruction_set_features.h
index d10ae21..d84bc02 100644
--- a/runtime/arch/instruction_set_features.h
+++ b/runtime/arch/instruction_set_features.h
@@ -17,6 +17,7 @@
 #ifndef ART_RUNTIME_ARCH_INSTRUCTION_SET_FEATURES_H_
 #define ART_RUNTIME_ARCH_INSTRUCTION_SET_FEATURES_H_
 
+#include <memory>
 #include <ostream>
 #include <vector>
 
@@ -36,31 +37,32 @@
 class InstructionSetFeatures {
  public:
   // Process a CPU variant string for the given ISA and create an InstructionSetFeatures.
-  static const InstructionSetFeatures* FromVariant(InstructionSet isa,
-                                                   const std::string& variant,
-                                                   std::string* error_msg);
+  static std::unique_ptr<const InstructionSetFeatures> FromVariant(InstructionSet isa,
+                                                                   const std::string& variant,
+                                                                   std::string* error_msg);
 
   // Parse a bitmap for the given isa and create an InstructionSetFeatures.
-  static const InstructionSetFeatures* FromBitmap(InstructionSet isa, uint32_t bitmap);
+  static std::unique_ptr<const InstructionSetFeatures> FromBitmap(InstructionSet isa,
+                                                                  uint32_t bitmap);
 
   // Turn C pre-processor #defines into the equivalent instruction set features for kRuntimeISA.
-  static const InstructionSetFeatures* FromCppDefines();
+  static std::unique_ptr<const InstructionSetFeatures> FromCppDefines();
 
   // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
-  static const InstructionSetFeatures* FromCpuInfo();
+  static std::unique_ptr<const InstructionSetFeatures> FromCpuInfo();
 
   // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
   // InstructionSetFeatures.
-  static const InstructionSetFeatures* FromHwcap();
+  static std::unique_ptr<const InstructionSetFeatures> FromHwcap();
 
   // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
   // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
-  static const InstructionSetFeatures* FromAssembly();
+  static std::unique_ptr<const InstructionSetFeatures> FromAssembly();
 
   // Parse a string of the form "div,-atomic_ldrd_strd" adding and removing these features to
   // create a new InstructionSetFeatures.
-  const InstructionSetFeatures* AddFeaturesFromString(const std::string& feature_list,
-                                                      std::string* error_msg) const WARN_UNUSED;
+  std::unique_ptr<const InstructionSetFeatures> AddFeaturesFromString(
+      const std::string& feature_list, std::string* error_msg) const WARN_UNUSED;
 
   // Are these features the same as the other given features?
   virtual bool Equals(const InstructionSetFeatures* other) const = 0;
@@ -107,7 +109,7 @@
                                  const std::string& variant);
 
   // Add architecture specific features in sub-classes.
-  virtual const InstructionSetFeatures*
+  virtual std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(bool smp, const std::vector<std::string>& features,
                                  std::string* error_msg) const = 0;
 
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index e10d4e6..6a442a5 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -30,8 +30,7 @@
 namespace art {
 
 // Cast entrypoints.
-extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass,
-                                          const mirror::Class* ref_class);
+extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class);
 
 // Math entrypoints.
 extern int32_t CmpgDouble(double a, double b);
@@ -60,6 +59,10 @@
 extern "C" int64_t __divdi3(int64_t, int64_t);
 extern "C" int64_t __moddi3(int64_t, int64_t);
 
+// No read barrier entrypoints for marking registers.
+void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints ATTRIBUTE_UNUSED,
+                                  bool is_marking ATTRIBUTE_UNUSED) {}
+
 void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
   // Note: MIPS has asserts checking for the type of entrypoint. Don't move it
   //       to InitDefaultEntryPoints().
@@ -71,10 +74,10 @@
   ResetQuickAllocEntryPoints(qpoints);
 
   // Cast
-  qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
+  qpoints->pInstanceofNonTrivial = artInstanceOfFromCode;
   static_assert(IsDirectEntrypoint(kQuickInstanceofNonTrivial), "Direct C stub not marked direct.");
-  qpoints->pCheckCast = art_quick_check_cast;
-  static_assert(!IsDirectEntrypoint(kQuickCheckCast), "Non-direct C stub marked direct.");
+  qpoints->pCheckInstanceOf = art_quick_check_instance_of;
+  static_assert(!IsDirectEntrypoint(kQuickCheckInstanceOf), "Non-direct C stub marked direct.");
 
   // DexCache
   qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
@@ -153,17 +156,24 @@
   // JNI
   qpoints->pJniMethodStart = JniMethodStart;
   static_assert(!IsDirectEntrypoint(kQuickJniMethodStart), "Non-direct C stub marked direct.");
+  qpoints->pJniMethodFastStart = JniMethodFastStart;
+  static_assert(!IsDirectEntrypoint(kQuickJniMethodFastStart), "Non-direct C stub marked direct.");
   qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
   static_assert(!IsDirectEntrypoint(kQuickJniMethodStartSynchronized),
                 "Non-direct C stub marked direct.");
   qpoints->pJniMethodEnd = JniMethodEnd;
   static_assert(!IsDirectEntrypoint(kQuickJniMethodEnd), "Non-direct C stub marked direct.");
+  qpoints->pJniMethodFastEnd = JniMethodFastEnd;
+  static_assert(!IsDirectEntrypoint(kQuickJniMethodFastEnd), "Non-direct C stub marked direct.");
   qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
   static_assert(!IsDirectEntrypoint(kQuickJniMethodEndSynchronized),
                 "Non-direct C stub marked direct.");
   qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
   static_assert(!IsDirectEntrypoint(kQuickJniMethodEndWithReference),
                 "Non-direct C stub marked direct.");
+  qpoints->pJniMethodFastEndWithReference = JniMethodFastEndWithReference;
+  static_assert(!IsDirectEntrypoint(kQuickJniMethodFastEndWithReference),
+                "Non-direct C stub marked direct.");
   qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
   static_assert(!IsDirectEntrypoint(kQuickJniMethodEndWithReferenceSynchronized),
                 "Non-direct C stub marked direct.");
diff --git a/runtime/arch/mips/instruction_set_features_mips.cc b/runtime/arch/mips/instruction_set_features_mips.cc
index b3a9866..a95b6f6 100644
--- a/runtime/arch/mips/instruction_set_features_mips.cc
+++ b/runtime/arch/mips/instruction_set_features_mips.cc
@@ -19,6 +19,7 @@
 #include <fstream>
 #include <sstream>
 
+#include "base/stl_util.h"
 #include "base/stringprintf.h"
 #include "utils.h"  // For Trim.
 
@@ -63,7 +64,7 @@
   }
 }
 
-const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromVariant(
+MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromVariant(
     const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED) {
 
   bool smp = true;  // Conservative default.
@@ -97,18 +98,19 @@
     LOG(WARNING) << "Unexpected CPU variant for Mips32 using defaults: " << variant;
   }
 
-  return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6);
+  return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6));
 }
 
-const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromBitmap(
+    uint32_t bitmap) {
   bool smp = (bitmap & kSmpBitfield) != 0;
   bool fpu_32bit = (bitmap & kFpu32Bitfield) != 0;
   bool mips_isa_gte2 = (bitmap & kIsaRevGte2Bitfield) != 0;
   bool r6 = (bitmap & kR6) != 0;
-  return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6);
+  return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6));
 }
 
-const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromCppDefines() {
+MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromCppDefines() {
   // Assume conservative defaults.
   const bool smp = true;
 
@@ -117,10 +119,10 @@
   bool r6;
   GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit);
 
-  return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6);
+  return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6));
 }
 
-const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromCpuInfo() {
+MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromCpuInfo() {
   // Look in /proc/cpuinfo for features we need.  Only use this when we can guarantee that
   // the kernel puts the appropriate feature flags in here.  Sometimes it doesn't.
   // Assume conservative defaults.
@@ -147,15 +149,15 @@
   } else {
     LOG(ERROR) << "Failed to open /proc/cpuinfo";
   }
-  return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6);
+  return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6));
 }
 
-const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromHwcap() {
+MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromHwcap() {
   UNIMPLEMENTED(WARNING);
   return FromCppDefines();
 }
 
-const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromAssembly() {
+MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromAssembly() {
   UNIMPLEMENTED(WARNING);
   return FromCppDefines();
 }
@@ -201,7 +203,8 @@
   return result;
 }
 
-const InstructionSetFeatures* MipsInstructionSetFeatures::AddFeaturesFromSplitString(
+std::unique_ptr<const InstructionSetFeatures>
+MipsInstructionSetFeatures::AddFeaturesFromSplitString(
     const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
   bool fpu_32bit = fpu_32bit_;
   bool mips_isa_gte2 = mips_isa_gte2_;
@@ -225,7 +228,8 @@
       return nullptr;
     }
   }
-  return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6);
+  return std::unique_ptr<const InstructionSetFeatures>(
+      new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6));
 }
 
 }  // namespace art
diff --git a/runtime/arch/mips/instruction_set_features_mips.h b/runtime/arch/mips/instruction_set_features_mips.h
index 2d54988..c2a28dc 100644
--- a/runtime/arch/mips/instruction_set_features_mips.h
+++ b/runtime/arch/mips/instruction_set_features_mips.h
@@ -23,29 +23,31 @@
 
 namespace art {
 
+class MipsInstructionSetFeatures;
+using MipsFeaturesUniquePtr = std::unique_ptr<const MipsInstructionSetFeatures>;
+
 // Instruction set features relevant to the MIPS architecture.
 class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
  public:
   // Process a CPU variant string like "r4000" and create InstructionSetFeatures.
-  static const MipsInstructionSetFeatures* FromVariant(const std::string& variant,
-                                                        std::string* error_msg);
+  static MipsFeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
 
   // Parse a bitmap and create an InstructionSetFeatures.
-  static const MipsInstructionSetFeatures* FromBitmap(uint32_t bitmap);
+  static MipsFeaturesUniquePtr FromBitmap(uint32_t bitmap);
 
   // Turn C pre-processor #defines into the equivalent instruction set features.
-  static const MipsInstructionSetFeatures* FromCppDefines();
+  static MipsFeaturesUniquePtr FromCppDefines();
 
   // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
-  static const MipsInstructionSetFeatures* FromCpuInfo();
+  static MipsFeaturesUniquePtr FromCpuInfo();
 
   // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
   // InstructionSetFeatures.
-  static const MipsInstructionSetFeatures* FromHwcap();
+  static MipsFeaturesUniquePtr FromHwcap();
 
   // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
   // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
-  static const MipsInstructionSetFeatures* FromAssembly();
+  static MipsFeaturesUniquePtr FromAssembly();
 
   bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
 
@@ -77,7 +79,7 @@
 
  protected:
   // Parse a vector of the form "fpu32", "mips2" adding these to a new MipsInstructionSetFeatures.
-  virtual const InstructionSetFeatures*
+  std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
                                  std::string* error_msg) const OVERRIDE;
 
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index c3c1882..34e34b4 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1171,10 +1171,11 @@
 END art_quick_unlock_object_no_inline
 
     /*
-     * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
+     * Entry from managed code that calls artInstanceOfFromCode and delivers exception on failure.
      */
-    .extern artThrowClassCastException
-ENTRY art_quick_check_cast
+    .extern artInstanceOfFromCode
+    .extern artThrowClassCastExceptionForObject
+ENTRY art_quick_check_instance_of
     addiu  $sp, $sp, -32
     .cfi_adjust_cfa_offset 32
     sw     $gp, 16($sp)
@@ -1183,7 +1184,7 @@
     sw     $t9, 8($sp)
     sw     $a1, 4($sp)
     sw     $a0, 0($sp)
-    la     $t9, artIsAssignableFromCode
+    la     $t9, artInstanceOfFromCode
     jalr   $t9
     addiu  $sp, $sp, -16             # reserve argument slots on the stack
     addiu  $sp, $sp, 16
@@ -1200,10 +1201,10 @@
     addiu  $sp, $sp, 32
     .cfi_adjust_cfa_offset -32
     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
-    la   $t9, artThrowClassCastException
-    jalr $zero, $t9                 # artThrowClassCastException (Class*, Class*, Thread*)
+    la   $t9, artThrowClassCastExceptionForObject
+    jalr $zero, $t9                 # artThrowClassCastException (Object*, Class*, Thread*)
     move $a2, rSELF                 # pass Thread::Current
-END art_quick_check_cast
+END art_quick_check_instance_of
 
     /*
      * Restore rReg's value from offset($sp) if rReg is not the same as rExclude.
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index a037905..bc17d47 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -30,8 +30,8 @@
 namespace art {
 
 // Cast entrypoints.
-extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass,
-                                          const mirror::Class* ref_class);
+extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class);
+
 // Math entrypoints.
 extern int32_t CmpgDouble(double a, double b);
 extern int32_t CmplDouble(double a, double b);
@@ -59,12 +59,16 @@
 extern "C" int64_t __divdi3(int64_t, int64_t);
 extern "C" int64_t __moddi3(int64_t, int64_t);
 
+// No read barrier entrypoints for marking registers.
+void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints ATTRIBUTE_UNUSED,
+                                  bool is_marking ATTRIBUTE_UNUSED) {}
+
 void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
   DefaultInitEntryPoints(jpoints, qpoints);
 
   // Cast
-  qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
-  qpoints->pCheckCast = art_quick_check_cast;
+  qpoints->pInstanceofNonTrivial = artInstanceOfFromCode;
+  qpoints->pCheckInstanceOf = art_quick_check_instance_of;
 
   // Math
   qpoints->pCmpgDouble = CmpgDouble;
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.cc b/runtime/arch/mips64/instruction_set_features_mips64.cc
index 5c0c914..490a8d2 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.cc
+++ b/runtime/arch/mips64/instruction_set_features_mips64.cc
@@ -24,27 +24,27 @@
 
 namespace art {
 
-const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromVariant(
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromVariant(
     const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED) {
   if (variant != "default" && variant != "mips64r6") {
     LOG(WARNING) << "Unexpected CPU variant for Mips64 using defaults: " << variant;
   }
   bool smp = true;  // Conservative default.
-  return new Mips64InstructionSetFeatures(smp);
+  return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(smp));
 }
 
-const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
   bool smp = (bitmap & kSmpBitfield) != 0;
-  return new Mips64InstructionSetFeatures(smp);
+  return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(smp));
 }
 
-const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromCppDefines() {
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromCppDefines() {
   const bool smp = true;
 
-  return new Mips64InstructionSetFeatures(smp);
+  return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(smp));
 }
 
-const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromCpuInfo() {
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromCpuInfo() {
   // Look in /proc/cpuinfo for features we need.  Only use this when we can guarantee that
   // the kernel puts the appropriate feature flags in here.  Sometimes it doesn't.
   bool smp = false;
@@ -65,15 +65,15 @@
   } else {
     LOG(ERROR) << "Failed to open /proc/cpuinfo";
   }
-  return new Mips64InstructionSetFeatures(smp);
+  return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(smp));
 }
 
-const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromHwcap() {
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromHwcap() {
   UNIMPLEMENTED(WARNING);
   return FromCppDefines();
 }
 
-const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromAssembly() {
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromAssembly() {
   UNIMPLEMENTED(WARNING);
   return FromCppDefines();
 }
@@ -99,7 +99,8 @@
   return result;
 }
 
-const InstructionSetFeatures* Mips64InstructionSetFeatures::AddFeaturesFromSplitString(
+std::unique_ptr<const InstructionSetFeatures>
+Mips64InstructionSetFeatures::AddFeaturesFromSplitString(
     const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
   auto i = features.begin();
   if (i != features.end()) {
@@ -108,7 +109,7 @@
     *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
     return nullptr;
   }
-  return new Mips64InstructionSetFeatures(smp);
+  return std::unique_ptr<const InstructionSetFeatures>(new Mips64InstructionSetFeatures(smp));
 }
 
 }  // namespace art
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.h b/runtime/arch/mips64/instruction_set_features_mips64.h
index d5d6012..2e66235 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.h
+++ b/runtime/arch/mips64/instruction_set_features_mips64.h
@@ -21,29 +21,32 @@
 
 namespace art {
 
+class Mips64InstructionSetFeatures;
+using Mips64FeaturesUniquePtr = std::unique_ptr<const Mips64InstructionSetFeatures>;
+
 // Instruction set features relevant to the MIPS64 architecture.
 class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures {
  public:
   // Process a CPU variant string like "r4000" and create InstructionSetFeatures.
-  static const Mips64InstructionSetFeatures* FromVariant(const std::string& variant,
-                                                        std::string* error_msg);
+  static Mips64FeaturesUniquePtr FromVariant(const std::string& variant,
+                                                                         std::string* error_msg);
 
   // Parse a bitmap and create an InstructionSetFeatures.
-  static const Mips64InstructionSetFeatures* FromBitmap(uint32_t bitmap);
+  static Mips64FeaturesUniquePtr FromBitmap(uint32_t bitmap);
 
   // Turn C pre-processor #defines into the equivalent instruction set features.
-  static const Mips64InstructionSetFeatures* FromCppDefines();
+  static Mips64FeaturesUniquePtr FromCppDefines();
 
   // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
-  static const Mips64InstructionSetFeatures* FromCpuInfo();
+  static Mips64FeaturesUniquePtr FromCpuInfo();
 
   // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
   // InstructionSetFeatures.
-  static const Mips64InstructionSetFeatures* FromHwcap();
+  static Mips64FeaturesUniquePtr FromHwcap();
 
   // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
   // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
-  static const Mips64InstructionSetFeatures* FromAssembly();
+  static Mips64FeaturesUniquePtr FromAssembly();
 
   bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
 
@@ -59,8 +62,9 @@
 
  protected:
   // Parse a vector of the form "fpu32", "mips2" adding these to a new Mips64InstructionSetFeatures.
-  virtual const InstructionSetFeatures*
-      AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
+  std::unique_ptr<const InstructionSetFeatures>
+      AddFeaturesFromSplitString(const bool smp,
+                                 const std::vector<std::string>& features,
                                  std::string* error_msg) const OVERRIDE;
 
  private:
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index cb2d1c8..0861d2d 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1256,10 +1256,11 @@
 END art_quick_unlock_object_no_inline
 
     /*
-     * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
+     * Entry from managed code that calls artInstanceOfFromCode and delivers exception on failure.
      */
-    .extern artThrowClassCastException
-ENTRY art_quick_check_cast
+    .extern artInstanceOfFromCode
+    .extern artThrowClassCastExceptionForObject
+ENTRY art_quick_check_instance_of
     daddiu $sp, $sp, -32
     .cfi_adjust_cfa_offset 32
     sd     $ra, 24($sp)
@@ -1267,7 +1268,7 @@
     sd     $t9, 16($sp)
     sd     $a1, 8($sp)
     sd     $a0, 0($sp)
-    jal    artIsAssignableFromCode
+    jal    artInstanceOfFromCode
     .cpreturn                       # Restore gp from t8 in branch delay slot.
                                     # t8 may be clobbered in artIsAssignableFromCode.
     beq    $v0, $zero, .Lthrow_class_cast_exception
@@ -1283,10 +1284,10 @@
     .cfi_adjust_cfa_offset -32
     SETUP_GP
     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
-    dla  $t9, artThrowClassCastException
-    jalr $zero, $t9                 # artThrowClassCastException (Class*, Class*, Thread*)
+    dla  $t9, artThrowClassCastExceptionForObject
+    jalr $zero, $t9                 # artThrowClassCastException (Object*, Class*, Thread*)
     move $a2, rSELF                 # pass Thread::Current
-END art_quick_check_cast
+END art_quick_check_instance_of
 
 
     /*
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 4638c3f..bbf9a8b 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -23,6 +23,7 @@
 #include "common_runtime_test.h"
 #include "entrypoints/quick/quick_entrypoints_enum.h"
 #include "imt_conflict_table.h"
+#include "jni_internal.h"
 #include "linear_alloc.h"
 #include "mirror/class-inl.h"
 #include "mirror/string-inl.h"
@@ -805,7 +806,7 @@
 
 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_check_cast(void);
+extern "C" void art_quick_check_instance_of(void);
 #endif
 
 TEST_F(StubTest, CheckCast) {
@@ -813,40 +814,90 @@
     (defined(__x86_64__) && !defined(__APPLE__))
   Thread* self = Thread::Current();
 
-  const uintptr_t art_quick_check_cast = StubTest::GetEntrypoint(self, kQuickCheckCast);
+  const uintptr_t art_quick_check_instance_of =
+      StubTest::GetEntrypoint(self, kQuickCheckInstanceOf);
 
   // Find some classes.
   ScopedObjectAccess soa(self);
   // garbage is created during ClassLinker::Init
 
-  StackHandleScope<2> hs(soa.Self());
-  Handle<mirror::Class> c(
-      hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
-  Handle<mirror::Class> c2(
-      hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;")));
+  VariableSizedHandleScope hs(soa.Self());
+  Handle<mirror::Class> klass_obj(
+      hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+  Handle<mirror::Class> klass_str(
+      hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/String;")));
+  Handle<mirror::Class> klass_list(
+      hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/util/List;")));
+  Handle<mirror::Class> klass_cloneable(
+        hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;")));
+  Handle<mirror::Class> klass_array_list(
+      hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/util/ArrayList;")));
+  Handle<mirror::Object> obj(hs.NewHandle(klass_obj->AllocObject(soa.Self())));
+  Handle<mirror::String> string(hs.NewHandle(
+      mirror::String::AllocFromModifiedUtf8(soa.Self(), "ABCD")));
+  Handle<mirror::Object> array_list(hs.NewHandle(klass_array_list->AllocObject(soa.Self())));
 
   EXPECT_FALSE(self->IsExceptionPending());
 
-  Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(c.Get()), 0U,
-          art_quick_check_cast, self);
-
+  Invoke3(reinterpret_cast<size_t>(obj.Get()),
+          reinterpret_cast<size_t>(klass_obj.Get()),
+          0U,
+          art_quick_check_instance_of,
+          self);
   EXPECT_FALSE(self->IsExceptionPending());
 
-  Invoke3(reinterpret_cast<size_t>(c2.Get()), reinterpret_cast<size_t>(c2.Get()), 0U,
-          art_quick_check_cast, self);
-
+  // Expected true: Test string instance of java.lang.String.
+  Invoke3(reinterpret_cast<size_t>(string.Get()),
+          reinterpret_cast<size_t>(klass_str.Get()),
+          0U,
+          art_quick_check_instance_of,
+          self);
   EXPECT_FALSE(self->IsExceptionPending());
 
-  Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(c2.Get()), 0U,
-          art_quick_check_cast, self);
-
+  // Expected true: Test string instance of java.lang.Object.
+  Invoke3(reinterpret_cast<size_t>(string.Get()),
+          reinterpret_cast<size_t>(klass_obj.Get()),
+          0U,
+          art_quick_check_instance_of,
+          self);
   EXPECT_FALSE(self->IsExceptionPending());
 
-  // TODO: Make the following work. But that would require correct managed frames.
+  // Expected false: Test object instance of java.lang.String.
+  Invoke3(reinterpret_cast<size_t>(obj.Get()),
+          reinterpret_cast<size_t>(klass_str.Get()),
+          0U,
+          art_quick_check_instance_of,
+          self);
+  EXPECT_TRUE(self->IsExceptionPending());
+  self->ClearException();
 
-  Invoke3(reinterpret_cast<size_t>(c2.Get()), reinterpret_cast<size_t>(c.Get()), 0U,
-          art_quick_check_cast, self);
+  Invoke3(reinterpret_cast<size_t>(array_list.Get()),
+          reinterpret_cast<size_t>(klass_list.Get()),
+          0U,
+          art_quick_check_instance_of,
+          self);
+  EXPECT_FALSE(self->IsExceptionPending());
 
+  Invoke3(reinterpret_cast<size_t>(array_list.Get()),
+          reinterpret_cast<size_t>(klass_cloneable.Get()),
+          0U,
+          art_quick_check_instance_of,
+          self);
+  EXPECT_FALSE(self->IsExceptionPending());
+
+  Invoke3(reinterpret_cast<size_t>(string.Get()),
+          reinterpret_cast<size_t>(klass_array_list.Get()),
+          0U,
+          art_quick_check_instance_of,
+          self);
+  EXPECT_TRUE(self->IsExceptionPending());
+  self->ClearException();
+
+  Invoke3(reinterpret_cast<size_t>(string.Get()),
+          reinterpret_cast<size_t>(klass_cloneable.Get()),
+          0U,
+          art_quick_check_instance_of,
+          self);
   EXPECT_TRUE(self->IsExceptionPending());
   self->ClearException();
 
@@ -1964,7 +2015,7 @@
   ASSERT_NE(nullptr, add_jmethod);
 
   // Get representation.
-  ArtMethod* contains_amethod = soa.DecodeMethod(contains_jmethod);
+  ArtMethod* contains_amethod = jni::DecodeArtMethod(contains_jmethod);
 
   // Patch up ArrayList.contains.
   if (contains_amethod->GetEntryPointFromQuickCompiledCode() == nullptr) {
@@ -1982,7 +2033,7 @@
   ASSERT_NE(nullptr, inf_contains_jmethod);
 
   // Get mirror representation.
-  ArtMethod* inf_contains = soa.DecodeMethod(inf_contains_jmethod);
+  ArtMethod* inf_contains = jni::DecodeArtMethod(inf_contains_jmethod);
 
   // Object
 
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index 077d2db..cb3dfec 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -17,6 +17,7 @@
 #include "context_x86.h"
 
 #include "base/bit_utils.h"
+#include "base/memory_tool.h"
 #include "quick/quick_method_frame_info.h"
 
 namespace art {
@@ -102,6 +103,7 @@
   uintptr_t esp = gprs[kNumberOfCpuRegisters - ESP - 1] - sizeof(intptr_t);
   gprs[kNumberOfCpuRegisters] = esp;
   *(reinterpret_cast<uintptr_t*>(esp)) = eip_;
+  MEMORY_TOOL_HANDLE_NO_RETURN;
   __asm__ __volatile__(
       "movl %1, %%ebx\n\t"          // Address base of FPRs.
       "movsd 0(%%ebx), %%xmm0\n\t"  // Load up XMM0-XMM7.
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index 0a10a3c..9cd4a3e 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -27,8 +27,7 @@
 namespace art {
 
 // Cast entrypoints.
-extern "C" size_t art_quick_is_assignable(const mirror::Class* klass,
-                                          const mirror::Class* ref_class);
+extern "C" size_t art_quick_instance_of(mirror::Object* obj, mirror::Class* ref_class);
 
 // Read barrier entrypoints.
 // art_quick_read_barrier_mark_regX uses an non-standard calling
@@ -45,12 +44,22 @@
 extern "C" mirror::Object* art_quick_read_barrier_slow(mirror::Object*, mirror::Object*, uint32_t);
 extern "C" mirror::Object* art_quick_read_barrier_for_root_slow(GcRoot<mirror::Object>*);
 
+void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_marking) {
+  qpoints->pReadBarrierMarkReg00 = is_marking ? art_quick_read_barrier_mark_reg00 : nullptr;
+  qpoints->pReadBarrierMarkReg01 = is_marking ? art_quick_read_barrier_mark_reg01 : nullptr;
+  qpoints->pReadBarrierMarkReg02 = is_marking ? art_quick_read_barrier_mark_reg02 : nullptr;
+  qpoints->pReadBarrierMarkReg03 = is_marking ? art_quick_read_barrier_mark_reg03 : nullptr;
+  qpoints->pReadBarrierMarkReg05 = is_marking ? art_quick_read_barrier_mark_reg05 : nullptr;
+  qpoints->pReadBarrierMarkReg06 = is_marking ? art_quick_read_barrier_mark_reg06 : nullptr;
+  qpoints->pReadBarrierMarkReg07 = is_marking ? art_quick_read_barrier_mark_reg07 : nullptr;
+}
+
 void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
   DefaultInitEntryPoints(jpoints, qpoints);
 
   // Cast
-  qpoints->pInstanceofNonTrivial = art_quick_is_assignable;
-  qpoints->pCheckCast = art_quick_check_cast;
+  qpoints->pInstanceofNonTrivial = art_quick_instance_of;
+  qpoints->pCheckInstanceOf = art_quick_check_instance_of;
 
   // More math.
   qpoints->pCos = cos;
@@ -88,14 +97,8 @@
 
   // Read barrier.
   qpoints->pReadBarrierJni = ReadBarrierJni;
-  qpoints->pReadBarrierMarkReg00 = art_quick_read_barrier_mark_reg00;
-  qpoints->pReadBarrierMarkReg01 = art_quick_read_barrier_mark_reg01;
-  qpoints->pReadBarrierMarkReg02 = art_quick_read_barrier_mark_reg02;
-  qpoints->pReadBarrierMarkReg03 = art_quick_read_barrier_mark_reg03;
+  UpdateReadBarrierEntrypoints(qpoints, /*is_marking*/ false);
   qpoints->pReadBarrierMarkReg04 = nullptr;  // Cannot use register 4 (ESP) to pass arguments.
-  qpoints->pReadBarrierMarkReg05 = art_quick_read_barrier_mark_reg05;
-  qpoints->pReadBarrierMarkReg06 = art_quick_read_barrier_mark_reg06;
-  qpoints->pReadBarrierMarkReg07 = art_quick_read_barrier_mark_reg07;
   // x86 has only 8 core registers.
   qpoints->pReadBarrierMarkReg08 = nullptr;
   qpoints->pReadBarrierMarkReg09 = nullptr;
diff --git a/runtime/arch/x86/instruction_set_features_x86.cc b/runtime/arch/x86/instruction_set_features_x86.cc
index 0093e82..90b55a9 100644
--- a/runtime/arch/x86/instruction_set_features_x86.cc
+++ b/runtime/arch/x86/instruction_set_features_x86.cc
@@ -49,7 +49,34 @@
     "silvermont",
 };
 
-const X86InstructionSetFeatures* X86InstructionSetFeatures::FromVariant(
+X86FeaturesUniquePtr X86InstructionSetFeatures::Create(bool x86_64,
+                                                       bool smp,
+                                                       bool has_SSSE3,
+                                                       bool has_SSE4_1,
+                                                       bool has_SSE4_2,
+                                                       bool has_AVX,
+                                                       bool has_AVX2,
+                                                       bool has_POPCNT) {
+  if (x86_64) {
+    return X86FeaturesUniquePtr(new X86_64InstructionSetFeatures(smp,
+                                                                 has_SSSE3,
+                                                                 has_SSE4_1,
+                                                                 has_SSE4_2,
+                                                                 has_AVX,
+                                                                 has_AVX2,
+                                                                 has_POPCNT));
+  } else {
+    return X86FeaturesUniquePtr(new X86InstructionSetFeatures(smp,
+                                                              has_SSSE3,
+                                                              has_SSE4_1,
+                                                              has_SSE4_2,
+                                                              has_AVX,
+                                                              has_AVX2,
+                                                              has_POPCNT));
+  }
+}
+
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromVariant(
     const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED,
     bool x86_64) {
   bool smp = true;  // Conservative default.
@@ -75,17 +102,10 @@
     LOG(WARNING) << "Unexpected CPU variant for X86 using defaults: " << variant;
   }
 
-  if (x86_64) {
-    return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
-                                            has_AVX2, has_POPCNT);
-  } else {
-    return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
-                                            has_AVX2, has_POPCNT);
-  }
+  return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT);
 }
 
-const X86InstructionSetFeatures* X86InstructionSetFeatures::FromBitmap(uint32_t bitmap,
-                                                                       bool x86_64) {
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromBitmap(uint32_t bitmap, bool x86_64) {
   bool smp = (bitmap & kSmpBitfield) != 0;
   bool has_SSSE3 = (bitmap & kSsse3Bitfield) != 0;
   bool has_SSE4_1 = (bitmap & kSse4_1Bitfield) != 0;
@@ -93,16 +113,10 @@
   bool has_AVX = (bitmap & kAvxBitfield) != 0;
   bool has_AVX2 = (bitmap & kAvxBitfield) != 0;
   bool has_POPCNT = (bitmap & kPopCntBitfield) != 0;
-  if (x86_64) {
-    return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2,
-                                            has_AVX, has_AVX2, has_POPCNT);
-  } else {
-    return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2,
-                                         has_AVX, has_AVX2, has_POPCNT);
-  }
+  return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT);
 }
 
-const X86InstructionSetFeatures* X86InstructionSetFeatures::FromCppDefines(bool x86_64) {
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromCppDefines(bool x86_64) {
   const bool smp = true;
 
 #ifndef __SSSE3__
@@ -141,16 +155,10 @@
   const bool has_POPCNT = true;
 #endif
 
-  if (x86_64) {
-    return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
-                                            has_AVX2, has_POPCNT);
-  } else {
-    return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
-                                         has_AVX2, has_POPCNT);
-  }
+  return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT);
 }
 
-const X86InstructionSetFeatures* X86InstructionSetFeatures::FromCpuInfo(bool x86_64) {
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromCpuInfo(bool x86_64) {
   // Look in /proc/cpuinfo for features we need.  Only use this when we can guarantee that
   // the kernel puts the appropriate feature flags in here.  Sometimes it doesn't.
   bool smp = false;
@@ -198,21 +206,15 @@
   } else {
     LOG(ERROR) << "Failed to open /proc/cpuinfo";
   }
-  if (x86_64) {
-    return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
-                                            has_AVX2, has_POPCNT);
-  } else {
-    return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
-                                         has_AVX2, has_POPCNT);
-  }
+  return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT);
 }
 
-const X86InstructionSetFeatures* X86InstructionSetFeatures::FromHwcap(bool x86_64) {
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromHwcap(bool x86_64) {
   UNIMPLEMENTED(WARNING);
   return FromCppDefines(x86_64);
 }
 
-const X86InstructionSetFeatures* X86InstructionSetFeatures::FromAssembly(bool x86_64) {
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromAssembly(bool x86_64) {
   UNIMPLEMENTED(WARNING);
   return FromCppDefines(x86_64);
 }
@@ -281,7 +283,7 @@
   return result;
 }
 
-const InstructionSetFeatures* X86InstructionSetFeatures::AddFeaturesFromSplitString(
+std::unique_ptr<const InstructionSetFeatures> X86InstructionSetFeatures::AddFeaturesFromSplitString(
     const bool smp, const std::vector<std::string>& features, bool x86_64,
     std::string* error_msg) const {
   bool has_SSSE3 = has_SSSE3_;
@@ -321,13 +323,7 @@
       return nullptr;
     }
   }
-  if (x86_64) {
-    return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
-                                            has_AVX2, has_POPCNT);
-  } else {
-    return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
-                                         has_AVX2, has_POPCNT);
-  }
+  return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT);
 }
 
 }  // namespace art
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
index 2aa8ae6..672892e 100644
--- a/runtime/arch/x86/instruction_set_features_x86.h
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -21,30 +21,34 @@
 
 namespace art {
 
+class X86InstructionSetFeatures;
+using X86FeaturesUniquePtr = std::unique_ptr<const X86InstructionSetFeatures>;
+
 // Instruction set features relevant to the X86 architecture.
 class X86InstructionSetFeatures : public InstructionSetFeatures {
  public:
   // Process a CPU variant string like "atom" or "nehalem" and create InstructionSetFeatures.
-  static const X86InstructionSetFeatures* FromVariant(const std::string& variant,
-                                                        std::string* error_msg,
-                                                        bool x86_64 = false);
+  static X86FeaturesUniquePtr FromVariant(const std::string& variant,
+                                                                      std::string* error_msg,
+                                                                      bool x86_64 = false);
 
   // Parse a bitmap and create an InstructionSetFeatures.
-  static const X86InstructionSetFeatures* FromBitmap(uint32_t bitmap, bool x86_64 = false);
+  static X86FeaturesUniquePtr FromBitmap(uint32_t bitmap,
+                                                                     bool x86_64 = false);
 
   // Turn C pre-processor #defines into the equivalent instruction set features.
-  static const X86InstructionSetFeatures* FromCppDefines(bool x86_64 = false);
+  static X86FeaturesUniquePtr FromCppDefines(bool x86_64 = false);
 
   // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
-  static const X86InstructionSetFeatures* FromCpuInfo(bool x86_64 = false);
+  static X86FeaturesUniquePtr FromCpuInfo(bool x86_64 = false);
 
   // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
   // InstructionSetFeatures.
-  static const X86InstructionSetFeatures* FromHwcap(bool x86_64 = false);
+  static X86FeaturesUniquePtr FromHwcap(bool x86_64 = false);
 
   // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
   // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
-  static const X86InstructionSetFeatures* FromAssembly(bool x86_64 = false);
+  static X86FeaturesUniquePtr FromAssembly(bool x86_64 = false);
 
   bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
 
@@ -64,13 +68,13 @@
 
  protected:
   // Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
-  virtual const InstructionSetFeatures*
+  virtual std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
                                  std::string* error_msg) const OVERRIDE {
     return AddFeaturesFromSplitString(smp, features, false, error_msg);
   }
 
-  const InstructionSetFeatures*
+  std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
                                  bool x86_64, std::string* error_msg) const;
 
@@ -85,6 +89,15 @@
         has_POPCNT_(has_POPCNT) {
   }
 
+  static X86FeaturesUniquePtr Create(bool x86_64,
+                                     bool smp,
+                                     bool has_SSSE3,
+                                     bool has_SSE4_1,
+                                     bool has_SSE4_2,
+                                     bool has_AVX,
+                                     bool has_AVX2,
+                                     bool has_POPCNT);
+
  private:
   // Bitmap positions for encoding features as a bitmap.
   enum {
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index f4f9a68..fb405fa 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1351,21 +1351,21 @@
     RETURN_IF_EAX_ZERO
 END_FUNCTION art_quick_unlock_object_no_inline
 
-DEFINE_FUNCTION art_quick_is_assignable
+DEFINE_FUNCTION art_quick_instance_of
     PUSH eax                              // alignment padding
     PUSH ecx                              // pass arg2 - obj->klass
     PUSH eax                              // pass arg1 - checked class
-    call SYMBOL(artIsAssignableFromCode)  // (Class* klass, Class* ref_klass)
+    call SYMBOL(artInstanceOfFromCode)    // (Object* obj, Class* ref_klass)
     addl LITERAL(12), %esp                // pop arguments
     CFI_ADJUST_CFA_OFFSET(-12)
     ret
-END_FUNCTION art_quick_is_assignable
+END_FUNCTION art_quick_instance_of
 
-DEFINE_FUNCTION art_quick_check_cast
+DEFINE_FUNCTION art_quick_check_instance_of
     PUSH eax                              // alignment padding
-    PUSH ecx                              // pass arg2 - obj->klass
-    PUSH eax                              // pass arg1 - checked class
-    call SYMBOL(artIsAssignableFromCode)  // (Class* klass, Class* ref_klass)
+    PUSH ecx                              // pass arg2 - checked class
+    PUSH eax                              // pass arg1 - obj
+    call SYMBOL(artInstanceOfFromCode)    // (Object* obj, Class* ref_klass)
     testl %eax, %eax
     jz 1f                                 // jump forward if not assignable
     addl LITERAL(12), %esp                // pop arguments
@@ -1385,9 +1385,9 @@
     CFI_ADJUST_CFA_OFFSET(4)
     PUSH ecx                              // pass arg2
     PUSH eax                              // pass arg1
-    call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*)
+    call SYMBOL(artThrowClassCastExceptionForObject)  // (Object* src, Class* dest, Thread*)
     UNREACHABLE
-END_FUNCTION art_quick_check_cast
+END_FUNCTION art_quick_check_instance_of
 
 // Restore reg's value if reg is not the same as exclude_reg, otherwise just adjust stack.
 MACRO2(POP_REG_NE, reg, exclude_reg)
@@ -2035,15 +2035,14 @@
     lea MIRROR_STRING_VALUE_OFFSET(%ecx), %edi
 #if (STRING_COMPRESSION_FEATURE)
     /* Differ cases */
-    cmpl    LITERAL(0), %edx
-    jl      .Lstring_compareto_this_is_compressed
-    cmpl    LITERAL(0), %ebx
-    jl      .Lstring_compareto_that_is_compressed
+    shrl    LITERAL(1), %edx
+    jnc     .Lstring_compareto_this_is_compressed
+    shrl    LITERAL(1), %ebx
+    jnc     .Lstring_compareto_that_is_compressed
     jmp     .Lstring_compareto_both_not_compressed
 .Lstring_compareto_this_is_compressed:
-    andl    LITERAL(0x7FFFFFFF), %edx
-    cmpl    LITERAL(0), %ebx
-    jl      .Lstring_compareto_both_compressed
+    shrl    LITERAL(1), %ebx
+    jnc     .Lstring_compareto_both_compressed
     /* If (this->IsCompressed() && that->IsCompressed() == false) */
     mov     %edx, %eax
     subl    %ebx, %eax
@@ -2061,7 +2060,6 @@
     cmovne  %edx, %eax                        // return eax = *(this_cur_char) - *(that_cur_char)
     jmp     .Lstring_compareto_return
 .Lstring_compareto_that_is_compressed:
-    andl    LITERAL(0x7FFFFFFF), %ebx
     mov     %edx, %eax
     subl    %ebx, %eax
     mov     %edx, %ecx
@@ -2078,7 +2076,6 @@
     cmovne  %edx, %eax
     jmp     .Lstring_compareto_return         // return eax = *(this_cur_char) - *(that_cur_char)
 .Lstring_compareto_both_compressed:
-    andl    LITERAL(0x7FFFFFFF), %ebx
     /* Calculate min length and count diff */
     mov     %edx, %ecx
     mov     %edx, %eax
@@ -2155,8 +2152,15 @@
     jz .Lslow_rb_\name
     ret
 .Lslow_rb_\name:
-    // Save all potentially live caller-save core registers.
     PUSH eax
+    mov MIRROR_OBJECT_LOCK_WORD_OFFSET(REG_VAR(reg)), %eax
+    add LITERAL(LOCK_WORD_STATE_FORWARDING_ADDRESS_OVERFLOW), %eax
+    // Jump if overflow, the only case where it overflows should be the forwarding address one.
+    // Taken ~25% of the time.
+    jnae .Lret_forwarding_address\name
+
+    // Save all potentially live caller-save core registers.
+    mov 0(%esp), %eax
     PUSH ecx
     PUSH edx
     PUSH ebx
@@ -2204,6 +2208,12 @@
     POP_REG_NE eax, RAW_VAR(reg)
 .Lret_rb_\name:
     ret
+.Lret_forwarding_address\name:
+    // The overflow cleared the top bits.
+    sall LITERAL(LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT), %eax
+    mov %eax, REG_VAR(reg)
+    POP_REG_NE eax, RAW_VAR(reg)
+    ret
     END_FUNCTION VAR(name)
 END_MACRO
 
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 8c425d5..a326b4e 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -30,8 +30,7 @@
 namespace art {
 
 // Cast entrypoints.
-extern "C" size_t art_quick_assignable_from_code(const mirror::Class* klass,
-                                                 const mirror::Class* ref_class);
+extern "C" size_t art_quick_instance_of(mirror::Object* obj, mirror::Class* ref_class);
 
 // Read barrier entrypoints.
 // art_quick_read_barrier_mark_regX uses an non-standard calling
@@ -56,6 +55,24 @@
 extern "C" mirror::Object* art_quick_read_barrier_slow(mirror::Object*, mirror::Object*, uint32_t);
 extern "C" mirror::Object* art_quick_read_barrier_for_root_slow(GcRoot<mirror::Object>*);
 
+void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_marking) {
+  qpoints->pReadBarrierMarkReg00 = is_marking ? art_quick_read_barrier_mark_reg00 : nullptr;
+  qpoints->pReadBarrierMarkReg01 = is_marking ? art_quick_read_barrier_mark_reg01 : nullptr;
+  qpoints->pReadBarrierMarkReg02 = is_marking ? art_quick_read_barrier_mark_reg02 : nullptr;
+  qpoints->pReadBarrierMarkReg03 = is_marking ? art_quick_read_barrier_mark_reg03 : nullptr;
+  qpoints->pReadBarrierMarkReg05 = is_marking ? art_quick_read_barrier_mark_reg05 : nullptr;
+  qpoints->pReadBarrierMarkReg06 = is_marking ? art_quick_read_barrier_mark_reg06 : nullptr;
+  qpoints->pReadBarrierMarkReg07 = is_marking ? art_quick_read_barrier_mark_reg07 : nullptr;
+  qpoints->pReadBarrierMarkReg08 = is_marking ? art_quick_read_barrier_mark_reg08 : nullptr;
+  qpoints->pReadBarrierMarkReg09 = is_marking ? art_quick_read_barrier_mark_reg09 : nullptr;
+  qpoints->pReadBarrierMarkReg10 = is_marking ? art_quick_read_barrier_mark_reg10 : nullptr;
+  qpoints->pReadBarrierMarkReg11 = is_marking ? art_quick_read_barrier_mark_reg11 : nullptr;
+  qpoints->pReadBarrierMarkReg12 = is_marking ? art_quick_read_barrier_mark_reg12 : nullptr;
+  qpoints->pReadBarrierMarkReg13 = is_marking ? art_quick_read_barrier_mark_reg13 : nullptr;
+  qpoints->pReadBarrierMarkReg14 = is_marking ? art_quick_read_barrier_mark_reg14 : nullptr;
+  qpoints->pReadBarrierMarkReg15 = is_marking ? art_quick_read_barrier_mark_reg15 : nullptr;
+}
+
 void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
 #if defined(__APPLE__)
   UNUSED(jpoints, qpoints);
@@ -64,8 +81,8 @@
   DefaultInitEntryPoints(jpoints, qpoints);
 
   // Cast
-  qpoints->pInstanceofNonTrivial = art_quick_assignable_from_code;
-  qpoints->pCheckCast = art_quick_check_cast;
+  qpoints->pInstanceofNonTrivial = art_quick_instance_of;
+  qpoints->pCheckInstanceOf = art_quick_check_instance_of;
 
   // More math.
   qpoints->pCos = cos;
@@ -102,22 +119,8 @@
 
   // Read barrier.
   qpoints->pReadBarrierJni = ReadBarrierJni;
-  qpoints->pReadBarrierMarkReg00 = art_quick_read_barrier_mark_reg00;
-  qpoints->pReadBarrierMarkReg01 = art_quick_read_barrier_mark_reg01;
-  qpoints->pReadBarrierMarkReg02 = art_quick_read_barrier_mark_reg02;
-  qpoints->pReadBarrierMarkReg03 = art_quick_read_barrier_mark_reg03;
+  UpdateReadBarrierEntrypoints(qpoints, /*is_marking*/ false);
   qpoints->pReadBarrierMarkReg04 = nullptr;  // Cannot use register 4 (RSP) to pass arguments.
-  qpoints->pReadBarrierMarkReg05 = art_quick_read_barrier_mark_reg05;
-  qpoints->pReadBarrierMarkReg06 = art_quick_read_barrier_mark_reg06;
-  qpoints->pReadBarrierMarkReg07 = art_quick_read_barrier_mark_reg07;
-  qpoints->pReadBarrierMarkReg08 = art_quick_read_barrier_mark_reg08;
-  qpoints->pReadBarrierMarkReg09 = art_quick_read_barrier_mark_reg09;
-  qpoints->pReadBarrierMarkReg10 = art_quick_read_barrier_mark_reg10;
-  qpoints->pReadBarrierMarkReg11 = art_quick_read_barrier_mark_reg11;
-  qpoints->pReadBarrierMarkReg12 = art_quick_read_barrier_mark_reg12;
-  qpoints->pReadBarrierMarkReg13 = art_quick_read_barrier_mark_reg13;
-  qpoints->pReadBarrierMarkReg14 = art_quick_read_barrier_mark_reg14;
-  qpoints->pReadBarrierMarkReg15 = art_quick_read_barrier_mark_reg15;
   // x86-64 has only 16 core registers.
   qpoints->pReadBarrierMarkReg16 = nullptr;
   qpoints->pReadBarrierMarkReg17 = nullptr;
diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64.h b/runtime/arch/x86_64/instruction_set_features_x86_64.h
index 0840f89..bc0f708 100644
--- a/runtime/arch/x86_64/instruction_set_features_x86_64.h
+++ b/runtime/arch/x86_64/instruction_set_features_x86_64.h
@@ -21,41 +21,42 @@
 
 namespace art {
 
+class X86_64InstructionSetFeatures;
+using X86_64FeaturesUniquePtr = std::unique_ptr<const X86_64InstructionSetFeatures>;
+
 // Instruction set features relevant to the X86_64 architecture.
 class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
  public:
   // Process a CPU variant string like "atom" or "nehalem" and create InstructionSetFeatures.
-  static const X86_64InstructionSetFeatures* FromVariant(const std::string& variant,
-                                                         std::string* error_msg) {
-    return X86InstructionSetFeatures::FromVariant(variant, error_msg, true)
-        ->AsX86_64InstructionSetFeatures();
+  static X86_64FeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg) {
+    return Convert(X86InstructionSetFeatures::FromVariant(variant, error_msg, true));
   }
 
   // Parse a bitmap and create an InstructionSetFeatures.
-  static const X86_64InstructionSetFeatures* FromBitmap(uint32_t bitmap) {
-    return X86InstructionSetFeatures::FromBitmap(bitmap, true)->AsX86_64InstructionSetFeatures();
+  static X86_64FeaturesUniquePtr FromBitmap(uint32_t bitmap) {
+    return Convert(X86InstructionSetFeatures::FromBitmap(bitmap, true));
   }
 
   // Turn C pre-processor #defines into the equivalent instruction set features.
-  static const X86_64InstructionSetFeatures* FromCppDefines() {
-    return X86InstructionSetFeatures::FromCppDefines(true)->AsX86_64InstructionSetFeatures();
+  static X86_64FeaturesUniquePtr FromCppDefines() {
+    return Convert(X86InstructionSetFeatures::FromCppDefines(true));
   }
 
   // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
-  static const X86_64InstructionSetFeatures* FromCpuInfo() {
-    return X86InstructionSetFeatures::FromCpuInfo(true)->AsX86_64InstructionSetFeatures();
+  static X86_64FeaturesUniquePtr FromCpuInfo() {
+    return Convert(X86InstructionSetFeatures::FromCpuInfo(true));
   }
 
   // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
   // InstructionSetFeatures.
-  static const X86_64InstructionSetFeatures* FromHwcap() {
-    return X86InstructionSetFeatures::FromHwcap(true)->AsX86_64InstructionSetFeatures();
+  static X86_64FeaturesUniquePtr FromHwcap() {
+    return Convert(X86InstructionSetFeatures::FromHwcap(true));
   }
 
   // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
   // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
-  static const X86_64InstructionSetFeatures* FromAssembly() {
-    return X86InstructionSetFeatures::FromAssembly(true)->AsX86_64InstructionSetFeatures();
+  static X86_64FeaturesUniquePtr FromAssembly() {
+    return Convert(X86InstructionSetFeatures::FromAssembly(true));
   }
 
   InstructionSet GetInstructionSet() const OVERRIDE {
@@ -66,7 +67,7 @@
 
  protected:
   // Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
-  const InstructionSetFeatures*
+  std::unique_ptr<const InstructionSetFeatures>
       AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
                                  std::string* error_msg) const OVERRIDE {
     return X86InstructionSetFeatures::AddFeaturesFromSplitString(smp, features, true, error_msg);
@@ -79,6 +80,10 @@
                                   has_AVX2, has_POPCNT) {
   }
 
+  static X86_64FeaturesUniquePtr Convert(X86FeaturesUniquePtr&& in) {
+    return X86_64FeaturesUniquePtr(in.release()->AsX86_64InstructionSetFeatures());
+  }
+
   friend class X86InstructionSetFeatures;
 
   DISALLOW_COPY_AND_ASSIGN(X86_64InstructionSetFeatures);
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index afa1c0f..860b77e 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1480,19 +1480,21 @@
     RETURN_IF_EAX_ZERO
 END_FUNCTION art_quick_unlock_object_no_inline
 
-DEFINE_FUNCTION art_quick_check_cast
+DEFINE_FUNCTION art_quick_check_instance_of
+    // We could check the super classes here but that is usually already checked in the caller.
     PUSH rdi                          // Save args for exc
     PUSH rsi
     subq LITERAL(8), %rsp             // Alignment padding.
     CFI_ADJUST_CFA_OFFSET(8)
     SETUP_FP_CALLEE_SAVE_FRAME
-    call SYMBOL(artIsAssignableFromCode)  // (Class* klass, Class* ref_klass)
+    call SYMBOL(artInstanceOfFromCode)  // (Object* obj, Class* ref_klass)
     testq %rax, %rax
     jz 1f                             // jump forward if not assignable
     RESTORE_FP_CALLEE_SAVE_FRAME
     addq LITERAL(24), %rsp            // pop arguments
     CFI_ADJUST_CFA_OFFSET(-24)
 
+.Lreturn:
     ret
 
     CFI_ADJUST_CFA_OFFSET(24 + 4 * 8)  // Reset unwind info so following code unwinds.
@@ -1504,9 +1506,9 @@
     POP rdi
     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
     mov %gs:THREAD_SELF_OFFSET, %rdx  // pass Thread::Current()
-    call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*)
+    call SYMBOL(artThrowClassCastExceptionForObject)  // (Object* src, Class* dest, Thread*)
     UNREACHABLE
-END_FUNCTION art_quick_check_cast
+END_FUNCTION art_quick_check_instance_of
 
 
 // Restore reg's value if reg is not the same as exclude_reg, otherwise just adjust stack.
@@ -2140,15 +2142,14 @@
     leal MIRROR_STRING_VALUE_OFFSET(%esi), %esi
 #if (STRING_COMPRESSION_FEATURE)
     /* Differ cases */
-    cmpl LITERAL(0), %r8d
-    jl      .Lstring_compareto_this_is_compressed
-    cmpl    LITERAL(0), %r9d
-    jl      .Lstring_compareto_that_is_compressed
+    shrl    LITERAL(1), %r8d
+    jnc     .Lstring_compareto_this_is_compressed
+    shrl    LITERAL(1), %r9d
+    jnc     .Lstring_compareto_that_is_compressed
     jmp     .Lstring_compareto_both_not_compressed
 .Lstring_compareto_this_is_compressed:
-    andl    LITERAL(0x7FFFFFFF), %r8d
-    cmpl    LITERAL(0), %r9d
-    jl      .Lstring_compareto_both_compressed
+    shrl    LITERAL(1), %r9d
+    jnc     .Lstring_compareto_both_compressed
     /* Comparison this (8-bit) and that (16-bit) */
     mov     %r8d, %eax
     subl    %r9d, %eax
@@ -2167,7 +2168,6 @@
 .Lstring_compareto_keep_length1:
     ret
 .Lstring_compareto_that_is_compressed:
-    andl    LITERAL(0x7FFFFFFF), %r9d
     movl    %r8d, %eax
     subl    %r9d, %eax
     mov     %r8d, %ecx
@@ -2185,7 +2185,6 @@
 .Lstring_compareto_keep_length2:
     ret
 .Lstring_compareto_both_compressed:
-    andl    LITERAL(0x7FFFFFFF), %r9d
     /* Calculate min length and count diff */
     movl    %r8d, %ecx
     movl    %r8d, %eax
@@ -2224,16 +2223,16 @@
 
 UNIMPLEMENTED art_quick_memcmp16
 
-DEFINE_FUNCTION art_quick_assignable_from_code
+DEFINE_FUNCTION art_quick_instance_of
     SETUP_FP_CALLEE_SAVE_FRAME
     subq LITERAL(8), %rsp                      // Alignment padding.
     CFI_ADJUST_CFA_OFFSET(8)
-    call SYMBOL(artIsAssignableFromCode)       // (const mirror::Class*, const mirror::Class*)
+    call SYMBOL(artInstanceOfFromCode)         // (mirror::Object*, mirror::Class*)
     addq LITERAL(8), %rsp
     CFI_ADJUST_CFA_OFFSET(-8)
     RESTORE_FP_CALLEE_SAVE_FRAME
     ret
-END_FUNCTION art_quick_assignable_from_code
+END_FUNCTION art_quick_instance_of
 
 
 // Return from a nested signal:
@@ -2274,8 +2273,16 @@
     jz .Lslow_rb_\name
     ret
 .Lslow_rb_\name:
-    // Save all potentially live caller-save core registers.
     PUSH rax
+    movl MIRROR_OBJECT_LOCK_WORD_OFFSET(REG_VAR(reg)), %eax
+    addl LITERAL(LOCK_WORD_STATE_FORWARDING_ADDRESS_OVERFLOW), %eax
+    // Jump if the addl caused eax to unsigned overflow. The only case where it overflows is the
+    // forwarding address one.
+    // Taken ~25% of the time.
+    jnae .Lret_forwarding_address\name
+
+    // Save all potentially live caller-save core registers.
+    movq 0(%rsp), %rax
     PUSH rcx
     PUSH rdx
     PUSH rsi
@@ -2340,6 +2347,12 @@
     POP_REG_NE rax, RAW_VAR(reg)
 .Lret_rb_\name:
     ret
+.Lret_forwarding_address\name:
+    // The overflow cleared the top bits.
+    sall LITERAL(LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT), %eax
+    movq %rax, REG_VAR(reg)
+    POP_REG_NE rax, RAW_VAR(reg)
+    ret
     END_FUNCTION VAR(name)
 END_MACRO
 
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index cd8815b..5ef1f06 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -172,6 +172,9 @@
 #define MIRROR_CLASS_COMPONENT_TYPE_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
 ADD_TEST_EQ(MIRROR_CLASS_COMPONENT_TYPE_OFFSET,
             art::mirror::Class::ComponentTypeOffset().Int32Value())
+#define MIRROR_CLASS_IF_TABLE_OFFSET (16 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_CLASS_IF_TABLE_OFFSET,
+            art::mirror::Class::IfTableOffset().Int32Value())
 #define MIRROR_CLASS_ACCESS_FLAGS_OFFSET (64 + MIRROR_OBJECT_HEADER_SIZE)
 ADD_TEST_EQ(MIRROR_CLASS_ACCESS_FLAGS_OFFSET,
             art::mirror::Class::AccessFlagsOffset().Int32Value())
diff --git a/runtime/base/iteration_range.h b/runtime/base/iteration_range.h
index 54ab174..9d45707 100644
--- a/runtime/base/iteration_range.h
+++ b/runtime/base/iteration_range.h
@@ -54,6 +54,17 @@
   return IterationRange<Iter>(it, it);
 }
 
+template <typename Container>
+inline auto ReverseRange(Container& c) {
+  typedef typename std::reverse_iterator<decltype(c.begin())> riter;
+  return MakeIterationRange(riter(c.end()), riter(c.begin()));
+}
+
+template <typename T, size_t size>
+inline auto ReverseRange(T (&array)[size]) {
+  return ReverseRange(MakeIterationRange<T*>(array, array+size));
+}
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_BASE_ITERATION_RANGE_H_
diff --git a/runtime/base/memory_tool.h b/runtime/base/memory_tool.h
index e1a2e07..42cbaa0 100644
--- a/runtime/base/memory_tool.h
+++ b/runtime/base/memory_tool.h
@@ -40,7 +40,10 @@
 constexpr bool kMemoryToolIsAvailable = false;
 #endif
 
+extern "C" void __asan_handle_no_return();
+
 #define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
+#define MEMORY_TOOL_HANDLE_NO_RETURN __asan_handle_no_return()
 #define RUNNING_ON_MEMORY_TOOL 1U
 constexpr bool kMemoryToolIsValgrind = false;
 constexpr bool kMemoryToolDetectsLeaks = true;
@@ -55,6 +58,7 @@
 #define MEMORY_TOOL_MAKE_UNDEFINED(p, s) VALGRIND_MAKE_MEM_UNDEFINED(p, s)
 #define MEMORY_TOOL_MAKE_DEFINED(p, s) VALGRIND_MAKE_MEM_DEFINED(p, s)
 #define ATTRIBUTE_NO_SANITIZE_ADDRESS
+#define MEMORY_TOOL_HANDLE_NO_RETURN do { } while (0)
 #define RUNNING_ON_MEMORY_TOOL RUNNING_ON_VALGRIND
 constexpr bool kMemoryToolIsAvailable = true;
 constexpr bool kMemoryToolIsValgrind = true;
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index bde0327..5d92298 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -42,7 +42,6 @@
 ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
 Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
 Mutex* Locks::intern_table_lock_ = nullptr;
-Mutex* Locks::interpreter_string_init_map_lock_ = nullptr;
 Mutex* Locks::jni_libraries_lock_ = nullptr;
 Mutex* Locks::logging_lock_ = nullptr;
 Mutex* Locks::mem_maps_lock_ = nullptr;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 3f2c5a9..74b786c 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -79,7 +79,6 @@
   kAllocSpaceLock,
   kBumpPointerSpaceBlockLock,
   kArenaPoolLock,
-  kDexFileToMethodInlinerMapLock,
   kInternTableLock,
   kOatFileSecondaryLookupLock,
   kHostDlOpenHandlesLock,
@@ -92,12 +91,10 @@
   kDefaultMutexLevel,
   kDexLock,
   kMarkSweepLargeObjectLock,
-  kPinTableLock,
   kJdwpObjectRegistryLock,
   kModifyLdtLock,
   kAllocatedThreadIdsLock,
   kMonitorPoolLock,
-  kMethodVerifiersLock,
   kClassLinkerClassesLock,  // TODO rename.
   kJitCodeCacheLock,
   kBreakpointLock,
@@ -630,12 +627,9 @@
   // TODO: improve name, perhaps instrumentation_update_lock_.
   static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
 
-  // Guards String initializer register map in interpreter.
-  static Mutex* interpreter_string_init_map_lock_ ACQUIRED_AFTER(deoptimization_lock_);
-
   // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
   // attaching and detaching.
-  static Mutex* thread_list_lock_ ACQUIRED_AFTER(interpreter_string_init_map_lock_);
+  static Mutex* thread_list_lock_ ACQUIRED_AFTER(deoptimization_lock_);
 
   // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
   static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
diff --git a/runtime/base/time_utils.h b/runtime/base/time_utils.h
index 55d2764..383b52f 100644
--- a/runtime/base/time_utils.h
+++ b/runtime/base/time_utils.h
@@ -73,9 +73,11 @@
 }
 
 #if defined(__APPLE__)
-// No clocks to specify on OS/X, fake value to pass to routines that require a clock.
+#ifndef CLOCK_REALTIME
+// No clocks to specify on OS/X < 10.12, fake value to pass to routines that require a clock.
 #define CLOCK_REALTIME 0xebadf00d
 #endif
+#endif
 
 // Sleep for the given number of nanoseconds, a bad way to handle contention.
 void NanoSleep(uint64_t ns);
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index 4498198..ff2dd1b 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -339,22 +339,59 @@
   return true;
 }
 
-void FdFile::Erase() {
+bool FdFile::Unlink() {
+  if (file_path_.empty()) {
+    return false;
+  }
+
+  // Try to figure out whether this file is still referring to the one on disk.
+  bool is_current = false;
+  {
+    struct stat this_stat, current_stat;
+    int cur_fd = TEMP_FAILURE_RETRY(open(file_path_.c_str(), O_RDONLY));
+    if (cur_fd > 0) {
+      // File still exists.
+      if (fstat(fd_, &this_stat) == 0 && fstat(cur_fd, &current_stat) == 0) {
+        is_current = (this_stat.st_dev == current_stat.st_dev) &&
+                     (this_stat.st_ino == current_stat.st_ino);
+      }
+      close(cur_fd);
+    }
+  }
+
+  if (is_current) {
+    unlink(file_path_.c_str());
+  }
+
+  return is_current;
+}
+
+bool FdFile::Erase(bool unlink) {
   DCHECK(!read_only_mode_);
-  TEMP_FAILURE_RETRY(SetLength(0));
-  TEMP_FAILURE_RETRY(Flush());
-  TEMP_FAILURE_RETRY(Close());
+
+  bool ret_result = true;
+  if (unlink) {
+    ret_result = Unlink();
+  }
+
+  int result;
+  result = SetLength(0);
+  result = Flush();
+  result = Close();
+  // Ignore the errors.
+
+  return ret_result;
 }
 
 int FdFile::FlushCloseOrErase() {
   DCHECK(!read_only_mode_);
-  int flush_result = TEMP_FAILURE_RETRY(Flush());
+  int flush_result = Flush();
   if (flush_result != 0) {
     LOG(ERROR) << "CloseOrErase failed while flushing a file.";
     Erase();
     return flush_result;
   }
-  int close_result = TEMP_FAILURE_RETRY(Close());
+  int close_result = Close();
   if (close_result != 0) {
     LOG(ERROR) << "CloseOrErase failed while closing a file.";
     Erase();
@@ -365,11 +402,11 @@
 
 int FdFile::FlushClose() {
   DCHECK(!read_only_mode_);
-  int flush_result = TEMP_FAILURE_RETRY(Flush());
+  int flush_result = Flush();
   if (flush_result != 0) {
     LOG(ERROR) << "FlushClose failed while flushing a file.";
   }
-  int close_result = TEMP_FAILURE_RETRY(Close());
+  int close_result = Close();
   if (close_result != 0) {
     LOG(ERROR) << "FlushClose failed while closing a file.";
   }
diff --git a/runtime/base/unix_file/fd_file.h b/runtime/base/unix_file/fd_file.h
index d896ee9..eb85c4f 100644
--- a/runtime/base/unix_file/fd_file.h
+++ b/runtime/base/unix_file/fd_file.h
@@ -97,7 +97,14 @@
   int Flush() OVERRIDE WARN_UNUSED;
 
   // Short for SetLength(0); Flush(); Close();
-  void Erase();
+  // If the file was opened with a path name and unlink = true, also calls Unlink() on the path.
+  // Note that it is the the caller's responsibility to avoid races.
+  bool Erase(bool unlink = false);
+
+  // Call unlink() if the file was opened with a path, and if open() with the name shows that
+  // the file descriptor of this file is still up-to-date. This is still racy, though, and it
+  // is up to the caller to ensure correctness in a multi-process setup.
+  bool Unlink();
 
   // Try to Flush(), then try to Close(); If either fails, call Erase().
   int FlushCloseOrErase() WARN_UNUSED;
diff --git a/runtime/base/unix_file/fd_file_test.cc b/runtime/base/unix_file/fd_file_test.cc
index 99ef6f7..7657a38 100644
--- a/runtime/base/unix_file/fd_file_test.cc
+++ b/runtime/base/unix_file/fd_file_test.cc
@@ -186,4 +186,24 @@
   ASSERT_EQ(file2.Close(), 0);
 }
 
+TEST_F(FdFileTest, EraseWithPathUnlinks) {
+  // New scratch file, zero-length.
+  art::ScratchFile tmp;
+  std::string filename = tmp.GetFilename();
+  tmp.Close();  // This is required because of the unlink race between the scratch file and the
+                // FdFile, which leads to close-guard breakage.
+  FdFile file(filename, O_RDWR, false);
+  ASSERT_TRUE(file.IsOpened());
+  EXPECT_GE(file.Fd(), 0);
+  uint8_t buffer[16] = { 0 };
+  EXPECT_TRUE(file.WriteFully(&buffer, sizeof(buffer)));
+  EXPECT_EQ(file.Flush(), 0);
+
+  EXPECT_TRUE(file.Erase(true));
+
+  EXPECT_FALSE(file.IsOpened());
+
+  EXPECT_FALSE(art::OS::FileExists(filename.c_str())) << filename;
+}
+
 }  // namespace unix_file
diff --git a/runtime/base/variant_map_test.cc b/runtime/base/variant_map_test.cc
index ccb22eb..93336e0 100644
--- a/runtime/base/variant_map_test.cc
+++ b/runtime/base/variant_map_test.cc
@@ -107,8 +107,8 @@
   fmFilled.Set(FruitMap::Orange, 555.0);
   EXPECT_EQ(size_t(2), fmFilled.Size());
 
-  // Test copy constructor
-  FruitMap fmEmptyCopy(fmEmpty);
+  // Test copy constructor (NOLINT as a reference is suggested, instead)
+  FruitMap fmEmptyCopy(fmEmpty);  // NOLINT
   EXPECT_EQ(size_t(0), fmEmptyCopy.Size());
 
   // Test copy constructor
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 5399dc5..6c27bc6 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -282,7 +282,7 @@
       return false;
     }
 
-    ArtField* f = CheckFieldID(soa, fid);
+    ArtField* f = CheckFieldID(fid);
     if (f == nullptr) {
       return false;
     }
@@ -313,7 +313,7 @@
   bool CheckMethodAndSig(ScopedObjectAccess& soa, jobject jobj, jclass jc,
                          jmethodID mid, Primitive::Type type, InvokeType invoke)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtMethod* m = CheckMethodID(soa, mid);
+    ArtMethod* m = CheckMethodID(mid);
     if (m == nullptr) {
       return false;
     }
@@ -362,7 +362,7 @@
   bool CheckStaticFieldID(ScopedObjectAccess& soa, jclass java_class, jfieldID fid)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     ObjPtr<mirror::Class> c = soa.Decode<mirror::Class>(java_class);
-    ArtField* f = CheckFieldID(soa, fid);
+    ArtField* f = CheckFieldID(fid);
     if (f == nullptr) {
       return false;
     }
@@ -385,7 +385,7 @@
    */
   bool CheckStaticMethod(ScopedObjectAccess& soa, jclass java_class, jmethodID mid)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtMethod* m = CheckMethodID(soa, mid);
+    ArtMethod* m = CheckMethodID(mid);
     if (m == nullptr) {
       return false;
     }
@@ -407,7 +407,7 @@
    */
   bool CheckVirtualMethod(ScopedObjectAccess& soa, jobject java_object, jmethodID mid)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtMethod* m = CheckMethodID(soa, mid);
+    ArtMethod* m = CheckMethodID(mid);
     if (m == nullptr) {
       return false;
     }
@@ -577,9 +577,8 @@
     return true;
   }
 
-  bool CheckConstructor(ScopedObjectAccess& soa, jmethodID mid)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtMethod* method = soa.DecodeMethod(mid);
+  bool CheckConstructor(jmethodID mid) REQUIRES_SHARED(Locks::mutator_lock_) {
+    ArtMethod* method = jni::DecodeArtMethod(mid);
     if (method == nullptr) {
       AbortF("expected non-null constructor");
       return false;
@@ -682,7 +681,7 @@
     if (!is_static && !CheckInstanceFieldID(soa, obj, fid)) {
       return false;
     }
-    ArtField* field = soa.DecodeField(fid);
+    ArtField* field = jni::DecodeArtField(fid);
     DCHECK(field != nullptr);  // Already checked by Check.
     if (is_static != field->IsStatic()) {
       AbortF("attempt to access %s field %s: %p",
@@ -844,9 +843,9 @@
       case 'c':  // jclass
         return CheckInstance(soa, kClass, arg.c, false);
       case 'f':  // jfieldID
-        return CheckFieldID(soa, arg.f) != nullptr;
+        return CheckFieldID(arg.f) != nullptr;
       case 'm':  // jmethodID
-        return CheckMethodID(soa, arg.m) != nullptr;
+        return CheckMethodID(arg.m) != nullptr;
       case 'r':  // release int
         return CheckReleaseMode(arg.r);
       case 's':  // jstring
@@ -868,7 +867,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_) {
     CHECK(args_p != nullptr);
     VarArgs args(args_p->Clone());
-    ArtMethod* m = CheckMethodID(soa, args.GetMethodID());
+    ArtMethod* m = CheckMethodID(args.GetMethodID());
     if (m == nullptr) {
       return false;
     }
@@ -962,7 +961,7 @@
       }
       case 'f': {  // jfieldID
         jfieldID fid = arg.f;
-        ArtField* f = soa.DecodeField(fid);
+        ArtField* f = jni::DecodeArtField(fid);
         *msg += ArtField::PrettyField(f);
         if (!entry) {
           StringAppendF(msg, " (%p)", fid);
@@ -971,7 +970,7 @@
       }
       case 'm': {  // jmethodID
         jmethodID mid = arg.m;
-        ArtMethod* m = soa.DecodeMethod(mid);
+        ArtMethod* m = jni::DecodeArtMethod(mid);
         *msg += ArtMethod::PrettyMethod(m);
         if (!entry) {
           StringAppendF(msg, " (%p)", mid);
@@ -981,7 +980,7 @@
       case '.': {
         const VarArgs* va = arg.va;
         VarArgs args(va->Clone());
-        ArtMethod* m = soa.DecodeMethod(args.GetMethodID());
+        ArtMethod* m = jni::DecodeArtMethod(args.GetMethodID());
         uint32_t len;
         const char* shorty = m->GetShorty(&len);
         CHECK_GE(len, 1u);
@@ -1147,13 +1146,12 @@
     return true;
   }
 
-  ArtField* CheckFieldID(ScopedObjectAccess& soa, jfieldID fid)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
+  ArtField* CheckFieldID(jfieldID fid) REQUIRES_SHARED(Locks::mutator_lock_) {
     if (fid == nullptr) {
       AbortF("jfieldID was NULL");
       return nullptr;
     }
-    ArtField* f = soa.DecodeField(fid);
+    ArtField* f = jni::DecodeArtField(fid);
     // TODO: Better check here.
     if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f->GetDeclaringClass().Ptr())) {
       Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
@@ -1163,13 +1161,12 @@
     return f;
   }
 
-  ArtMethod* CheckMethodID(ScopedObjectAccess& soa, jmethodID mid)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
+  ArtMethod* CheckMethodID(jmethodID mid) REQUIRES_SHARED(Locks::mutator_lock_) {
     if (mid == nullptr) {
       AbortF("jmethodID was NULL");
       return nullptr;
     }
-    ArtMethod* m = soa.DecodeMethod(mid);
+    ArtMethod* m = jni::DecodeArtMethod(mid);
     // TODO: Better check here.
     if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m->GetDeclaringClass())) {
       Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
@@ -2005,7 +2002,7 @@
     VarArgs rest(mid, vargs);
     JniValueType args[4] = {{.E = env}, {.c = c}, {.m = mid}, {.va = &rest}};
     if (sc.Check(soa, true, "Ecm.", args) && sc.CheckInstantiableNonArray(soa, c) &&
-        sc.CheckConstructor(soa, mid)) {
+        sc.CheckConstructor(mid)) {
       JniValueType result;
       result.L = baseEnv(env)->NewObjectV(env, c, mid, vargs);
       if (sc.Check(soa, false, "L", &result)) {
@@ -2029,7 +2026,7 @@
     VarArgs rest(mid, vargs);
     JniValueType args[4] = {{.E = env}, {.c = c}, {.m = mid}, {.va = &rest}};
     if (sc.Check(soa, true, "Ecm.", args) && sc.CheckInstantiableNonArray(soa, c) &&
-        sc.CheckConstructor(soa, mid)) {
+        sc.CheckConstructor(mid)) {
       JniValueType result;
       result.L = baseEnv(env)->NewObjectA(env, c, mid, vargs);
       if (sc.Check(soa, false, "L", &result)) {
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 350855b..7359243 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -69,7 +69,7 @@
   Thread::PoisonObjectPointersIfDebug();
   ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
   // MethodVerifier refuses methods with string_idx out of bounds.
-  DCHECK_LT(string_idx, declaring_class->GetDexFile().NumStringIds());;
+  DCHECK_LT(string_idx, declaring_class->GetDexFile().NumStringIds());
   ObjPtr<mirror::String> string =
         mirror::StringDexCachePair::Lookup(declaring_class->GetDexCacheStrings(),
                                            string_idx,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 3ae36de..4905514 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -63,13 +63,16 @@
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
 #include "jit/offline_profiling_info.h"
+#include "jni_internal.h"
 #include "leb128.h"
 #include "linear_alloc.h"
 #include "mirror/class.h"
 #include "mirror/class-inl.h"
+#include "mirror/class_ext.h"
 #include "mirror/class_loader.h"
 #include "mirror/dex_cache.h"
 #include "mirror/dex_cache-inl.h"
+#include "mirror/emulated_stack_frame.h"
 #include "mirror/field.h"
 #include "mirror/iftable-inl.h"
 #include "mirror/method.h"
@@ -135,10 +138,22 @@
   return exception_init_method != nullptr;
 }
 
-// Helper for ThrowEarlierClassFailure. Throws the stored error.
-static void HandleEarlierVerifyError(Thread* self, ClassLinker* class_linker, ObjPtr<mirror::Class> c)
+static mirror::Object* GetVerifyError(ObjPtr<mirror::Class> c)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjPtr<mirror::Object> obj = c->GetVerifyError();
+  ObjPtr<mirror::ClassExt> ext(c->GetExtData());
+  if (ext == nullptr) {
+    return nullptr;
+  } else {
+    return ext->GetVerifyError();
+  }
+}
+
+// Helper for ThrowEarlierClassFailure. Throws the stored error.
+static void HandleEarlierVerifyError(Thread* self,
+                                     ClassLinker* class_linker,
+                                     ObjPtr<mirror::Class> c)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ObjPtr<mirror::Object> obj = GetVerifyError(c);
   DCHECK(obj != nullptr);
   self->AssertNoPendingException();
   if (obj->IsClass()) {
@@ -172,8 +187,8 @@
   Runtime* const runtime = Runtime::Current();
   if (!runtime->IsAotCompiler()) {  // Give info if this occurs at runtime.
     std::string extra;
-    if (c->GetVerifyError() != nullptr) {
-      ObjPtr<mirror::Object> verify_error = c->GetVerifyError();
+    if (GetVerifyError(c) != nullptr) {
+      ObjPtr<mirror::Object> verify_error = GetVerifyError(c);
       if (verify_error->IsClass()) {
         extra = mirror::Class::PrettyDescriptor(verify_error->AsClass());
       } else {
@@ -191,11 +206,14 @@
     ObjPtr<mirror::Throwable> pre_allocated = runtime->GetPreAllocatedNoClassDefFoundError();
     self->SetException(pre_allocated);
   } else {
-    if (c->GetVerifyError() != nullptr) {
+    if (GetVerifyError(c) != nullptr) {
       // Rethrow stored error.
       HandleEarlierVerifyError(self, this, c);
     }
-    if (c->GetVerifyError() == nullptr || wrap_in_no_class_def) {
+    // TODO This might be wrong if we hit an OOME while allocating the ClassExt. In that case we
+    // might have meant to go down the earlier if statement with the original error but it got
+    // swallowed by the OOM so we end up here.
+    if (GetVerifyError(c) == nullptr || wrap_in_no_class_def) {
       // If there isn't a recorded earlier error, or this is a repeat throw from initialization,
       // the top-level exception must be a NoClassDefFoundError. The potentially already pending
       // exception will be a cause.
@@ -377,8 +395,8 @@
   CHECK(java_lang_Class.Get() != nullptr);
   mirror::Class::SetClassClass(java_lang_Class.Get());
   java_lang_Class->SetClass(java_lang_Class.Get());
-  if (kUseBakerOrBrooksReadBarrier) {
-    java_lang_Class->AssertReadBarrierPointer();
+  if (kUseBakerReadBarrier) {
+    java_lang_Class->AssertReadBarrierState();
   }
   java_lang_Class->SetClassSize(class_class_size);
   java_lang_Class->SetPrimitiveType(Primitive::kPrimNot);
@@ -457,6 +475,9 @@
   SetClassRoot(kJavaLangString, java_lang_String.Get());
   SetClassRoot(kJavaLangRefReference, java_lang_ref_Reference.Get());
 
+  // Fill in the empty iftable. Needs to be done after the kObjectArrayClass root is set.
+  java_lang_Object->SetIfTable(AllocIfTable(self, 0));
+
   // Setup the primitive type classes.
   SetClassRoot(kPrimitiveBoolean, CreatePrimitiveClass(self, Primitive::kPrimBoolean));
   SetClassRoot(kPrimitiveByte, CreatePrimitiveClass(self, Primitive::kPrimByte));
@@ -494,6 +515,14 @@
   java_lang_DexCache->SetObjectSize(mirror::DexCache::InstanceSize());
   mirror::Class::SetStatus(java_lang_DexCache, mirror::Class::kStatusResolved, self);
 
+
+  // Setup dalvik.system.ClassExt
+  Handle<mirror::Class> dalvik_system_ClassExt(hs.NewHandle(
+      AllocClass(self, java_lang_Class.Get(), mirror::ClassExt::ClassSize(image_pointer_size_))));
+  SetClassRoot(kDalvikSystemClassExt, dalvik_system_ClassExt.Get());
+  mirror::ClassExt::SetClass(dalvik_system_ClassExt.Get());
+  mirror::Class::SetStatus(dalvik_system_ClassExt, mirror::Class::kStatusResolved, self);
+
   // Set up array classes for string, field, method
   Handle<mirror::Class> object_array_string(hs.NewHandle(
       AllocClass(self, java_lang_Class.Get(),
@@ -539,7 +568,7 @@
     quick_to_interpreter_bridge_trampoline_ = GetQuickToInterpreterBridge();
   }
 
-  // Object, String and DexCache need to be rerun through FindSystemClass to finish init
+  // Object, String, ClassExt and DexCache need to be rerun through FindSystemClass to finish init
   mirror::Class::SetStatus(java_lang_Object, mirror::Class::kStatusNotReady, self);
   CheckSystemClass(self, java_lang_Object, "Ljava/lang/Object;");
   CHECK_EQ(java_lang_Object->GetObjectSize(), mirror::Object::InstanceSize());
@@ -548,6 +577,9 @@
   mirror::Class::SetStatus(java_lang_DexCache, mirror::Class::kStatusNotReady, self);
   CheckSystemClass(self, java_lang_DexCache, "Ljava/lang/DexCache;");
   CHECK_EQ(java_lang_DexCache->GetObjectSize(), mirror::DexCache::InstanceSize());
+  mirror::Class::SetStatus(dalvik_system_ClassExt, mirror::Class::kStatusNotReady, self);
+  CheckSystemClass(self, dalvik_system_ClassExt, "Ldalvik/system/ClassExt;");
+  CHECK_EQ(dalvik_system_ClassExt->GetObjectSize(), mirror::ClassExt::InstanceSize());
 
   // Setup the primitive array type classes - can't be done until Object has a vtable.
   SetClassRoot(kBooleanArrayClass, FindSystemClass(self, "[Z"));
@@ -650,6 +682,11 @@
   SetClassRoot(kJavaLangInvokeMethodHandleImpl, class_root);
   mirror::MethodHandleImpl::SetClass(class_root);
 
+  class_root = FindSystemClass(self, "Ldalvik/system/EmulatedStackFrame;");
+  CHECK(class_root != nullptr);
+  SetClassRoot(kDalvikSystemEmulatedStackFrame, class_root);
+  mirror::EmulatedStackFrame::SetClass(class_root);
+
   // java.lang.ref classes need to be specially flagged, but otherwise are normal classes
   // finish initializing Reference class
   mirror::Class::SetStatus(java_lang_ref_Reference, mirror::Class::kStatusNotReady, self);
@@ -882,13 +919,11 @@
         SanityCheckArtMethod(klass->GetEmbeddedVTableEntry(i, pointer_size), nullptr, image_spaces);
       }
     }
-    auto* iftable = klass->GetIfTable();
-    if (iftable != nullptr) {
-      for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
-        if (iftable->GetMethodArrayCount(i) > 0) {
-          SanityCheckArtMethodPointerArray(
-              iftable->GetMethodArray(i), nullptr, pointer_size, image_spaces);
-        }
+    mirror::IfTable* iftable = klass->GetIfTable();
+    for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
+      if (iftable->GetMethodArrayCount(i) > 0) {
+        SanityCheckArtMethodPointerArray(
+            iftable->GetMethodArray(i), nullptr, pointer_size, image_spaces);
       }
     }
   }
@@ -1059,6 +1094,8 @@
   mirror::ShortArray::SetArrayClass(GetClassRoot(kShortArrayClass));
   mirror::Throwable::SetClass(GetClassRoot(kJavaLangThrowable));
   mirror::StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement));
+  mirror::EmulatedStackFrame::SetClass(GetClassRoot(kDalvikSystemEmulatedStackFrame));
+  mirror::ClassExt::SetClass(GetClassRoot(kDalvikSystemClassExt));
 
   for (gc::space::ImageSpace* image_space : spaces) {
     // Boot class loader, use a null handle.
@@ -1089,13 +1126,12 @@
            class_loader->GetClass();
 }
 
-static mirror::String* GetDexPathListElementName(ScopedObjectAccessUnchecked& soa,
-                                                 ObjPtr<mirror::Object> element)
+static mirror::String* GetDexPathListElementName(ObjPtr<mirror::Object> element)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ArtField* const dex_file_field =
-      soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
   ArtField* const dex_file_name_field =
-      soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_fileName);
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_fileName);
   DCHECK(dex_file_field != nullptr);
   DCHECK(dex_file_name_field != nullptr);
   DCHECK(element != nullptr);
@@ -1119,9 +1155,9 @@
   DCHECK(error_msg != nullptr);
   ScopedObjectAccessUnchecked soa(Thread::Current());
   ArtField* const dex_path_list_field =
-      soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList);
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList);
   ArtField* const dex_elements_field =
-      soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements);
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements);
   CHECK(dex_path_list_field != nullptr);
   CHECK(dex_elements_field != nullptr);
   while (!ClassLinker::IsBootClassLoader(soa, class_loader)) {
@@ -1148,7 +1184,7 @@
             *error_msg = StringPrintf("Null dex element at index %d", i);
             return false;
           }
-          ObjPtr<mirror::String> const name = GetDexPathListElementName(soa, element);
+          ObjPtr<mirror::String> const name = GetDexPathListElementName(element);
           if (name == nullptr) {
             *error_msg = StringPrintf("Null name for dex element at index %d", i);
             return false;
@@ -1309,12 +1345,9 @@
         const size_t num_types = dex_file->NumTypeIds();
         const size_t num_methods = dex_file->NumMethodIds();
         const size_t num_fields = dex_file->NumFieldIds();
-        size_t num_method_types = 0;
-        if (Runtime::Current()->IsMethodHandlesEnabled()) {
-          num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
-          if (dex_file->NumProtoIds() < num_method_types) {
-            num_method_types = dex_file->NumProtoIds();
-          }
+        size_t num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
+        if (dex_file->NumProtoIds() < num_method_types) {
+          num_method_types = dex_file->NumProtoIds();
         }
 
         CHECK_EQ(num_strings, dex_cache->NumStrings());
@@ -1701,7 +1734,7 @@
         ObjPtr<mirror::Object> element = elements->GetWithoutChecks(i);
         if (element != nullptr) {
           // If we are somewhere in the middle of the array, there may be nulls at the end.
-          loader_dex_file_names.push_back(GetDexPathListElementName(soa, element));
+          loader_dex_file_names.push_back(GetDexPathListElementName(element));
         }
       }
       // Ignore the number of image dex files since we are adding those to the class loader anyways.
@@ -1859,7 +1892,7 @@
     boot_class_table_.VisitRoots(buffered_visitor);
 
     // If tracing is enabled, then mark all the class loaders to prevent unloading.
-    if (tracing_enabled) {
+    if ((flags & kVisitRootFlagClassLoader) != 0 || tracing_enabled) {
       for (const ClassLoaderData& data : class_loaders_) {
         GcRoot<mirror::Object> root(GcRoot<mirror::Object>(self->DecodeJObject(data.weak_root)));
         root.VisitRoot(visitor, RootInfo(kRootVMInternal));
@@ -2041,6 +2074,7 @@
   mirror::ShortArray::ResetArrayClass();
   mirror::MethodType::ResetClass();
   mirror::MethodHandleImpl::ResetClass();
+  mirror::EmulatedStackFrame::ResetClass();
   Thread* const self = Thread::Current();
   for (const ClassLoaderData& data : class_loaders_) {
     DeleteClassLoader(self, data);
@@ -2110,21 +2144,18 @@
   //
   // If this needs to be mitigated in a production system running this code,
   // DexCache::kDexCacheMethodTypeCacheSize can be set to zero.
-  const bool is_method_handles_enabled = Runtime::Current()->IsMethodHandlesEnabled();
   mirror::MethodTypeDexCacheType* method_types = nullptr;
   size_t num_method_types = 0;
 
-  if (is_method_handles_enabled) {
-    if (dex_file.NumProtoIds() < mirror::DexCache::kDexCacheMethodTypeCacheSize) {
-      num_method_types = dex_file.NumProtoIds();
-    } else {
-      num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
-    }
+  if (dex_file.NumProtoIds() < mirror::DexCache::kDexCacheMethodTypeCacheSize) {
+    num_method_types = dex_file.NumProtoIds();
+  } else {
+    num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
+  }
 
-    if (num_method_types > 0) {
-      method_types = reinterpret_cast<mirror::MethodTypeDexCacheType*>(
-          raw_arrays + layout.MethodTypesOffset());
-    }
+  if (num_method_types > 0) {
+    method_types = reinterpret_cast<mirror::MethodTypeDexCacheType*>(
+        raw_arrays + layout.MethodTypesOffset());
   }
 
   DCHECK_ALIGNED(raw_arrays, alignof(mirror::StringDexCacheType)) <<
@@ -2327,12 +2358,12 @@
   return ClassPathEntry(nullptr, nullptr);
 }
 
-bool ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
-                                             Thread* self,
-                                             const char* descriptor,
-                                             size_t hash,
-                                             Handle<mirror::ClassLoader> class_loader,
-                                             ObjPtr<mirror::Class>* result) {
+bool ClassLinker::FindClassInBaseDexClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+                                                Thread* self,
+                                                const char* descriptor,
+                                                size_t hash,
+                                                Handle<mirror::ClassLoader> class_loader,
+                                                ObjPtr<mirror::Class>* result) {
   // Termination case: boot class-loader.
   if (IsBootClassLoader(soa, class_loader.Get())) {
     // The boot class loader, search the boot class path.
@@ -2362,14 +2393,24 @@
   // Unsupported class-loader?
   if (soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_PathClassLoader) !=
       class_loader->GetClass()) {
-    *result = nullptr;
-    return false;
+    // PathClassLoader is the most common case, so it's the one we check first. For secondary dex
+    // files, we also check DexClassLoader here.
+    if (soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_DexClassLoader) !=
+        class_loader->GetClass()) {
+      *result = nullptr;
+      return false;
+    }
   }
 
   // Handles as RegisterDexFile may allocate dex caches (and cause thread suspension).
   StackHandleScope<4> hs(self);
   Handle<mirror::ClassLoader> h_parent(hs.NewHandle(class_loader->GetParent()));
-  bool recursive_result = FindClassInPathClassLoader(soa, self, descriptor, hash, h_parent, result);
+  bool recursive_result = FindClassInBaseDexClassLoader(soa,
+                                                        self,
+                                                        descriptor,
+                                                        hash,
+                                                        h_parent,
+                                                        result);
 
   if (!recursive_result) {
     // Something wrong up the chain.
@@ -2385,16 +2426,17 @@
   // Handle as if this is the child PathClassLoader.
   // The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
   // We need to get the DexPathList and loop through it.
-  ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
+  ArtField* const cookie_field =
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
   ArtField* const dex_file_field =
-      soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
   ObjPtr<mirror::Object> dex_path_list =
-      soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList)->
-      GetObject(class_loader.Get());
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)->
+          GetObject(class_loader.Get());
   if (dex_path_list != nullptr && dex_file_field != nullptr && cookie_field != nullptr) {
     // DexPathList has an array dexElements of Elements[] which each contain a dex file.
     ObjPtr<mirror::Object> dex_elements_obj =
-        soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
+        jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
         GetObject(dex_path_list);
     // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
     // at the mCookie which is a DexFile vector.
@@ -2491,14 +2533,14 @@
   } else {
     ScopedObjectAccessUnchecked soa(self);
     ObjPtr<mirror::Class> cp_klass;
-    if (FindClassInPathClassLoader(soa, self, descriptor, hash, class_loader, &cp_klass)) {
+    if (FindClassInBaseDexClassLoader(soa, self, descriptor, hash, class_loader, &cp_klass)) {
       // The chain was understood. So the value in cp_klass is either the class we were looking
       // for, or not found.
       if (cp_klass != nullptr) {
         return cp_klass.Ptr();
       }
-      // TODO: We handle the boot classpath loader in FindClassInPathClassLoader. Try to unify this
-      //       and the branch above. TODO: throw the right exception here.
+      // TODO: We handle the boot classpath loader in FindClassInBaseDexClassLoader. Try to unify
+      //       this and the branch above. TODO: throw the right exception here.
 
       // We'll let the Java-side rediscover all this and throw the exception with the right stack
       // trace.
@@ -2566,6 +2608,8 @@
       klass.Assign(GetClassRoot(kJavaLangRefReference));
     } else if (strcmp(descriptor, "Ljava/lang/DexCache;") == 0) {
       klass.Assign(GetClassRoot(kJavaLangDexCache));
+    } else if (strcmp(descriptor, "Ldalvik/system/ClassExt;") == 0) {
+      klass.Assign(GetClassRoot(kDalvikSystemClassExt));
     }
   }
 
@@ -2785,6 +2829,13 @@
     return true;
   }
 
+  if (runtime->IsFullyDeoptable()) {
+    // We need to be able to deoptimize at any time so we should always just ignore precompiled
+    // code and go to the interpreter assuming we don't already have jitted code.
+    jit::Jit* jit = Runtime::Current()->GetJit();
+    return (jit == nullptr) || !jit->GetCodeCache()->ContainsPc(quick_code);
+  }
+
   if (runtime->IsNativeDebuggable()) {
     DCHECK(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse());
     // If we are doing native debugging, ignore application's AOT code,
@@ -3351,7 +3402,8 @@
 }
 
 mirror::Class* ClassLinker::CreatePrimitiveClass(Thread* self, Primitive::Type type) {
-  ObjPtr<mirror::Class> klass = AllocClass(self, mirror::Class::PrimitiveClassSize(image_pointer_size_));
+  ObjPtr<mirror::Class> klass =
+      AllocClass(self, mirror::Class::PrimitiveClassSize(image_pointer_size_));
   if (UNLIKELY(klass == nullptr)) {
     self->AssertPendingOOMException();
     return nullptr;
@@ -3369,10 +3421,12 @@
   ObjectLock<mirror::Class> lock(self, h_class);
   h_class->SetAccessFlags(kAccPublic | kAccFinal | kAccAbstract);
   h_class->SetPrimitiveType(type);
+  h_class->SetIfTable(GetClassRoot(kJavaLangObject)->GetIfTable());
   mirror::Class::SetStatus(h_class, mirror::Class::kStatusInitialized, self);
   const char* descriptor = Primitive::Descriptor(type);
-  ObjPtr<mirror::Class> existing = InsertClass(descriptor, h_class.Get(),
-                                        ComputeModifiedUtf8Hash(descriptor));
+  ObjPtr<mirror::Class> existing = InsertClass(descriptor,
+                                               h_class.Get(),
+                                               ComputeModifiedUtf8Hash(descriptor));
   CHECK(existing == nullptr) << "InitPrimitiveClass(" << type << ") failed";
   return h_class.Get();
 }
@@ -3730,9 +3784,8 @@
   return false;
 }
 
-void ClassLinker::VerifyClass(Thread* self,
-                              Handle<mirror::Class> klass,
-                              verifier::HardFailLogMode log_level) {
+verifier::MethodVerifier::FailureKind ClassLinker::VerifyClass(
+    Thread* self, Handle<mirror::Class> klass, verifier::HardFailLogMode log_level) {
   {
     // TODO: assert that the monitor on the Class is held
     ObjectLock<mirror::Class> lock(self, klass);
@@ -3753,16 +3806,16 @@
     // this class as a parent to another.
     if (klass->IsErroneous()) {
       ThrowEarlierClassFailure(klass.Get());
-      return;
+      return verifier::MethodVerifier::kHardFailure;
     }
 
     // Don't attempt to re-verify if already sufficiently verified.
     if (klass->IsVerified()) {
       EnsureSkipAccessChecksMethods(klass);
-      return;
+      return verifier::MethodVerifier::kNoFailure;
     }
     if (klass->IsCompileTimeVerified() && Runtime::Current()->IsAotCompiler()) {
-      return;
+      return verifier::MethodVerifier::kNoFailure;
     }
 
     if (klass->GetStatus() == mirror::Class::kStatusResolved) {
@@ -3778,7 +3831,7 @@
     if (!Runtime::Current()->IsVerificationEnabled()) {
       mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, self);
       EnsureSkipAccessChecksMethods(klass);
-      return;
+      return verifier::MethodVerifier::kNoFailure;
     }
   }
 
@@ -3788,7 +3841,7 @@
   // If we have a superclass and we get a hard verification failure we can return immediately.
   if (supertype.Get() != nullptr && !AttemptSupertypeVerification(self, klass, supertype)) {
     CHECK(self->IsExceptionPending()) << "Verification error should be pending.";
-    return;
+    return verifier::MethodVerifier::kHardFailure;
   }
 
   // Verify all default super-interfaces.
@@ -3815,7 +3868,7 @@
       } else if (UNLIKELY(!AttemptSupertypeVerification(self, klass, iface))) {
         // We had a hard failure while verifying this interface. Just return immediately.
         CHECK(self->IsExceptionPending()) << "Verification error should be pending.";
-        return;
+        return verifier::MethodVerifier::kHardFailure;
       } else if (UNLIKELY(!iface->IsVerified())) {
         // We softly failed to verify the iface. Stop checking and clean up.
         // Put the iface into the supertype handle so we know what caused us to fail.
@@ -3841,8 +3894,8 @@
   //     oat_file_class_status == mirror::Class::kStatusError => !preverified
   DCHECK(!(oat_file_class_status == mirror::Class::kStatusError) || !preverified);
 
-  verifier::MethodVerifier::FailureKind verifier_failure = verifier::MethodVerifier::kNoFailure;
   std::string error_msg;
+  verifier::MethodVerifier::FailureKind verifier_failure = verifier::MethodVerifier::kNoFailure;
   if (!preverified) {
     Runtime* runtime = Runtime::Current();
     verifier_failure = verifier::MethodVerifier::VerifyClass(self,
@@ -3915,6 +3968,7 @@
       EnsureSkipAccessChecksMethods(klass);
     }
   }
+  return verifier_failure;
 }
 
 void ClassLinker::EnsureSkipAccessChecksMethods(Handle<mirror::Class> klass) {
@@ -4071,6 +4125,8 @@
   DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot);
   klass->SetName(soa.Decode<mirror::String>(name));
   klass->SetDexCache(GetClassRoot(kJavaLangReflectProxy)->GetDexCache());
+  // Object has an empty iftable, copy it for that reason.
+  klass->SetIfTable(GetClassRoot(kJavaLangObject)->GetIfTable());
   mirror::Class::SetStatus(klass, mirror::Class::kStatusIdx, self);
   std::string descriptor(GetDescriptorForProxy(klass.Get()));
   const size_t hash = ComputeModifiedUtf8Hash(descriptor.c_str());
@@ -6331,16 +6387,18 @@
 bool ClassLinker::SetupInterfaceLookupTable(Thread* self, Handle<mirror::Class> klass,
                                             Handle<mirror::ObjectArray<mirror::Class>> interfaces) {
   StackHandleScope<1> hs(self);
-  const size_t super_ifcount =
-      klass->HasSuperClass() ? klass->GetSuperClass()->GetIfTableCount() : 0U;
+  const bool has_superclass = klass->HasSuperClass();
+  const size_t super_ifcount = has_superclass ? klass->GetSuperClass()->GetIfTableCount() : 0U;
   const bool have_interfaces = interfaces.Get() != nullptr;
   const size_t num_interfaces =
       have_interfaces ? interfaces->GetLength() : klass->NumDirectInterfaces();
   if (num_interfaces == 0) {
     if (super_ifcount == 0) {
+      if (LIKELY(has_superclass)) {
+        klass->SetIfTable(klass->GetSuperClass()->GetIfTable());
+      }
       // Class implements no interfaces.
       DCHECK_EQ(klass->GetIfTableCount(), 0);
-      DCHECK(klass->GetIfTable() == nullptr);
       return true;
     }
     // Class implements same interfaces as parent, are any of these not marker interfaces?
@@ -6533,7 +6591,7 @@
   } else {
     // No imt in the super class, need to reconstruct from the iftable.
     ObjPtr<mirror::IfTable> if_table = super_class->GetIfTable();
-    if (if_table != nullptr) {
+    if (if_table->Count() != 0) {
       // Ignore copied methods since we will handle these in LinkInterfaceMethods.
       FillIMTFromIfTable(if_table,
                          unimplemented_method,
@@ -8056,6 +8114,7 @@
     "Ljava/lang/Throwable;",
     "Ljava/lang/ClassNotFoundException;",
     "Ljava/lang/StackTraceElement;",
+    "Ldalvik/system/EmulatedStackFrame;",
     "Z",
     "B",
     "C",
@@ -8074,6 +8133,7 @@
     "[J",
     "[S",
     "[Ljava/lang/StackTraceElement;",
+    "Ldalvik/system/ClassExt;",
   };
   static_assert(arraysize(class_roots_descriptors) == size_t(kClassRootsMax),
                 "Mismatch between class descriptors and class-root enum");
@@ -8093,7 +8153,7 @@
   StackHandleScope<11> hs(self);
 
   ArtField* dex_elements_field =
-      soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements);
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements);
 
   Handle<mirror::Class> dex_elements_class(hs.NewHandle(dex_elements_field->GetType<true>()));
   DCHECK(dex_elements_class.Get() != nullptr);
@@ -8106,13 +8166,13 @@
       hs.NewHandle(dex_elements_class->GetComponentType());
 
   ArtField* element_file_field =
-      soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
   DCHECK_EQ(h_dex_element_class.Get(), element_file_field->GetDeclaringClass());
 
-  ArtField* cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
+  ArtField* cookie_field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
   DCHECK_EQ(cookie_field->GetDeclaringClass(), element_file_field->GetType<false>());
 
-  ArtField* file_name_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_fileName);
+  ArtField* file_name_field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_fileName);
   DCHECK_EQ(file_name_field->GetDeclaringClass(), element_file_field->GetType<false>());
 
   // Fill the elements array.
@@ -8162,7 +8222,7 @@
   DCHECK(h_path_class_loader.Get() != nullptr);
   // Set DexPathList.
   ArtField* path_list_field =
-      soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList);
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList);
   DCHECK(path_list_field != nullptr);
   path_list_field->SetObject<false>(h_path_class_loader.Get(), h_dex_path_list.Get());
 
@@ -8306,17 +8366,16 @@
       }
       ObjPtr<mirror::DexCache> klass_dex_cache = klass->GetDexCache();
       if (klass_dex_cache == dex_cache) {
-        const size_t class_def_idx = klass->GetDexClassDefIndex();
         DCHECK(klass->IsResolved());
-        CHECK_LT(class_def_idx, num_class_defs);
-        class_set.insert(class_def_idx);
+        CHECK_LT(klass->GetDexClassDefIndex(), num_class_defs);
+        class_set.insert(klass->GetDexTypeIndex());
       }
     }
 
     if (!class_set.empty()) {
       auto it = ret.find(resolved_classes);
       if (it != ret.end()) {
-        // Already have the key, union the class def idxs.
+        // Already have the key, union the class type indexes.
         it->AddClasses(class_set.begin(), class_set.end());
       } else {
         resolved_classes.AddClasses(class_set.begin(), class_set.end());
@@ -8359,13 +8418,8 @@
       VLOG(profiler) << "Found opened dex file for " << dex_file->GetLocation() << " with "
                      << info.GetClasses().size() << " classes";
       DCHECK_EQ(dex_file->GetLocationChecksum(), info.GetLocationChecksum());
-      for (uint16_t class_def_idx : info.GetClasses()) {
-        if (class_def_idx >= dex_file->NumClassDefs()) {
-          LOG(WARNING) << "Class def index " << class_def_idx << " >= " << dex_file->NumClassDefs();
-          continue;
-        }
-        const DexFile::TypeId& type_id = dex_file->GetTypeId(
-            dex_file->GetClassDef(class_def_idx).class_idx_);
+      for (uint16_t type_idx : info.GetClasses()) {
+        const DexFile::TypeId& type_id = dex_file->GetTypeId(type_idx);
         const char* descriptor = dex_file->GetTypeDescriptor(type_id);
         ret.insert(descriptor);
       }
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index f2bf581..1d29e31 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -36,6 +36,7 @@
 #include "jni.h"
 #include "mirror/class.h"
 #include "object_callbacks.h"
+#include "verifier/method_verifier.h"
 #include "verifier/verifier_log_mode.h"
 
 namespace art {
@@ -108,6 +109,7 @@
     kJavaLangThrowable,
     kJavaLangClassNotFoundException,
     kJavaLangStackTraceElement,
+    kDalvikSystemEmulatedStackFrame,
     kPrimitiveBoolean,
     kPrimitiveByte,
     kPrimitiveChar,
@@ -126,6 +128,7 @@
     kLongArrayClass,
     kShortArrayClass,
     kJavaLangStackTraceElementArrayClass,
+    kDalvikSystemClassExt,
     kClassRootsMax,
   };
 
@@ -415,10 +418,10 @@
       REQUIRES(!dex_lock_);
 
   void VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags)
-      REQUIRES(!Locks::classlinker_classes_lock_)
+      REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
-      REQUIRES(!dex_lock_)
+      REQUIRES(!dex_lock_, !Locks::classlinker_classes_lock_, !Locks::trace_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   mirror::DexCache* FindDexCache(Thread* self,
@@ -470,9 +473,10 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
-  void VerifyClass(Thread* self,
-                   Handle<mirror::Class> klass,
-                   verifier::HardFailLogMode log_level = verifier::HardFailLogMode::kLogNone)
+  verifier::MethodVerifier::FailureKind VerifyClass(
+      Thread* self,
+      Handle<mirror::Class> klass,
+      verifier::HardFailLogMode log_level = verifier::HardFailLogMode::kLogNone)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!dex_lock_);
   bool VerifyClassUsingOatFile(const DexFile& dex_file,
@@ -791,17 +795,17 @@
 
   void FixupStaticTrampolines(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Finds a class in the path class loader, loading it if necessary without using JNI. Hash
+  // Finds a class in a Path- or DexClassLoader, loading it if necessary without using JNI. Hash
   // function is supposed to be ComputeModifiedUtf8Hash(descriptor). Returns true if the
   // class-loader chain could be handled, false otherwise, i.e., a non-supported class-loader
   // was encountered while walking the parent chain (currently only BootClassLoader and
   // PathClassLoader are supported).
-  bool FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
-                                  Thread* self,
-                                  const char* descriptor,
-                                  size_t hash,
-                                  Handle<mirror::ClassLoader> class_loader,
-                                  ObjPtr<mirror::Class>* result)
+  bool FindClassInBaseDexClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+                                     Thread* self,
+                                     const char* descriptor,
+                                     size_t hash,
+                                     Handle<mirror::ClassLoader> class_loader,
+                                     ObjPtr<mirror::Class>* result)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!dex_lock_);
 
@@ -1197,7 +1201,7 @@
   friend struct CompilationHelper;  // For Compile in ImageTest.
   friend class ImageDumper;  // for DexLock
   friend class ImageWriter;  // for GetClassRoots
-  friend class VMClassLoader;  // for LookupClass and FindClassInPathClassLoader.
+  friend class VMClassLoader;  // for LookupClass and FindClassInBaseDexClassLoader.
   friend class JniCompilerTest;  // for GetRuntimeQuickGenericJniStub
   friend class JniInternalTest;  // for GetRuntimeQuickGenericJniStub
   ART_FRIEND_TEST(ClassLinkerTest, RegisterDexFileName);  // for DexLock, and RegisterDexFileLocked
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index ab18627..44590ba 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -30,7 +30,9 @@
 #include "gc/heap.h"
 #include "mirror/accessible_object.h"
 #include "mirror/class-inl.h"
+#include "mirror/class_ext.h"
 #include "mirror/dex_cache.h"
+#include "mirror/emulated_stack_frame.h"
 #include "mirror/executable.h"
 #include "mirror/field.h"
 #include "mirror/method_type.h"
@@ -99,7 +101,8 @@
     EXPECT_EQ(0U, primitive->NumDirectInterfaces());
     EXPECT_FALSE(primitive->HasVTable());
     EXPECT_EQ(0, primitive->GetIfTableCount());
-    EXPECT_TRUE(primitive->GetIfTable() == nullptr);
+    EXPECT_TRUE(primitive->GetIfTable() != nullptr);
+    EXPECT_EQ(primitive->GetIfTable()->Count(), 0u);
     EXPECT_EQ(kAccPublic | kAccFinal | kAccAbstract, primitive->GetAccessFlags());
   }
 
@@ -585,6 +588,7 @@
     addOffset(OFFSETOF_MEMBER(mirror::Class, dex_cache_strings_), "dexCacheStrings");
     addOffset(OFFSETOF_MEMBER(mirror::Class, dex_class_def_idx_), "dexClassDefIndex");
     addOffset(OFFSETOF_MEMBER(mirror::Class, dex_type_idx_), "dexTypeIndex");
+    addOffset(OFFSETOF_MEMBER(mirror::Class, ext_data_), "extData");
     addOffset(OFFSETOF_MEMBER(mirror::Class, ifields_), "iFields");
     addOffset(OFFSETOF_MEMBER(mirror::Class, iftable_), "ifTable");
     addOffset(OFFSETOF_MEMBER(mirror::Class, methods_), "methods");
@@ -602,12 +606,17 @@
     addOffset(OFFSETOF_MEMBER(mirror::Class, sfields_), "sFields");
     addOffset(OFFSETOF_MEMBER(mirror::Class, status_), "status");
     addOffset(OFFSETOF_MEMBER(mirror::Class, super_class_), "superClass");
-    addOffset(OFFSETOF_MEMBER(mirror::Class, verify_error_), "verifyError");
     addOffset(OFFSETOF_MEMBER(mirror::Class, virtual_methods_offset_), "virtualMethodsOffset");
     addOffset(OFFSETOF_MEMBER(mirror::Class, vtable_), "vtable");
   };
 };
 
+struct ClassExtOffsets : public CheckOffsets<mirror::ClassExt> {
+  ClassExtOffsets() : CheckOffsets<mirror::ClassExt>(false, "Ldalvik/system/ClassExt;") {
+    addOffset(OFFSETOF_MEMBER(mirror::ClassExt, verify_error_), "verifyError");
+  }
+};
+
 struct StringOffsets : public CheckOffsets<mirror::String> {
   StringOffsets() : CheckOffsets<mirror::String>(false, "Ljava/lang/String;") {
     addOffset(OFFSETOF_MEMBER(mirror::String, count_), "count");
@@ -734,12 +743,21 @@
   MethodHandleImplOffsets() : CheckOffsets<mirror::MethodHandleImpl>(
       false, "Ljava/lang/invoke/MethodHandle;") {
     addOffset(OFFSETOF_MEMBER(mirror::MethodHandleImpl, art_field_or_method_), "artFieldOrMethod");
-    addOffset(OFFSETOF_MEMBER(mirror::MethodHandleImpl, as_type_cache_), "asTypeCache");
     addOffset(OFFSETOF_MEMBER(mirror::MethodHandleImpl, handle_kind_), "handleKind");
+    addOffset(OFFSETOF_MEMBER(mirror::MethodHandleImpl, nominal_type_), "nominalType");
     addOffset(OFFSETOF_MEMBER(mirror::MethodHandleImpl, method_type_), "type");
   }
 };
 
+struct EmulatedStackFrameOffsets : public CheckOffsets<mirror::EmulatedStackFrame> {
+  EmulatedStackFrameOffsets() : CheckOffsets<mirror::EmulatedStackFrame>(
+      false, "Ldalvik/system/EmulatedStackFrame;") {
+    addOffset(OFFSETOF_MEMBER(mirror::EmulatedStackFrame, references_), "references");
+    addOffset(OFFSETOF_MEMBER(mirror::EmulatedStackFrame, stack_frame_), "stackFrame");
+    addOffset(OFFSETOF_MEMBER(mirror::EmulatedStackFrame, type_), "type");
+  }
+};
+
 // C++ fields must exactly match the fields in the Java classes. If this fails,
 // reorder the fields in the C++ class. Managed class fields are ordered by
 // ClassLinker::LinkFields.
@@ -747,6 +765,7 @@
   ScopedObjectAccess soa(Thread::Current());
   EXPECT_TRUE(ObjectOffsets().Check());
   EXPECT_TRUE(ClassOffsets().Check());
+  EXPECT_TRUE(ClassExtOffsets().Check());
   EXPECT_TRUE(StringOffsets().Check());
   EXPECT_TRUE(ThrowableOffsets().Check());
   EXPECT_TRUE(StackTraceElementOffsets().Check());
@@ -760,6 +779,7 @@
   EXPECT_TRUE(ExecutableOffsets().Check());
   EXPECT_TRUE(MethodTypeOffsets().Check());
   EXPECT_TRUE(MethodHandleImplOffsets().Check());
+  EXPECT_TRUE(EmulatedStackFrameOffsets().Check());
 }
 
 TEST_F(ClassLinkerTest, FindClassNonexistent) {
diff --git a/runtime/class_table.h b/runtime/class_table.h
index bc9eaf4..558c144 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -48,7 +48,7 @@
     uint32_t operator()(const GcRoot<mirror::Class>& root) const NO_THREAD_SAFETY_ANALYSIS;
     // Same class loader and descriptor.
     bool operator()(const GcRoot<mirror::Class>& a, const GcRoot<mirror::Class>& b) const
-        NO_THREAD_SAFETY_ANALYSIS;;
+        NO_THREAD_SAFETY_ANALYSIS;
     // Same descriptor.
     bool operator()(const GcRoot<mirror::Class>& a, const char* descriptor) const
         NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 5409fcb..8226e60 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -520,17 +520,17 @@
 
   // The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
   // We need to get the DexPathList and loop through it.
-  ArtField* cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
+  ArtField* cookie_field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
   ArtField* dex_file_field =
-      soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
   ObjPtr<mirror::Object> dex_path_list =
-      soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList)->
-      GetObject(class_loader.Get());
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)->
+          GetObject(class_loader.Get());
   if (dex_path_list != nullptr && dex_file_field!= nullptr && cookie_field != nullptr) {
     // DexPathList has an array dexElements of Elements[] which each contain a dex file.
     ObjPtr<mirror::Object> dex_elements_obj =
-        soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
-        GetObject(dex_path_list);
+        jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
+            GetObject(dex_path_list);
     // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
     // at the mCookie which is a DexFile vector.
     if (dex_elements_obj != nullptr) {
@@ -572,6 +572,29 @@
   return ret;
 }
 
+jobject CommonRuntimeTestImpl::LoadMultiDex(const char* first_dex_name,
+                                            const char* second_dex_name) {
+  std::vector<std::unique_ptr<const DexFile>> first_dex_files = OpenTestDexFiles(first_dex_name);
+  std::vector<std::unique_ptr<const DexFile>> second_dex_files = OpenTestDexFiles(second_dex_name);
+  std::vector<const DexFile*> class_path;
+  CHECK_NE(0U, first_dex_files.size());
+  CHECK_NE(0U, second_dex_files.size());
+  for (auto& dex_file : first_dex_files) {
+    class_path.push_back(dex_file.get());
+    loaded_dex_files_.push_back(std::move(dex_file));
+  }
+  for (auto& dex_file : second_dex_files) {
+    class_path.push_back(dex_file.get());
+    loaded_dex_files_.push_back(std::move(dex_file));
+  }
+
+  Thread* self = Thread::Current();
+  jobject class_loader = Runtime::Current()->GetClassLinker()->CreatePathClassLoader(self,
+                                                                                     class_path);
+  self->SetClassLoaderOverride(class_loader);
+  return class_loader;
+}
+
 jobject CommonRuntimeTestImpl::LoadDex(const char* dex_name) {
   std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles(dex_name);
   std::vector<const DexFile*> class_path;
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 92934c6..17e3729 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -52,7 +52,7 @@
 
   ScratchFile(const ScratchFile& other, const char* suffix);
 
-  explicit ScratchFile(ScratchFile&& other);
+  ScratchFile(ScratchFile&& other);
 
   ScratchFile& operator=(ScratchFile&& other);
 
@@ -133,6 +133,8 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   jobject LoadDex(const char* dex_name) REQUIRES_SHARED(Locks::mutator_lock_);
+  jobject LoadMultiDex(const char* first_dex_name, const char* second_dex_name)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   std::string android_data_;
   std::string dalvik_cache_;
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 0251776..9f0dbbb 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -813,13 +813,11 @@
 
 void ThrowWrongMethodTypeException(mirror::MethodType* callee_type,
                                    mirror::MethodType* callsite_type) {
-  // TODO(narayan): Should we provide more detail here ? The RI doesn't bother.
-  UNUSED(callee_type);
-  UNUSED(callsite_type);
-
   ThrowException("Ljava/lang/invoke/WrongMethodTypeException;",
                  nullptr,
-                 "Invalid method type for signature polymorphic call");
+                 StringPrintf("Expected %s but was %s",
+                              callee_type->PrettyDescriptor().c_str(),
+                              callsite_type->PrettyDescriptor().c_str()).c_str());
 }
 
 }  // namespace art
diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h
index 00dedef..806653a 100644
--- a/runtime/compiler_callbacks.h
+++ b/runtime/compiler_callbacks.h
@@ -47,6 +47,7 @@
   virtual bool IsRelocationPossible() = 0;
 
   virtual verifier::VerifierDeps* GetVerifierDeps() const = 0;
+  virtual void SetVerifierDeps(verifier::VerifierDeps* deps ATTRIBUTE_UNUSED) {}
 
   bool IsBootImage() {
     return mode_ == CallbackMode::kCompileBootImage;
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 1da888e..dc2ae2e 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -39,6 +39,7 @@
 #include "handle_scope.h"
 #include "jdwp/jdwp_priv.h"
 #include "jdwp/object_registry.h"
+#include "jni_internal.h"
 #include "jvalue-inl.h"
 #include "mirror/class.h"
 #include "mirror/class-inl.h"
@@ -2007,7 +2008,7 @@
   mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
   CHECK(thread_object != nullptr) << error;
   ArtField* java_lang_Thread_name_field =
-      soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
+      jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name);
   ObjPtr<mirror::String> s(java_lang_Thread_name_field->GetObject(thread_object)->AsString());
   if (s != nullptr) {
     *name = s->ToModifiedUtf8();
@@ -2032,7 +2033,7 @@
   } else if (error == JDWP::ERR_NONE) {
     ObjPtr<mirror::Class> c = soa.Decode<mirror::Class>(WellKnownClasses::java_lang_Thread);
     CHECK(c != nullptr);
-    ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
+    ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group);
     CHECK(f != nullptr);
     ObjPtr<mirror::Object> group = f->GetObject(thread_object);
     CHECK(group != nullptr);
@@ -2074,7 +2075,7 @@
     return error;
   }
   ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroupName");
-  ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
+  ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_name);
   CHECK(f != nullptr);
   ObjPtr<mirror::String> s = f->GetObject(thread_group)->AsString();
 
@@ -2093,7 +2094,7 @@
   ObjPtr<mirror::Object> parent;
   {
     ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroupParent");
-    ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_parent);
+    ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_parent);
     CHECK(f != nullptr);
     parent = f->GetObject(thread_group);
   }
@@ -2102,13 +2103,13 @@
   return JDWP::ERR_NONE;
 }
 
-static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group,
+static void GetChildThreadGroups(mirror::Object* thread_group,
                                  std::vector<JDWP::ObjectId>* child_thread_group_ids)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   CHECK(thread_group != nullptr);
 
   // Get the int "ngroups" count of this thread group...
-  ArtField* ngroups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_ngroups);
+  ArtField* ngroups_field = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_ngroups);
   CHECK(ngroups_field != nullptr);
   const int32_t size = ngroups_field->GetInt(thread_group);
   if (size == 0) {
@@ -2116,7 +2117,7 @@
   }
 
   // Get the ThreadGroup[] "groups" out of this thread group...
-  ArtField* groups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_groups);
+  ArtField* groups_field = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_groups);
   ObjPtr<mirror::Object> groups_array = groups_field->GetObject(thread_group);
 
   CHECK(groups_array != nullptr);
@@ -2154,7 +2155,7 @@
   // Add child thread groups.
   {
     std::vector<JDWP::ObjectId> child_thread_groups_ids;
-    GetChildThreadGroups(soa, thread_group, &child_thread_groups_ids);
+    GetChildThreadGroups(thread_group, &child_thread_groups_ids);
     expandBufAdd4BE(pReply, child_thread_groups_ids.size());
     for (JDWP::ObjectId child_thread_group_id : child_thread_groups_ids) {
       expandBufAddObjectId(pReply, child_thread_group_id);
@@ -2166,7 +2167,7 @@
 
 JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
   ScopedObjectAccessUnchecked soa(Thread::Current());
-  ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
+  ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
   ObjPtr<mirror::Object> group = f->GetObject(f->GetDeclaringClass());
   return gRegistry->Add(group);
 }
@@ -2256,14 +2257,13 @@
   return JDWP::ERR_NONE;
 }
 
-static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
-                                   mirror::Object* desired_thread_group, mirror::Object* peer)
+static bool IsInDesiredThreadGroup(mirror::Object* desired_thread_group, mirror::Object* peer)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   // Do we want threads from all thread groups?
   if (desired_thread_group == nullptr) {
     return true;
   }
-  ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
+  ArtField* thread_group_field = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group);
   DCHECK(thread_group_field != nullptr);
   ObjPtr<mirror::Object> group = thread_group_field->GetObject(peer);
   return (group == desired_thread_group);
@@ -2296,7 +2296,7 @@
       // Doing so might help us report ZOMBIE threads too.
       continue;
     }
-    if (IsInDesiredThreadGroup(soa, thread_group, peer)) {
+    if (IsInDesiredThreadGroup(thread_group, peer)) {
       thread_ids->push_back(gRegistry->Add(peer));
     }
   }
@@ -4093,7 +4093,7 @@
 
   // Invoke the method.
   ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(pReq->receiver.Read()));
-  JValue result = InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(m),
+  JValue result = InvokeWithJValues(soa, ref.get(), jni::EncodeArtMethod(m),
                                     reinterpret_cast<jvalue*>(pReq->arg_values.get()));
 
   // Prepare JDWP ids for the reply.
@@ -4371,7 +4371,7 @@
     CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
     ScopedObjectAccessUnchecked soa(Thread::Current());
     StackHandleScope<1> hs(soa.Self());
-    Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa)));
+    Handle<mirror::String> name(hs.NewHandle(t->GetThreadName()));
     size_t char_count = (name.Get() != nullptr) ? name->GetLength() : 0;
     const jchar* chars = (name.Get() != nullptr) ? name->GetValue() : nullptr;
     bool is_compressed = (name.Get() != nullptr) ? name->IsCompressed() : false;
@@ -5117,13 +5117,11 @@
 }
 
 ArtMethod* DeoptimizationRequest::Method() const {
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  return soa.DecodeMethod(method_);
+  return jni::DecodeArtMethod(method_);
 }
 
 void DeoptimizationRequest::SetMethod(ArtMethod* m) {
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  method_ = soa.EncodeMethod(m);
+  method_ = jni::EncodeArtMethod(m);
 }
 
 void Dbg::VisitRoots(RootVisitor* visitor) {
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index b3317a5..2ef7509 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -29,25 +29,17 @@
 
 #include "base/enums.h"
 #include "base/file_magic.h"
-#include "base/hash_map.h"
 #include "base/logging.h"
-#include "base/stl_util.h"
 #include "base/stringprintf.h"
 #include "base/systrace.h"
 #include "base/unix_file/fd_file.h"
 #include "dex_file-inl.h"
 #include "dex_file_verifier.h"
-#include "globals.h"
 #include "jvalue.h"
 #include "leb128.h"
-#include "oat_file.h"
 #include "os.h"
-#include "safe_map.h"
-#include "thread.h"
-#include "type_lookup_table.h"
 #include "utf-inl.h"
 #include "utils.h"
-#include "well_known_classes.h"
 #include "zip_archive.h"
 
 namespace art {
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index da828dc..da9fa50 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -22,7 +22,6 @@
 #include <vector>
 
 #include "base/logging.h"
-#include "base/mutex.h"  // For Locks::mutator_lock_.
 #include "base/value_object.h"
 #include "globals.h"
 #include "invoke_type.h"
@@ -36,10 +35,8 @@
 class OatDexFile;
 class Signature;
 class StringPiece;
-class TypeLookupTable;
 class ZipArchive;
 
-// TODO: move all of the macro functionality into the DexCache class.
 class DexFile {
  public:
   // First Dex format version supporting default methods.
diff --git a/runtime/dex_file_annotations.cc b/runtime/dex_file_annotations.cc
index 0765465..835f456 100644
--- a/runtime/dex_file_annotations.cc
+++ b/runtime/dex_file_annotations.cc
@@ -22,6 +22,7 @@
 #include "art_method-inl.h"
 #include "class_linker-inl.h"
 #include "dex_file-inl.h"
+#include "jni_internal.h"
 #include "jvalue-inl.h"
 #include "mirror/field.h"
 #include "mirror/method.h"
@@ -281,7 +282,7 @@
 
   JValue result;
   ArtMethod* create_annotation_method =
-      soa.DecodeMethod(WellKnownClasses::libcore_reflect_AnnotationFactory_createAnnotation);
+      jni::DecodeArtMethod(WellKnownClasses::libcore_reflect_AnnotationFactory_createAnnotation);
   uint32_t args[2] = { static_cast<uint32_t>(reinterpret_cast<uintptr_t>(annotation_class.Get())),
                        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(h_element_array.Get())) };
   create_annotation_method->Invoke(self, args, sizeof(args), &result, "LLL");
@@ -633,7 +634,7 @@
 
   JValue result;
   ArtMethod* annotation_member_init =
-      soa.DecodeMethod(WellKnownClasses::libcore_reflect_AnnotationMember_init);
+      jni::DecodeArtMethod(WellKnownClasses::libcore_reflect_AnnotationMember_init);
   uint32_t args[5] = { static_cast<uint32_t>(reinterpret_cast<uintptr_t>(new_member.Get())),
                        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(string_name.Get())),
                        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(value_object.Get())),
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 5d70076..be25803 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -25,7 +25,6 @@
 #include "dex_file-inl.h"
 #include "experimental_flags.h"
 #include "leb128.h"
-#include "runtime.h"
 #include "safe_map.h"
 #include "utf-inl.h"
 #include "utils.h"
@@ -457,22 +456,22 @@
 
 #define DECODE_UNSIGNED_CHECKED_FROM_WITH_ERROR_VALUE(ptr, var, error_value)  \
   uint32_t var;                                                               \
-  if (!DecodeUnsignedLeb128Checked(&ptr, begin_ + size_, &var)) {             \
+  if (!DecodeUnsignedLeb128Checked(&(ptr), begin_ + size_, &(var))) {         \
     return error_value;                                                       \
   }
 
-#define DECODE_UNSIGNED_CHECKED_FROM(ptr, var)                      \
-  uint32_t var;                                                     \
-  if (!DecodeUnsignedLeb128Checked(&ptr, begin_ + size_, &var)) {   \
-    ErrorStringPrintf("Read out of bounds");                        \
-    return false;                                                   \
+#define DECODE_UNSIGNED_CHECKED_FROM(ptr, var)                        \
+  uint32_t var;                                                       \
+  if (!DecodeUnsignedLeb128Checked(&(ptr), begin_ + size_, &(var))) { \
+    ErrorStringPrintf("Read out of bounds");                          \
+    return false;                                                     \
   }
 
-#define DECODE_SIGNED_CHECKED_FROM(ptr, var)                      \
-  int32_t var;                                                    \
-  if (!DecodeSignedLeb128Checked(&ptr, begin_ + size_, &var)) {   \
-    ErrorStringPrintf("Read out of bounds");                      \
-    return false;                                                 \
+#define DECODE_SIGNED_CHECKED_FROM(ptr, var)                        \
+  int32_t var;                                                      \
+  if (!DecodeSignedLeb128Checked(&(ptr), begin_ + size_, &(var))) { \
+    ErrorStringPrintf("Read out of bounds");                        \
+    return false;                                                   \
   }
 
 bool DexFileVerifier::CheckAndGetHandlerOffsets(const DexFile::CodeItem* code_item,
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index e392870..3801c22 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -58,7 +58,7 @@
 
   void VerifyModification(const char* dex_file_base64_content,
                           const char* location,
-                          std::function<void(DexFile*)> f,
+                          const std::function<void(DexFile*)>& f,
                           const char* expected_error) {
     size_t length;
     std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(dex_file_base64_content, &length));
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 8eb1a79..99b9f9d 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -480,6 +480,18 @@
     insns[1] = val;
   }
 
+  void SetVRegA_21c(uint8_t val) {
+    DCHECK(FormatOf(Opcode()) == k21c);
+    uint16_t* insns = reinterpret_cast<uint16_t*>(this);
+    insns[0] = (val << 8) | (insns[0] & 0x00ff);
+  }
+
+  void SetVRegB_21c(uint16_t val) {
+    DCHECK(FormatOf(Opcode()) == k21c);
+    uint16_t* insns = reinterpret_cast<uint16_t*>(this);
+    insns[1] = val;
+  }
+
   // Returns the format of the given opcode.
   static Format FormatOf(Code opcode) {
     return kInstructionFormats[opcode];
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 31811fb..ed60f59 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -483,15 +483,15 @@
 
 template<InvokeType type, bool access_check>
 inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
-                                     mirror::Object** this_object,
+                                     ObjPtr<mirror::Object>* this_object,
                                      ArtMethod* referrer,
                                      Thread* self) {
   ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
   ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, referrer);
   if (resolved_method == nullptr) {
     StackHandleScope<1> hs(self);
-    mirror::Object* null_this = nullptr;
-    HandleWrapper<mirror::Object> h_this(
+    ObjPtr<mirror::Object> null_this = nullptr;
+    HandleWrapperObjPtr<mirror::Object> h_this(
         hs.NewHandleWrapper(type == kStatic ? &null_this : this_object));
     constexpr ClassLinker::ResolveMode resolve_mode =
         access_check ? ClassLinker::kForceICCECheck
@@ -560,7 +560,7 @@
       //    defaults. What we actually need is a GetContainingClass that says which classes virtuals
       //    this method is coming from.
       StackHandleScope<2> hs2(self);
-      HandleWrapper<mirror::Object> h_this(hs2.NewHandleWrapper(this_object));
+      HandleWrapperObjPtr<mirror::Object> h_this(hs2.NewHandleWrapper(this_object));
       Handle<mirror::Class> h_referring_class(hs2.NewHandle(referrer->GetDeclaringClass()));
       const uint16_t method_type_idx =
           h_referring_class->GetDexFile().GetMethodId(method_idx).class_idx_;
@@ -652,7 +652,7 @@
 #define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check)                 \
   template REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE                       \
   ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx,         \
-                                                      mirror::Object** this_object, \
+                                                      ObjPtr<mirror::Object>* this_object, \
                                                       ArtMethod* referrer, \
                                                       Thread* self)
 #define EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \
@@ -722,8 +722,11 @@
 }
 
 // Fast path method resolution that can't throw exceptions.
-inline ArtMethod* FindMethodFast(uint32_t method_idx, mirror::Object* this_object,
-                                 ArtMethod* referrer, bool access_check, InvokeType type) {
+inline ArtMethod* FindMethodFast(uint32_t method_idx,
+                                 ObjPtr<mirror::Object> this_object,
+                                 ArtMethod* referrer,
+                                 bool access_check,
+                                 InvokeType type) {
   ScopedAssertNoThreadSuspension ants(__FUNCTION__);
   if (UNLIKELY(this_object == nullptr && type != kStatic)) {
     return nullptr;
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index cbefbba..1ccb4b0 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -237,7 +237,7 @@
   }
 }
 
-bool FillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload) {
+bool FillArrayData(ObjPtr<mirror::Object> obj, const Instruction::ArrayDataPayload* payload) {
   DCHECK_EQ(payload->ident, static_cast<uint16_t>(Instruction::kArrayDataSignature));
   if (UNLIKELY(obj == nullptr)) {
     ThrowNullPointerException("null array in FILL_ARRAY_DATA");
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 20c8401..bcddfb0 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -156,7 +156,7 @@
 
 template<InvokeType type, bool access_check>
 inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
-                                     mirror::Object** this_object,
+                                     ObjPtr<mirror::Object>* this_object,
                                      ArtMethod* referrer,
                                      Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_)
@@ -171,7 +171,7 @@
 
 // Fast path method resolution that can't throw exceptions.
 inline ArtMethod* FindMethodFast(uint32_t method_idx,
-                                 mirror::Object* this_object,
+                                 ObjPtr<mirror::Object> this_object,
                                  ArtMethod* referrer,
                                  bool access_check,
                                  InvokeType type)
@@ -203,7 +203,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_)
     REQUIRES(!Roles::uninterruptible_);
 
-bool FillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload)
+bool FillArrayData(ObjPtr<mirror::Object> obj, const Instruction::ArrayDataPayload* payload)
     REQUIRES_SHARED(Locks::mutator_lock_)
     REQUIRES(!Roles::uninterruptible_);
 
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index dc5fd07..515fcbf 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -43,11 +43,8 @@
         obj = self->AllocTlab(byte_count); \
         DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
         obj->SetClass(klass); \
-        if (kUseBakerOrBrooksReadBarrier) { \
-          if (kUseBrooksReadBarrier) { \
-            obj->SetReadBarrierPointer(obj); \
-          } \
-          obj->AssertReadBarrierPointer(); \
+        if (kUseBakerReadBarrier) { \
+          obj->AssertReadBarrierState(); \
         } \
         QuasiAtomic::ThreadFenceForConstructor(); \
         return obj; \
@@ -69,11 +66,8 @@
         obj = self->AllocTlab(byte_count); \
         DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
         obj->SetClass(klass); \
-        if (kUseBakerOrBrooksReadBarrier) { \
-          if (kUseBrooksReadBarrier) { \
-            obj->SetReadBarrierPointer(obj); \
-          } \
-          obj->AssertReadBarrierPointer(); \
+        if (kUseBakerReadBarrier) { \
+          obj->AssertReadBarrierState(); \
         } \
         QuasiAtomic::ThreadFenceForConstructor(); \
         return obj; \
@@ -94,11 +88,8 @@
       obj = self->AllocTlab(byte_count); \
       DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
       obj->SetClass(klass); \
-      if (kUseBakerOrBrooksReadBarrier) { \
-        if (kUseBrooksReadBarrier) { \
-          obj->SetReadBarrierPointer(obj); \
-        } \
-        obj->AssertReadBarrierPointer(); \
+      if (kUseBakerReadBarrier) { \
+        obj->AssertReadBarrierState(); \
       } \
       QuasiAtomic::ThreadFenceForConstructor(); \
       return obj; \
diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
index 2732d68..083d578 100644
--- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
@@ -27,4 +27,12 @@
   return klass->IsAssignableFrom(ref_class) ? 1 : 0;
 }
 
+// Is assignable test for code, won't throw.  Null and equality test already performed.
+extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  DCHECK(obj != nullptr);
+  DCHECK(ref_class != nullptr);
+  return obj->InstanceOf(ref_class) ? 1 : 0;
+}
+
 }  // namespace art
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index cfa5325..64030f3 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -31,7 +31,7 @@
 // These are extern declarations of assembly stubs with common names.
 
 // Cast entrypoints.
-extern "C" void art_quick_check_cast(const art::mirror::Class*, const art::mirror::Class*);
+extern "C" void art_quick_check_instance_of(art::mirror::Object*, art::mirror::Class*);
 
 // DexCache entrypoints.
 extern "C" void* art_quick_initialize_static_storage(uint32_t);
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index 3cfee45..a1c5082 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -33,8 +33,8 @@
   V(AllocStringFromChars, void*, int32_t, int32_t, void*) \
   V(AllocStringFromString, void*, void*) \
 \
-  V(InstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*) \
-  V(CheckCast, void, const mirror::Class*, const mirror::Class*) \
+  V(InstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*) \
+  V(CheckInstanceOf, void, mirror::Object*, mirror::Class*) \
 \
   V(InitializeStaticStorage, void*, uint32_t) \
   V(InitializeTypeAndVerifyAccess, void*, uint32_t) \
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index a205b17..c8ee99a 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -111,6 +111,14 @@
   self->QuickDeliverException();
 }
 
+extern "C" NO_RETURN void artThrowClassCastExceptionForObject(mirror::Object* obj,
+                                                              mirror::Class* dest_type,
+                                                              Thread* self)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  DCHECK(obj != nullptr);
+  artThrowClassCastException(dest_type, obj->GetClass(), self);
+}
+
 extern "C" NO_RETURN void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value,
                                                       Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 0bb6581..fe82878 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2121,7 +2121,9 @@
 // to hold the mutator lock (see REQUIRES_SHARED(Locks::mutator_lock_) annotations).
 
 template<InvokeType type, bool access_check>
-static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, Thread* self,
+static TwoWordReturn artInvokeCommon(uint32_t method_idx,
+                                     ObjPtr<mirror::Object> this_object,
+                                     Thread* self,
                                      ArtMethod** sp) {
   ScopedQuickEntrypointChecks sqec(self);
   DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs));
@@ -2136,7 +2138,9 @@
       ScopedObjectAccessUnchecked soa(self->GetJniEnv());
       RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
       visitor.VisitArguments();
-      method = FindMethodFromCode<type, access_check>(method_idx, &this_object, caller_method,
+      method = FindMethodFromCode<type, access_check>(method_idx,
+                                                      &this_object,
+                                                      caller_method,
                                                       self);
       visitor.FixupReferences();
     }
@@ -2162,7 +2166,7 @@
 #define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check)                                \
   template REQUIRES_SHARED(Locks::mutator_lock_)                                          \
   TwoWordReturn artInvokeCommon<type, access_check>(                                            \
-      uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
+      uint32_t method_idx, ObjPtr<mirror::Object> his_object, Thread* self, ArtMethod** sp)
 
 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
@@ -2190,9 +2194,13 @@
 }
 
 extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
-    uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  return artInvokeCommon<kStatic, true>(method_idx, this_object, self, sp);
+    uint32_t method_idx,
+    mirror::Object* this_object ATTRIBUTE_UNUSED,
+    Thread* self,
+    ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
+  // For static, this_object is not required and may be random garbage. Don't pass it down so that
+  // it doesn't cause ObjPtr alignment failure check.
+  return artInvokeCommon<kStatic, true>(method_idx, nullptr, self, sp);
 }
 
 extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
@@ -2211,10 +2219,11 @@
 // is there for consistency but should not be used, as some architectures overwrite it
 // in the assembly trampoline.
 extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUTE_UNUSED,
-                                                      mirror::Object* this_object,
+                                                      mirror::Object* raw_this_object,
                                                       Thread* self,
                                                       ArtMethod** sp)
     REQUIRES_SHARED(Locks::mutator_lock_) {
+  ObjPtr<mirror::Object> this_object(raw_this_object);
   ScopedQuickEntrypointChecks sqec(self);
   StackHandleScope<1> hs(self);
   Handle<mirror::Class> cls(hs.NewHandle(this_object->GetClass()));
@@ -2285,7 +2294,9 @@
       ScopedObjectAccessUnchecked soa(self->GetJniEnv());
       RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa);
       visitor.VisitArguments();
-      method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, caller_method,
+      method = FindMethodFromCode<kInterface, false>(dex_method_idx,
+                                                     &this_object,
+                                                     caller_method,
                                                      self);
       visitor.FixupReferences();
     }
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index cdb1051..b0463d7 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -174,8 +174,9 @@
                          sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocStringFromString, pInstanceofNonTrivial,
                          sizeof(void*));
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInstanceofNonTrivial, pCheckCast, sizeof(void*));
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckCast, pInitializeStaticStorage, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInstanceofNonTrivial, pCheckInstanceOf, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckInstanceOf, pInitializeStaticStorage,
+                         sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeStaticStorage, pInitializeTypeAndVerifyAccess,
                          sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeTypeAndVerifyAccess, pInitializeType,
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index d921900..e18a955 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -181,7 +181,6 @@
 }
 
 void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
-  CHECK(kUseReadBarrier);
   new_record_condition_.Broadcast(Thread::Current());
 }
 
@@ -291,6 +290,9 @@
   // Wait for GC's sweeping to complete and allow new records
   while (UNLIKELY((!kUseReadBarrier && !allow_new_record_) ||
                   (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+    // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+    // presence of threads blocking for weak ref access.
+    self->CheckEmptyCheckpoint();
     new_record_condition_.WaitHoldingLocks(self);
   }
 
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index c8b2b89..90cff6a 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -261,7 +261,6 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(Locks::alloc_tracker_lock_);
   void BroadcastForNewAllocationRecords()
-      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(Locks::alloc_tracker_lock_);
 
   // TODO: Is there a better way to hide the entries_'s type?
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 76f500c..7c64952 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -32,7 +32,7 @@
     mirror::Object* ref, accounting::ContinuousSpaceBitmap* bitmap) {
   // For the Baker-style RB, in a rare case, we could incorrectly change the object from white
   // to gray even though the object has already been marked through. This happens if a mutator
-  // thread gets preempted before the AtomicSetReadBarrierPointer below, GC marks through the
+  // thread gets preempted before the AtomicSetReadBarrierState below, GC marks through the
   // object (changes it from white to gray and back to white), and the thread runs and
   // incorrectly changes it from white to gray. If this happens, the object will get added to the
   // mark stack again and get changed back to white after it is processed.
@@ -50,14 +50,14 @@
     // we can avoid an expensive CAS.
     // For the baker case, an object is marked if either the mark bit marked or the bitmap bit is
     // set.
-    success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
+    success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(), ReadBarrier::GrayState());
   } else {
     success = !bitmap->AtomicTestAndSet(ref);
   }
   if (success) {
     // Newly marked.
     if (kUseBakerReadBarrier) {
-      DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
+      DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
     }
     PushOntoMarkStack(ref);
   }
@@ -84,8 +84,8 @@
       return ref;
     }
     // This may or may not succeed, which is ok because the object may already be gray.
-    bool success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
-                                                    ReadBarrier::GrayPtr());
+    bool success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(),
+                                                  ReadBarrier::GrayState());
     if (success) {
       MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
       immune_gray_stack_.push_back(ref);
@@ -125,10 +125,6 @@
       return from_ref;
     case space::RegionSpace::RegionType::kRegionTypeFromSpace: {
       mirror::Object* to_ref = GetFwdPtr(from_ref);
-      if (kUseBakerReadBarrier) {
-        DCHECK_NE(to_ref, ReadBarrier::GrayPtr())
-            << "from_ref=" << from_ref << " to_ref=" << to_ref;
-      }
       if (to_ref == nullptr) {
         // It isn't marked yet. Mark it by copying it to the to-space.
         to_ref = Copy(from_ref);
@@ -153,9 +149,7 @@
 
 inline mirror::Object* ConcurrentCopying::MarkFromReadBarrier(mirror::Object* from_ref) {
   mirror::Object* ret;
-  // TODO: Delete GetMarkBit check when all of the callers properly check the bit. Remaining caller
-  // is array allocations.
-  if (from_ref == nullptr || from_ref->GetMarkBit()) {
+  if (from_ref == nullptr) {
     return from_ref;
   }
   // TODO: Consider removing this check when we are done investigating slow paths. b/30162165
@@ -192,9 +186,9 @@
 
 inline bool ConcurrentCopying::IsMarkedInUnevacFromSpace(mirror::Object* from_ref) {
   // Use load acquire on the read barrier pointer to ensure that we never see a white read barrier
-  // pointer with an unmarked bit due to reordering.
+  // state with an unmarked bit due to reordering.
   DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
-  if (kUseBakerReadBarrier && from_ref->GetReadBarrierPointerAcquire() == ReadBarrier::GrayPtr()) {
+  if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
     return true;
   }
   return region_space_bitmap_->Test(from_ref);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 8bb90e1..8353b26 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -246,7 +246,7 @@
     Thread* self = Thread::Current();
     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
         << thread->GetState() << " thread " << thread << " self " << self;
-    thread->SetIsGcMarking(true);
+    thread->SetIsGcMarkingAndUpdateEntrypoints(true);
     if (use_tlab_ && thread->HasTlab()) {
       if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
         // This must come before the revoke.
@@ -418,7 +418,7 @@
                                   [&visitor](mirror::Object* obj)
         REQUIRES_SHARED(Locks::mutator_lock_) {
       // If an object is not gray, it should only have references to things in the immune spaces.
-      if (obj->GetReadBarrierPointer() != ReadBarrier::GrayPtr()) {
+      if (obj->GetReadBarrierState() != ReadBarrier::GrayState()) {
         obj->VisitReferences</*kVisitNativeRoots*/true,
                              kDefaultVerifyFlags,
                              kWithoutReadBarrier>(visitor, visitor);
@@ -463,7 +463,7 @@
       if (kIsDebugBuild) {
         Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
       }
-      obj->SetReadBarrierPointer(ReadBarrier::GrayPtr());
+      obj->SetReadBarrierState(ReadBarrier::GrayState());
     }
   }
 
@@ -514,26 +514,6 @@
   live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
 }
 
-class EmptyCheckpoint : public Closure {
- public:
-  explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
-      : concurrent_copying_(concurrent_copying) {
-  }
-
-  virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
-    // Note: self is not necessarily equal to thread since thread may be suspended.
-    Thread* self = Thread::Current();
-    CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
-        << thread->GetState() << " thread " << thread << " self " << self;
-    // If thread is a running mutator, then act on behalf of the garbage collector.
-    // See the code in ThreadList::RunCheckpoint.
-    concurrent_copying_->GetBarrier().Pass(self);
-  }
-
- private:
-  ConcurrentCopying* const concurrent_copying_;
-};
-
 // Used to visit objects in the immune spaces.
 inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) {
   DCHECK(obj != nullptr);
@@ -549,11 +529,11 @@
 
   ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
     if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
-      if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+      if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
         collector_->ScanImmuneObject(obj);
         // Done scanning the object, go back to white.
-        bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
-                                                        ReadBarrier::WhitePtr());
+        bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
+                                                      ReadBarrier::WhiteState());
         CHECK(success);
       }
     } else {
@@ -620,9 +600,9 @@
       LOG(INFO) << "immune gray stack size=" << immune_gray_stack_.size();
     }
     for (mirror::Object* obj : immune_gray_stack_) {
-      DCHECK(obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
-      bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
-                                                      ReadBarrier::WhitePtr());
+      DCHECK(obj->GetReadBarrierState() == ReadBarrier::GrayState());
+      bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
+                                                    ReadBarrier::WhiteState());
       DCHECK(success);
     }
     immune_gray_stack_.clear();
@@ -746,7 +726,7 @@
     // Disable the thread-local is_gc_marking flag.
     // Note a thread that has just started right before this checkpoint may have already this flag
     // set to false, which is ok.
-    thread->SetIsGcMarking(false);
+    thread->SetIsGcMarkingAndUpdateEntrypoints(false);
     // If thread is a running mutator, then act on behalf of the garbage collector.
     // See the code in ThreadList::RunCheckpoint.
     concurrent_copying_->GetBarrier().Pass(self);
@@ -821,11 +801,11 @@
   for (mirror::Object* obj : false_gray_stack_) {
     DCHECK(IsMarked(obj));
     // The object could be white here if a thread got preempted after a success at the
-    // AtomicSetReadBarrierPointer in Mark(), GC started marking through it (but not finished so
+    // AtomicSetReadBarrierState in Mark(), GC started marking through it (but not finished so
     // still gray), and the thread ran to register it onto the false gray stack.
-    if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
-      bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
-                                                      ReadBarrier::WhitePtr());
+    if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
+      bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
+                                                    ReadBarrier::WhiteState());
       DCHECK(success);
     }
   }
@@ -835,10 +815,10 @@
 
 void ConcurrentCopying::IssueEmptyCheckpoint() {
   Thread* self = Thread::Current();
-  EmptyCheckpoint check_point(this);
   ThreadList* thread_list = Runtime::Current()->GetThreadList();
-  gc_barrier_->Init(self, 0);
-  size_t barrier_count = thread_list->RunCheckpoint(&check_point);
+  Barrier* barrier = thread_list->EmptyCheckpointBarrier();
+  barrier->Init(self, 0);
+  size_t barrier_count = thread_list->RunEmptyCheckpoint();
   // If there are no threads to wait which implys that all the checkpoint functions are finished,
   // then no need to release the mutator lock.
   if (barrier_count == 0) {
@@ -848,7 +828,7 @@
   Locks::mutator_lock_->SharedUnlock(self);
   {
     ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
-    gc_barrier_->Increment(self, barrier_count);
+    barrier->Increment(self, barrier_count);
   }
   Locks::mutator_lock_->SharedLock(self);
 }
@@ -955,9 +935,9 @@
     }
     collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
     if (kUseBakerReadBarrier) {
-      CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr())
+      CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState())
           << "Ref " << ref << " " << ref->PrettyTypeOf()
-          << " has non-white rb_ptr ";
+          << " has non-white rb_state ";
     }
   }
 
@@ -1026,8 +1006,8 @@
     VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
     obj->VisitReferences(visitor, visitor);
     if (kUseBakerReadBarrier) {
-      CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr())
-          << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
+      CHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::WhiteState())
+          << "obj=" << obj << " non-white rb_state " << obj->GetReadBarrierState();
     }
   }
 
@@ -1253,6 +1233,10 @@
     }
     gc_mark_stack_->Reset();
   } else if (mark_stack_mode == kMarkStackModeShared) {
+    // Do an empty checkpoint to avoid a race with a mutator preempted in the middle of a read
+    // barrier but before pushing onto the mark stack. b/32508093. Note the weak ref access is
+    // disabled at this point.
+    IssueEmptyCheckpoint();
     // Process the shared GC mark stack with a lock.
     {
       MutexLock mu(self, mark_stack_lock_);
@@ -1333,8 +1317,8 @@
 inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
   DCHECK(!region_space_->IsInFromSpace(to_ref));
   if (kUseBakerReadBarrier) {
-    DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
-        << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
+    DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
+        << " " << to_ref << " " << to_ref->GetReadBarrierState()
         << " is_marked=" << IsMarked(to_ref);
   }
   bool add_to_live_bytes = false;
@@ -1351,8 +1335,8 @@
     Scan(to_ref);
   }
   if (kUseBakerReadBarrier) {
-    DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
-        << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
+    DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
+        << " " << to_ref << " " << to_ref->GetReadBarrierState()
         << " is_marked=" << IsMarked(to_ref);
   }
 #ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
@@ -1368,9 +1352,9 @@
     // above IsInToSpace() evaluates to true and we change the color from gray to white here in this
     // else block.
     if (kUseBakerReadBarrier) {
-      bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
-          ReadBarrier::GrayPtr(),
-          ReadBarrier::WhitePtr());
+      bool success = to_ref->AtomicSetReadBarrierState</*kCasRelease*/true>(
+          ReadBarrier::GrayState(),
+          ReadBarrier::WhiteState());
       DCHECK(success) << "Must succeed as we won the race.";
     }
   }
@@ -1458,9 +1442,9 @@
         while (!mark_stack->IsEmpty()) {
           mirror::Object* obj = mark_stack->PopBack();
           if (kUseBakerReadBarrier) {
-            mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
-            LOG(INFO) << "On mark queue : " << obj << " " << obj->PrettyTypeOf() << " rb_ptr="
-                      << rb_ptr << " is_marked=" << IsMarked(obj);
+            uint32_t rb_state = obj->GetReadBarrierState();
+            LOG(INFO) << "On mark queue : " << obj << " " << obj->PrettyTypeOf() << " rb_state="
+                      << rb_state << " is_marked=" << IsMarked(obj);
           } else {
             LOG(INFO) << "On mark queue : " << obj << " " << obj->PrettyTypeOf()
                       << " is_marked=" << IsMarked(obj);
@@ -1707,7 +1691,7 @@
 void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
   if (kUseBakerReadBarrier) {
     LOG(INFO) << "holder=" << obj << " " << obj->PrettyTypeOf()
-              << " holder rb_ptr=" << obj->GetReadBarrierPointer();
+              << " holder rb_state=" << obj->GetReadBarrierState();
   } else {
     LOG(INFO) << "holder=" << obj << " " << obj->PrettyTypeOf();
   }
@@ -1762,10 +1746,10 @@
         return;
       }
       bool updated_all_immune_objects = updated_all_immune_objects_.LoadSequentiallyConsistent();
-      CHECK(updated_all_immune_objects || ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
-          << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
-          << (obj != nullptr ? obj->GetReadBarrierPointer() : nullptr)
-          << " ref=" << ref << " ref rb_ptr=" << ref->GetReadBarrierPointer()
+      CHECK(updated_all_immune_objects || ref->GetReadBarrierState() == ReadBarrier::GrayState())
+          << "Unmarked immune space ref. obj=" << obj << " rb_state="
+          << (obj != nullptr ? obj->GetReadBarrierState() : 0U)
+          << " ref=" << ref << " ref rb_state=" << ref->GetReadBarrierState()
           << " updated_all_immune_objects=" << updated_all_immune_objects;
     }
   } else {
@@ -2158,7 +2142,7 @@
     to_ref->SetLockWord(old_lock_word, false);
     // Set the gray ptr.
     if (kUseBakerReadBarrier) {
-      to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
+      to_ref->SetReadBarrierState(ReadBarrier::GrayState());
     }
 
     LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
@@ -2176,7 +2160,7 @@
         DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
       }
       if (kUseBakerReadBarrier) {
-        DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
+        DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState());
       }
       DCHECK(GetFwdPtr(from_ref) == to_ref);
       CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
@@ -2262,14 +2246,14 @@
   if (!is_los && mark_bitmap->Test(ref)) {
     // Already marked.
     if (kUseBakerReadBarrier) {
-      DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
-             ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
+      DCHECK(ref->GetReadBarrierState() == ReadBarrier::GrayState() ||
+             ref->GetReadBarrierState() == ReadBarrier::WhiteState());
     }
   } else if (is_los && los_bitmap->Test(ref)) {
     // Already marked in LOS.
     if (kUseBakerReadBarrier) {
-      DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
-             ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
+      DCHECK(ref->GetReadBarrierState() == ReadBarrier::GrayState() ||
+             ref->GetReadBarrierState() == ReadBarrier::WhiteState());
     }
   } else {
     // Not marked.
@@ -2282,7 +2266,7 @@
         DCHECK(!los_bitmap->Test(ref));
       }
       if (kUseBakerReadBarrier) {
-        DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
+        DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState());
       }
     } else {
       // For the baker-style RB, we need to handle 'false-gray' cases. See the
@@ -2298,25 +2282,25 @@
       // This may or may not succeed, which is ok.
       bool cas_success = false;
       if (kUseBakerReadBarrier) {
-        cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
-                                                       ReadBarrier::GrayPtr());
+        cas_success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(),
+                                                     ReadBarrier::GrayState());
       }
       if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
         // Already marked.
         if (kUseBakerReadBarrier && cas_success &&
-            ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+            ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
           PushOntoFalseGrayStack(ref);
         }
       } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
         // Already marked in LOS.
         if (kUseBakerReadBarrier && cas_success &&
-            ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+            ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
           PushOntoFalseGrayStack(ref);
         }
       } else {
         // Newly marked.
         if (kUseBakerReadBarrier) {
-          DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
+          DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
         }
         PushOntoMarkStack(ref);
       }
@@ -2335,7 +2319,6 @@
     TimingLogger::ScopedTiming split("ClearRegionSpaceCards", GetTimings());
     // We do not currently use the region space cards at all, madvise them away to save ram.
     heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit());
-    region_space_ = nullptr;
   }
   {
     MutexLock mu(self, skipped_blocks_lock_);
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index e0bf744..ddcb6c0 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -124,9 +124,9 @@
   if (obj == nullptr) {
     return nullptr;
   }
-  if (kUseBakerOrBrooksReadBarrier) {
-    // Verify all the objects have the correct forward pointer installed.
-    obj->AssertReadBarrierPointer();
+  if (kUseBakerReadBarrier) {
+    // Verify all the objects have the correct forward state installed.
+    obj->AssertReadBarrierState();
   }
   if (!immune_spaces_.IsInImmuneRegion(obj)) {
     if (objects_before_forwarding_->HasAddress(obj)) {
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 77d7274..673a97e 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -462,9 +462,9 @@
                                          mirror::Object* holder,
                                          MemberOffset offset) {
   DCHECK(obj != nullptr);
-  if (kUseBakerOrBrooksReadBarrier) {
-    // Verify all the objects have the correct pointer installed.
-    obj->AssertReadBarrierPointer();
+  if (kUseBakerReadBarrier) {
+    // Verify all the objects have the correct state installed.
+    obj->AssertReadBarrierState();
   }
   if (immune_spaces_.IsInImmuneRegion(obj)) {
     if (kCountMarkedObjects) {
@@ -503,9 +503,9 @@
 
 inline bool MarkSweep::MarkObjectParallel(mirror::Object* obj) {
   DCHECK(obj != nullptr);
-  if (kUseBakerOrBrooksReadBarrier) {
-    // Verify all the objects have the correct pointer installed.
-    obj->AssertReadBarrierPointer();
+  if (kUseBakerReadBarrier) {
+    // Verify all the objects have the correct state installed.
+    obj->AssertReadBarrierState();
   }
   if (immune_spaces_.IsInImmuneRegion(obj)) {
     DCHECK(IsMarked(obj) != nullptr);
@@ -608,8 +608,7 @@
 void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
   // Visit all runtime roots and clear dirty flags.
-  Runtime::Current()->VisitConcurrentRoots(
-      this, static_cast<VisitRootFlags>(flags | kVisitRootFlagNonMoving));
+  Runtime::Current()->VisitConcurrentRoots(this, flags);
 }
 
 class MarkSweep::DelayReferenceReferentVisitor {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 19c2e9a..a94cb27 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -98,7 +98,7 @@
       REQUIRES(!mark_stack_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void MarkConcurrentRoots(VisitRootFlags flags)
+  virtual void MarkConcurrentRoots(VisitRootFlags flags)
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES(!mark_stack_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 2ff4a3f..a815b83 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -589,13 +589,9 @@
   // references.
   saved_bytes_ +=
       CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size);
-  if (kUseBakerOrBrooksReadBarrier) {
-    obj->AssertReadBarrierPointer();
-    if (kUseBrooksReadBarrier) {
-      DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
-      forward_address->SetReadBarrierPointer(forward_address);
-    }
-    forward_address->AssertReadBarrierPointer();
+  if (kUseBakerReadBarrier) {
+    obj->AssertReadBarrierState();
+    forward_address->AssertReadBarrierState();
   }
   DCHECK(to_space_->HasAddress(forward_address) ||
          fallback_space_->HasAddress(forward_address) ||
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index bb7e854..a2dbe3f 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -56,6 +56,19 @@
   RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
 }
 
+void StickyMarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
+  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+  // Visit all runtime roots and clear dirty flags including class loader. This is done to prevent
+  // incorrect class unloading since the GC does not card mark when storing store the class during
+  // object allocation. Doing this for each allocation would be slow.
+  // Since the card is not dirty, it means the object may not get scanned. This can cause class
+  // unloading to occur even though the class and class loader are reachable through the object's
+  // class.
+  Runtime::Current()->VisitConcurrentRoots(
+      this,
+      static_cast<VisitRootFlags>(flags | kVisitRootFlagClassLoader));
+}
+
 void StickyMarkSweep::Sweep(bool swap_bitmaps ATTRIBUTE_UNUSED) {
   SweepArray(GetHeap()->GetLiveStack(), false);
 }
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index 100ca64..45f912f 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -33,6 +33,12 @@
   StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
   ~StickyMarkSweep() {}
 
+  virtual void MarkConcurrentRoots(VisitRootFlags flags)
+      OVERRIDE
+      REQUIRES(Locks::heap_bitmap_lock_)
+      REQUIRES(!mark_stack_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
  protected:
   // Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the
   // alloc space will be marked as immune.
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index 1d377a4..7ff845d 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -38,10 +38,10 @@
     case kGcCauseDebugger: return "Debugger";
     case kGcCauseClassLinker: return "ClassLinker";
     case kGcCauseJitCodeCache: return "JitCodeCache";
-    default:
-      LOG(FATAL) << "Unreachable";
-      UNREACHABLE();
+    case kGcCauseAddRemoveSystemWeakHolder: return "SystemWeakHolder";
   }
+  LOG(FATAL) << "Unreachable";
+  UNREACHABLE();
 }
 
 std::ostream& operator<<(std::ostream& os, const GcCause& gc_cause) {
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 05ce9c7..97129e8 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -86,11 +86,8 @@
     obj = self->AllocTlab(byte_count);
     DCHECK(obj != nullptr) << "AllocTlab can't fail";
     obj->SetClass(klass);
-    if (kUseBakerOrBrooksReadBarrier) {
-      if (kUseBrooksReadBarrier) {
-        obj->SetReadBarrierPointer(obj.Ptr());
-      }
-      obj->AssertReadBarrierPointer();
+    if (kUseBakerReadBarrier) {
+      obj->AssertReadBarrierState();
     }
     bytes_allocated = byte_count;
     usable_size = bytes_allocated;
@@ -102,11 +99,8 @@
       LIKELY(obj != nullptr)) {
     DCHECK(!is_running_on_memory_tool_);
     obj->SetClass(klass);
-    if (kUseBakerOrBrooksReadBarrier) {
-      if (kUseBrooksReadBarrier) {
-        obj->SetReadBarrierPointer(obj.Ptr());
-      }
-      obj->AssertReadBarrierPointer();
+    if (kUseBakerReadBarrier) {
+      obj->AssertReadBarrierState();
     }
     usable_size = bytes_allocated;
     pre_fence_visitor(obj, usable_size);
@@ -143,11 +137,8 @@
     DCHECK_GT(bytes_allocated, 0u);
     DCHECK_GT(usable_size, 0u);
     obj->SetClass(klass);
-    if (kUseBakerOrBrooksReadBarrier) {
-      if (kUseBrooksReadBarrier) {
-        obj->SetReadBarrierPointer(obj.Ptr());
-      }
-      obj->AssertReadBarrierPointer();
+    if (kUseBakerReadBarrier) {
+      obj->AssertReadBarrierState();
     }
     if (collector::SemiSpace::kUseRememberedSet && UNLIKELY(allocator == kAllocatorTypeNonMoving)) {
       // (Note this if statement will be constant folded away for the
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index ffad80d..f0e619d 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -264,6 +264,10 @@
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "Heap() entering";
   }
+  if (kUseReadBarrier) {
+    CHECK_EQ(foreground_collector_type_, kCollectorTypeCC);
+    CHECK_EQ(background_collector_type_, kCollectorTypeCCBackground);
+  }
   CHECK_GE(large_object_threshold, kMinLargeObjectThreshold);
   ScopedTrace trace(__FUNCTION__);
   Runtime* const runtime = Runtime::Current();
@@ -612,6 +616,8 @@
       concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
                                                                        "",
                                                                        measure_gc_performance);
+      DCHECK(region_space_ != nullptr);
+      concurrent_copying_collector_->SetRegionSpace(region_space_);
       garbage_collectors_.push_back(concurrent_copying_collector_);
     }
     if (MayUseCollector(kCollectorTypeMC)) {
@@ -2404,13 +2410,9 @@
     }
     // Copy the object over to its new location. Don't use alloc_size to avoid valgrind error.
     memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
-    if (kUseBakerOrBrooksReadBarrier) {
-      obj->AssertReadBarrierPointer();
-      if (kUseBrooksReadBarrier) {
-        DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
-        forward_address->SetReadBarrierPointer(forward_address);
-      }
-      forward_address->AssertReadBarrierPointer();
+    if (kUseBakerReadBarrier) {
+      obj->AssertReadBarrierState();
+      forward_address->AssertReadBarrierState();
     }
     return forward_address;
   }
@@ -2708,7 +2710,6 @@
         collector = semi_space_collector_;
         break;
       case kCollectorTypeCC:
-        concurrent_copying_collector_->SetRegionSpace(region_space_);
         collector = concurrent_copying_collector_;
         break;
       case kCollectorTypeMC:
@@ -4068,7 +4069,6 @@
 }
 
 void Heap::BroadcastForNewAllocationRecords() const {
-  CHECK(kUseReadBarrier);
   // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may
   // be set to false while some threads are waiting for system weak access in
   // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index e8eb69e..0c671d2 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -797,7 +797,6 @@
       REQUIRES(!Locks::alloc_tracker_lock_);
 
   void BroadcastForNewAllocationRecords() const
-      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::alloc_tracker_lock_);
 
   void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 798ecd3..2cde7d5 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -55,7 +55,6 @@
 }
 
 void ReferenceProcessor::BroadcastForSlowPath(Thread* self) {
-  CHECK(kUseReadBarrier);
   MutexLock mu(self, *Locks::reference_processor_lock_);
   condition_.Broadcast(self);
 }
@@ -99,6 +98,9 @@
         }
       }
     }
+    // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+    // presence of threads blocking for weak ref access.
+    self->CheckEmptyCheckpoint();
     condition_.WaitHoldingLocks(self);
   }
   return reference->GetReferent();
@@ -270,6 +272,9 @@
   // Wait untul we are done processing reference.
   while ((!kUseReadBarrier && SlowPathEnabled()) ||
          (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
+    // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+    // presence of threads blocking for weak ref access.
+    self->CheckEmptyCheckpoint();
     condition_.WaitHoldingLocks(self);
   }
   // At this point, since the sentinel of the reference is live, it is guaranteed to not be
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 4e6f7da..a0eb197 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -75,19 +75,19 @@
     // collector (SemiSpace) is running.
     CHECK(ref != nullptr);
     collector::ConcurrentCopying* concurrent_copying = heap->ConcurrentCopyingCollector();
-    mirror::Object* rb_ptr = ref->GetReadBarrierPointer();
-    if (rb_ptr == ReadBarrier::GrayPtr()) {
-      ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), ReadBarrier::WhitePtr());
-      CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
+    uint32_t rb_state = ref->GetReadBarrierState();
+    if (rb_state == ReadBarrier::GrayState()) {
+      ref->AtomicSetReadBarrierState(ReadBarrier::GrayState(), ReadBarrier::WhiteState());
+      CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState());
     } else {
       // In ConcurrentCopying::ProcessMarkStackRef() we may leave a white reference in the queue and
       // find it here, which is OK.
-      CHECK_EQ(rb_ptr, ReadBarrier::WhitePtr()) << "ref=" << ref << " rb_ptr=" << rb_ptr;
+      CHECK_EQ(rb_state, ReadBarrier::WhiteState()) << "ref=" << ref << " rb_state=" << rb_state;
       ObjPtr<mirror::Object> referent = ref->GetReferent<kWithoutReadBarrier>();
       // The referent could be null if it's cleared by a mutator (Reference.clear()).
       if (referent != nullptr) {
         CHECK(concurrent_copying->IsInToSpace(referent.Ptr()))
-            << "ref=" << ref << " rb_ptr=" << ref->GetReadBarrierPointer()
+            << "ref=" << ref << " rb_state=" << ref->GetReadBarrierState()
             << " referent=" << referent;
       }
     }
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 6035406..6019540 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -399,8 +399,8 @@
     auto* obj = reinterpret_cast<mirror::Object*>(current);
     CHECK(obj->GetClass() != nullptr) << "Image object at address " << obj << " has null class";
     CHECK(live_bitmap_->Test(obj)) << obj->PrettyTypeOf();
-    if (kUseBakerOrBrooksReadBarrier) {
-      obj->AssertReadBarrierPointer();
+    if (kUseBakerReadBarrier) {
+      obj->AssertReadBarrierState();
     }
     current += RoundUp(obj->SizeOf(), kObjectAlignment);
   }
@@ -1002,7 +1002,7 @@
         mirror::IfTable* iftable = as_klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
         // Ensure iftable arrays are fixed up since we need GetMethodArray to return the valid
         // contents.
-        if (iftable != nullptr && IsInAppImage(iftable)) {
+        if (IsInAppImage(iftable)) {
           operator()(iftable);
           for (int32_t i = 0, count = iftable->Count(); i < count; ++i) {
             if (iftable->GetMethodArrayCount<kVerifyNone, kWithoutReadBarrier>(i) > 0) {
@@ -1606,7 +1606,7 @@
 
   std::ostringstream oss;
   bool first = true;
-  for (auto msg : error_msgs) {
+  for (const auto& msg : error_msgs) {
     if (!first) {
       oss << "\n    ";
     }
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index bbc634d..3e79223 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -116,18 +116,17 @@
                                                   size_t* bytes_tl_bulk_allocated) {
   DCHECK(IsAllocated() && IsInToSpace());
   DCHECK_ALIGNED(num_bytes, kAlignment);
-  Atomic<uint8_t*>* atomic_top = reinterpret_cast<Atomic<uint8_t*>*>(&top_);
   uint8_t* old_top;
   uint8_t* new_top;
   do {
-    old_top = atomic_top->LoadRelaxed();
+    old_top = top_.LoadRelaxed();
     new_top = old_top + num_bytes;
     if (UNLIKELY(new_top > end_)) {
       return nullptr;
     }
-  } while (!atomic_top->CompareExchangeWeakSequentiallyConsistent(old_top, new_top));
-  reinterpret_cast<Atomic<uint64_t>*>(&objects_allocated_)->FetchAndAddSequentiallyConsistent(1);
-  DCHECK_LE(atomic_top->LoadRelaxed(), end_);
+  } while (!top_.CompareExchangeWeakRelaxed(old_top, new_top));
+  objects_allocated_.FetchAndAddRelaxed(1);
+  DCHECK_LE(Top(), end_);
   DCHECK_LT(old_top, end_);
   DCHECK_LE(new_top, end_);
   *bytes_allocated = num_bytes;
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 35bc369..8077319 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -57,7 +57,7 @@
   regions_.reset(new Region[num_regions_]);
   uint8_t* region_addr = mem_map->Begin();
   for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
-    regions_[i] = Region(i, region_addr, region_addr + kRegionSize);
+    regions_[i].Init(i, region_addr, region_addr + kRegionSize);
   }
   mark_bitmap_.reset(
       accounting::ContinuousSpaceBitmap::Create("region space live bitmap", Begin(), Capacity()));
@@ -72,7 +72,6 @@
     }
     CHECK_EQ(regions_[num_regions_ - 1].End(), Limit());
   }
-  full_region_ = Region();
   DCHECK(!full_region_.IsFree());
   DCHECK(full_region_.IsAllocated());
   current_region_ = &full_region_;
@@ -346,7 +345,7 @@
 void RegionSpace::RecordAlloc(mirror::Object* ref) {
   CHECK(ref != nullptr);
   Region* r = RefToRegion(ref);
-  reinterpret_cast<Atomic<uint64_t>*>(&r->objects_allocated_)->FetchAndAddSequentiallyConsistent(1);
+  r->objects_allocated_.FetchAndAddSequentiallyConsistent(1);
 }
 
 bool RegionSpace::AllocNewTlab(Thread* self) {
@@ -424,7 +423,8 @@
 }
 
 void RegionSpace::Region::Dump(std::ostream& os) const {
-  os << "Region[" << idx_ << "]=" << reinterpret_cast<void*>(begin_) << "-" << reinterpret_cast<void*>(top_)
+  os << "Region[" << idx_ << "]=" << reinterpret_cast<void*>(begin_) << "-"
+     << reinterpret_cast<void*>(Top())
      << "-" << reinterpret_cast<void*>(end_)
      << " state=" << static_cast<uint>(state_) << " type=" << static_cast<uint>(type_)
      << " objects_allocated=" << objects_allocated_
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 381ccfa..f3b9595 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -246,11 +246,19 @@
           objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
           is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {}
 
-    Region(size_t idx, uint8_t* begin, uint8_t* end)
-        : idx_(idx), begin_(begin), top_(begin), end_(end),
-          state_(RegionState::kRegionStateFree), type_(RegionType::kRegionTypeNone),
-          objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
-          is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {
+    void Init(size_t idx, uint8_t* begin, uint8_t* end) {
+      idx_ = idx;
+      begin_ = begin;
+      top_.StoreRelaxed(begin);
+      end_ = end;
+      state_ = RegionState::kRegionStateFree;
+      type_ = RegionType::kRegionTypeNone;
+      objects_allocated_.StoreRelaxed(0);
+      alloc_time_ = 0;
+      live_bytes_ = static_cast<size_t>(-1);
+      is_newly_allocated_ = false;
+      is_a_tlab_ = false;
+      thread_ = nullptr;
       DCHECK_LT(begin, end);
       DCHECK_EQ(static_cast<size_t>(end - begin), kRegionSize);
     }
@@ -264,16 +272,13 @@
     }
 
     void Clear() {
-      top_ = begin_;
+      top_.StoreRelaxed(begin_);
       state_ = RegionState::kRegionStateFree;
       type_ = RegionType::kRegionTypeNone;
-      objects_allocated_ = 0;
+      objects_allocated_.StoreRelaxed(0);
       alloc_time_ = 0;
       live_bytes_ = static_cast<size_t>(-1);
-      if (!kMadviseZeroes) {
-        memset(begin_, 0, end_ - begin_);
-      }
-      madvise(begin_, end_ - begin_, MADV_DONTNEED);
+      ZeroAndReleasePages(begin_, end_ - begin_);
       is_newly_allocated_ = false;
       is_a_tlab_ = false;
       thread_ = nullptr;
@@ -287,8 +292,8 @@
       bool is_free = state_ == RegionState::kRegionStateFree;
       if (is_free) {
         DCHECK(IsInNoSpace());
-        DCHECK_EQ(begin_, top_);
-        DCHECK_EQ(objects_allocated_, 0U);
+        DCHECK_EQ(begin_, Top());
+        DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
       }
       return is_free;
     }
@@ -328,7 +333,7 @@
     bool IsLarge() const {
       bool is_large = state_ == RegionState::kRegionStateLarge;
       if (is_large) {
-        DCHECK_LT(begin_ + 1 * MB, top_);
+        DCHECK_LT(begin_ + 1 * MB, Top());
       }
       return is_large;
     }
@@ -337,7 +342,7 @@
     bool IsLargeTail() const {
       bool is_large_tail = state_ == RegionState::kRegionStateLargeTail;
       if (is_large_tail) {
-        DCHECK_EQ(begin_, top_);
+        DCHECK_EQ(begin_, Top());
       }
       return is_large_tail;
     }
@@ -395,15 +400,15 @@
 
     size_t BytesAllocated() const {
       if (IsLarge()) {
-        DCHECK_LT(begin_ + kRegionSize, top_);
-        return static_cast<size_t>(top_ - begin_);
+        DCHECK_LT(begin_ + kRegionSize, Top());
+        return static_cast<size_t>(Top() - begin_);
       } else if (IsLargeTail()) {
-        DCHECK_EQ(begin_, top_);
+        DCHECK_EQ(begin_, Top());
         return 0;
       } else {
         DCHECK(IsAllocated()) << static_cast<uint>(state_);
-        DCHECK_LE(begin_, top_);
-        size_t bytes = static_cast<size_t>(top_ - begin_);
+        DCHECK_LE(begin_, Top());
+        size_t bytes = static_cast<size_t>(Top() - begin_);
         DCHECK_LE(bytes, kRegionSize);
         return bytes;
       }
@@ -411,12 +416,12 @@
 
     size_t ObjectsAllocated() const {
       if (IsLarge()) {
-        DCHECK_LT(begin_ + 1 * MB, top_);
-        DCHECK_EQ(objects_allocated_, 0U);
+        DCHECK_LT(begin_ + 1 * MB, Top());
+        DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
         return 1;
       } else if (IsLargeTail()) {
-        DCHECK_EQ(begin_, top_);
-        DCHECK_EQ(objects_allocated_, 0U);
+        DCHECK_EQ(begin_, Top());
+        DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
         return 0;
       } else {
         DCHECK(IsAllocated()) << static_cast<uint>(state_);
@@ -428,12 +433,12 @@
       return begin_;
     }
 
-    uint8_t* Top() const {
-      return top_;
+    ALWAYS_INLINE uint8_t* Top() const {
+      return top_.LoadRelaxed();
     }
 
     void SetTop(uint8_t* new_top) {
-      top_ = new_top;
+      top_.StoreRelaxed(new_top);
     }
 
     uint8_t* End() const {
@@ -448,27 +453,26 @@
 
     void RecordThreadLocalAllocations(size_t num_objects, size_t num_bytes) {
       DCHECK(IsAllocated());
-      DCHECK_EQ(objects_allocated_, 0U);
-      DCHECK_EQ(top_, end_);
-      objects_allocated_ = num_objects;
-      top_ = begin_ + num_bytes;
-      DCHECK_EQ(top_, end_);
+      DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
+      DCHECK_EQ(Top(), end_);
+      objects_allocated_.StoreRelaxed(num_objects);
+      top_.StoreRelaxed(begin_ + num_bytes);
+      DCHECK_EQ(Top(), end_);
     }
 
    private:
-    size_t idx_;                   // The region's index in the region space.
-    uint8_t* begin_;               // The begin address of the region.
-    // Can't use Atomic<uint8_t*> as Atomic's copy operator is implicitly deleted.
-    uint8_t* top_;                 // The current position of the allocation.
-    uint8_t* end_;                 // The end address of the region.
-    RegionState state_;            // The region state (see RegionState).
-    RegionType type_;              // The region type (see RegionType).
-    uint64_t objects_allocated_;   // The number of objects allocated.
-    uint32_t alloc_time_;          // The allocation time of the region.
-    size_t live_bytes_;            // The live bytes. Used to compute the live percent.
-    bool is_newly_allocated_;      // True if it's allocated after the last collection.
-    bool is_a_tlab_;               // True if it's a tlab.
-    Thread* thread_;               // The owning thread if it's a tlab.
+    size_t idx_;                        // The region's index in the region space.
+    uint8_t* begin_;                    // The begin address of the region.
+    Atomic<uint8_t*> top_;              // The current position of the allocation.
+    uint8_t* end_;                      // The end address of the region.
+    RegionState state_;                 // The region state (see RegionState).
+    RegionType type_;                   // The region type (see RegionType).
+    Atomic<size_t> objects_allocated_;  // The number of objects allocated.
+    uint32_t alloc_time_;               // The allocation time of the region.
+    size_t live_bytes_;                 // The live bytes. Used to compute the live percent.
+    bool is_newly_allocated_;           // True if it's allocated after the last collection.
+    bool is_a_tlab_;                    // True if it's a tlab.
+    Thread* thread_;                    // The owning thread if it's a tlab.
 
     friend class RegionSpace;
   };
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 7778871..cbb3d73 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -108,13 +108,10 @@
     EXPECT_GE(size, SizeOfZeroLengthByteArray());
     EXPECT_TRUE(byte_array_class != nullptr);
     o->SetClass(byte_array_class);
-    if (kUseBakerOrBrooksReadBarrier) {
+    if (kUseBakerReadBarrier) {
       // Like the proper heap object allocation, install and verify
-      // the correct read barrier pointer.
-      if (kUseBrooksReadBarrier) {
-        o->SetReadBarrierPointer(o);
-      }
-      o->AssertReadBarrierPointer();
+      // the correct read barrier state.
+      o->AssertReadBarrierState();
     }
     mirror::Array* arr = o->AsArray<kVerifyNone>();
     size_t header_size = SizeOfZeroLengthByteArray();
diff --git a/runtime/gc/system_weak.h b/runtime/gc/system_weak.h
index 3910a28..e5cddfc 100644
--- a/runtime/gc/system_weak.h
+++ b/runtime/gc/system_weak.h
@@ -30,7 +30,8 @@
 
   virtual void Allow() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
   virtual void Disallow() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
-  virtual void Broadcast() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+  // See Runtime::BroadcastForNewSystemWeaks for the broadcast_for_checkpoint definition.
+  virtual void Broadcast(bool broadcast_for_checkpoint) = 0;
 
   virtual void Sweep(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
 };
@@ -61,19 +62,27 @@
     allow_new_system_weak_ = false;
   }
 
-  void Broadcast() OVERRIDE
-      REQUIRES_SHARED(Locks::mutator_lock_)
+  void Broadcast(bool broadcast_for_checkpoint ATTRIBUTE_UNUSED) OVERRIDE
       REQUIRES(!allow_disallow_lock_) {
-    CHECK(kUseReadBarrier);
     MutexLock mu(Thread::Current(), allow_disallow_lock_);
     new_weak_condition_.Broadcast(Thread::Current());
   }
 
+  // WARNING: For lock annotations only.
+  Mutex* GetAllowDisallowLock() const RETURN_CAPABILITY(allow_disallow_lock_) {
+    return nullptr;
+  }
+
  protected:
-  void Wait(Thread* self) REQUIRES_SHARED(allow_disallow_lock_) {
+  void Wait(Thread* self)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(allow_disallow_lock_) {
     // Wait for GC's sweeping to complete and allow new records
     while (UNLIKELY((!kUseReadBarrier && !allow_new_system_weak_) ||
                     (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+      // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+      // presence of threads blocking for weak ref access.
+      self->CheckEmptyCheckpoint();
       new_weak_condition_.WaitHoldingLocks(self);
     }
   }
diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc
index af8a444..9b601c0 100644
--- a/runtime/gc/system_weak_test.cc
+++ b/runtime/gc/system_weak_test.cc
@@ -58,12 +58,14 @@
     disallow_count_++;
   }
 
-  void Broadcast() OVERRIDE
-      REQUIRES_SHARED(Locks::mutator_lock_)
+  void Broadcast(bool broadcast_for_checkpoint) OVERRIDE
       REQUIRES(!allow_disallow_lock_) {
-    SystemWeakHolder::Broadcast();
+    SystemWeakHolder::Broadcast(broadcast_for_checkpoint);
 
-    allow_count_++;
+    if (!broadcast_for_checkpoint) {
+      // Don't count the broadcasts for running checkpoints.
+      allow_count_++;
+    }
   }
 
   void Sweep(IsMarkedVisitor* visitor) OVERRIDE
diff --git a/runtime/generate-operator-out.py b/runtime/generate-operator-out.py
new file mode 120000
index 0000000..cc291d2
--- /dev/null
+++ b/runtime/generate-operator-out.py
@@ -0,0 +1 @@
+../tools/generate-operator-out.py
\ No newline at end of file
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index 03f5bf6..f13ff8c 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -52,6 +52,8 @@
 DEFINE_CHECK_EQ(static_cast<uint32_t>(MIRROR_CLASS_STATUS_INITIALIZED), (static_cast<uint32_t>((art::mirror::Class::kStatusInitialized))))
 #define ACCESS_FLAGS_CLASS_IS_FINALIZABLE 0x80000000
 DEFINE_CHECK_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), (static_cast<uint32_t>((art::kAccClassIsFinalizable))))
+#define ACCESS_FLAGS_CLASS_IS_INTERFACE 0x200
+DEFINE_CHECK_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_INTERFACE), (static_cast<uint32_t>((art::kAccInterface))))
 #define ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT 0x1f
 DEFINE_CHECK_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT), (static_cast<uint32_t>((art::MostSignificantBit(art::kAccClassIsFinalizable)))))
 #define ART_METHOD_DEX_CACHE_METHODS_OFFSET_32 20
@@ -96,6 +98,12 @@
 DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), (static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShiftedToggled)))
 #define LOCK_WORD_THIN_LOCK_COUNT_ONE 65536
 DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_ONE), (static_cast<int32_t>(art::LockWord::kThinLockCountOne)))
+#define LOCK_WORD_STATE_FORWARDING_ADDRESS 0x3
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_FORWARDING_ADDRESS), (static_cast<uint32_t>(art::LockWord::kStateForwardingAddress)))
+#define LOCK_WORD_STATE_FORWARDING_ADDRESS_OVERFLOW 0x40000000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_FORWARDING_ADDRESS_OVERFLOW), (static_cast<uint32_t>(art::LockWord::kStateForwardingAddressOverflow)))
+#define LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT 0x3
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT), (static_cast<uint32_t>(art::LockWord::kForwardingAddressShift)))
 #define LOCK_WORD_GC_STATE_MASK_SHIFTED 0x30000000
 DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_GC_STATE_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kGCStateMaskShifted)))
 #define LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED 0xcfffffff
@@ -134,6 +142,10 @@
 DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_SUSPEND_REQUEST), (static_cast<int32_t>((art::kSuspendRequest))))
 #define THREAD_CHECKPOINT_REQUEST 2
 DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_CHECKPOINT_REQUEST), (static_cast<int32_t>((art::kCheckpointRequest))))
+#define THREAD_EMPTY_CHECKPOINT_REQUEST 4
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_EMPTY_CHECKPOINT_REQUEST), (static_cast<int32_t>((art::kEmptyCheckpointRequest))))
+#define THREAD_SUSPEND_OR_CHECKPOINT_REQUEST 7
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), (static_cast<int32_t>((art::kSuspendRequest | art::kCheckpointRequest | art::kEmptyCheckpointRequest))))
 #define JIT_CHECK_OSR (-1)
 DEFINE_CHECK_EQ(static_cast<int16_t>(JIT_CHECK_OSR), (static_cast<int16_t>((art::jit::kJitCheckForOSR))))
 #define JIT_HOTNESS_DISABLE (-2)
diff --git a/runtime/handle.h b/runtime/handle.h
index d33d4a6..3db3be2 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -42,13 +42,9 @@
   Handle() : reference_(nullptr) {
   }
 
-  ALWAYS_INLINE Handle(const Handle<T>& handle) : reference_(handle.reference_) {
-  }
+  ALWAYS_INLINE Handle(const Handle<T>& handle) = default;
 
-  ALWAYS_INLINE Handle<T>& operator=(const Handle<T>& handle) {
-    reference_ = handle.reference_;
-    return *this;
-  }
+  ALWAYS_INLINE Handle<T>& operator=(const Handle<T>& handle) = default;
 
   ALWAYS_INLINE explicit Handle(StackReference<T>* reference) : reference_(reference) {
   }
@@ -109,15 +105,10 @@
   }
 
   ALWAYS_INLINE MutableHandle(const MutableHandle<T>& handle)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      : Handle<T>(handle.reference_) {
-  }
+      REQUIRES_SHARED(Locks::mutator_lock_) = default;
 
   ALWAYS_INLINE MutableHandle<T>& operator=(const MutableHandle<T>& handle)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    Handle<T>::operator=(handle);
-    return *this;
-  }
+      REQUIRES_SHARED(Locks::mutator_lock_) = default;
 
   ALWAYS_INLINE explicit MutableHandle(StackReference<T>* reference)
       REQUIRES_SHARED(Locks::mutator_lock_)
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 8a0aba6..adb7d8a 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -69,7 +69,7 @@
         number_of_references_(num_references) {}
 
   // Variable sized constructor.
-  BaseHandleScope(BaseHandleScope* link)
+  explicit BaseHandleScope(BaseHandleScope* link)
       : link_(link),
         number_of_references_(kNumReferencesVariableSized) {}
 
diff --git a/runtime/handle_scope_test.cc b/runtime/handle_scope_test.cc
index 92063c4..aab1d9c 100644
--- a/runtime/handle_scope_test.cc
+++ b/runtime/handle_scope_test.cc
@@ -14,15 +14,27 @@
  * limitations under the License.
  */
 
+#include <type_traits>
+
 #include "base/enums.h"
 #include "common_runtime_test.h"
 #include "gtest/gtest.h"
+#include "handle.h"
 #include "handle_scope-inl.h"
+#include "mirror/object.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread.h"
 
 namespace art {
 
+// Handles are value objects and should be trivially copyable.
+static_assert(std::is_trivially_copyable<Handle<mirror::Object>>::value,
+              "Handle should be trivially copyable");
+static_assert(std::is_trivially_copyable<MutableHandle<mirror::Object>>::value,
+              "MutableHandle should be trivially copyable");
+static_assert(std::is_trivially_copyable<ScopedNullHandle<mirror::Object>>::value,
+              "ScopedNullHandle should be trivially copyable");
+
 class HandleScopeTest : public CommonRuntimeTest {};
 
 // Test the offsets computed for members of HandleScope. Because of cross-compiling
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 2336759..8cbe491 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -574,9 +574,9 @@
   }
 
   void WriteStringTable() {
-    for (const std::pair<std::string, HprofStringId>& p : strings_) {
+    for (const auto& p : strings_) {
       const std::string& string = p.first;
-      const size_t id = p.second;
+      const HprofStringId id = p.second;
 
       output_->StartNewRecord(HPROF_TAG_STRING, kHprofTime);
 
diff --git a/runtime/image.cc b/runtime/image.cc
index 299d5fd..bd5ba93 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -25,7 +25,7 @@
 namespace art {
 
 const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '1', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '2', '\0' };
 
 ImageHeader::ImageHeader(uint32_t image_begin,
                          uint32_t image_size,
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index a61a187..9c05d3c 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -63,9 +63,9 @@
     strong_interns_.VisitRoots(visitor);
   } else if ((flags & kVisitRootFlagNewRoots) != 0) {
     for (auto& root : new_strong_intern_roots_) {
-      mirror::String* old_ref = root.Read<kWithoutReadBarrier>();
+      ObjPtr<mirror::String> old_ref = root.Read<kWithoutReadBarrier>();
       root.VisitRoot(visitor, RootInfo(kRootInternedString));
-      mirror::String* new_ref = root.Read<kWithoutReadBarrier>();
+      ObjPtr<mirror::String> new_ref = root.Read<kWithoutReadBarrier>();
       if (new_ref != old_ref) {
         // The GC moved a root in the log. Need to search the strong interns and update the
         // corresponding object. This is slow, but luckily for us, this may only happen with a
@@ -86,17 +86,17 @@
   // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
 }
 
-mirror::String* InternTable::LookupWeak(Thread* self, mirror::String* s) {
+ObjPtr<mirror::String> InternTable::LookupWeak(Thread* self, ObjPtr<mirror::String> s) {
   MutexLock mu(self, *Locks::intern_table_lock_);
   return LookupWeakLocked(s);
 }
 
-mirror::String* InternTable::LookupStrong(Thread* self, mirror::String* s) {
+ObjPtr<mirror::String> InternTable::LookupStrong(Thread* self, ObjPtr<mirror::String> s) {
   MutexLock mu(self, *Locks::intern_table_lock_);
   return LookupStrongLocked(s);
 }
 
-mirror::String* InternTable::LookupStrong(Thread* self,
+ObjPtr<mirror::String> InternTable::LookupStrong(Thread* self,
                                           uint32_t utf16_length,
                                           const char* utf8_data) {
   DCHECK_EQ(utf16_length, CountModifiedUtf8Chars(utf8_data));
@@ -107,11 +107,11 @@
   return strong_interns_.Find(string);
 }
 
-mirror::String* InternTable::LookupWeakLocked(mirror::String* s) {
+ObjPtr<mirror::String> InternTable::LookupWeakLocked(ObjPtr<mirror::String> s) {
   return weak_interns_.Find(s);
 }
 
-mirror::String* InternTable::LookupStrongLocked(mirror::String* s) {
+ObjPtr<mirror::String> InternTable::LookupStrongLocked(ObjPtr<mirror::String> s) {
   return strong_interns_.Find(s);
 }
 
@@ -121,7 +121,7 @@
   strong_interns_.AddNewTable();
 }
 
-mirror::String* InternTable::InsertStrong(mirror::String* s) {
+ObjPtr<mirror::String> InternTable::InsertStrong(ObjPtr<mirror::String> s) {
   Runtime* runtime = Runtime::Current();
   if (runtime->IsActiveTransaction()) {
     runtime->RecordStrongStringInsertion(s);
@@ -133,7 +133,7 @@
   return s;
 }
 
-mirror::String* InternTable::InsertWeak(mirror::String* s) {
+ObjPtr<mirror::String> InternTable::InsertWeak(ObjPtr<mirror::String> s) {
   Runtime* runtime = Runtime::Current();
   if (runtime->IsActiveTransaction()) {
     runtime->RecordWeakStringInsertion(s);
@@ -142,11 +142,11 @@
   return s;
 }
 
-void InternTable::RemoveStrong(mirror::String* s) {
+void InternTable::RemoveStrong(ObjPtr<mirror::String> s) {
   strong_interns_.Remove(s);
 }
 
-void InternTable::RemoveWeak(mirror::String* s) {
+void InternTable::RemoveWeak(ObjPtr<mirror::String> s) {
   Runtime* runtime = Runtime::Current();
   if (runtime->IsActiveTransaction()) {
     runtime->RecordWeakStringRemoval(s);
@@ -155,19 +155,22 @@
 }
 
 // Insert/remove methods used to undo changes made during an aborted transaction.
-mirror::String* InternTable::InsertStrongFromTransaction(mirror::String* s) {
+ObjPtr<mirror::String> InternTable::InsertStrongFromTransaction(ObjPtr<mirror::String> s) {
   DCHECK(!Runtime::Current()->IsActiveTransaction());
   return InsertStrong(s);
 }
-mirror::String* InternTable::InsertWeakFromTransaction(mirror::String* s) {
+
+ObjPtr<mirror::String> InternTable::InsertWeakFromTransaction(ObjPtr<mirror::String> s) {
   DCHECK(!Runtime::Current()->IsActiveTransaction());
   return InsertWeak(s);
 }
-void InternTable::RemoveStrongFromTransaction(mirror::String* s) {
+
+void InternTable::RemoveStrongFromTransaction(ObjPtr<mirror::String> s) {
   DCHECK(!Runtime::Current()->IsActiveTransaction());
   RemoveStrong(s);
 }
-void InternTable::RemoveWeakFromTransaction(mirror::String* s) {
+
+void InternTable::RemoveWeakFromTransaction(ObjPtr<mirror::String> s) {
   DCHECK(!Runtime::Current()->IsActiveTransaction());
   RemoveWeak(s);
 }
@@ -185,7 +188,6 @@
 }
 
 void InternTable::BroadcastForNewInterns() {
-  CHECK(kUseReadBarrier);
   Thread* self = Thread::Current();
   MutexLock mu(self, *Locks::intern_table_lock_);
   weak_intern_condition_.Broadcast(self);
@@ -196,14 +198,17 @@
   {
     ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
     MutexLock mu(self, *Locks::intern_table_lock_);
-    while (weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) {
+    while ((!kUseReadBarrier && weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) ||
+           (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
       weak_intern_condition_.Wait(self);
     }
   }
   Locks::intern_table_lock_->ExclusiveLock(self);
 }
 
-mirror::String* InternTable::Insert(mirror::String* s, bool is_strong, bool holding_locks) {
+ObjPtr<mirror::String> InternTable::Insert(ObjPtr<mirror::String> s,
+                                           bool is_strong,
+                                           bool holding_locks) {
   if (s == nullptr) {
     return nullptr;
   }
@@ -222,7 +227,7 @@
       }
     }
     // Check the strong table for a match.
-    mirror::String* strong = LookupStrongLocked(s);
+    ObjPtr<mirror::String> strong = LookupStrongLocked(s);
     if (strong != nullptr) {
       return strong;
     }
@@ -244,7 +249,7 @@
     CHECK(self->GetWeakRefAccessEnabled());
   }
   // There is no match in the strong table, check the weak table.
-  mirror::String* weak = LookupWeakLocked(s);
+  ObjPtr<mirror::String> weak = LookupWeakLocked(s);
   if (weak != nullptr) {
     if (is_strong) {
       // A match was found in the weak table. Promote to the strong table.
@@ -257,11 +262,11 @@
   return is_strong ? InsertStrong(s) : InsertWeak(s);
 }
 
-mirror::String* InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
+ObjPtr<mirror::String> InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
   DCHECK(utf8_data != nullptr);
   Thread* self = Thread::Current();
   // Try to avoid allocation.
-  mirror::String* s = LookupStrong(self, utf16_length, utf8_data);
+  ObjPtr<mirror::String> s = LookupStrong(self, utf16_length, utf8_data);
   if (s != nullptr) {
     return s;
   }
@@ -269,25 +274,25 @@
       self, utf16_length, utf8_data));
 }
 
-mirror::String* InternTable::InternStrong(const char* utf8_data) {
+ObjPtr<mirror::String> InternTable::InternStrong(const char* utf8_data) {
   DCHECK(utf8_data != nullptr);
   return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
 }
 
-mirror::String* InternTable::InternStrongImageString(mirror::String* s) {
+ObjPtr<mirror::String> InternTable::InternStrongImageString(ObjPtr<mirror::String> s) {
   // May be holding the heap bitmap lock.
   return Insert(s, true, true);
 }
 
-mirror::String* InternTable::InternStrong(mirror::String* s) {
+ObjPtr<mirror::String> InternTable::InternStrong(ObjPtr<mirror::String> s) {
   return Insert(s, true, false);
 }
 
-mirror::String* InternTable::InternWeak(mirror::String* s) {
+ObjPtr<mirror::String> InternTable::InternWeak(ObjPtr<mirror::String> s) {
   return Insert(s, false, false);
 }
 
-bool InternTable::ContainsWeak(mirror::String* s) {
+bool InternTable::ContainsWeak(ObjPtr<mirror::String> s) {
   return LookupWeak(Thread::Current(), s) == s;
 }
 
@@ -314,7 +319,7 @@
   if (kIsDebugBuild) {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
   }
-  return static_cast<size_t>(root.Read()->GetHashCode());
+  return static_cast<size_t>(root.Read<kWithoutReadBarrier>()->GetHashCode());
 }
 
 bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
@@ -322,7 +327,7 @@
   if (kIsDebugBuild) {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
   }
-  return a.Read()->Equals(b.Read());
+  return a.Read<kWithoutReadBarrier>()->Equals(b.Read<kWithoutReadBarrier>());
 }
 
 bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
@@ -330,7 +335,7 @@
   if (kIsDebugBuild) {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
   }
-  mirror::String* a_string = a.Read();
+  ObjPtr<mirror::String> a_string = a.Read<kWithoutReadBarrier>();
   uint32_t a_length = static_cast<uint32_t>(a_string->GetLength());
   if (a_length != b.GetUtf16Length()) {
     return false;
@@ -392,7 +397,7 @@
   return table_to_write->WriteToMemory(ptr);
 }
 
-void InternTable::Table::Remove(mirror::String* s) {
+void InternTable::Table::Remove(ObjPtr<mirror::String> s) {
   for (UnorderedSet& table : tables_) {
     auto it = table.Find(GcRoot<mirror::String>(s));
     if (it != table.end()) {
@@ -403,7 +408,7 @@
   LOG(FATAL) << "Attempting to remove non-interned string " << s->ToModifiedUtf8();
 }
 
-mirror::String* InternTable::Table::Find(mirror::String* s) {
+ObjPtr<mirror::String> InternTable::Table::Find(ObjPtr<mirror::String> s) {
   Locks::intern_table_lock_->AssertHeld(Thread::Current());
   for (UnorderedSet& table : tables_) {
     auto it = table.Find(GcRoot<mirror::String>(s));
@@ -414,7 +419,7 @@
   return nullptr;
 }
 
-mirror::String* InternTable::Table::Find(const Utf8String& string) {
+ObjPtr<mirror::String> InternTable::Table::Find(const Utf8String& string) {
   Locks::intern_table_lock_->AssertHeld(Thread::Current());
   for (UnorderedSet& table : tables_) {
     auto it = table.Find(string);
@@ -429,7 +434,7 @@
   tables_.push_back(UnorderedSet());
 }
 
-void InternTable::Table::Insert(mirror::String* s) {
+void InternTable::Table::Insert(ObjPtr<mirror::String> s) {
   // Always insert the last table, the image tables are before and we avoid inserting into these
   // to prevent dirty pages.
   DCHECK(!tables_.empty());
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 30ff55d..f661d9f 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -57,43 +57,44 @@
   InternTable();
 
   // Interns a potentially new string in the 'strong' table. May cause thread suspension.
-  mirror::String* InternStrong(int32_t utf16_length, const char* utf8_data)
+  ObjPtr<mirror::String> InternStrong(int32_t utf16_length, const char* utf8_data)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   // Only used by image writer. Special version that may not cause thread suspension since the GC
   // cannot be running while we are doing image writing. Maybe be called while while holding a
   // lock since there will not be thread suspension.
-  mirror::String* InternStrongImageString(mirror::String* s)
+  ObjPtr<mirror::String> InternStrongImageString(ObjPtr<mirror::String> s)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Interns a potentially new string in the 'strong' table. May cause thread suspension.
-  mirror::String* InternStrong(const char* utf8_data) REQUIRES_SHARED(Locks::mutator_lock_)
+  ObjPtr<mirror::String> InternStrong(const char* utf8_data) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
   // Interns a potentially new string in the 'strong' table. May cause thread suspension.
-  mirror::String* InternStrong(mirror::String* s) REQUIRES_SHARED(Locks::mutator_lock_)
+  ObjPtr<mirror::String> InternStrong(ObjPtr<mirror::String> s)
+      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
   // Interns a potentially new string in the 'weak' table. May cause thread suspension.
-  mirror::String* InternWeak(mirror::String* s) REQUIRES_SHARED(Locks::mutator_lock_)
+  ObjPtr<mirror::String> InternWeak(ObjPtr<mirror::String> s) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
   void SweepInternTableWeaks(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::intern_table_lock_);
 
-  bool ContainsWeak(mirror::String* s) REQUIRES_SHARED(Locks::mutator_lock_)
+  bool ContainsWeak(ObjPtr<mirror::String> s) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::intern_table_lock_);
 
   // Lookup a strong intern, returns null if not found.
-  mirror::String* LookupStrong(Thread* self, mirror::String* s)
+  ObjPtr<mirror::String> LookupStrong(Thread* self, ObjPtr<mirror::String> s)
       REQUIRES(!Locks::intern_table_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  mirror::String* LookupStrong(Thread* self, uint32_t utf16_length, const char* utf8_data)
+  ObjPtr<mirror::String> LookupStrong(Thread* self, uint32_t utf16_length, const char* utf8_data)
       REQUIRES(!Locks::intern_table_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Lookup a weak intern, returns null if not found.
-  mirror::String* LookupWeak(Thread* self, mirror::String* s)
+  ObjPtr<mirror::String> LookupWeak(Thread* self, ObjPtr<mirror::String> s)
       REQUIRES(!Locks::intern_table_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -111,7 +112,7 @@
 
   void DumpForSigQuit(std::ostream& os) const REQUIRES(!Locks::intern_table_lock_);
 
-  void BroadcastForNewInterns() REQUIRES_SHARED(Locks::mutator_lock_);
+  void BroadcastForNewInterns();
 
   // Adds all of the resolved image strings from the image spaces into the intern table. The
   // advantage of doing this is preventing expensive DexFile::FindStringId calls. Sets
@@ -181,13 +182,13 @@
   class Table {
    public:
     Table();
-    mirror::String* Find(mirror::String* s) REQUIRES_SHARED(Locks::mutator_lock_)
+    ObjPtr<mirror::String> Find(ObjPtr<mirror::String> s) REQUIRES_SHARED(Locks::mutator_lock_)
         REQUIRES(Locks::intern_table_lock_);
-    mirror::String* Find(const Utf8String& string) REQUIRES_SHARED(Locks::mutator_lock_)
+    ObjPtr<mirror::String> Find(const Utf8String& string) REQUIRES_SHARED(Locks::mutator_lock_)
         REQUIRES(Locks::intern_table_lock_);
-    void Insert(mirror::String* s) REQUIRES_SHARED(Locks::mutator_lock_)
+    void Insert(ObjPtr<mirror::String> s) REQUIRES_SHARED(Locks::mutator_lock_)
         REQUIRES(Locks::intern_table_lock_);
-    void Remove(mirror::String* s)
+    void Remove(ObjPtr<mirror::String> s)
         REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
     void VisitRoots(RootVisitor* visitor)
         REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
@@ -221,30 +222,30 @@
   // Insert if non null, otherwise return null. Must be called holding the mutator lock.
   // If holding_locks is true, then we may also hold other locks. If holding_locks is true, then we
   // require GC is not running since it is not safe to wait while holding locks.
-  mirror::String* Insert(mirror::String* s, bool is_strong, bool holding_locks)
+  ObjPtr<mirror::String> Insert(ObjPtr<mirror::String> s, bool is_strong, bool holding_locks)
       REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  mirror::String* LookupStrongLocked(mirror::String* s)
+  ObjPtr<mirror::String> LookupStrongLocked(ObjPtr<mirror::String> s)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
-  mirror::String* LookupWeakLocked(mirror::String* s)
+  ObjPtr<mirror::String> LookupWeakLocked(ObjPtr<mirror::String> s)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
-  mirror::String* InsertStrong(mirror::String* s)
+  ObjPtr<mirror::String> InsertStrong(ObjPtr<mirror::String> s)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
-  mirror::String* InsertWeak(mirror::String* s)
+  ObjPtr<mirror::String> InsertWeak(ObjPtr<mirror::String> s)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
-  void RemoveStrong(mirror::String* s)
+  void RemoveStrong(ObjPtr<mirror::String> s)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
-  void RemoveWeak(mirror::String* s)
+  void RemoveWeak(ObjPtr<mirror::String> s)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
 
   // Transaction rollback access.
-  mirror::String* InsertStrongFromTransaction(mirror::String* s)
+  ObjPtr<mirror::String> InsertStrongFromTransaction(ObjPtr<mirror::String> s)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
-  mirror::String* InsertWeakFromTransaction(mirror::String* s)
+  ObjPtr<mirror::String> InsertWeakFromTransaction(ObjPtr<mirror::String> s)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
-  void RemoveStrongFromTransaction(mirror::String* s)
+  void RemoveStrongFromTransaction(ObjPtr<mirror::String> s)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
-  void RemoveWeakFromTransaction(mirror::String* s)
+  void RemoveWeakFromTransaction(ObjPtr<mirror::String> s)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
 
   size_t AddTableFromMemoryLocked(const uint8_t* ptr)
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index 74cec57..b91d946 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -193,22 +193,22 @@
   ASSERT_NE(foo.Get(), bar.Get());
   ASSERT_NE(foo.Get(), foobar.Get());
   ASSERT_NE(bar.Get(), foobar.Get());
-  mirror::String* lookup_foo = intern_table.LookupStrong(soa.Self(), 3, "foo");
-  EXPECT_EQ(lookup_foo, foo.Get());
-  mirror::String* lookup_bar = intern_table.LookupStrong(soa.Self(), 3, "bar");
-  EXPECT_EQ(lookup_bar, bar.Get());
-  mirror::String* lookup_foobar = intern_table.LookupStrong(soa.Self(), 6, "foobar");
-  EXPECT_EQ(lookup_foobar, foobar.Get());
-  mirror::String* lookup_foox = intern_table.LookupStrong(soa.Self(), 4, "foox");
+  ObjPtr<mirror::String> lookup_foo = intern_table.LookupStrong(soa.Self(), 3, "foo");
+  EXPECT_OBJ_PTR_EQ(lookup_foo, foo.Get());
+  ObjPtr<mirror::String> lookup_bar = intern_table.LookupStrong(soa.Self(), 3, "bar");
+  EXPECT_OBJ_PTR_EQ(lookup_bar, bar.Get());
+  ObjPtr<mirror::String> lookup_foobar = intern_table.LookupStrong(soa.Self(), 6, "foobar");
+  EXPECT_OBJ_PTR_EQ(lookup_foobar, foobar.Get());
+  ObjPtr<mirror::String> lookup_foox = intern_table.LookupStrong(soa.Self(), 4, "foox");
   EXPECT_TRUE(lookup_foox == nullptr);
-  mirror::String* lookup_fooba = intern_table.LookupStrong(soa.Self(), 5, "fooba");
+  ObjPtr<mirror::String> lookup_fooba = intern_table.LookupStrong(soa.Self(), 5, "fooba");
   EXPECT_TRUE(lookup_fooba == nullptr);
-  mirror::String* lookup_foobaR = intern_table.LookupStrong(soa.Self(), 6, "foobaR");
+  ObjPtr<mirror::String> lookup_foobaR = intern_table.LookupStrong(soa.Self(), 6, "foobaR");
   EXPECT_TRUE(lookup_foobaR == nullptr);
   // Try a hash conflict.
   ASSERT_EQ(ComputeUtf16HashFromModifiedUtf8("foobar", 6),
             ComputeUtf16HashFromModifiedUtf8("foobbS", 6));
-  mirror::String* lookup_foobbS = intern_table.LookupStrong(soa.Self(), 6, "foobbS");
+  ObjPtr<mirror::String> lookup_foobbS = intern_table.LookupStrong(soa.Self(), 6, "foobbS");
   EXPECT_TRUE(lookup_foobbS == nullptr);
 }
 
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 2e00770..a32c800 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -35,8 +35,17 @@
 namespace art {
 namespace interpreter {
 
-static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& shorty,
-                           Object* receiver, uint32_t* args, JValue* result)
+ALWAYS_INLINE static ObjPtr<mirror::Object> ObjArg(uint32_t arg)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  return ObjPtr<mirror::Object>(reinterpret_cast<mirror::Object*>(arg));
+}
+
+static void InterpreterJni(Thread* self,
+                           ArtMethod* method,
+                           const StringPiece& shorty,
+                           ObjPtr<mirror::Object> receiver,
+                           uint32_t* args,
+                           JValue* result)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   // TODO: The following enters JNI code using a typedef-ed function rather than the JNI compiler,
   //       it should be removed and JNI compiled stubs used instead.
@@ -52,7 +61,7 @@
         ScopedThreadStateChange tsc(self, kNative);
         jresult = fn(soa.Env(), klass.get());
       }
-      result->SetL(soa.Decode<Object>(jresult));
+      result->SetL(soa.Decode<mirror::Object>(jresult));
     } else if (shorty == "V") {
       typedef void (fntype)(JNIEnv*, jclass);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
@@ -87,14 +96,13 @@
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedLocalRef<jobject> arg0(soa.Env(),
-                                   soa.AddLocalReference<jobject>(
-                                       reinterpret_cast<Object*>(args[0])));
+                                   soa.AddLocalReference<jobject>(ObjArg(args[0])));
       jobject jresult;
       {
         ScopedThreadStateChange tsc(self, kNative);
         jresult = fn(soa.Env(), klass.get(), arg0.get());
       }
-      result->SetL(soa.Decode<Object>(jresult));
+      result->SetL(soa.Decode<mirror::Object>(jresult));
     } else if (shorty == "IIZ") {
       typedef jint (fntype)(JNIEnv*, jclass, jint, jboolean);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
@@ -109,8 +117,7 @@
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedLocalRef<jobject> arg0(soa.Env(),
-                                   soa.AddLocalReference<jobject>(
-                                       reinterpret_cast<Object*>(args[0])));
+                                   soa.AddLocalReference<jobject>(ObjArg(args[0])));
       ScopedThreadStateChange tsc(self, kNative);
       result->SetI(fn(soa.Env(), klass.get(), arg0.get(), args[1]));
     } else if (shorty == "SIZ") {
@@ -134,11 +141,9 @@
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedLocalRef<jobject> arg0(soa.Env(),
-                                   soa.AddLocalReference<jobject>(
-                                       reinterpret_cast<Object*>(args[0])));
+                                   soa.AddLocalReference<jobject>(ObjArg(args[0])));
       ScopedLocalRef<jobject> arg1(soa.Env(),
-                                   soa.AddLocalReference<jobject>(
-                                       reinterpret_cast<Object*>(args[1])));
+                                   soa.AddLocalReference<jobject>(ObjArg(args[1])));
       ScopedThreadStateChange tsc(self, kNative);
       result->SetZ(fn(soa.Env(), klass.get(), arg0.get(), arg1.get()));
     } else if (shorty == "ZILL") {
@@ -147,11 +152,9 @@
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedLocalRef<jobject> arg1(soa.Env(),
-                                   soa.AddLocalReference<jobject>(
-                                       reinterpret_cast<Object*>(args[1])));
+                                   soa.AddLocalReference<jobject>(ObjArg(args[1])));
       ScopedLocalRef<jobject> arg2(soa.Env(),
-                                   soa.AddLocalReference<jobject>(
-                                       reinterpret_cast<Object*>(args[2])));
+                                   soa.AddLocalReference<jobject>(ObjArg(args[2])));
       ScopedThreadStateChange tsc(self, kNative);
       result->SetZ(fn(soa.Env(), klass.get(), args[0], arg1.get(), arg2.get()));
     } else if (shorty == "VILII") {
@@ -160,8 +163,7 @@
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedLocalRef<jobject> arg1(soa.Env(),
-                                   soa.AddLocalReference<jobject>(
-                                       reinterpret_cast<Object*>(args[1])));
+                                   soa.AddLocalReference<jobject>(ObjArg(args[1])));
       ScopedThreadStateChange tsc(self, kNative);
       fn(soa.Env(), klass.get(), args[0], arg1.get(), args[2], args[3]);
     } else if (shorty == "VLILII") {
@@ -170,11 +172,9 @@
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedLocalRef<jobject> arg0(soa.Env(),
-                                   soa.AddLocalReference<jobject>(
-                                       reinterpret_cast<Object*>(args[0])));
+                                   soa.AddLocalReference<jobject>(ObjArg(args[0])));
       ScopedLocalRef<jobject> arg2(soa.Env(),
-                                   soa.AddLocalReference<jobject>(
-                                       reinterpret_cast<Object*>(args[2])));
+                                   soa.AddLocalReference<jobject>(ObjArg(args[2])));
       ScopedThreadStateChange tsc(self, kNative);
       fn(soa.Env(), klass.get(), arg0.get(), args[1], arg2.get(), args[3], args[4]);
     } else {
@@ -192,7 +192,7 @@
         ScopedThreadStateChange tsc(self, kNative);
         jresult = fn(soa.Env(), rcvr.get());
       }
-      result->SetL(soa.Decode<Object>(jresult));
+      result->SetL(soa.Decode<mirror::Object>(jresult));
     } else if (shorty == "V") {
       typedef void (fntype)(JNIEnv*, jobject);
       fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
@@ -206,14 +206,13 @@
       ScopedLocalRef<jobject> rcvr(soa.Env(),
                                    soa.AddLocalReference<jobject>(receiver));
       ScopedLocalRef<jobject> arg0(soa.Env(),
-                                   soa.AddLocalReference<jobject>(
-                                       reinterpret_cast<Object*>(args[0])));
+                                   soa.AddLocalReference<jobject>(ObjArg(args[0])));
       jobject jresult;
       {
         ScopedThreadStateChange tsc(self, kNative);
         jresult = fn(soa.Env(), rcvr.get(), arg0.get());
       }
-      result->SetL(soa.Decode<Object>(jresult));
+      result->SetL(soa.Decode<mirror::Object>(jresult));
       ScopedThreadStateChange tsc(self, kNative);
     } else if (shorty == "III") {
       typedef jint (fntype)(JNIEnv*, jobject, jint, jint);
@@ -312,7 +311,7 @@
           } else {
             // Mterp didn't like that instruction.  Single-step it with the reference interpreter.
             result_register = ExecuteSwitchImpl<false, false>(self, code_item, shadow_frame,
-                                                               result_register, true);
+                                                              result_register, true);
             if (shadow_frame.GetDexPC() == DexFile::kDexNoIndex) {
               // Single-stepped a return or an exception not handled locally.  Return to caller.
               return result_register;
@@ -354,8 +353,11 @@
   }
 }
 
-void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receiver,
-                                uint32_t* args, JValue* result,
+void EnterInterpreterFromInvoke(Thread* self,
+                                ArtMethod* method,
+                                ObjPtr<mirror::Object> receiver,
+                                uint32_t* args,
+                                JValue* result,
                                 bool stay_in_interpreter) {
   DCHECK_EQ(self, Thread::Current());
   bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
@@ -393,7 +395,7 @@
   size_t cur_reg = num_regs - num_ins;
   if (!method->IsStatic()) {
     CHECK(receiver != nullptr);
-    shadow_frame->SetVRegReference(cur_reg, receiver);
+    shadow_frame->SetVRegReference(cur_reg, receiver.Ptr());
     ++cur_reg;
   }
   uint32_t shorty_len = 0;
@@ -402,8 +404,9 @@
     DCHECK_LT(shorty_pos + 1, shorty_len);
     switch (shorty[shorty_pos + 1]) {
       case 'L': {
-        Object* o = reinterpret_cast<StackReference<Object>*>(&args[arg_pos])->AsMirrorPtr();
-        shadow_frame->SetVRegReference(cur_reg, o);
+        ObjPtr<mirror::Object> o =
+            reinterpret_cast<StackReference<mirror::Object>*>(&args[arg_pos])->AsMirrorPtr();
+        shadow_frame->SetVRegReference(cur_reg, o.Ptr());
         break;
       }
       case 'J': case 'D': {
@@ -442,7 +445,7 @@
     // references pointers due to moving GC.
     args = shadow_frame->GetVRegArgs(method->IsStatic() ? 0 : 1);
     if (!Runtime::Current()->IsStarted()) {
-      UnstartedRuntime::Jni(self, method, receiver, args, result);
+      UnstartedRuntime::Jni(self, method, receiver.Ptr(), args, result);
     } else {
       InterpreterJni(self, method, shorty, receiver, args, result);
     }
@@ -539,7 +542,7 @@
         if (kIsDebugBuild) {
           ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
           // This is a suspend point. But it's ok since value has been set into shadow_frame.
-          mirror::Class* klass = class_linker->ResolveType(
+          ObjPtr<mirror::Class> klass = class_linker->ResolveType(
               instr->VRegB_21c(), shadow_frame->GetMethod());
           DCHECK(klass->IsStringClass());
         }
@@ -582,8 +585,10 @@
   return Execute(self, code_item, *shadow_frame, JValue());
 }
 
-void ArtInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* code_item,
-                                       ShadowFrame* shadow_frame, JValue* result) {
+void ArtInterpreterToInterpreterBridge(Thread* self,
+                                       const DexFile::CodeItem* code_item,
+                                       ShadowFrame* shadow_frame,
+                                       JValue* result) {
   bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
   if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
     ThrowStackOverflowError(self);
@@ -595,10 +600,10 @@
   // Ensure static methods are initialized.
   const bool is_static = method->IsStatic();
   if (is_static) {
-    mirror::Class* declaring_class = method->GetDeclaringClass();
+    ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
     if (UNLIKELY(!declaring_class->IsInitialized())) {
       StackHandleScope<1> hs(self);
-      HandleWrapper<Class> h_declaring_class(hs.NewHandleWrapper(&declaring_class));
+      HandleWrapperObjPtr<mirror::Class> h_declaring_class(hs.NewHandleWrapper(&declaring_class));
       if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
           self, h_declaring_class, true, true))) {
         DCHECK(self->IsExceptionPending());
@@ -615,9 +620,9 @@
     // We don't expect to be asked to interpret native code (which is entered via a JNI compiler
     // generated stub) except during testing and image writing.
     CHECK(!Runtime::Current()->IsStarted());
-    Object* receiver = is_static ? nullptr : shadow_frame->GetVRegReference(0);
+    ObjPtr<mirror::Object> receiver = is_static ? nullptr : shadow_frame->GetVRegReference(0);
     uint32_t* args = shadow_frame->GetVRegArgs(is_static ? 0 : 1);
-    UnstartedRuntime::Jni(self, shadow_frame->GetMethod(), receiver, args, result);
+    UnstartedRuntime::Jni(self, shadow_frame->GetMethod(), receiver.Ptr(), args, result);
   }
 
   self->PopShadowFrame();
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index 38ce851..65cfade 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -19,6 +19,7 @@
 
 #include "base/mutex.h"
 #include "dex_file.h"
+#include "obj_ptr.h"
 
 namespace art {
 namespace mirror {
@@ -36,7 +37,9 @@
 // The optional stay_in_interpreter parameter (false by default) can be used by clients to
 // explicitly force interpretation in the remaining path that implements method invocation.
 extern void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method,
-                                       mirror::Object* receiver, uint32_t* args, JValue* result,
+                                       ObjPtr<mirror::Object> receiver,
+                                       uint32_t* args,
+                                       JValue* result,
                                        bool stay_in_interpreter = false)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index a0d712e..8c63a9e 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -27,6 +27,7 @@
 #include "method_handles-inl.h"
 #include "mirror/array-inl.h"
 #include "mirror/class.h"
+#include "mirror/emulated_stack_frame.h"
 #include "mirror/method_handle_impl.h"
 #include "reflection.h"
 #include "reflection-inl.h"
@@ -42,6 +43,60 @@
   ThrowNullPointerExceptionFromDexPC();
 }
 
+template<Primitive::Type field_type>
+static ALWAYS_INLINE void DoFieldGetCommon(Thread* self,
+                                           const ShadowFrame& shadow_frame,
+                                           ObjPtr<mirror::Object>& obj,
+                                           ArtField* field,
+                                           JValue* result)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  field->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
+
+  // Report this field access to instrumentation if needed.
+  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+  if (UNLIKELY(instrumentation->HasFieldReadListeners())) {
+    StackHandleScope<1> hs(self);
+    // Wrap in handle wrapper in case the listener does thread suspension.
+    HandleWrapperObjPtr<mirror::Object> h(hs.NewHandleWrapper(&obj));
+    ObjPtr<mirror::Object> this_object;
+    if (!field->IsStatic()) {
+      this_object = obj;
+    }
+    instrumentation->FieldReadEvent(self,
+                                    this_object.Ptr(),
+                                    shadow_frame.GetMethod(),
+                                    shadow_frame.GetDexPC(),
+                                    field);
+  }
+
+  switch (field_type) {
+    case Primitive::kPrimBoolean:
+      result->SetZ(field->GetBoolean(obj));
+      break;
+    case Primitive::kPrimByte:
+      result->SetB(field->GetByte(obj));
+      break;
+    case Primitive::kPrimChar:
+      result->SetC(field->GetChar(obj));
+      break;
+    case Primitive::kPrimShort:
+      result->SetS(field->GetShort(obj));
+      break;
+    case Primitive::kPrimInt:
+      result->SetI(field->GetInt(obj));
+      break;
+    case Primitive::kPrimLong:
+      result->SetJ(field->GetLong(obj));
+      break;
+    case Primitive::kPrimNot:
+      result->SetL(field->GetObject(obj));
+      break;
+    default:
+      LOG(FATAL) << "Unreachable: " << field_type;
+      UNREACHABLE();
+  }
+}
+
 template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
 bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
                 uint16_t inst_data) {
@@ -54,7 +109,7 @@
     CHECK(self->IsExceptionPending());
     return false;
   }
-  ObjPtr<Object> obj;
+  ObjPtr<mirror::Object> obj;
   if (is_static) {
     obj = f->GetDeclaringClass();
   } else {
@@ -64,45 +119,31 @@
       return false;
     }
   }
-  f->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
-  // Report this field access to instrumentation if needed.
-  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
-  if (UNLIKELY(instrumentation->HasFieldReadListeners())) {
-    StackHandleScope<1> hs(self);
-    // Wrap in handle wrapper in case the listener does thread suspension.
-    HandleWrapperObjPtr<mirror::Object> h(hs.NewHandleWrapper(&obj));
-    ObjPtr<Object> this_object;
-    if (!f->IsStatic()) {
-      this_object = obj;
-    }
-    instrumentation->FieldReadEvent(self,
-                                    this_object.Ptr(),
-                                    shadow_frame.GetMethod(),
-                                    shadow_frame.GetDexPC(),
-                                    f);
-  }
+
+  JValue result;
+  DoFieldGetCommon<field_type>(self, shadow_frame, obj, f, &result);
   uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
   switch (field_type) {
     case Primitive::kPrimBoolean:
-      shadow_frame.SetVReg(vregA, f->GetBoolean(obj));
+      shadow_frame.SetVReg(vregA, result.GetZ());
       break;
     case Primitive::kPrimByte:
-      shadow_frame.SetVReg(vregA, f->GetByte(obj));
+      shadow_frame.SetVReg(vregA, result.GetB());
       break;
     case Primitive::kPrimChar:
-      shadow_frame.SetVReg(vregA, f->GetChar(obj));
+      shadow_frame.SetVReg(vregA, result.GetC());
       break;
     case Primitive::kPrimShort:
-      shadow_frame.SetVReg(vregA, f->GetShort(obj));
+      shadow_frame.SetVReg(vregA, result.GetS());
       break;
     case Primitive::kPrimInt:
-      shadow_frame.SetVReg(vregA, f->GetInt(obj));
+      shadow_frame.SetVReg(vregA, result.GetI());
       break;
     case Primitive::kPrimLong:
-      shadow_frame.SetVRegLong(vregA, f->GetLong(obj));
+      shadow_frame.SetVRegLong(vregA, result.GetJ());
       break;
     case Primitive::kPrimNot:
-      shadow_frame.SetVRegReference(vregA, f->GetObject(obj).Ptr());
+      shadow_frame.SetVRegReference(vregA, result.GetL());
       break;
     default:
       LOG(FATAL) << "Unreachable: " << field_type;
@@ -143,11 +184,53 @@
 #undef EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL
 #undef EXPLICIT_DO_FIELD_GET_TEMPLATE_DECL
 
+// Helper for getters in invoke-polymorphic.
+inline static void DoFieldGetForInvokePolymorphic(Thread* self,
+                                                  const ShadowFrame& shadow_frame,
+                                                  ObjPtr<mirror::Object>& obj,
+                                                  ArtField* field,
+                                                  Primitive::Type field_type,
+                                                  JValue* result)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  switch (field_type) {
+    case Primitive::kPrimBoolean:
+      DoFieldGetCommon<Primitive::kPrimBoolean>(self, shadow_frame, obj, field, result);
+      break;
+    case Primitive::kPrimByte:
+      DoFieldGetCommon<Primitive::kPrimByte>(self, shadow_frame, obj, field, result);
+      break;
+    case Primitive::kPrimChar:
+      DoFieldGetCommon<Primitive::kPrimChar>(self, shadow_frame, obj, field, result);
+      break;
+    case Primitive::kPrimShort:
+      DoFieldGetCommon<Primitive::kPrimShort>(self, shadow_frame, obj, field, result);
+      break;
+    case Primitive::kPrimInt:
+      DoFieldGetCommon<Primitive::kPrimInt>(self, shadow_frame, obj, field, result);
+      break;
+    case Primitive::kPrimLong:
+      DoFieldGetCommon<Primitive::kPrimLong>(self, shadow_frame, obj, field, result);
+      break;
+    case Primitive::kPrimFloat:
+      DoFieldGetCommon<Primitive::kPrimInt>(self, shadow_frame, obj, field, result);
+      break;
+    case Primitive::kPrimDouble:
+      DoFieldGetCommon<Primitive::kPrimLong>(self, shadow_frame, obj, field, result);
+      break;
+    case Primitive::kPrimNot:
+      DoFieldGetCommon<Primitive::kPrimNot>(self, shadow_frame, obj, field, result);
+      break;
+    case Primitive::kPrimVoid:
+      LOG(FATAL) << "Unreachable: " << field_type;
+      UNREACHABLE();
+  }
+}
+
 // Handles iget-quick, iget-wide-quick and iget-object-quick instructions.
 // Returns true on success, otherwise throws an exception and returns false.
 template<Primitive::Type field_type>
 bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data) {
-  Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+  ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
   if (UNLIKELY(obj == nullptr)) {
     // We lost the reference to the field index so we cannot get a more
     // precised exception message.
@@ -163,8 +246,14 @@
                                                         field_offset.Uint32Value());
     DCHECK(f != nullptr);
     DCHECK(!f->IsStatic());
-    instrumentation->FieldReadEvent(Thread::Current(), obj, shadow_frame.GetMethod(),
-                                    shadow_frame.GetDexPC(), f);
+    StackHandleScope<1> hs(Thread::Current());
+    // Save obj in case the instrumentation event has thread suspension.
+    HandleWrapperObjPtr<mirror::Object> h = hs.NewHandleWrapper(&obj);
+    instrumentation->FieldReadEvent(Thread::Current(),
+                                    obj.Ptr(),
+                                    shadow_frame.GetMethod(),
+                                    shadow_frame.GetDexPC(),
+                                    f);
   }
   // Note: iget-x-quick instructions are only for non-volatile fields.
   const uint32_t vregA = inst->VRegA_22c(inst_data);
@@ -211,6 +300,42 @@
 EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimNot);      // iget-object-quick.
 #undef EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL
 
+static JValue GetFieldValue(const ShadowFrame& shadow_frame,
+                            Primitive::Type field_type,
+                            uint32_t vreg)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  JValue field_value;
+  switch (field_type) {
+    case Primitive::kPrimBoolean:
+      field_value.SetZ(static_cast<uint8_t>(shadow_frame.GetVReg(vreg)));
+      break;
+    case Primitive::kPrimByte:
+      field_value.SetB(static_cast<int8_t>(shadow_frame.GetVReg(vreg)));
+      break;
+    case Primitive::kPrimChar:
+      field_value.SetC(static_cast<uint16_t>(shadow_frame.GetVReg(vreg)));
+      break;
+    case Primitive::kPrimShort:
+      field_value.SetS(static_cast<int16_t>(shadow_frame.GetVReg(vreg)));
+      break;
+    case Primitive::kPrimInt:
+    case Primitive::kPrimFloat:
+      field_value.SetI(shadow_frame.GetVReg(vreg));
+      break;
+    case Primitive::kPrimLong:
+    case Primitive::kPrimDouble:
+      field_value.SetJ(shadow_frame.GetVRegLong(vreg));
+      break;
+    case Primitive::kPrimNot:
+      field_value.SetL(shadow_frame.GetVRegReference(vreg));
+      break;
+    case Primitive::kPrimVoid:
+      LOG(FATAL) << "Unreachable: " << field_type;
+      UNREACHABLE();
+  }
+  return field_value;
+}
+
 template<Primitive::Type field_type>
 static JValue GetFieldValue(const ShadowFrame& shadow_frame, uint32_t vreg)
     REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -244,32 +369,15 @@
   return field_value;
 }
 
-template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
-         bool transaction_active>
-bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction* inst,
-                uint16_t inst_data) {
-  bool do_assignability_check = do_access_check;
-  bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
-  uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
-  ArtField* f =
-      FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
-                                                    Primitive::ComponentSize(field_type));
-  if (UNLIKELY(f == nullptr)) {
-    CHECK(self->IsExceptionPending());
-    return false;
-  }
-  ObjPtr<Object> obj;
-  if (is_static) {
-    obj = f->GetDeclaringClass();
-  } else {
-    obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
-    if (UNLIKELY(obj == nullptr)) {
-      ThrowNullPointerExceptionForFieldAccess(f, false);
-      return false;
-    }
-  }
+template<Primitive::Type field_type, bool do_assignability_check, bool transaction_active>
+static inline bool DoFieldPutCommon(Thread* self,
+                                    const ShadowFrame& shadow_frame,
+                                    ObjPtr<mirror::Object>& obj,
+                                    ArtField* f,
+                                    const JValue& value)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   f->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
-  uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
+
   // Report this field access to instrumentation if needed. Since we only have the offset of
   // the field from the base of the object, we need to look for it first.
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
@@ -277,42 +385,42 @@
     StackHandleScope<1> hs(self);
     // Wrap in handle wrapper in case the listener does thread suspension.
     HandleWrapperObjPtr<mirror::Object> h(hs.NewHandleWrapper(&obj));
-    JValue field_value = GetFieldValue<field_type>(shadow_frame, vregA);
-    ObjPtr<Object> this_object = f->IsStatic() ? nullptr : obj;
+    ObjPtr<mirror::Object> this_object = f->IsStatic() ? nullptr : obj;
     instrumentation->FieldWriteEvent(self, this_object.Ptr(),
                                      shadow_frame.GetMethod(),
                                      shadow_frame.GetDexPC(),
                                      f,
-                                     field_value);
+                                     value);
   }
+
   switch (field_type) {
     case Primitive::kPrimBoolean:
-      f->SetBoolean<transaction_active>(obj, shadow_frame.GetVReg(vregA));
+      f->SetBoolean<transaction_active>(obj, value.GetZ());
       break;
     case Primitive::kPrimByte:
-      f->SetByte<transaction_active>(obj, shadow_frame.GetVReg(vregA));
+      f->SetByte<transaction_active>(obj, value.GetB());
       break;
     case Primitive::kPrimChar:
-      f->SetChar<transaction_active>(obj, shadow_frame.GetVReg(vregA));
+      f->SetChar<transaction_active>(obj, value.GetC());
       break;
     case Primitive::kPrimShort:
-      f->SetShort<transaction_active>(obj, shadow_frame.GetVReg(vregA));
+      f->SetShort<transaction_active>(obj, value.GetS());
       break;
     case Primitive::kPrimInt:
-      f->SetInt<transaction_active>(obj, shadow_frame.GetVReg(vregA));
+      f->SetInt<transaction_active>(obj, value.GetI());
       break;
     case Primitive::kPrimLong:
-      f->SetLong<transaction_active>(obj, shadow_frame.GetVRegLong(vregA));
+      f->SetLong<transaction_active>(obj, value.GetJ());
       break;
     case Primitive::kPrimNot: {
-      Object* reg = shadow_frame.GetVRegReference(vregA);
+      ObjPtr<mirror::Object> reg = value.GetL();
       if (do_assignability_check && reg != nullptr) {
         // FieldHelper::GetType can resolve classes, use a handle wrapper which will restore the
         // object in the destructor.
-        ObjPtr<Class> field_class;
+        ObjPtr<mirror::Class> field_class;
         {
           StackHandleScope<2> hs(self);
-          HandleWrapper<mirror::Object> h_reg(hs.NewHandleWrapper(&reg));
+          HandleWrapperObjPtr<mirror::Object> h_reg(hs.NewHandleWrapper(&reg));
           HandleWrapperObjPtr<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
           field_class = f->GetType<true>();
         }
@@ -337,6 +445,40 @@
   return true;
 }
 
+template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
+         bool transaction_active>
+bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction* inst,
+                uint16_t inst_data) {
+  const bool do_assignability_check = do_access_check;
+  bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
+  uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
+  ArtField* f =
+      FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
+                                                    Primitive::ComponentSize(field_type));
+  if (UNLIKELY(f == nullptr)) {
+    CHECK(self->IsExceptionPending());
+    return false;
+  }
+  ObjPtr<mirror::Object> obj;
+  if (is_static) {
+    obj = f->GetDeclaringClass();
+  } else {
+    obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+    if (UNLIKELY(obj == nullptr)) {
+      ThrowNullPointerExceptionForFieldAccess(f, false);
+      return false;
+    }
+  }
+
+  uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
+  JValue value = GetFieldValue<field_type>(shadow_frame, vregA);
+  return DoFieldPutCommon<field_type, do_assignability_check, transaction_active>(self,
+                                                                                  shadow_frame,
+                                                                                  obj,
+                                                                                  f,
+                                                                                  value);
+}
+
 // Explicitly instantiate all DoFieldPut functions.
 #define EXPLICIT_DO_FIELD_PUT_TEMPLATE_DECL(_find_type, _field_type, _do_check, _transaction_active) \
   template bool DoFieldPut<_find_type, _field_type, _do_check, _transaction_active>(Thread* self, \
@@ -369,9 +511,49 @@
 #undef EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL
 #undef EXPLICIT_DO_FIELD_PUT_TEMPLATE_DECL
 
+// Helper for setters in invoke-polymorphic.
+bool DoFieldPutForInvokePolymorphic(Thread* self,
+                                    ShadowFrame& shadow_frame,
+                                    ObjPtr<mirror::Object>& obj,
+                                    ArtField* field,
+                                    Primitive::Type field_type,
+                                    const JValue& value)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  static const bool kDoCheckAssignability = false;
+  static const bool kTransaction = false;
+  switch (field_type) {
+    case Primitive::kPrimBoolean:
+      return DoFieldPutCommon<Primitive::kPrimBoolean, kDoCheckAssignability, kTransaction>(
+          self, shadow_frame, obj, field, value);
+    case Primitive::kPrimByte:
+      return DoFieldPutCommon<Primitive::kPrimByte, kDoCheckAssignability, kTransaction>(
+          self, shadow_frame, obj, field, value);
+    case Primitive::kPrimChar:
+      return DoFieldPutCommon<Primitive::kPrimChar, kDoCheckAssignability, kTransaction>(
+          self, shadow_frame, obj, field, value);
+    case Primitive::kPrimShort:
+      return DoFieldPutCommon<Primitive::kPrimShort, kDoCheckAssignability, kTransaction>(
+          self, shadow_frame, obj, field, value);
+    case Primitive::kPrimInt:
+    case Primitive::kPrimFloat:
+      return DoFieldPutCommon<Primitive::kPrimInt, kDoCheckAssignability, kTransaction>(
+          self, shadow_frame, obj, field, value);
+    case Primitive::kPrimLong:
+    case Primitive::kPrimDouble:
+      return DoFieldPutCommon<Primitive::kPrimLong, kDoCheckAssignability, kTransaction>(
+          self, shadow_frame, obj, field, value);
+    case Primitive::kPrimNot:
+      return DoFieldPutCommon<Primitive::kPrimNot, kDoCheckAssignability, kTransaction>(
+          self, shadow_frame, obj, field, value);
+    case Primitive::kPrimVoid:
+      LOG(FATAL) << "Unreachable: " << field_type;
+      UNREACHABLE();
+  }
+}
+
 template<Primitive::Type field_type, bool transaction_active>
 bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data) {
-  Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+  ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
   if (UNLIKELY(obj == nullptr)) {
     // We lost the reference to the field index so we cannot get a more
     // precised exception message.
@@ -389,8 +571,15 @@
     DCHECK(f != nullptr);
     DCHECK(!f->IsStatic());
     JValue field_value = GetFieldValue<field_type>(shadow_frame, vregA);
-    instrumentation->FieldWriteEvent(Thread::Current(), obj, shadow_frame.GetMethod(),
-                                     shadow_frame.GetDexPC(), f, field_value);
+    StackHandleScope<1> hs(Thread::Current());
+    // Save obj in case the instrumentation event has thread suspension.
+    HandleWrapperObjPtr<mirror::Object> h = hs.NewHandleWrapper(&obj);
+    instrumentation->FieldWriteEvent(Thread::Current(),
+                                     obj.Ptr(),
+                                     shadow_frame.GetMethod(),
+                                     shadow_frame.GetDexPC(),
+                                     f,
+                                     field_value);
   }
   // Note: iput-x-quick instructions are only for non-volatile fields.
   switch (field_type) {
@@ -499,49 +688,51 @@
 //
 
 template <bool is_range, bool do_assignability_check>
-    REQUIRES_SHARED(Locks::mutator_lock_)
-static inline bool DoCallCommon(ArtMethod* called_method,
-                                Thread* self,
-                                ShadowFrame& shadow_frame,
-                                JValue* result,
-                                uint16_t number_of_inputs,
-                                uint32_t (&arg)[Instruction::kMaxVarArgRegs],
-                                uint32_t vregC) ALWAYS_INLINE;
-
-template <bool is_range> REQUIRES_SHARED(Locks::mutator_lock_)
-static inline bool DoCallPolymorphic(ArtMethod* called_method,
-                                     Handle<mirror::MethodType> callsite_type,
-                                     Handle<mirror::MethodType> target_type,
-                                     Thread* self,
-                                     ShadowFrame& shadow_frame,
-                                     JValue* result,
-                                     uint32_t (&arg)[Instruction::kMaxVarArgRegs],
-                                     uint32_t vregC) ALWAYS_INLINE;
-
-REQUIRES_SHARED(Locks::mutator_lock_)
-static inline bool DoCallTransform(ArtMethod* called_method,
-                                   Handle<mirror::MethodType> callsite_type,
-                                   Thread* self,
-                                   ShadowFrame& shadow_frame,
-                                   Handle<mirror::MethodHandleImpl> receiver,
-                                   JValue* result) ALWAYS_INLINE;
-
-REQUIRES_SHARED(Locks::mutator_lock_)
-inline void PerformCall(Thread* self,
-                        const DexFile::CodeItem* code_item,
-                        ArtMethod* caller_method,
-                        const size_t first_dest_reg,
-                        ShadowFrame* callee_frame,
-                        JValue* result) ALWAYS_INLINE;
+static ALWAYS_INLINE bool DoCallCommon(ArtMethod* called_method,
+                                       Thread* self,
+                                       ShadowFrame& shadow_frame,
+                                       JValue* result,
+                                       uint16_t number_of_inputs,
+                                       uint32_t (&arg)[Instruction::kMaxVarArgRegs],
+                                       uint32_t vregC) REQUIRES_SHARED(Locks::mutator_lock_);
 
 template <bool is_range>
-REQUIRES_SHARED(Locks::mutator_lock_)
-inline void CopyRegisters(ShadowFrame& caller_frame,
-                          ShadowFrame* callee_frame,
-                          const uint32_t (&arg)[Instruction::kMaxVarArgRegs],
-                          const size_t first_src_reg,
-                          const size_t first_dest_reg,
-                          const size_t num_regs) ALWAYS_INLINE;
+static ALWAYS_INLINE bool DoCallPolymorphic(ArtMethod* called_method,
+                                            Handle<mirror::MethodType> callsite_type,
+                                            Handle<mirror::MethodType> target_type,
+                                            Thread* self,
+                                            ShadowFrame& shadow_frame,
+                                            JValue* result,
+                                            uint32_t (&arg)[Instruction::kMaxVarArgRegs],
+                                            uint32_t vregC,
+                                            const MethodHandleKind handle_kind)
+  REQUIRES_SHARED(Locks::mutator_lock_);
+
+template <bool is_range>
+static ALWAYS_INLINE bool DoCallTransform(ArtMethod* called_method,
+                                          Handle<mirror::MethodType> callsite_type,
+                                          Handle<mirror::MethodType> callee_type,
+                                          Thread* self,
+                                          ShadowFrame& shadow_frame,
+                                          Handle<mirror::MethodHandleImpl> receiver,
+                                          JValue* result,
+                                          uint32_t (&arg)[Instruction::kMaxVarArgRegs],
+                                          uint32_t vregC) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ALWAYS_INLINE void PerformCall(Thread* self,
+                               const DexFile::CodeItem* code_item,
+                               ArtMethod* caller_method,
+                               const size_t first_dest_reg,
+                               ShadowFrame* callee_frame,
+                               JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
+
+template <bool is_range>
+ALWAYS_INLINE void CopyRegisters(ShadowFrame& caller_frame,
+                                 ShadowFrame* callee_frame,
+                                 const uint32_t (&arg)[Instruction::kMaxVarArgRegs],
+                                 const size_t first_src_reg,
+                                 const size_t first_dest_reg,
+                                 const size_t num_regs) REQUIRES_SHARED(Locks::mutator_lock_);
 
 // END DECLARATIONS.
 
@@ -554,7 +745,7 @@
   ArtMethod* method = shadow_frame->GetMethod();
   // Ensure static methods are initialized.
   if (method->IsStatic()) {
-    mirror::Class* declaringClass = method->GetDeclaringClass();
+    ObjPtr<mirror::Class> declaringClass = method->GetDeclaringClass();
     if (UNLIKELY(!declaringClass->IsInitialized())) {
       self->PushShadowFrame(shadow_frame);
       StackHandleScope<1> hs(self);
@@ -587,7 +778,7 @@
                                     uint16_t this_obj_vreg,
                                     JValue result)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  Object* existing = shadow_frame->GetVRegReference(this_obj_vreg);
+  ObjPtr<mirror::Object> existing = shadow_frame->GetVRegReference(this_obj_vreg);
   if (existing == nullptr) {
     // If it's null, we come from compiled code that was deoptimized. Nothing to do,
     // as the compiler verified there was no alias.
@@ -607,15 +798,71 @@
   }
 }
 
+inline static bool IsInvokeExact(const DexFile& dex_file, int invoke_method_idx) {
+  // This check uses string comparison as it needs less code and data
+  // to do than fetching the associated ArtMethod from the DexCache
+  // and checking against ArtMethods in the well known classes. The
+  // verifier needs to perform a more rigorous check.
+  const char* method_name = dex_file.GetMethodName(dex_file.GetMethodId(invoke_method_idx));
+  bool is_invoke_exact = (0 == strcmp(method_name, "invokeExact"));
+  DCHECK(is_invoke_exact || (0 == strcmp(method_name, "invoke")));
+  return is_invoke_exact;
+}
+
+inline static ObjPtr<mirror::Class> GetAndInitializeDeclaringClass(Thread* self, ArtField* field)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  // Method handle invocations on static fields should ensure class is
+  // initialized. This usually happens when an instance is constructed
+  // or class members referenced, but this is not guaranteed when
+  // looking up method handles.
+  ObjPtr<mirror::Class> klass = field->GetDeclaringClass();
+  if (UNLIKELY(!klass->IsInitialized())) {
+    StackHandleScope<1> hs(self);
+    HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(&klass));
+    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h, true, true)) {
+      DCHECK(self->IsExceptionPending());
+      return nullptr;
+    }
+  }
+  return klass;
+}
+
+// Returns true iff. the callsite type for a polymorphic invoke is transformer
+// like, i.e that it has a single input argument whose type is
+// dalvik.system.EmulatedStackFrame.
+static inline bool IsCallerTransformer(Handle<mirror::MethodType> callsite_type)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ObjPtr<mirror::ObjectArray<mirror::Class>> param_types(callsite_type->GetPTypes());
+  if (param_types->GetLength() == 1) {
+    ObjPtr<mirror::Class> param(param_types->GetWithoutChecks(0));
+    return param == WellKnownClasses::ToClass(WellKnownClasses::dalvik_system_EmulatedStackFrame);
+  }
+
+  return false;
+}
+
 template<bool is_range, bool do_access_check>
-    REQUIRES_SHARED(Locks::mutator_lock_)
-inline bool DoInvokePolymorphic(Thread* self, ShadowFrame& shadow_frame,
-                                const Instruction* inst, uint16_t inst_data,
-                                JValue* result) {
+inline bool DoInvokePolymorphic(Thread* self,
+                                ShadowFrame& shadow_frame,
+                                const Instruction* inst,
+                                uint16_t inst_data,
+                                JValue* result)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   // Invoke-polymorphic instructions always take a receiver. i.e, they are never static.
   const uint32_t vRegC = (is_range) ? inst->VRegC_4rcc() : inst->VRegC_45cc();
+  const int invoke_method_idx = (is_range) ? inst->VRegB_4rcc() : inst->VRegB_45cc();
 
-  // The method_idx here is the name of the signature polymorphic method that
+  // Initialize |result| to 0 as this is the default return value for
+  // polymorphic invocations of method handle types with void return
+  // and provides sane return result in error cases.
+  result->SetJ(0);
+
+  // Determine if this invocation is MethodHandle.invoke() or
+  // MethodHandle.invokeExact().
+  bool is_invoke_exact = IsInvokeExact(shadow_frame.GetMethod()->GetDeclaringClass()->GetDexFile(),
+                                       invoke_method_idx);
+
+  // The invoke_method_idx here is the name of the signature polymorphic method that
   // was symbolically invoked in bytecode (say MethodHandle.invoke or MethodHandle.invokeExact)
   // and not the method that we'll dispatch to in the end.
   //
@@ -625,14 +872,12 @@
   // that vRegC really is a reference type.
   StackHandleScope<6> hs(self);
   Handle<mirror::MethodHandleImpl> method_handle(hs.NewHandle(
-      reinterpret_cast<mirror::MethodHandleImpl*>(shadow_frame.GetVRegReference(vRegC))));
+      ObjPtr<mirror::MethodHandleImpl>::DownCast(
+          MakeObjPtr(shadow_frame.GetVRegReference(vRegC)))));
   if (UNLIKELY(method_handle.Get() == nullptr)) {
-    const int method_idx = (is_range) ? inst->VRegB_4rcc() : inst->VRegB_45cc();
     // Note that the invoke type is kVirtual here because a call to a signature
     // polymorphic method is shaped like a virtual call at the bytecode level.
-    ThrowNullPointerExceptionForMethodAccess(method_idx, InvokeType::kVirtual);
-
-    result->SetJ(0);
+    ThrowNullPointerExceptionForMethodAccess(invoke_method_idx, InvokeType::kVirtual);
     return false;
   }
 
@@ -653,24 +898,43 @@
   // This implies we couldn't resolve one or more types in this method handle.
   if (UNLIKELY(callsite_type.Get() == nullptr)) {
     CHECK(self->IsExceptionPending());
-    result->SetJ(0);
     return false;
   }
 
-  // Get the method we're actually invoking along with the kind of
-  // invoke that is desired. We don't need to perform access checks at this
-  // point because they would have been performed on our behalf at the point
-  // of creation of the method handle.
-  ArtMethod* called_method = method_handle->GetTargetMethod();
   const MethodHandleKind handle_kind = method_handle->GetHandleKind();
   Handle<mirror::MethodType> handle_type(hs.NewHandle(method_handle->GetMethodType()));
-  CHECK(called_method != nullptr);
   CHECK(handle_type.Get() != nullptr);
+  {
+    // We need to check the nominal type of the handle in addition to the
+    // real type. The "nominal" type is present when MethodHandle.asType is
+    // called any handle, and results in the declared type of the handle
+    // changing.
+    ObjPtr<mirror::MethodType> nominal_type(method_handle->GetNominalType());
+    ObjPtr<mirror::MethodType> check_type(nullptr);
+    if (LIKELY(nominal_type.Ptr() == nullptr)) {
+      check_type.Assign(handle_type.Get());
+    } else {
+      check_type.Assign(nominal_type.Ptr());
+    }
+
+    if (is_invoke_exact) {
+      if (UNLIKELY(!callsite_type->IsExactMatch(check_type.Ptr()))) {
+        ThrowWrongMethodTypeException(check_type.Ptr(), callsite_type.Get());
+        return false;
+      }
+    } else {
+      if (UNLIKELY(!IsCallerTransformer(callsite_type) &&
+                   !callsite_type->IsConvertible(check_type.Ptr()))) {
+        ThrowWrongMethodTypeException(check_type.Ptr(), callsite_type.Get());
+        return false;
+      }
+    }
+  }
 
   uint32_t arg[Instruction::kMaxVarArgRegs] = {};
-  uint32_t receiver_vregC = 0;
+  uint32_t first_src_reg = 0;
   if (is_range) {
-    receiver_vregC = (inst->VRegC_4rcc() + 1);
+    first_src_reg = (inst->VRegC_4rcc() + 1);
   } else {
     inst->GetVarArgs(arg, inst_data);
     arg[0] = arg[1];
@@ -678,41 +942,39 @@
     arg[2] = arg[3];
     arg[3] = arg[4];
     arg[4] = 0;
-    receiver_vregC = arg[0];
+    first_src_reg = arg[0];
   }
 
   if (IsInvoke(handle_kind)) {
-    if (handle_kind == kInvokeVirtual || handle_kind == kInvokeInterface) {
-      mirror::Object* receiver = shadow_frame.GetVRegReference(receiver_vregC);
-      mirror::Class* declaring_class = called_method->GetDeclaringClass();
-      // Verify that _vRegC is an object reference and of the type expected by
-      // the receiver.
-      called_method = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(
-          called_method, kRuntimePointerSize);
-      if (!VerifyObjectIsClass(receiver, declaring_class)) {
-        return false;
-      }
-    } else if (handle_kind == kInvokeDirect) {
-      if (called_method->IsConstructor()) {
-        // TODO(narayan) : We need to handle the case where the target method is a
-        // constructor here.
-        UNIMPLEMENTED(FATAL) << "Direct invokes for constructors are not implemented yet.";
-        return false;
-      }
+    // Get the method we're actually invoking along with the kind of
+    // invoke that is desired. We don't need to perform access checks at this
+    // point because they would have been performed on our behalf at the point
+    // of creation of the method handle.
+    ArtMethod* called_method = method_handle->GetTargetMethod();
+    CHECK(called_method != nullptr);
 
-      // Nothing special to do in the case where we're not dealing with a
-      // constructor. It's a private method, and we've already access checked at
-      // the point of creating the handle.
+    if (handle_kind == kInvokeVirtual || handle_kind == kInvokeInterface) {
+      // TODO: Unfortunately, we have to postpone dynamic receiver based checks
+      // because the receiver might be cast or might come from an emulated stack
+      // frame, which means that it is unknown at this point. We perform these
+      // checks inside DoCallPolymorphic right before we do the actual invoke.
+    } else if (handle_kind == kInvokeDirect) {
+      // String constructors are a special case, they are replaced with StringFactory
+      // methods.
+      if (called_method->IsConstructor() && called_method->GetDeclaringClass()->IsStringClass()) {
+        DCHECK(handle_type->GetRType()->IsStringClass());
+        called_method = WellKnownClasses::StringInitToStringFactory(called_method);
+      }
     } else if (handle_kind == kInvokeSuper) {
-      mirror::Class* declaring_class = called_method->GetDeclaringClass();
+      ObjPtr<mirror::Class> declaring_class = called_method->GetDeclaringClass();
 
       // Note that we're not dynamically dispatching on the type of the receiver
       // here. We use the static type of the "receiver" object that we've
       // recorded in the method handle's type, which will be the same as the
       // special caller that was specified at the point of lookup.
-      mirror::Class* referrer_class = handle_type->GetPTypes()->Get(0);
+      ObjPtr<mirror::Class> referrer_class = handle_type->GetPTypes()->Get(0);
       if (!declaring_class->IsInterface()) {
-        mirror::Class* super_class = referrer_class->GetSuperClass();
+        ObjPtr<mirror::Class> super_class = referrer_class->GetSuperClass();
         uint16_t vtable_index = called_method->GetMethodIndex();
         DCHECK(super_class != nullptr);
         DCHECK(super_class->HasVTable());
@@ -728,27 +990,83 @@
       CHECK(called_method != nullptr);
     }
 
+    bool call_success;
     if (handle_kind == kInvokeTransform) {
-      return DoCallTransform(called_method,
-                             callsite_type,
-                             self,
-                             shadow_frame,
-                             method_handle /* receiver */,
-                             result);
+      call_success = DoCallTransform<is_range>(called_method,
+                                               callsite_type,
+                                               handle_type,
+                                               self,
+                                               shadow_frame,
+                                               method_handle /* receiver */,
+                                               result,
+                                               arg,
+                                               first_src_reg);
     } else {
-      return DoCallPolymorphic<is_range>(called_method,
-                                         callsite_type,
-                                         handle_type,
-                                         self,
-                                         shadow_frame,
-                                         result,
-                                         arg,
-                                         receiver_vregC);
+      call_success = DoCallPolymorphic<is_range>(called_method,
+                                                 callsite_type,
+                                                 handle_type,
+                                                 self,
+                                                 shadow_frame,
+                                                 result,
+                                                 arg,
+                                                 first_src_reg,
+                                                 handle_kind);
     }
-  } else {
-    // TODO(narayan): Implement field getters and setters.
-    UNIMPLEMENTED(FATAL) << "Field references in method handles are not implemented yet.";
+    if (LIKELY(call_success && ConvertReturnValue(callsite_type, handle_type, result))) {
+      return true;
+    }
+    DCHECK(self->IsExceptionPending());
     return false;
+  } else {
+    DCHECK(!is_range);
+    ArtField* field = method_handle->GetTargetField();
+    Primitive::Type field_type = field->GetTypeAsPrimitiveType();
+
+    switch (handle_kind) {
+      case kInstanceGet: {
+        ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(first_src_reg);
+        DoFieldGetForInvokePolymorphic(self, shadow_frame, obj, field, field_type, result);
+        if (!ConvertReturnValue(callsite_type, handle_type, result)) {
+          DCHECK(self->IsExceptionPending());
+          return false;
+        }
+        return true;
+      }
+      case kStaticGet: {
+        ObjPtr<mirror::Object> obj = GetAndInitializeDeclaringClass(self, field);
+        if (obj == nullptr) {
+          DCHECK(self->IsExceptionPending());
+          return false;
+        }
+        DoFieldGetForInvokePolymorphic(self, shadow_frame, obj, field, field_type, result);
+        if (!ConvertReturnValue(callsite_type, handle_type, result)) {
+          DCHECK(self->IsExceptionPending());
+          return false;
+        }
+        return true;
+      }
+      case kInstancePut: {
+        JValue value = GetFieldValue(shadow_frame, field_type, arg[1]);
+        if (!ConvertArgumentValue(callsite_type, handle_type, 1, &value)) {
+          DCHECK(self->IsExceptionPending());
+          return false;
+        }
+        ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(first_src_reg);
+        return DoFieldPutForInvokePolymorphic(self, shadow_frame, obj, field, field_type, value);
+      }
+      case kStaticPut: {
+        JValue value = GetFieldValue(shadow_frame, field_type, arg[0]);
+        if (!ConvertArgumentValue(callsite_type, handle_type, 0, &value)) {
+          DCHECK(self->IsExceptionPending());
+          return false;
+        }
+        ObjPtr<mirror::Object> obj = field->GetDeclaringClass();
+        return DoFieldPutForInvokePolymorphic(self, shadow_frame, obj, field, field_type, value);
+      }
+      default:
+        LOG(FATAL) << "Unreachable: " << handle_kind;
+        UNREACHABLE();
+    }
   }
 }
 
@@ -823,20 +1141,6 @@
   }
 }
 
-// Returns true iff. the callsite type for a polymorphic invoke is transformer
-// like, i.e that it has a single input argument whose type is
-// dalvik.system.EmulatedStackFrame.
-static inline bool IsCallerTransformer(Handle<mirror::MethodType> callsite_type)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjPtr<mirror::ObjectArray<mirror::Class>> param_types(callsite_type->GetPTypes());
-  if (param_types->GetLength() == 1) {
-    ObjPtr<mirror::Class> param(param_types->GetWithoutChecks(0));
-    return param == WellKnownClasses::ToClass(WellKnownClasses::dalvik_system_EmulatedStackFrame);
-  }
-
-  return false;
-}
-
 template <bool is_range>
 static inline bool DoCallPolymorphic(ArtMethod* called_method,
                                      Handle<mirror::MethodType> callsite_type,
@@ -845,9 +1149,8 @@
                                      ShadowFrame& shadow_frame,
                                      JValue* result,
                                      uint32_t (&arg)[Instruction::kMaxVarArgRegs],
-                                     uint32_t first_src_reg) {
-  // TODO(narayan): Wire in the String.init hacks.
-
+                                     uint32_t first_src_reg,
+                                     const MethodHandleKind handle_kind) {
   // Compute method information.
   const DexFile::CodeItem* code_item = called_method->GetCodeItem();
 
@@ -878,6 +1181,8 @@
       CREATE_SHADOW_FRAME(num_regs, &shadow_frame, called_method, /* dex pc */ 0);
   ShadowFrame* new_shadow_frame = shadow_frame_unique_ptr.get();
 
+  // Whether this polymorphic invoke was issued by a transformer method.
+  bool is_caller_transformer = false;
   // Thread might be suspended during PerformArgumentConversions due to the
   // allocations performed during boxing.
   {
@@ -899,18 +1204,16 @@
       // case, we'll have to unmarshal the EmulatedStackFrame into the
       // new_shadow_frame and perform argument conversions on it.
       if (IsCallerTransformer(callsite_type)) {
-        // The emulated stack frame will be the first ahnd only argument
-        // when we're coming through from a transformer.
-        //
-        // TODO(narayan): This should be a mirror::EmulatedStackFrame after that
-        // type is introduced.
-        ObjPtr<mirror::Object> emulated_stack_frame(
-            shadow_frame.GetVRegReference(first_src_reg));
-        if (!ConvertAndCopyArgumentsFromEmulatedStackFrame<is_range>(self,
-                                                                     emulated_stack_frame,
-                                                                     target_type,
-                                                                     first_dest_reg,
-                                                                     new_shadow_frame)) {
+        is_caller_transformer = true;
+        // The emulated stack frame is the first and only argument when we're coming
+        // through from a transformer.
+        ObjPtr<mirror::EmulatedStackFrame> emulated_stack_frame(
+            reinterpret_cast<mirror::EmulatedStackFrame*>(
+                shadow_frame.GetVRegReference(first_src_reg)));
+        if (!emulated_stack_frame->WriteToShadowFrame(self,
+                                                      target_type,
+                                                      first_dest_reg,
+                                                      new_shadow_frame)) {
           DCHECK(self->IsExceptionPending());
           result->SetL(0);
           return false;
@@ -930,20 +1233,49 @@
     }
   }
 
+  // See TODO in DoInvokePolymorphic : We need to perform this dynamic, receiver
+  // based dispatch right before we perform the actual call, because the
+  // receiver isn't known very early.
+  if (handle_kind == kInvokeVirtual || handle_kind == kInvokeInterface) {
+    ObjPtr<mirror::Object> receiver(new_shadow_frame->GetVRegReference(first_dest_reg));
+    ObjPtr<mirror::Class> declaring_class(called_method->GetDeclaringClass());
+    // Verify that _vRegC is an object reference and of the type expected by
+    // the receiver.
+    if (!VerifyObjectIsClass(receiver, declaring_class)) {
+      DCHECK(self->IsExceptionPending());
+      return false;
+    }
+
+    called_method = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(
+        called_method, kRuntimePointerSize);
+  }
+
   PerformCall(self, code_item, shadow_frame.GetMethod(), first_dest_reg, new_shadow_frame, result);
 
-  // TODO(narayan): Perform return value conversions.
+  // If the caller of this signature polymorphic method was a transformer,
+  // we need to copy the result back out to the emulated stack frame.
+  if (is_caller_transformer && !self->IsExceptionPending()) {
+    ObjPtr<mirror::EmulatedStackFrame> emulated_stack_frame(
+        reinterpret_cast<mirror::EmulatedStackFrame*>(
+            shadow_frame.GetVRegReference(first_src_reg)));
+
+    emulated_stack_frame->SetReturnValue(self, *result);
+  }
 
   return !self->IsExceptionPending();
 }
 
+template <bool is_range>
 static inline bool DoCallTransform(ArtMethod* called_method,
                                    Handle<mirror::MethodType> callsite_type,
+                                   Handle<mirror::MethodType> callee_type,
                                    Thread* self,
                                    ShadowFrame& shadow_frame,
                                    Handle<mirror::MethodHandleImpl> receiver,
-                                   JValue* result) {
-  // This can be fixed, because the method we're calling here
+                                   JValue* result,
+                                   uint32_t (&arg)[Instruction::kMaxVarArgRegs],
+                                   uint32_t first_src_reg) {
+  // This can be fixed to two, because the method we're calling here
   // (MethodHandle.transformInternal) doesn't have any locals and the signature
   // is known :
   //
@@ -963,18 +1295,33 @@
       CREATE_SHADOW_FRAME(kNumRegsForTransform, &shadow_frame, called_method, /* dex pc */ 0);
   ShadowFrame* new_shadow_frame = shadow_frame_unique_ptr.get();
 
-  // TODO(narayan): Perform argument conversions first (if this is an inexact invoke), and
-  // then construct an argument list object that's passed through to the
-  // method. Note that the ArgumentList reference is currently a nullptr.
-  //
-  // NOTE(narayan): If the caller is a transformer method (i.e, there is only
-  // one argument and its type is EmulatedStackFrame), we can directly pass that
-  // through without having to do any additional work.
-  UNUSED(callsite_type);
+  StackHandleScope<1> hs(self);
+  MutableHandle<mirror::EmulatedStackFrame> sf(hs.NewHandle<mirror::EmulatedStackFrame>(nullptr));
+  if (IsCallerTransformer(callsite_type)) {
+    // If we're entering this transformer from another transformer, we can pass
+    // through the handle directly to the callee, instead of having to
+    // instantiate a new stack frame based on the shadow frame.
+    sf.Assign(reinterpret_cast<mirror::EmulatedStackFrame*>(
+        shadow_frame.GetVRegReference(first_src_reg)));
+  } else {
+    sf.Assign(mirror::EmulatedStackFrame::CreateFromShadowFrameAndArgs<is_range>(
+        self,
+        callsite_type,
+        callee_type,
+        shadow_frame,
+        first_src_reg,
+        arg));
+
+    // Something went wrong while creating the emulated stack frame, we should
+    // throw the pending exception.
+    if (sf.Get() == nullptr) {
+      DCHECK(self->IsExceptionPending());
+      return false;
+    }
+  }
 
   new_shadow_frame->SetVRegReference(0, receiver.Get());
-  // TODO(narayan): This is the EmulatedStackFrame, currently nullptr.
-  new_shadow_frame->SetVRegReference(1, nullptr);
+  new_shadow_frame->SetVRegReference(1, sf.Get());
 
   PerformCall(self,
               code_item,
@@ -983,6 +1330,12 @@
               new_shadow_frame,
               result);
 
+  // If the called transformer method we called has returned a value, then we
+  // need to copy it back to |result|.
+  if (!self->IsExceptionPending()) {
+    sf->GetReturnValue(self, result);
+  }
+
   return !self->IsExceptionPending();
 }
 
@@ -1104,15 +1457,22 @@
       switch (shorty[shorty_pos + 1]) {
         // Handle Object references. 1 virtual register slot.
         case 'L': {
-          Object* o = shadow_frame.GetVRegReference(src_reg);
+          ObjPtr<mirror::Object> o = shadow_frame.GetVRegReference(src_reg);
           if (do_assignability_check && o != nullptr) {
             PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
-            Class* arg_type =
-                method->GetClassFromTypeIndex(
-                    params->GetTypeItem(shorty_pos).type_idx_, true /* resolve */, pointer_size);
+            const uint32_t type_idx = params->GetTypeItem(shorty_pos).type_idx_;
+            ObjPtr<mirror::Class> arg_type = method->GetDexCacheResolvedType(type_idx,
+                                                                             pointer_size);
             if (arg_type == nullptr) {
-              CHECK(self->IsExceptionPending());
-              return false;
+              StackHandleScope<1> hs(self);
+              // Preserve o since it is used below and GetClassFromTypeIndex may cause thread
+              // suspension.
+              HandleWrapperObjPtr<mirror::Object> h = hs.NewHandleWrapper(&o);
+              arg_type = method->GetClassFromTypeIndex(type_idx, true /* resolve */, pointer_size);
+              if (arg_type == nullptr) {
+                CHECK(self->IsExceptionPending());
+                return false;
+              }
             }
             if (!o->VerifierInstanceOf(arg_type)) {
               // This should never happen.
@@ -1125,7 +1485,7 @@
               return false;
             }
           }
-          new_shadow_frame->SetVRegReference(dest_reg, o);
+          new_shadow_frame->SetVRegReference(dest_reg, o.Ptr());
           break;
         }
         // Handle doubles and longs. 2 consecutive virtual register slots.
@@ -1192,8 +1552,10 @@
 }
 
 template <bool is_range, bool do_access_check, bool transaction_active>
-bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
-                      Thread* self, JValue* result) {
+bool DoFilledNewArray(const Instruction* inst,
+                      const ShadowFrame& shadow_frame,
+                      Thread* self,
+                      JValue* result) {
   DCHECK(inst->Opcode() == Instruction::FILLED_NEW_ARRAY ||
          inst->Opcode() == Instruction::FILLED_NEW_ARRAY_RANGE);
   const int32_t length = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
@@ -1206,14 +1568,17 @@
     return false;
   }
   uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
-  Class* array_class = ResolveVerifyAndClinit(type_idx, shadow_frame.GetMethod(),
-                                              self, false, do_access_check);
+  ObjPtr<mirror::Class> array_class = ResolveVerifyAndClinit(type_idx,
+                                                             shadow_frame.GetMethod(),
+                                                             self,
+                                                             false,
+                                                             do_access_check);
   if (UNLIKELY(array_class == nullptr)) {
     DCHECK(self->IsExceptionPending());
     return false;
   }
   CHECK(array_class->IsArrayClass());
-  Class* component_class = array_class->GetComponentType();
+  ObjPtr<mirror::Class> component_class = array_class->GetComponentType();
   const bool is_primitive_int_component = component_class->IsPrimitiveInt();
   if (UNLIKELY(component_class->IsPrimitive() && !is_primitive_int_component)) {
     if (component_class->IsPrimitiveLong() || component_class->IsPrimitiveDouble()) {
@@ -1226,9 +1591,12 @@
     }
     return false;
   }
-  Object* new_array = Array::Alloc<true>(self, array_class, length,
-                                         array_class->GetComponentSizeShift(),
-                                         Runtime::Current()->GetHeap()->GetCurrentAllocator());
+  ObjPtr<mirror::Object> new_array = mirror::Array::Alloc<true>(
+      self,
+      array_class,
+      length,
+      array_class->GetComponentSizeShift(),
+      Runtime::Current()->GetHeap()->GetCurrentAllocator());
   if (UNLIKELY(new_array == nullptr)) {
     self->AssertPendingOOMException();
     return false;
@@ -1246,7 +1614,7 @@
       new_array->AsIntArray()->SetWithoutChecks<transaction_active>(
           i, shadow_frame.GetVReg(src_reg));
     } else {
-      new_array->AsObjectArray<Object>()->SetWithoutChecks<transaction_active>(
+      new_array->AsObjectArray<mirror::Object>()->SetWithoutChecks<transaction_active>(
           i, shadow_frame.GetVRegReference(src_reg));
     }
   }
@@ -1255,17 +1623,18 @@
   return true;
 }
 
-// TODO fix thread analysis: should be REQUIRES_SHARED(Locks::mutator_lock_).
+// TODO: Use ObjPtr here.
 template<typename T>
-static void RecordArrayElementsInTransactionImpl(mirror::PrimitiveArray<T>* array, int32_t count)
-    NO_THREAD_SAFETY_ANALYSIS {
+static void RecordArrayElementsInTransactionImpl(mirror::PrimitiveArray<T>* array,
+                                                 int32_t count)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   Runtime* runtime = Runtime::Current();
   for (int32_t i = 0; i < count; ++i) {
     runtime->RecordWriteArray(array, i, array->GetWithoutChecks(i));
   }
 }
 
-void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
+void RecordArrayElementsInTransaction(ObjPtr<mirror::Array> array, int32_t count)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK(Runtime::Current()->IsActiveTransaction());
   DCHECK(array != nullptr);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 40d6f03..9c26d24 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -43,25 +43,11 @@
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/string-inl.h"
+#include "obj_ptr.h"
 #include "stack.h"
 #include "thread.h"
 #include "well_known_classes.h"
 
-using ::art::ArtMethod;
-using ::art::mirror::Array;
-using ::art::mirror::BooleanArray;
-using ::art::mirror::ByteArray;
-using ::art::mirror::CharArray;
-using ::art::mirror::Class;
-using ::art::mirror::ClassLoader;
-using ::art::mirror::IntArray;
-using ::art::mirror::LongArray;
-using ::art::mirror::Object;
-using ::art::mirror::ObjectArray;
-using ::art::mirror::ShortArray;
-using ::art::mirror::String;
-using ::art::mirror::Throwable;
-
 namespace art {
 namespace interpreter {
 
@@ -69,13 +55,11 @@
     REQUIRES_SHARED(Locks::mutator_lock_);
 
 template <bool kMonitorCounting>
-static inline void DoMonitorEnter(Thread* self,
-                                  ShadowFrame* frame,
-                                  Object* ref)
+static inline void DoMonitorEnter(Thread* self, ShadowFrame* frame, ObjPtr<mirror::Object> ref)
     NO_THREAD_SAFETY_ANALYSIS
     REQUIRES(!Roles::uninterruptible_) {
   StackHandleScope<1> hs(self);
-  Handle<Object> h_ref(hs.NewHandle(ref));
+  Handle<mirror::Object> h_ref(hs.NewHandle(ref));
   h_ref->MonitorEnter(self);
   if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
     frame->GetLockCountData().AddMonitor(self, h_ref.Get());
@@ -83,13 +67,11 @@
 }
 
 template <bool kMonitorCounting>
-static inline void DoMonitorExit(Thread* self,
-                                 ShadowFrame* frame,
-                                 Object* ref)
+static inline void DoMonitorExit(Thread* self, ShadowFrame* frame, ObjPtr<mirror::Object> ref)
     NO_THREAD_SAFETY_ANALYSIS
     REQUIRES(!Roles::uninterruptible_) {
   StackHandleScope<1> hs(self);
-  Handle<Object> h_ref(hs.NewHandle(ref));
+  Handle<mirror::Object> h_ref(hs.NewHandle(ref));
   h_ref->MonitorExit(self);
   if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) {
     frame->GetLockCountData().RemoveMonitorOrThrow(self, h_ref.Get());
@@ -113,7 +95,7 @@
 void AbortTransactionV(Thread* self, const char* fmt, va_list args)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
-void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
+void RecordArrayElementsInTransaction(ObjPtr<mirror::Array> array, int32_t count)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
 // Invokes the given method. This is part of the invocation support and is used by DoInvoke and
@@ -126,11 +108,14 @@
 // Handles all invoke-XXX/range instructions except for invoke-polymorphic[/range].
 // Returns true on success, otherwise throws an exception and returns false.
 template<InvokeType type, bool is_range, bool do_access_check>
-static inline bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
-                            uint16_t inst_data, JValue* result) {
+static inline bool DoInvoke(Thread* self,
+                            ShadowFrame& shadow_frame,
+                            const Instruction* inst,
+                            uint16_t inst_data,
+                            JValue* result) {
   const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
   const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
-  Object* receiver = (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
+  ObjPtr<mirror::Object> receiver = (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
   ArtMethod* sf_method = shadow_frame.GetMethod();
   ArtMethod* const called_method = FindMethodFromCode<type, do_access_check>(
       method_idx, &receiver, sf_method, self);
@@ -156,7 +141,7 @@
       instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
       if (UNLIKELY(instrumentation->HasInvokeVirtualOrInterfaceListeners())) {
         instrumentation->InvokeVirtualOrInterface(
-            self, receiver, sf_method, shadow_frame.GetDexPC(), called_method);
+            self, receiver.Ptr(), sf_method, shadow_frame.GetDexPC(), called_method);
       }
     }
     return DoCall<is_range, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
@@ -177,7 +162,7 @@
                                         const Instruction* inst, uint16_t inst_data,
                                         JValue* result) {
   const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
-  Object* const receiver = shadow_frame.GetVRegReference(vregC);
+  ObjPtr<mirror::Object> const receiver = shadow_frame.GetVRegReference(vregC);
   if (UNLIKELY(receiver == nullptr)) {
     // We lost the reference to the method index so we cannot get a more
     // precised exception message.
@@ -190,7 +175,7 @@
     CHECK(receiver->GetClass() != nullptr)
         << "Null class found in object " << receiver << " in region type "
         << Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
-            RegionSpace()->GetRegionType(receiver);
+            RegionSpace()->GetRegionType(receiver.Ptr());
   }
   CHECK(receiver->GetClass()->ShouldHaveEmbeddedVTable());
   ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(
@@ -214,7 +199,7 @@
     // TODO: Remove the InvokeVirtualOrInterface instrumentation, as it was only used by the JIT.
     if (UNLIKELY(instrumentation->HasInvokeVirtualOrInterfaceListeners())) {
       instrumentation->InvokeVirtualOrInterface(
-          self, receiver, shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
+          self, receiver.Ptr(), shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
     }
     // No need to check since we've been quickened.
     return DoCall<is_range, false>(called_method, self, shadow_frame, inst, inst_data, result);
@@ -249,9 +234,11 @@
 
 // Handles string resolution for const-string and const-string-jumbo instructions. Also ensures the
 // java.lang.String class is initialized.
-static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uint32_t string_idx)
+static inline ObjPtr<mirror::String> ResolveString(Thread* self,
+                                                   ShadowFrame& shadow_frame,
+                                                   uint32_t string_idx)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  Class* java_lang_string_class = String::GetJavaLangString();
+  ObjPtr<mirror::Class> java_lang_string_class = mirror::String::GetJavaLangString();
   if (UNLIKELY(!java_lang_string_class->IsInitialized())) {
     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
     StackHandleScope<1> hs(self);
@@ -262,11 +249,11 @@
     }
   }
   ArtMethod* method = shadow_frame.GetMethod();
-  mirror::Class* declaring_class = method->GetDeclaringClass();
+  ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
   // MethodVerifier refuses methods with string_idx out of bounds.
   DCHECK_LT(string_idx % mirror::DexCache::kDexCacheStringCacheSize,
             declaring_class->GetDexFile().NumStringIds());
-  mirror::String* string_ptr =
+  ObjPtr<mirror::String> string_ptr =
       mirror::StringDexCachePair::Lookup(declaring_class->GetDexCacheStrings(),
                                          string_idx,
                                          mirror::DexCache::kDexCacheStringCacheSize).Read();
@@ -318,8 +305,10 @@
 
 // Handles div-long and div-long-2addr instructions.
 // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
-static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg,
-                                int64_t dividend, int64_t divisor)
+static inline bool DoLongDivide(ShadowFrame& shadow_frame,
+                                size_t result_reg,
+                                int64_t dividend,
+                                int64_t divisor)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const int64_t kMinLong = std::numeric_limits<int64_t>::min();
   if (UNLIKELY(divisor == 0)) {
@@ -336,8 +325,10 @@
 
 // Handles rem-long and rem-long-2addr instructions.
 // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
-static inline bool DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg,
-                                   int64_t dividend, int64_t divisor)
+static inline bool DoLongRemainder(ShadowFrame& shadow_frame,
+                                   size_t result_reg,
+                                   int64_t dividend,
+                                   int64_t divisor)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const int64_t kMinLong = std::numeric_limits<int64_t>::min();
   if (UNLIKELY(divisor == 0)) {
@@ -443,7 +434,7 @@
         << inst->DumpString(shadow_frame.GetMethod()->GetDexFile()) << "\n";
     for (uint32_t i = 0; i < shadow_frame.NumberOfVRegs(); ++i) {
       uint32_t raw_value = shadow_frame.GetVReg(i);
-      Object* ref_value = shadow_frame.GetVRegReference(i);
+      ObjPtr<mirror::Object> ref_value = shadow_frame.GetVRegReference(i);
       oss << StringPrintf(" vreg%u=0x%08X", i, raw_value);
       if (ref_value != nullptr) {
         if (ref_value->GetClass()->IsStringClass() &&
@@ -469,13 +460,13 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   // Uint required, so that sign extension does not make this wrong on 64b systems
   uint32_t src_value = shadow_frame.GetVReg(src_reg);
-  mirror::Object* o = shadow_frame.GetVRegReference<kVerifyNone>(src_reg);
+  ObjPtr<mirror::Object> o = shadow_frame.GetVRegReference<kVerifyNone>(src_reg);
 
   // If both register locations contains the same value, the register probably holds a reference.
   // Note: As an optimization, non-moving collectors leave a stale reference value
   // in the references array even after the original vreg was overwritten to a non-reference.
-  if (src_value == reinterpret_cast<uintptr_t>(o)) {
-    new_shadow_frame->SetVRegReference(dest_reg, o);
+  if (src_value == reinterpret_cast<uintptr_t>(o.Ptr())) {
+    new_shadow_frame->SetVRegReference(dest_reg, o.Ptr());
   } else {
     new_shadow_frame->SetVReg(dest_reg, src_value);
   }
diff --git a/runtime/interpreter/interpreter_mterp_impl.h b/runtime/interpreter/interpreter_mterp_impl.h
index 90d9f89..1be20fa 100644
--- a/runtime/interpreter/interpreter_mterp_impl.h
+++ b/runtime/interpreter/interpreter_mterp_impl.h
@@ -21,6 +21,7 @@
 #include "base/mutex.h"
 #include "dex_file.h"
 #include "jvalue.h"
+#include "obj_ptr.h"
 
 namespace art {
 
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 78afe56..435ac62 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -192,9 +192,9 @@
         break;
       case Instruction::MOVE_EXCEPTION: {
         PREAMBLE();
-        Throwable* exception = self->GetException();
+        ObjPtr<mirror::Throwable> exception = self->GetException();
         DCHECK(exception != nullptr) << "No pending exception on MOVE_EXCEPTION instruction";
-        shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
+        shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception.Ptr());
         self->ClearException();
         inst = inst->Next_1xx();
         break;
@@ -273,11 +273,11 @@
         self->AllowThreadSuspension();
         HANDLE_MONITOR_CHECKS();
         const size_t ref_idx = inst->VRegA_11x(inst_data);
-        Object* obj_result = shadow_frame.GetVRegReference(ref_idx);
+        ObjPtr<mirror::Object> obj_result = shadow_frame.GetVRegReference(ref_idx);
         if (do_assignability_check && obj_result != nullptr) {
           PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
-          Class* return_type = shadow_frame.GetMethod()->GetReturnType(true /* resolve */,
-                                                                       pointer_size);
+          ObjPtr<mirror::Class> return_type = method->GetReturnType(true /* resolve */,
+                                                                    pointer_size);
           // Re-load since it might have moved.
           obj_result = shadow_frame.GetVRegReference(ref_idx);
           if (return_type == nullptr) {
@@ -373,41 +373,44 @@
         break;
       case Instruction::CONST_STRING: {
         PREAMBLE();
-        String* s = ResolveString(self, shadow_frame,  inst->VRegB_21c());
+        ObjPtr<mirror::String> s = ResolveString(self, shadow_frame,  inst->VRegB_21c());
         if (UNLIKELY(s == nullptr)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
-          shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s);
+          shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s.Ptr());
           inst = inst->Next_2xx();
         }
         break;
       }
       case Instruction::CONST_STRING_JUMBO: {
         PREAMBLE();
-        String* s = ResolveString(self, shadow_frame,  inst->VRegB_31c());
+        ObjPtr<mirror::String> s = ResolveString(self, shadow_frame,  inst->VRegB_31c());
         if (UNLIKELY(s == nullptr)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
-          shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s);
+          shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s.Ptr());
           inst = inst->Next_3xx();
         }
         break;
       }
       case Instruction::CONST_CLASS: {
         PREAMBLE();
-        Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
-                                          self, false, do_access_check);
+        ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(inst->VRegB_21c(),
+                                                         shadow_frame.GetMethod(),
+                                                         self,
+                                                         false,
+                                                         do_access_check);
         if (UNLIKELY(c == nullptr)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
-          shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c);
+          shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c.Ptr());
           inst = inst->Next_2xx();
         }
         break;
       }
       case Instruction::MONITOR_ENTER: {
         PREAMBLE();
-        Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+        ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
         if (UNLIKELY(obj == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
@@ -419,7 +422,7 @@
       }
       case Instruction::MONITOR_EXIT: {
         PREAMBLE();
-        Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+        ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
         if (UNLIKELY(obj == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
@@ -431,12 +434,15 @@
       }
       case Instruction::CHECK_CAST: {
         PREAMBLE();
-        Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
-                                          self, false, do_access_check);
+        ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(inst->VRegB_21c(),
+                                                         shadow_frame.GetMethod(),
+                                                         self,
+                                                         false,
+                                                         do_access_check);
         if (UNLIKELY(c == nullptr)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
-          Object* obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
+          ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
           if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
             ThrowClassCastException(c, obj->GetClass());
             HANDLE_PENDING_EXCEPTION();
@@ -448,12 +454,15 @@
       }
       case Instruction::INSTANCE_OF: {
         PREAMBLE();
-        Class* c = ResolveVerifyAndClinit(inst->VRegC_22c(), shadow_frame.GetMethod(),
-                                          self, false, do_access_check);
+        ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(inst->VRegC_22c(),
+                                                         shadow_frame.GetMethod(),
+                                                         self,
+                                                         false,
+                                                         do_access_check);
         if (UNLIKELY(c == nullptr)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
-          Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+          ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
           shadow_frame.SetVReg(inst->VRegA_22c(inst_data),
                                (obj != nullptr && obj->InstanceOf(c)) ? 1 : 0);
           inst = inst->Next_2xx();
@@ -462,7 +471,7 @@
       }
       case Instruction::ARRAY_LENGTH:  {
         PREAMBLE();
-        Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
+        ObjPtr<mirror::Object> array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
         if (UNLIKELY(array == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
@@ -474,9 +483,12 @@
       }
       case Instruction::NEW_INSTANCE: {
         PREAMBLE();
-        Object* obj = nullptr;
-        Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
-                                          self, false, do_access_check);
+        ObjPtr<mirror::Object> obj = nullptr;
+        ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(inst->VRegB_21c(),
+                                                         shadow_frame.GetMethod(),
+                                                         self,
+                                                         false,
+                                                         do_access_check);
         if (LIKELY(c != nullptr)) {
           if (UNLIKELY(c->IsStringClass())) {
             gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
@@ -499,7 +511,7 @@
             HANDLE_PENDING_EXCEPTION();
             break;
           }
-          shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), obj);
+          shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), obj.Ptr());
           inst = inst->Next_2xx();
         }
         break;
@@ -507,13 +519,13 @@
       case Instruction::NEW_ARRAY: {
         PREAMBLE();
         int32_t length = shadow_frame.GetVReg(inst->VRegB_22c(inst_data));
-        Object* obj = AllocArrayFromCode<do_access_check, true>(
+        ObjPtr<mirror::Object> obj = AllocArrayFromCode<do_access_check, true>(
             inst->VRegC_22c(), length, shadow_frame.GetMethod(), self,
             Runtime::Current()->GetHeap()->GetCurrentAllocator());
         if (UNLIKELY(obj == nullptr)) {
           HANDLE_PENDING_EXCEPTION();
         } else {
-          shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
+          shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj.Ptr());
           inst = inst->Next_2xx();
         }
         break;
@@ -539,7 +551,7 @@
         const uint16_t* payload_addr = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
         const Instruction::ArrayDataPayload* payload =
             reinterpret_cast<const Instruction::ArrayDataPayload*>(payload_addr);
-        Object* obj = shadow_frame.GetVRegReference(inst->VRegA_31t(inst_data));
+        ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_31t(inst_data));
         bool success = FillArrayData(obj, payload);
         if (!success) {
           HANDLE_PENDING_EXCEPTION();
@@ -553,7 +565,8 @@
       }
       case Instruction::THROW: {
         PREAMBLE();
-        Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+        ObjPtr<mirror::Object> exception =
+            shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
         if (UNLIKELY(exception == nullptr)) {
           ThrowNullPointerException("throw with null exception");
         } else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
@@ -911,14 +924,14 @@
       }
       case Instruction::AGET_BOOLEAN: {
         PREAMBLE();
-        Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
         }
         int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        BooleanArray* array = a->AsBooleanArray();
+        ObjPtr<mirror::BooleanArray> array = a->AsBooleanArray();
         if (array->CheckIsValidIndex(index)) {
           shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
           inst = inst->Next_2xx();
@@ -929,14 +942,14 @@
       }
       case Instruction::AGET_BYTE: {
         PREAMBLE();
-        Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
         }
         int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        ByteArray* array = a->AsByteArray();
+        ObjPtr<mirror::ByteArray> array = a->AsByteArray();
         if (array->CheckIsValidIndex(index)) {
           shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
           inst = inst->Next_2xx();
@@ -947,14 +960,14 @@
       }
       case Instruction::AGET_CHAR: {
         PREAMBLE();
-        Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
         }
         int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        CharArray* array = a->AsCharArray();
+        ObjPtr<mirror::CharArray> array = a->AsCharArray();
         if (array->CheckIsValidIndex(index)) {
           shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
           inst = inst->Next_2xx();
@@ -965,14 +978,14 @@
       }
       case Instruction::AGET_SHORT: {
         PREAMBLE();
-        Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
         }
         int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        ShortArray* array = a->AsShortArray();
+        ObjPtr<mirror::ShortArray> array = a->AsShortArray();
         if (array->CheckIsValidIndex(index)) {
           shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
           inst = inst->Next_2xx();
@@ -983,7 +996,7 @@
       }
       case Instruction::AGET: {
         PREAMBLE();
-        Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
@@ -991,7 +1004,7 @@
         }
         int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
         DCHECK(a->IsIntArray() || a->IsFloatArray()) << a->PrettyTypeOf();
-        auto* array = down_cast<IntArray*>(a);
+        ObjPtr<mirror::IntArray> array = ObjPtr<mirror::IntArray>::DownCast(a);
         if (array->CheckIsValidIndex(index)) {
           shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
           inst = inst->Next_2xx();
@@ -1002,7 +1015,7 @@
       }
       case Instruction::AGET_WIDE:  {
         PREAMBLE();
-        Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
@@ -1010,7 +1023,7 @@
         }
         int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
         DCHECK(a->IsLongArray() || a->IsDoubleArray()) << a->PrettyTypeOf();
-        auto* array = down_cast<LongArray*>(a);
+        ObjPtr<mirror::LongArray> array = ObjPtr<mirror::LongArray>::DownCast(a);
         if (array->CheckIsValidIndex(index)) {
           shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
           inst = inst->Next_2xx();
@@ -1021,14 +1034,14 @@
       }
       case Instruction::AGET_OBJECT: {
         PREAMBLE();
-        Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
         }
         int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        ObjectArray<Object>* array = a->AsObjectArray<Object>();
+        ObjPtr<mirror::ObjectArray<mirror::Object>> array = a->AsObjectArray<mirror::Object>();
         if (array->CheckIsValidIndex(index)) {
           shadow_frame.SetVRegReference(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
           inst = inst->Next_2xx();
@@ -1039,7 +1052,7 @@
       }
       case Instruction::APUT_BOOLEAN: {
         PREAMBLE();
-        Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
@@ -1047,7 +1060,7 @@
         }
         uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
         int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        BooleanArray* array = a->AsBooleanArray();
+        ObjPtr<mirror::BooleanArray> array = a->AsBooleanArray();
         if (array->CheckIsValidIndex(index)) {
           array->SetWithoutChecks<transaction_active>(index, val);
           inst = inst->Next_2xx();
@@ -1058,7 +1071,7 @@
       }
       case Instruction::APUT_BYTE: {
         PREAMBLE();
-        Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
@@ -1066,7 +1079,7 @@
         }
         int8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
         int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        ByteArray* array = a->AsByteArray();
+        ObjPtr<mirror::ByteArray> array = a->AsByteArray();
         if (array->CheckIsValidIndex(index)) {
           array->SetWithoutChecks<transaction_active>(index, val);
           inst = inst->Next_2xx();
@@ -1077,7 +1090,7 @@
       }
       case Instruction::APUT_CHAR: {
         PREAMBLE();
-        Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
@@ -1085,7 +1098,7 @@
         }
         uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
         int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        CharArray* array = a->AsCharArray();
+        ObjPtr<mirror::CharArray> array = a->AsCharArray();
         if (array->CheckIsValidIndex(index)) {
           array->SetWithoutChecks<transaction_active>(index, val);
           inst = inst->Next_2xx();
@@ -1096,7 +1109,7 @@
       }
       case Instruction::APUT_SHORT: {
         PREAMBLE();
-        Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
@@ -1104,7 +1117,7 @@
         }
         int16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
         int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        ShortArray* array = a->AsShortArray();
+        ObjPtr<mirror::ShortArray> array = a->AsShortArray();
         if (array->CheckIsValidIndex(index)) {
           array->SetWithoutChecks<transaction_active>(index, val);
           inst = inst->Next_2xx();
@@ -1115,7 +1128,7 @@
       }
       case Instruction::APUT: {
         PREAMBLE();
-        Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
@@ -1124,7 +1137,7 @@
         int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
         int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
         DCHECK(a->IsIntArray() || a->IsFloatArray()) << a->PrettyTypeOf();
-        auto* array = down_cast<IntArray*>(a);
+        ObjPtr<mirror::IntArray> array = ObjPtr<mirror::IntArray>::DownCast(a);
         if (array->CheckIsValidIndex(index)) {
           array->SetWithoutChecks<transaction_active>(index, val);
           inst = inst->Next_2xx();
@@ -1135,7 +1148,7 @@
       }
       case Instruction::APUT_WIDE: {
         PREAMBLE();
-        Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
@@ -1144,7 +1157,7 @@
         int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data));
         int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
         DCHECK(a->IsLongArray() || a->IsDoubleArray()) << a->PrettyTypeOf();
-        LongArray* array = down_cast<LongArray*>(a);
+        ObjPtr<mirror::LongArray> array = ObjPtr<mirror::LongArray>::DownCast(a);
         if (array->CheckIsValidIndex(index)) {
           array->SetWithoutChecks<transaction_active>(index, val);
           inst = inst->Next_2xx();
@@ -1155,15 +1168,15 @@
       }
       case Instruction::APUT_OBJECT: {
         PREAMBLE();
-        Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+        ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
         if (UNLIKELY(a == nullptr)) {
           ThrowNullPointerExceptionFromInterpreter();
           HANDLE_PENDING_EXCEPTION();
           break;
         }
         int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-        Object* val = shadow_frame.GetVRegReference(inst->VRegA_23x(inst_data));
-        ObjectArray<Object>* array = a->AsObjectArray<Object>();
+        ObjPtr<mirror::Object> val = shadow_frame.GetVRegReference(inst->VRegA_23x(inst_data));
+        ObjPtr<mirror::ObjectArray<mirror::Object>> array = a->AsObjectArray<mirror::Object>();
         if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
           array->SetWithoutChecks<transaction_active>(index, val);
           inst = inst->Next_2xx();
@@ -1545,6 +1558,7 @@
       }
       case Instruction::INVOKE_POLYMORPHIC: {
         PREAMBLE();
+        DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
         bool success = DoInvokePolymorphic<false, do_access_check>(
             self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_4xx);
@@ -1552,11 +1566,11 @@
       }
       case Instruction::INVOKE_POLYMORPHIC_RANGE: {
         PREAMBLE();
+        DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
         bool success = DoInvokePolymorphic<true, do_access_check>(
             self, shadow_frame, inst, inst_data, &result_register);
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_4xx);
         break;
-        break;
       }
       case Instruction::NEG_INT:
         PREAMBLE();
diff --git a/runtime/interpreter/interpreter_switch_impl.h b/runtime/interpreter/interpreter_switch_impl.h
index d0c9386..267df2e 100644
--- a/runtime/interpreter/interpreter_switch_impl.h
+++ b/runtime/interpreter/interpreter_switch_impl.h
@@ -21,6 +21,7 @@
 #include "base/mutex.h"
 #include "dex_file.h"
 #include "jvalue.h"
+#include "obj_ptr.h"
 
 namespace art {
 
diff --git a/runtime/interpreter/mterp/arm/footer.S b/runtime/interpreter/mterp/arm/footer.S
index 62e573a..cd32ea2 100644
--- a/runtime/interpreter/mterp/arm/footer.S
+++ b/runtime/interpreter/mterp/arm/footer.S
@@ -156,7 +156,7 @@
     REFRESH_IBASE
     add     r2, rINST, rINST            @ r2<- byte offset
     FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    ands    lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     bne     .L_suspend_request_pending
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_return.S b/runtime/interpreter/mterp/arm/op_return.S
index 1888373..f9c0f0f 100644
--- a/runtime/interpreter/mterp/arm/op_return.S
+++ b/runtime/interpreter/mterp/arm/op_return.S
@@ -8,7 +8,7 @@
     bl      MterpThreadFenceForConstructor
     ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
     mov     r0, rSELF
-    ands    lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     blne    MterpSuspendCheck                       @ (self)
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG r0, r2                     @ r0<- vAA
diff --git a/runtime/interpreter/mterp/arm/op_return_void.S b/runtime/interpreter/mterp/arm/op_return_void.S
index cbea2bf..a91ccb3 100644
--- a/runtime/interpreter/mterp/arm/op_return_void.S
+++ b/runtime/interpreter/mterp/arm/op_return_void.S
@@ -2,7 +2,7 @@
     bl      MterpThreadFenceForConstructor
     ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
     mov     r0, rSELF
-    ands    lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     blne    MterpSuspendCheck                       @ (self)
     mov    r0, #0
     mov    r1, #0
diff --git a/runtime/interpreter/mterp/arm/op_return_void_no_barrier.S b/runtime/interpreter/mterp/arm/op_return_void_no_barrier.S
index 2dde7ae..b953f4c 100644
--- a/runtime/interpreter/mterp/arm/op_return_void_no_barrier.S
+++ b/runtime/interpreter/mterp/arm/op_return_void_no_barrier.S
@@ -1,6 +1,6 @@
     ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
     mov     r0, rSELF
-    ands    lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     blne    MterpSuspendCheck                       @ (self)
     mov    r0, #0
     mov    r1, #0
diff --git a/runtime/interpreter/mterp/arm/op_return_wide.S b/runtime/interpreter/mterp/arm/op_return_wide.S
index ceae878..df582c0 100644
--- a/runtime/interpreter/mterp/arm/op_return_wide.S
+++ b/runtime/interpreter/mterp/arm/op_return_wide.S
@@ -6,7 +6,7 @@
     bl      MterpThreadFenceForConstructor
     ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
     mov     r0, rSELF
-    ands    lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     blne    MterpSuspendCheck                       @ (self)
     mov     r2, rINST, lsr #8           @ r2<- AA
     VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[AA]
diff --git a/runtime/interpreter/mterp/arm64/footer.S b/runtime/interpreter/mterp/arm64/footer.S
index 7628ed3..ada0326 100644
--- a/runtime/interpreter/mterp/arm64/footer.S
+++ b/runtime/interpreter/mterp/arm64/footer.S
@@ -141,7 +141,7 @@
     add     w2, wINST, wINST            // w2<- byte offset
     FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
     REFRESH_IBASE
-    ands    lr, lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     b.ne    .L_suspend_request_pending
     GET_INST_OPCODE ip                  // extract opcode from wINST
     GOTO_OPCODE ip                      // jump to next instruction
@@ -215,7 +215,7 @@
  */
 MterpCheckSuspendAndContinue:
     ldr     xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]  // refresh xIBASE
-    ands    w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     b.ne    check1
     GET_INST_OPCODE ip                  // extract opcode from wINST
     GOTO_OPCODE ip                      // jump to next instruction
@@ -270,7 +270,7 @@
     ldr     lr, [xSELF, #THREAD_FLAGS_OFFSET]
     str     x0, [x2]
     mov     x0, xSELF
-    ands    lr, lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     b.eq    check2
     bl      MterpSuspendCheck                       // (self)
 check2:
diff --git a/runtime/interpreter/mterp/arm64/op_return.S b/runtime/interpreter/mterp/arm64/op_return.S
index 28630ee..9f125c7 100644
--- a/runtime/interpreter/mterp/arm64/op_return.S
+++ b/runtime/interpreter/mterp/arm64/op_return.S
@@ -8,7 +8,7 @@
     bl      MterpThreadFenceForConstructor
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     mov     x0, xSELF
-    ands    w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     b.ne    .L${opcode}_check
 .L${opcode}_return:
     lsr     w2, wINST, #8               // r2<- AA
diff --git a/runtime/interpreter/mterp/arm64/op_return_void.S b/runtime/interpreter/mterp/arm64/op_return_void.S
index 3a5aa56..b253006 100644
--- a/runtime/interpreter/mterp/arm64/op_return_void.S
+++ b/runtime/interpreter/mterp/arm64/op_return_void.S
@@ -2,7 +2,7 @@
     bl      MterpThreadFenceForConstructor
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     mov     x0, xSELF
-    ands    w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     b.ne    .L${opcode}_check
 .L${opcode}_return:
     mov     x0, #0
diff --git a/runtime/interpreter/mterp/arm64/op_return_void_no_barrier.S b/runtime/interpreter/mterp/arm64/op_return_void_no_barrier.S
index 1e06953..c817169 100644
--- a/runtime/interpreter/mterp/arm64/op_return_void_no_barrier.S
+++ b/runtime/interpreter/mterp/arm64/op_return_void_no_barrier.S
@@ -1,6 +1,6 @@
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     mov     x0, xSELF
-    ands    w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     b.ne    .L${opcode}_check
 .L${opcode}_return:
     mov     x0, #0
diff --git a/runtime/interpreter/mterp/arm64/op_return_wide.S b/runtime/interpreter/mterp/arm64/op_return_wide.S
index c6e1d9d..c47661c 100644
--- a/runtime/interpreter/mterp/arm64/op_return_wide.S
+++ b/runtime/interpreter/mterp/arm64/op_return_wide.S
@@ -7,7 +7,7 @@
     bl      MterpThreadFenceForConstructor
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     mov     x0, xSELF
-    ands    w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     b.ne    .L${opcode}_check
 .L${opcode}_return:
     lsr     w2, wINST, #8               // w2<- AA
diff --git a/runtime/interpreter/mterp/mips/binop.S b/runtime/interpreter/mterp/mips/binop.S
index 66627e2..862d95a 100644
--- a/runtime/interpreter/mterp/mips/binop.S
+++ b/runtime/interpreter/mterp/mips/binop.S
@@ -30,4 +30,3 @@
     $instr                                 #  $result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
-    /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/mips/binop2addr.S b/runtime/interpreter/mterp/mips/binop2addr.S
index 548cbcb..17aa8eb 100644
--- a/runtime/interpreter/mterp/mips/binop2addr.S
+++ b/runtime/interpreter/mterp/mips/binop2addr.S
@@ -25,5 +25,4 @@
     $preinstr                              #  optional op
     $instr                                 #  $result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
-    /* 10-13 instructions */
+    SET_VREG_GOTO($result, rOBJ, t0)       #  vA <- $result
diff --git a/runtime/interpreter/mterp/mips/binopLit16.S b/runtime/interpreter/mterp/mips/binopLit16.S
index fc0c9ff..0696e7a 100644
--- a/runtime/interpreter/mterp/mips/binopLit16.S
+++ b/runtime/interpreter/mterp/mips/binopLit16.S
@@ -11,12 +11,11 @@
      * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
      *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
      */
-    # binop/lit16 vA, vB,                  /* +CCCC */
+    /* binop/lit16 vA, vB, +CCCC */
     FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
     GET_OPB(a2)                            #  a2 <- B
-    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_VREG(a0, a2)                       #  a0 <- vB
-    and       rOBJ, rOBJ, 15
     .if $chkzero
     # cmp a1, 0; is second operand zero?
     beqz      a1, common_errDivideByZero
@@ -26,5 +25,4 @@
     $preinstr                              #  optional op
     $instr                                 #  $result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
-    /* 10-13 instructions */
+    SET_VREG_GOTO($result, rOBJ, t0)       #  vA <- $result
diff --git a/runtime/interpreter/mterp/mips/binopLit8.S b/runtime/interpreter/mterp/mips/binopLit8.S
index a591408..382dd2b 100644
--- a/runtime/interpreter/mterp/mips/binopLit8.S
+++ b/runtime/interpreter/mterp/mips/binopLit8.S
@@ -12,7 +12,7 @@
      *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
      *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
      */
-    # binop/lit8 vAA, vBB,                 /* +CC */
+    /* binop/lit8 vAA, vBB, +CC */
     FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
     GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a3, 255                  #  a2 <- BB
@@ -28,4 +28,3 @@
     $instr                                 #  $result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
-    /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/mips/binopWide.S b/runtime/interpreter/mterp/mips/binopWide.S
index 608525b..604134d 100644
--- a/runtime/interpreter/mterp/mips/binopWide.S
+++ b/runtime/interpreter/mterp/mips/binopWide.S
@@ -3,10 +3,10 @@
      * Generic 64-bit binary operation.  Provide an "instr" line that
      * specifies an instruction that performs "result = a0-a1 op a2-a3".
      * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
+     * comes back in a register pair other than a0-a1, you can override "result".)
      *
      * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
+     * vCC (a2-a3).  Useful for integer division and modulus.
      *
      * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
      *      xor-long
@@ -32,4 +32,3 @@
     $instr                                 #  result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vAA/vAA+1 <- $result0/$result1
-    /* 14-17 instructions */
diff --git a/runtime/interpreter/mterp/mips/binopWide2addr.S b/runtime/interpreter/mterp/mips/binopWide2addr.S
index cc92149..f96fdb2 100644
--- a/runtime/interpreter/mterp/mips/binopWide2addr.S
+++ b/runtime/interpreter/mterp/mips/binopWide2addr.S
@@ -3,22 +3,21 @@
      * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
      * that specifies an instruction that performs "result = a0-a1 op a2-a3".
      * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
+     * comes back in a register pair other than a0-a1, you can override "result".)
      *
      * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
+     * vB (a2-a3).  Useful for integer division and modulus.
      *
      * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
      *      and-long/2addr, or-long/2addr, xor-long/2addr
-     *      rem-double/2addr
      */
     /* binop/2addr vA, vB */
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a1)                            #  a1 <- B
     EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
     EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64($arg2, $arg3, a1)               #  a2/a3 <- vBB/vBB+1
-    LOAD64($arg0, $arg1, t0)               #  a0/a1 <- vAA/vAA+1
+    LOAD64($arg2, $arg3, a1)               #  a2/a3 <- vB/vB+1
+    LOAD64($arg0, $arg1, t0)               #  a0/a1 <- vA/vA+1
     .if $chkzero
     or        t0, $arg2, $arg3             #  second arg (a2-a3) is zero?
     beqz      t0, common_errDivideByZero
@@ -28,6 +27,4 @@
     $preinstr                              #  optional op
     $instr                                 #  result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64($result0, $result1, rOBJ)   #  vAA/vAA+1 <- $result0/$result1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-    /* 12-15 instructions */
+    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vA/vA+1 <- $result0/$result1
diff --git a/runtime/interpreter/mterp/mips/fbinop.S b/runtime/interpreter/mterp/mips/fbinop.S
index d0d39ae..6c1468c 100644
--- a/runtime/interpreter/mterp/mips/fbinop.S
+++ b/runtime/interpreter/mterp/mips/fbinop.S
@@ -6,7 +6,7 @@
 
     /* binop vAA, vBB, vCC */
     FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  s5 <- AA
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
     srl       a3, a0, 8                    #  a3 <- CC
     and       a2, a0, 255                  #  a2 <- BB
     GET_VREG_F(fa1, a3)                    #  a1 <- vCC
@@ -14,6 +14,5 @@
 
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     $instr                                 #  f0 = result
-    SET_VREG_F(fv0, rOBJ)                  #  vAA <- fv0
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vAA <- fv0
diff --git a/runtime/interpreter/mterp/mips/fbinop2addr.S b/runtime/interpreter/mterp/mips/fbinop2addr.S
index ccb67b1..2caaf9c 100644
--- a/runtime/interpreter/mterp/mips/fbinop2addr.S
+++ b/runtime/interpreter/mterp/mips/fbinop2addr.S
@@ -1,19 +1,18 @@
     /*
      * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
-     * that specifies an instruction that performs "result = a0 op a1".
+     * that specifies an instruction that performs "fv0 = fa0 op fa1".
      * This could be an MIPS instruction or a function call.
      *
      * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
-     * div-float/2addr, rem-float/2addr
+     *      div-float/2addr, rem-float/2addr
      */
     /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a3)                            #  a3 <- B
     GET_VREG_F(fa0, rOBJ)
     GET_VREG_F(fa1, a3)
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
 
     $instr
-    SET_VREG_F(fv0, rOBJ)                  #  vAA <- result
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- result
diff --git a/runtime/interpreter/mterp/mips/fbinopWide.S b/runtime/interpreter/mterp/mips/fbinopWide.S
index 3be9325..a1fe91e 100644
--- a/runtime/interpreter/mterp/mips/fbinopWide.S
+++ b/runtime/interpreter/mterp/mips/fbinopWide.S
@@ -1,6 +1,6 @@
     /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * Generic 64-bit floating-point binary operation.  Provide an "instr"
+     * line that specifies an instruction that performs "fv0 = fa0 op fa1".
      * This could be an MIPS instruction or a function call.
      *
      * for: add-double, sub-double, mul-double, div-double,
@@ -9,7 +9,7 @@
      */
     /* binop vAA, vBB, vCC */
     FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  s5 <- AA
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a0, 255                  #  a2 <- BB
     srl       a3, a0, 8                    #  a3 <- CC
     EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
@@ -19,10 +19,5 @@
 
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     $instr
-    SET_VREG64_F(fv0, fv0f, rOBJ)
-    b         .L${opcode}_finish
-%break
-
-.L${opcode}_finish:
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vAA/vAA+1 <- fv0
diff --git a/runtime/interpreter/mterp/mips/fbinopWide2addr.S b/runtime/interpreter/mterp/mips/fbinopWide2addr.S
index 8541f11..7303441 100644
--- a/runtime/interpreter/mterp/mips/fbinopWide2addr.S
+++ b/runtime/interpreter/mterp/mips/fbinopWide2addr.S
@@ -1,10 +1,11 @@
     /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * Generic 64-bit floating-point "/2addr" binary operation.
+     * Provide an "instr" line that specifies an instruction that
+     * performs "fv0 = fa0 op fa1".
      * This could be an MIPS instruction or a function call.
      *
      * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *  div-double/2addr, rem-double/2addr
+     *      div-double/2addr, rem-double/2addr
      */
     /* binop/2addr vA, vB */
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
@@ -16,6 +17,5 @@
 
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     $instr
-    SET_VREG64_F(fv0, fv0f, rOBJ)
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vA/vA+1 <- fv0
diff --git a/runtime/interpreter/mterp/mips/footer.S b/runtime/interpreter/mterp/mips/footer.S
index 1363751..9909dfe 100644
--- a/runtime/interpreter/mterp/mips/footer.S
+++ b/runtime/interpreter/mterp/mips/footer.S
@@ -151,7 +151,7 @@
     REFRESH_IBASE()
     addu    a2, rINST, rINST            # a2<- byte offset
     FETCH_ADVANCE_INST_RB(a2)           # update rPC, load rINST
-    and     ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and     ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     bnez    ra, .L_suspend_request_pending
     GET_INST_OPCODE(t0)                 # extract opcode from rINST
     GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/funop.S b/runtime/interpreter/mterp/mips/funop.S
index bfb9346..b2b22c9 100644
--- a/runtime/interpreter/mterp/mips/funop.S
+++ b/runtime/interpreter/mterp/mips/funop.S
@@ -1,18 +1,15 @@
     /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op a0".
+     * Generic 32-bit floating-point unary operation.  Provide an "instr"
+     * line that specifies an instruction that performs "fv0 = op fa0".
      * This could be a MIPS instruction or a function call.
      *
-     * for: int-to-float, float-to-int
+     * for: int-to-float
      */
     /* unop vA, vB */
     GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  t0 <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_VREG_F(fa0, a3)
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     $instr
-
-.L${opcode}_set_vreg_f:
-    SET_VREG_F(fv0, rOBJ)
     GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    GOTO_OPCODE(t1)                        #  jump to next instruction
+    SET_VREG_F_GOTO(fv0, rOBJ, t1)         #  vA <- fv0
diff --git a/runtime/interpreter/mterp/mips/funopWide.S b/runtime/interpreter/mterp/mips/funopWide.S
deleted file mode 100644
index 3d4cf22..0000000
--- a/runtime/interpreter/mterp/mips/funopWide.S
+++ /dev/null
@@ -1,22 +0,0 @@
-%default {"preinstr":"", "ld_arg":"LOAD64_F(fa0, fa0f, a3)", "st_result":"SET_VREG64_F(fv0, fv0f, rOBJ)"}
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op a0/a1".
-     * This could be a MIPS instruction or a function call.
-     *
-     * long-to-double, double-to-long
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  t1 <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    $ld_arg
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $preinstr                              #  optional op
-    $instr                                 #  a0/a1 <- op, a2-a3 changed
-
-.L${opcode}_set_vreg:
-    $st_result                             #  vAA <- a0/a1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-    /* 12-13 instructions */
diff --git a/runtime/interpreter/mterp/mips/funopWider.S b/runtime/interpreter/mterp/mips/funopWider.S
index efb85f3..6862e24 100644
--- a/runtime/interpreter/mterp/mips/funopWider.S
+++ b/runtime/interpreter/mterp/mips/funopWider.S
@@ -1,10 +1,8 @@
-%default {"st_result":"SET_VREG64_F(fv0, fv0f, rOBJ)"}
     /*
-     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = op a0", where
-     * "result" is a 64-bit quantity in a0/a1.
+     * Generic 32bit-to-64bit floating-point unary operation.  Provide an "instr"
+     * line that specifies an instruction that performs "fv0 = op fa0".
      *
-     * For: int-to-double, float-to-long, float-to-double
+     * For: int-to-double, float-to-double
      */
     /* unop vA, vB */
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
@@ -12,8 +10,5 @@
     GET_VREG_F(fa0, a3)
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     $instr
-
-.L${opcode}_set_vreg:
-    $st_result                             #  vA/vA+1 <- a0/a1
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) #  vA/vA+1 <- fv0
diff --git a/runtime/interpreter/mterp/mips/header.S b/runtime/interpreter/mterp/mips/header.S
index a3a6744..0ce7745 100644
--- a/runtime/interpreter/mterp/mips/header.S
+++ b/runtime/interpreter/mterp/mips/header.S
@@ -153,6 +153,58 @@
 #define fcc1   $$fcc1
 #endif
 
+#ifdef MIPS32REVGE2
+#define SEB(rd, rt) \
+    seb       rd, rt
+#define SEH(rd, rt) \
+    seh       rd, rt
+#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
+    ins       rd_lo, rt_hi, 16, 16
+#else
+#define SEB(rd, rt) \
+    sll       rd, rt, 24; \
+    sra       rd, rd, 24
+#define SEH(rd, rt) \
+    sll       rd, rt, 16; \
+    sra       rd, rd, 16
+/* Clobbers rt_hi on pre-R2. */
+#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
+    sll       rt_hi, rt_hi, 16; \
+    or        rd_lo, rt_hi
+#endif
+
+#ifdef FPU64
+#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
+    mthc1     r, flo
+#else
+#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
+    mtc1      r, fhi
+#endif
+
+#ifdef MIPS32REVGE6
+#define JR(rt) \
+    jic       rt, 0
+#define LSA(rd, rs, rt, sa) \
+    .if sa; \
+    lsa       rd, rs, rt, sa; \
+    .else; \
+    addu      rd, rs, rt; \
+    .endif
+#else
+#define JR(rt) \
+    jalr      zero, rt
+#define LSA(rd, rs, rt, sa) \
+    .if sa; \
+    .set      push; \
+    .set      noat; \
+    sll       AT, rs, sa; \
+    addu      rd, AT, rt; \
+    .set      pop; \
+    .else; \
+    addu      rd, rs, rt; \
+    .endif
+#endif
+
 /*
  * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
  * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
@@ -186,12 +238,12 @@
     sw        rPC, OFF_FP_DEX_PC_PTR(rFP)
 
 #define EXPORT_DEX_PC(tmp) \
-    lw   tmp, OFF_FP_CODE_ITEM(rFP) \
-    sw   rPC, OFF_FP_DEX_PC_PTR(rFP) \
-    addu tmp, CODEITEM_INSNS_OFFSET \
-    subu tmp, rPC, tmp \
-    sra  tmp, tmp, 1 \
-    sw   tmp, OFF_FP_DEX_PC(rFP)
+    lw        tmp, OFF_FP_CODE_ITEM(rFP); \
+    sw        rPC, OFF_FP_DEX_PC_PTR(rFP); \
+    addu      tmp, CODEITEM_INSNS_OFFSET; \
+    subu      tmp, rPC, tmp; \
+    sra       tmp, tmp, 1; \
+    sw        tmp, OFF_FP_DEX_PC(rFP)
 
 /*
  * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
@@ -206,18 +258,11 @@
  * exception catch may miss.  (This also implies that it must come after
  * EXPORT_PC().)
  */
-#define FETCH_ADVANCE_INST(_count) lhu rINST, ((_count)*2)(rPC); \
+#define FETCH_ADVANCE_INST(_count) \
+    lhu       rINST, ((_count)*2)(rPC); \
     addu      rPC, rPC, ((_count) * 2)
 
 /*
- * The operation performed here is similar to FETCH_ADVANCE_INST, except the
- * src and dest registers are parameterized (not hard-wired to rPC and rINST).
- */
-#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
-    lhu       _dreg, ((_count)*2)(_sreg) ;            \
-    addu      _sreg, _sreg, (_count)*2
-
-/*
  * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
  * rINST ahead of possible exception point.  Be sure to manually advance rPC
  * later.
@@ -232,7 +277,8 @@
  * rPC to point to the next instruction.  "rd" must specify the distance
  * in bytes, *not* 16-bit code units, and may be a signed value.
  */
-#define FETCH_ADVANCE_INST_RB(rd) addu rPC, rPC, rd; \
+#define FETCH_ADVANCE_INST_RB(rd) \
+    addu      rPC, rPC, rd; \
     lhu       rINST, (rPC)
 
 /*
@@ -257,38 +303,75 @@
 #define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
 
 /*
- * Put the prefetched instruction's opcode field into the specified register.
+ * Transform opcode into branch target address.
  */
-#define GET_PREFETCHED_OPCODE(dreg, sreg)   andi     dreg, sreg, 255
+#define GET_OPCODE_TARGET(rd) \
+    sll       rd, rd, ${handler_size_bits}; \
+    addu      rd, rIBASE, rd
 
 /*
  * Begin executing the opcode in rd.
  */
-#define GOTO_OPCODE(rd) sll rd, rd, ${handler_size_bits}; \
-    addu      rd, rIBASE, rd; \
-    jalr      zero, rd
-
-#define GOTO_OPCODE_BASE(_base, rd)  sll rd, rd, ${handler_size_bits}; \
-    addu      rd, _base, rd; \
-    jalr      zero, rd
+#define GOTO_OPCODE(rd) \
+    GET_OPCODE_TARGET(rd); \
+    JR(rd)
 
 /*
  * Get/set the 32-bit value from a Dalvik register.
  */
 #define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
 
-#define GET_VREG_F(rd, rix) EAS2(AT, rFP, rix); \
-    .set noat; l.s rd, (AT); .set at
+#define GET_VREG_F(rd, rix) \
+    .set noat; \
+    EAS2(AT, rFP, rix); \
+    l.s       rd, (AT); \
+    .set at
 
-#define SET_VREG(rd, rix) .set noat; \
+#ifdef MIPS32REVGE6
+#define SET_VREG(rd, rix) \
+    lsa       t8, rix, rFP, 2; \
+    sw        rd, 0(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    sw        zero, 0(t8)
+#else
+#define SET_VREG(rd, rix) \
+    .set noat; \
     sll       AT, rix, 2; \
     addu      t8, rFP, AT; \
     sw        rd, 0(t8); \
     addu      t8, rREFS, AT; \
     .set at; \
     sw        zero, 0(t8)
+#endif
 
-#define SET_VREG64(rlo, rhi, rix) .set noat; \
+#ifdef MIPS32REVGE6
+#define SET_VREG_OBJECT(rd, rix) \
+    lsa       t8, rix, rFP, 2; \
+    sw        rd, 0(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    sw        rd, 0(t8)
+#else
+#define SET_VREG_OBJECT(rd, rix) \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        rd, 0(t8)
+#endif
+
+#ifdef MIPS32REVGE6
+#define SET_VREG64(rlo, rhi, rix) \
+    lsa       t8, rix, rFP, 2; \
+    sw        rlo, 0(t8); \
+    sw        rhi, 4(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    sw        zero, 0(t8); \
+    sw        zero, 4(t8)
+#else
+#define SET_VREG64(rlo, rhi, rix) \
+    .set noat; \
     sll       AT, rix, 2; \
     addu      t8, rFP, AT; \
     sw        rlo, 0(t8); \
@@ -297,9 +380,39 @@
     .set at; \
     sw        zero, 0(t8); \
     sw        zero, 4(t8)
+#endif
 
-#ifdef FPU64
-#define SET_VREG64_F(rlo, rhi, rix) .set noat; \
+#ifdef MIPS32REVGE6
+#define SET_VREG_F(rd, rix) \
+    lsa       t8, rix, rFP, 2; \
+    s.s       rd, 0(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    sw        zero, 0(t8)
+#else
+#define SET_VREG_F(rd, rix) \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    s.s       rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8)
+#endif
+
+#ifdef MIPS32REVGE6
+#define SET_VREG64_F(rlo, rhi, rix) \
+    lsa       t8, rix, rFP, 2; \
+    .set noat; \
+    mfhc1     AT, rlo; \
+    s.s       rlo, 0(t8); \
+    sw        AT, 4(t8); \
+    .set at; \
+    lsa       t8, rix, rREFS, 2; \
+    sw        zero, 0(t8); \
+    sw        zero, 4(t8)
+#elif defined(FPU64)
+#define SET_VREG64_F(rlo, rhi, rix) \
+    .set noat; \
     sll       AT, rix, 2; \
     addu      t8, rREFS, AT; \
     sw        zero, 0(t8); \
@@ -310,7 +423,8 @@
     .set at; \
     s.s       rlo, 0(t8)
 #else
-#define SET_VREG64_F(rlo, rhi, rix) .set noat; \
+#define SET_VREG64_F(rlo, rhi, rix) \
+    .set noat; \
     sll       AT, rix, 2; \
     addu      t8, rFP, AT; \
     s.s       rlo, 0(t8); \
@@ -321,18 +435,21 @@
     sw        zero, 4(t8)
 #endif
 
-#define SET_VREG_OBJECT(rd, rix) .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        rd, 0(t8)
-
 /* Combination of the SET_VREG and GOTO_OPCODE functions to save 1 instruction */
-#define SET_VREG_GOTO(rd, rix, dst) .set noreorder; \
-    sll       dst, dst, ${handler_size_bits}; \
-    addu      dst, rIBASE, dst; \
+#ifdef MIPS32REVGE6
+#define SET_VREG_GOTO(rd, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    lsa       t8, rix, rFP, 2; \
+    sw        rd, 0(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    jalr      zero, dst; \
+    sw        zero, 0(t8); \
+    .set reorder
+#else
+#define SET_VREG_GOTO(rd, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
     .set noat; \
     sll       AT, rix, 2; \
     addu      t8, rFP, AT; \
@@ -342,11 +459,51 @@
     jalr      zero, dst; \
     sw        zero, 0(t8); \
     .set reorder
+#endif
+
+/* Combination of the SET_VREG_OBJECT and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    lsa       t8, rix, rFP, 2; \
+    sw        rd, 0(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    jalr      zero, dst; \
+    sw        rd, 0(t8); \
+    .set reorder
+#else
+#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    jalr      zero, dst; \
+    sw        rd, 0(t8); \
+    .set reorder
+#endif
 
 /* Combination of the SET_VREG64 and GOTO_OPCODE functions to save 1 instruction */
-#define SET_VREG64_GOTO(rlo, rhi, rix, dst) .set noreorder; \
-    sll       dst, dst, ${handler_size_bits}; \
-    addu      dst, rIBASE, dst; \
+#ifdef MIPS32REVGE6
+#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    lsa       t8, rix, rFP, 2; \
+    sw        rlo, 0(t8); \
+    sw        rhi, 4(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    sw        zero, 0(t8); \
+    jalr      zero, dst; \
+    sw        zero, 4(t8); \
+    .set reorder
+#else
+#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
     .set noat; \
     sll       AT, rix, 2; \
     addu      t8, rFP, AT; \
@@ -358,14 +515,82 @@
     jalr      zero, dst; \
     sw        zero, 4(t8); \
     .set reorder
+#endif
 
-#define SET_VREG_F(rd, rix) .set noat; \
+/* Combination of the SET_VREG_F and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG_F_GOTO(rd, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    lsa       t8, rix, rFP, 2; \
+    s.s       rd, 0(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    jalr      zero, dst; \
+    sw        zero, 0(t8); \
+    .set reorder
+#else
+#define SET_VREG_F_GOTO(rd, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    .set noat; \
     sll       AT, rix, 2; \
     addu      t8, rFP, AT; \
     s.s       rd, 0(t8); \
     addu      t8, rREFS, AT; \
     .set at; \
-    sw        zero, 0(t8)
+    jalr      zero, dst; \
+    sw        zero, 0(t8); \
+    .set reorder
+#endif
+
+/* Combination of the SET_VREG64_F and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    lsa       t8, rix, rFP, 2; \
+    .set noat; \
+    mfhc1     AT, rlo; \
+    s.s       rlo, 0(t8); \
+    sw        AT, 4(t8); \
+    .set at; \
+    lsa       t8, rix, rREFS, 2; \
+    sw        zero, 0(t8); \
+    jalr      zero, dst; \
+    sw        zero, 4(t8); \
+    .set reorder
+#elif defined(FPU64)
+#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rREFS, AT; \
+    sw        zero, 0(t8); \
+    sw        zero, 4(t8); \
+    addu      t8, rFP, AT; \
+    mfhc1     AT, rlo; \
+    sw        AT, 4(t8); \
+    .set at; \
+    jalr      zero, dst; \
+    s.s       rlo, 0(t8); \
+    .set reorder
+#else
+#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    s.s       rlo, 0(t8); \
+    s.s       rhi, 4(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8); \
+    jalr      zero, dst; \
+    sw        zero, 4(t8); \
+    .set reorder
+#endif
 
 #define GET_OPA(rd) srl rd, rINST, 8
 #ifdef MIPS32REVGE2
@@ -376,60 +601,60 @@
 #define GET_OPB(rd) srl rd, rINST, 12
 
 /*
- * Form an Effective Address rd = rbase + roff<<n;
- * Uses reg AT
+ * Form an Effective Address rd = rbase + roff<<shift;
+ * Uses reg AT on pre-R6.
  */
-#define EASN(rd, rbase, roff, rshift) .set noat; \
-    sll       AT, roff, rshift; \
-    addu      rd, rbase, AT; \
-    .set at
+#define EASN(rd, rbase, roff, shift) LSA(rd, roff, rbase, shift)
 
 #define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
 #define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
 #define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
 #define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
 
-/*
- * Form an Effective Shift Right rd = rbase + roff>>n;
- * Uses reg AT
- */
-#define ESRN(rd, rbase, roff, rshift) .set noat; \
-    srl       AT, roff, rshift; \
-    addu      rd, rbase, AT; \
+#define LOAD_eas2(rd, rbase, roff) \
+    .set noat; \
+    EAS2(AT, rbase, roff); \
+    lw        rd, 0(AT); \
     .set at
 
-#define LOAD_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
-    .set noat; lw rd, 0(AT); .set at
-
-#define STORE_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
-    .set noat; sw rd, 0(AT); .set at
+#define STORE_eas2(rd, rbase, roff) \
+    .set noat; \
+    EAS2(AT, rbase, roff); \
+    sw        rd, 0(AT); \
+    .set at
 
 #define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
 #define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
 
-#define STORE64_off(rlo, rhi, rbase, off) sw rlo, off(rbase); \
+#define STORE64_off(rlo, rhi, rbase, off) \
+    sw        rlo, off(rbase); \
     sw        rhi, (off+4)(rbase)
-#define LOAD64_off(rlo, rhi, rbase, off) lw rlo, off(rbase); \
+#define LOAD64_off(rlo, rhi, rbase, off) \
+    lw        rlo, off(rbase); \
     lw        rhi, (off+4)(rbase)
 
 #define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
 #define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
 
 #ifdef FPU64
-#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+#define STORE64_off_F(rlo, rhi, rbase, off) \
+    s.s       rlo, off(rbase); \
     .set noat; \
     mfhc1     AT, rlo; \
     sw        AT, (off+4)(rbase); \
     .set at
-#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+#define LOAD64_off_F(rlo, rhi, rbase, off) \
+    l.s       rlo, off(rbase); \
     .set noat; \
     lw        AT, (off+4)(rbase); \
     mthc1     AT, rlo; \
     .set at
 #else
-#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+#define STORE64_off_F(rlo, rhi, rbase, off) \
+    s.s       rlo, off(rbase); \
     s.s       rhi, (off+4)(rbase)
-#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+#define LOAD64_off_F(rlo, rhi, rbase, off) \
+    l.s       rlo, off(rbase); \
     l.s       rhi, (off+4)(rbase)
 #endif
 
@@ -490,3 +715,11 @@
 
 #define REFRESH_IBASE() \
     lw        rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+
+/* Constants for float/double_to_int/long conversions */
+#define INT_MIN                 0x80000000
+#define INT_MIN_AS_FLOAT        0xCF000000
+#define INT_MIN_AS_DOUBLE_HIGH  0xC1E00000
+#define LONG_MIN_HIGH           0x80000000
+#define LONG_MIN_AS_FLOAT       0xDF000000
+#define LONG_MIN_AS_DOUBLE_HIGH 0xC3E00000
diff --git a/runtime/interpreter/mterp/mips/invoke.S b/runtime/interpreter/mterp/mips/invoke.S
index bcd3a57..db3b8af 100644
--- a/runtime/interpreter/mterp/mips/invoke.S
+++ b/runtime/interpreter/mterp/mips/invoke.S
@@ -2,8 +2,8 @@
     /*
      * Generic invoke handler wrapper.
      */
-    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
-    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     .extern $helper
     EXPORT_PC()
     move    a0, rSELF
diff --git a/runtime/interpreter/mterp/mips/op_aget.S b/runtime/interpreter/mterp/mips/op_aget.S
index 8aa8992..e88402c 100644
--- a/runtime/interpreter/mterp/mips/op_aget.S
+++ b/runtime/interpreter/mterp/mips/op_aget.S
@@ -19,11 +19,7 @@
     # null array object?
     beqz      a0, common_errNullObject     #  yes, bail
     LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    .if $shift
     EASN(a0, a0, a1, $shift)               #  a0 <- arrayObj + index*width
-    .else
-    addu      a0, a0, a1
-    .endif
     # a1 >= a3; compare unsigned index
     bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/mips/op_aget_object.S b/runtime/interpreter/mterp/mips/op_aget_object.S
index e3ab9d8..9c49dfe 100644
--- a/runtime/interpreter/mterp/mips/op_aget_object.S
+++ b/runtime/interpreter/mterp/mips/op_aget_object.S
@@ -14,7 +14,6 @@
     lw   a1, THREAD_EXCEPTION_OFFSET(rSELF)
     PREFETCH_INST(2)                       #  load rINST
     bnez a1, MterpException
-    SET_VREG_OBJECT(v0, rOBJ)              #  vAA <- v0
     ADVANCE(2)                             #  advance rPC
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG_OBJECT_GOTO(v0, rOBJ, t0)     #  vAA <- v0
diff --git a/runtime/interpreter/mterp/mips/op_aput.S b/runtime/interpreter/mterp/mips/op_aput.S
index 53d6ae0..46dcaee 100644
--- a/runtime/interpreter/mterp/mips/op_aput.S
+++ b/runtime/interpreter/mterp/mips/op_aput.S
@@ -17,14 +17,11 @@
     # null array object?
     beqz      a0, common_errNullObject     #  yes, bail
     LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    .if $shift
     EASN(a0, a0, a1, $shift)               #  a0 <- arrayObj + index*width
-    .else
-    addu      a0, a0, a1
-    .endif
     bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     GET_VREG(a2, rOBJ)                     #  a2 <- vAA
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GET_OPCODE_TARGET(t0)
     $store a2, $data_offset(a0)            #  vBB[vCC] <- a2
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    JR(t0)                                 #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_aput_wide.S b/runtime/interpreter/mterp/mips/op_aput_wide.S
index ef99261..c3cff56 100644
--- a/runtime/interpreter/mterp/mips/op_aput_wide.S
+++ b/runtime/interpreter/mterp/mips/op_aput_wide.S
@@ -1,7 +1,5 @@
     /*
      * Array put, 64 bits.  vBB[vCC] <- vAA.
-     *
-     * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
      */
     /* aput-wide vAA, vBB, vCC */
     FETCH(a0, 1)                           #  a0 <- CCBB
@@ -21,5 +19,6 @@
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     LOAD64(a2, a3, rOBJ)                   #  a2/a3 <- vAA/vAA+1
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GET_OPCODE_TARGET(t0)
     STORE64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET) #  a2/a3 <- vBB[vCC]
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    JR(t0)                                 #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_array_length.S b/runtime/interpreter/mterp/mips/op_array_length.S
index 2b4a86f..ae2fe68 100644
--- a/runtime/interpreter/mterp/mips/op_array_length.S
+++ b/runtime/interpreter/mterp/mips/op_array_length.S
@@ -1,6 +1,7 @@
     /*
      * Return the length of an array.
      */
+    /* array-length vA, vB */
     GET_OPB(a1)                            #  a1 <- B
     GET_OPA4(a2)                           #  a2 <- A+
     GET_VREG(a0, a1)                       #  a0 <- vB (object ref)
diff --git a/runtime/interpreter/mterp/mips/op_check_cast.S b/runtime/interpreter/mterp/mips/op_check_cast.S
index 9a6cefa..3875ce6 100644
--- a/runtime/interpreter/mterp/mips/op_check_cast.S
+++ b/runtime/interpreter/mterp/mips/op_check_cast.S
@@ -1,7 +1,7 @@
     /*
      * Check to see if a cast from one class to another is allowed.
      */
-    # check-cast vAA, class                /* BBBB */
+    /* check-cast vAA, class@BBBB */
     EXPORT_PC()
     FETCH(a0, 1)                           #  a0 <- BBBB
     GET_OPA(a1)                            #  a1 <- AA
diff --git a/runtime/interpreter/mterp/mips/op_cmpg_double.S b/runtime/interpreter/mterp/mips/op_cmpg_double.S
index e7965a7..b2e7532 100644
--- a/runtime/interpreter/mterp/mips/op_cmpg_double.S
+++ b/runtime/interpreter/mterp/mips/op_cmpg_double.S
@@ -1 +1 @@
-%include "mips/op_cmpl_double.S" { "naninst":"li rTEMP, 1" }
+%include "mips/op_cmpl_double.S" { "gt_bias":"1" }
diff --git a/runtime/interpreter/mterp/mips/op_cmpg_float.S b/runtime/interpreter/mterp/mips/op_cmpg_float.S
index 53519a6..76550b5 100644
--- a/runtime/interpreter/mterp/mips/op_cmpg_float.S
+++ b/runtime/interpreter/mterp/mips/op_cmpg_float.S
@@ -1 +1 @@
-%include "mips/op_cmpl_float.S" { "naninst":"li rTEMP, 1" }
+%include "mips/op_cmpl_float.S" { "gt_bias":"1" }
diff --git a/runtime/interpreter/mterp/mips/op_cmpl_double.S b/runtime/interpreter/mterp/mips/op_cmpl_double.S
index db89242..369e5b3 100644
--- a/runtime/interpreter/mterp/mips/op_cmpl_double.S
+++ b/runtime/interpreter/mterp/mips/op_cmpl_double.S
@@ -1,53 +1,51 @@
-%default { "naninst":"li rTEMP, -1" }
+%default { "gt_bias":"0" }
     /*
      * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
-     * into the destination register (rTEMP) based on the comparison results.
-     *
-     * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
-     * on what value we'd like to return when one of the operands is NaN.
-     *
-     * See op_cmpl_float for more details.
+     * into the destination register based on the comparison results.
      *
      * For: cmpl-double, cmpg-double
      */
     /* op vAA, vBB, vCC */
 
     FETCH(a0, 1)                           #  a0 <- CCBB
-    and       rOBJ, a0, 255                #  s5 <- BB
+    and       rOBJ, a0, 255                #  rOBJ <- BB
     srl       t0, a0, 8                    #  t0 <- CC
-    EAS2(rOBJ, rFP, rOBJ)                  #  s5 <- &fp[BB]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[BB]
     EAS2(t0, rFP, t0)                      #  t0 <- &fp[CC]
     LOAD64_F(ft0, ft0f, rOBJ)
     LOAD64_F(ft1, ft1f, t0)
 #ifdef MIPS32REVGE6
-    cmp.lt.d  ft2, ft0, ft1
-    li        rTEMP, -1
-    bc1nez    ft2, .L${opcode}_finish
-    cmp.lt.d  ft2, ft1, ft0
-    li        rTEMP, 1
-    bc1nez    ft2, .L${opcode}_finish
     cmp.eq.d  ft2, ft0, ft1
     li        rTEMP, 0
-    bc1nez    ft2, .L${opcode}_finish
-    b         .L${opcode}_nan
-#else
-    c.olt.d   fcc0, ft0, ft1
+    bc1nez    ft2, 1f                      # done if vBB == vCC (ordered)
+    .if $gt_bias
+    cmp.lt.d  ft2, ft0, ft1
     li        rTEMP, -1
-    bc1t      fcc0, .L${opcode}_finish
-    c.olt.d   fcc0, ft1, ft0
+    bc1nez    ft2, 1f                      # done if vBB < vCC (ordered)
+    li        rTEMP, 1                     # vBB > vCC or unordered
+    .else
+    cmp.lt.d  ft2, ft1, ft0
     li        rTEMP, 1
-    bc1t      fcc0, .L${opcode}_finish
+    bc1nez    ft2, 1f                      # done if vBB > vCC (ordered)
+    li        rTEMP, -1                    # vBB < vCC or unordered
+    .endif
+#else
     c.eq.d    fcc0, ft0, ft1
     li        rTEMP, 0
-    bc1t      fcc0, .L${opcode}_finish
-    b         .L${opcode}_nan
+    bc1t      fcc0, 1f                     # done if vBB == vCC (ordered)
+    .if $gt_bias
+    c.olt.d   fcc0, ft0, ft1
+    li        rTEMP, -1
+    bc1t      fcc0, 1f                     # done if vBB < vCC (ordered)
+    li        rTEMP, 1                     # vBB > vCC or unordered
+    .else
+    c.olt.d   fcc0, ft1, ft0
+    li        rTEMP, 1
+    bc1t      fcc0, 1f                     # done if vBB > vCC (ordered)
+    li        rTEMP, -1                    # vBB < vCC or unordered
+    .endif
 #endif
-%break
-
-.L${opcode}_nan:
-    $naninst
-
-.L${opcode}_finish:
+1:
     GET_OPA(rOBJ)
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
diff --git a/runtime/interpreter/mterp/mips/op_cmpl_float.S b/runtime/interpreter/mterp/mips/op_cmpl_float.S
index b8c0961..1dd5506 100644
--- a/runtime/interpreter/mterp/mips/op_cmpl_float.S
+++ b/runtime/interpreter/mterp/mips/op_cmpl_float.S
@@ -1,60 +1,49 @@
-%default { "naninst":"li rTEMP, -1" }
+%default { "gt_bias":"0" }
     /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register rTEMP based on the results of the comparison.
-     *
-     * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
-     * on what value we'd like to return when one of the operands is NaN.
-     *
-     * The operation we're implementing is:
-     *   if (x == y)
-     *     return 0;
-     *   else if (x < y)
-     *     return -1;
-     *   else if (x > y)
-     *     return 1;
-     *   else
-     *     return {-1 or 1};  // one or both operands was NaN
+     * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
+     * into the destination register based on the comparison results.
      *
      * for: cmpl-float, cmpg-float
      */
     /* op vAA, vBB, vCC */
 
-    /* "clasic" form */
     FETCH(a0, 1)                           #  a0 <- CCBB
     and       a2, a0, 255                  #  a2 <- BB
     srl       a3, a0, 8
     GET_VREG_F(ft0, a2)
     GET_VREG_F(ft1, a3)
 #ifdef MIPS32REVGE6
-    cmp.lt.s  ft2, ft0, ft1               # Is ft0 < ft1
-    li        rTEMP, -1
-    bc1nez    ft2, .L${opcode}_finish
-    cmp.lt.s  ft2, ft1, ft0
-    li        rTEMP, 1
-    bc1nez    ft2, .L${opcode}_finish
     cmp.eq.s  ft2, ft0, ft1
     li        rTEMP, 0
-    bc1nez    ft2, .L${opcode}_finish
-    b         .L${opcode}_nan
-#else
-    c.olt.s   fcc0, ft0, ft1               # Is ft0 < ft1
+    bc1nez    ft2, 1f                      # done if vBB == vCC (ordered)
+    .if $gt_bias
+    cmp.lt.s  ft2, ft0, ft1
     li        rTEMP, -1
-    bc1t      fcc0, .L${opcode}_finish
-    c.olt.s   fcc0, ft1, ft0
+    bc1nez    ft2, 1f                      # done if vBB < vCC (ordered)
+    li        rTEMP, 1                     # vBB > vCC or unordered
+    .else
+    cmp.lt.s  ft2, ft1, ft0
     li        rTEMP, 1
-    bc1t      fcc0, .L${opcode}_finish
+    bc1nez    ft2, 1f                      # done if vBB > vCC (ordered)
+    li        rTEMP, -1                    # vBB < vCC or unordered
+    .endif
+#else
     c.eq.s    fcc0, ft0, ft1
     li        rTEMP, 0
-    bc1t      fcc0, .L${opcode}_finish
-    b         .L${opcode}_nan
+    bc1t      fcc0, 1f                     # done if vBB == vCC (ordered)
+    .if $gt_bias
+    c.olt.s   fcc0, ft0, ft1
+    li        rTEMP, -1
+    bc1t      fcc0, 1f                     # done if vBB < vCC (ordered)
+    li        rTEMP, 1                     # vBB > vCC or unordered
+    .else
+    c.olt.s   fcc0, ft1, ft0
+    li        rTEMP, 1
+    bc1t      fcc0, 1f                     # done if vBB > vCC (ordered)
+    li        rTEMP, -1                    # vBB < vCC or unordered
+    .endif
 #endif
-%break
-
-.L${opcode}_nan:
-    $naninst
-
-.L${opcode}_finish:
+1:
     GET_OPA(rOBJ)
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
diff --git a/runtime/interpreter/mterp/mips/op_const.S b/runtime/interpreter/mterp/mips/op_const.S
index c505761..bd9f873 100644
--- a/runtime/interpreter/mterp/mips/op_const.S
+++ b/runtime/interpreter/mterp/mips/op_const.S
@@ -1,9 +1,8 @@
-    # const vAA,                           /* +BBBBbbbb */
+    /* const vAA, +BBBBbbbb */
     GET_OPA(a3)                            #  a3 <- AA
     FETCH(a0, 1)                           #  a0 <- bbbb (low)
     FETCH(a1, 2)                           #  a1 <- BBBB (high)
     FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    sll       a1, a1, 16
-    or        a0, a1, a0                   #  a0 <- BBBBbbbb
+    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
diff --git a/runtime/interpreter/mterp/mips/op_const_16.S b/runtime/interpreter/mterp/mips/op_const_16.S
index 5e47633..2ffb30f 100644
--- a/runtime/interpreter/mterp/mips/op_const_16.S
+++ b/runtime/interpreter/mterp/mips/op_const_16.S
@@ -1,4 +1,4 @@
-    # const/16 vAA,                        /* +BBBB */
+    /* const/16 vAA, +BBBB */
     FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
     GET_OPA(a3)                            #  a3 <- AA
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/mips/op_const_4.S b/runtime/interpreter/mterp/mips/op_const_4.S
index 8b662f9..6866c78 100644
--- a/runtime/interpreter/mterp/mips/op_const_4.S
+++ b/runtime/interpreter/mterp/mips/op_const_4.S
@@ -1,4 +1,4 @@
-    # const/4 vA,                          /* +B */
+    /* const/4 vA, +B */
     sll       a1, rINST, 16                #  a1 <- Bxxx0000
     GET_OPA(a0)                            #  a0 <- A+
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/mips/op_const_class.S b/runtime/interpreter/mterp/mips/op_const_class.S
index 7202b11..9adea44 100644
--- a/runtime/interpreter/mterp/mips/op_const_class.S
+++ b/runtime/interpreter/mterp/mips/op_const_class.S
@@ -1,4 +1,4 @@
-    # const/class vAA, Class               /* BBBB */
+    /* const/class vAA, class@BBBB */
     EXPORT_PC()
     FETCH(a0, 1)                        # a0 <- BBBB
     GET_OPA(a1)                         # a1 <- AA
diff --git a/runtime/interpreter/mterp/mips/op_const_high16.S b/runtime/interpreter/mterp/mips/op_const_high16.S
index 36c1c35..5162402 100644
--- a/runtime/interpreter/mterp/mips/op_const_high16.S
+++ b/runtime/interpreter/mterp/mips/op_const_high16.S
@@ -1,4 +1,4 @@
-    # const/high16 vAA,                    /* +BBBB0000 */
+    /* const/high16 vAA, +BBBB0000 */
     FETCH(a0, 1)                           #  a0 <- 0000BBBB (zero-extended)
     GET_OPA(a3)                            #  a3 <- AA
     sll       a0, a0, 16                   #  a0 <- BBBB0000
diff --git a/runtime/interpreter/mterp/mips/op_const_string.S b/runtime/interpreter/mterp/mips/op_const_string.S
index d8eeb46..006e114 100644
--- a/runtime/interpreter/mterp/mips/op_const_string.S
+++ b/runtime/interpreter/mterp/mips/op_const_string.S
@@ -1,4 +1,4 @@
-    # const/string vAA, String             /* BBBB */
+    /* const/string vAA, string@BBBB */
     EXPORT_PC()
     FETCH(a0, 1)                        # a0 <- BBBB
     GET_OPA(a1)                         # a1 <- AA
diff --git a/runtime/interpreter/mterp/mips/op_const_string_jumbo.S b/runtime/interpreter/mterp/mips/op_const_string_jumbo.S
index d732ca1..54cec97 100644
--- a/runtime/interpreter/mterp/mips/op_const_string_jumbo.S
+++ b/runtime/interpreter/mterp/mips/op_const_string_jumbo.S
@@ -1,10 +1,9 @@
-    # const/string vAA, String          /* BBBBBBBB */
+    /* const/string vAA, string@BBBBBBBB */
     EXPORT_PC()
     FETCH(a0, 1)                        # a0 <- bbbb (low)
     FETCH(a2, 2)                        # a2 <- BBBB (high)
     GET_OPA(a1)                         # a1 <- AA
-    sll    a2, a2, 16
-    or     a0, a0, a2                   # a0 <- BBBBbbbb
+    INSERT_HIGH_HALF(a0, a2)            # a0 <- BBBBbbbb
     addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
     move   a3, rSELF
     JAL(MterpConstString)               # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
diff --git a/runtime/interpreter/mterp/mips/op_const_wide.S b/runtime/interpreter/mterp/mips/op_const_wide.S
index 01d0f87..f8911e3 100644
--- a/runtime/interpreter/mterp/mips/op_const_wide.S
+++ b/runtime/interpreter/mterp/mips/op_const_wide.S
@@ -1,14 +1,11 @@
-    # const-wide vAA,                      /* +HHHHhhhhBBBBbbbb */
+    /* const-wide vAA, +HHHHhhhhBBBBbbbb */
     FETCH(a0, 1)                           #  a0 <- bbbb (low)
     FETCH(a1, 2)                           #  a1 <- BBBB (low middle)
     FETCH(a2, 3)                           #  a2 <- hhhh (high middle)
-    sll       a1, 16 #
-    or        a0, a1                       #  a0 <- BBBBbbbb (low word)
+    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb (low word)
     FETCH(a3, 4)                           #  a3 <- HHHH (high)
     GET_OPA(t1)                            #  t1 <- AA
-    sll       a3, 16
-    or        a1, a3, a2                   #  a1 <- HHHHhhhh (high word)
+    INSERT_HIGH_HALF(a2, a3)               #  a2 <- HHHHhhhh (high word)
     FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(a0, a1, t1)                 #  vAA <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(a0, a2, t1, t0)        #  vAA/vAA+1 <- a0/a2
diff --git a/runtime/interpreter/mterp/mips/op_const_wide_16.S b/runtime/interpreter/mterp/mips/op_const_wide_16.S
index 583d9ef..2ca5ab9 100644
--- a/runtime/interpreter/mterp/mips/op_const_wide_16.S
+++ b/runtime/interpreter/mterp/mips/op_const_wide_16.S
@@ -1,8 +1,7 @@
-    # const-wide/16 vAA,                   /* +BBBB */
+    /* const-wide/16 vAA, +BBBB */
     FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
     GET_OPA(a3)                            #  a3 <- AA
     sra       a1, a0, 31                   #  a1 <- ssssssss
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(a0, a1, a3)                 #  vAA <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_const_wide_32.S b/runtime/interpreter/mterp/mips/op_const_wide_32.S
index 3eb4574..bf802ca 100644
--- a/runtime/interpreter/mterp/mips/op_const_wide_32.S
+++ b/runtime/interpreter/mterp/mips/op_const_wide_32.S
@@ -1,11 +1,9 @@
-    # const-wide/32 vAA,                   /* +BBBBbbbb */
+    /* const-wide/32 vAA, +BBBBbbbb */
     FETCH(a0, 1)                           #  a0 <- 0000bbbb (low)
     GET_OPA(a3)                            #  a3 <- AA
     FETCH_S(a2, 2)                         #  a2 <- ssssBBBB (high)
     FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    sll       a2, a2, 16
-    or        a0, a0, a2                   #  a0 <- BBBBbbbb
+    INSERT_HIGH_HALF(a0, a2)               #  a0 <- BBBBbbbb
     sra       a1, a0, 31                   #  a1 <- ssssssss
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(a0, a1, a3)                 #  vAA <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_const_wide_high16.S b/runtime/interpreter/mterp/mips/op_const_wide_high16.S
index 88382c6..04b90fa 100644
--- a/runtime/interpreter/mterp/mips/op_const_wide_high16.S
+++ b/runtime/interpreter/mterp/mips/op_const_wide_high16.S
@@ -1,9 +1,8 @@
-    # const-wide/high16 vAA,               /* +BBBB000000000000 */
+    /* const-wide/high16 vAA, +BBBB000000000000 */
     FETCH(a1, 1)                           #  a1 <- 0000BBBB (zero-extended)
     GET_OPA(a3)                            #  a3 <- AA
     li        a0, 0                        #  a0 <- 00000000
     sll       a1, 16                       #  a1 <- BBBB0000
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(a0, a1, a3)                 #  vAA <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_double_to_int.S b/runtime/interpreter/mterp/mips/op_double_to_int.S
index b1792ec..3b44964 100644
--- a/runtime/interpreter/mterp/mips/op_double_to_int.S
+++ b/runtime/interpreter/mterp/mips/op_double_to_int.S
@@ -1,58 +1,39 @@
-%include "mips/unopNarrower.S" {"instr":"b d2i_doconv"}
-/*
- * Convert the double in a0/a1 to an int in a0.
- *
- * We have to clip values to int min/max per the specification.  The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer.  The EABI convert function isn't doing this for us.
- */
-%break
+    /*
+     * double-to-int
+     *
+     * We have to clip values to int min/max per the specification.  The
+     * expected common case is a "reasonable" value that converts directly
+     * to modest integer.  The EABI convert function isn't doing this for us.
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    LOAD64_F(fa0, fa0f, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
 
-d2i_doconv:
+    li        t0, INT_MIN_AS_DOUBLE_HIGH
+    mtc1      zero, fa1
+    MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
 #ifdef MIPS32REVGE6
-    la        t0, .LDOUBLE_TO_INT_max
-    LOAD64_F(fa1, fa1f, t0)
-    cmp.le.d  ft2, fa1, fa0
-    l.s       fv0, .LDOUBLE_TO_INT_maxret
-    bc1nez    ft2, .L${opcode}_set_vreg_f
-
-    la        t0, .LDOUBLE_TO_INT_min
-    LOAD64_F(fa1, fa1f, t0)
-    cmp.le.d  ft2, fa0, fa1
-    l.s       fv0, .LDOUBLE_TO_INT_minret
-    bc1nez    ft2, .L${opcode}_set_vreg_f
-
-    mov.d     fa1, fa0
-    cmp.un.d  ft2, fa0, fa1
-    li.s      fv0, 0
-    bc1nez    ft2, .L${opcode}_set_vreg_f
+    /*
+     * TODO: simplify this when the MIPS64R6 emulator
+     * supports NAN2008=1.
+     */
+    cmp.le.d  ft0, fa1, fa0
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    bc1nez    ft0, 1f                      #  if INT_MIN <= vB, proceed to truncation
+    cmp.eq.d  ft0, fa0, fa0
+    selnez.d  fa0, fa1, ft0                #  fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
 #else
-    la        t0, .LDOUBLE_TO_INT_max
-    LOAD64_F(fa1, fa1f, t0)
     c.ole.d   fcc0, fa1, fa0
-    l.s       fv0, .LDOUBLE_TO_INT_maxret
-    bc1t      .L${opcode}_set_vreg_f
-
-    la        t0, .LDOUBLE_TO_INT_min
-    LOAD64_F(fa1, fa1f, t0)
-    c.ole.d   fcc0, fa0, fa1
-    l.s       fv0, .LDOUBLE_TO_INT_minret
-    bc1t      .L${opcode}_set_vreg_f
-
-    mov.d     fa1, fa0
-    c.un.d    fcc0, fa0, fa1
-    li.s      fv0, 0
-    bc1t      .L${opcode}_set_vreg_f
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    bc1t      fcc0, 1f                     #  if INT_MIN <= vB, proceed to truncation
+    c.eq.d    fcc0, fa0, fa0
+    mtc1      zero, fa0
+    MOVE_TO_FPU_HIGH(zero, fa0, fa0f)
+    movt.d    fa0, fa1, fcc0               #  fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
 #endif
-
-    trunc.w.d  fv0, fa0
-    b         .L${opcode}_set_vreg_f
-
-.LDOUBLE_TO_INT_max:
-    .dword 0x41dfffffffc00000
-.LDOUBLE_TO_INT_min:
-    .dword 0xc1e0000000000000              #  minint, as a double (high word)
-.LDOUBLE_TO_INT_maxret:
-    .word 0x7fffffff
-.LDOUBLE_TO_INT_minret:
-    .word 0x80000000
+1:
+    trunc.w.d fa0, fa0
+    SET_VREG_F_GOTO(fa0, rOBJ, t1)         #  vA <- result
diff --git a/runtime/interpreter/mterp/mips/op_double_to_long.S b/runtime/interpreter/mterp/mips/op_double_to_long.S
index 7f7a799..78d4a8f 100644
--- a/runtime/interpreter/mterp/mips/op_double_to_long.S
+++ b/runtime/interpreter/mterp/mips/op_double_to_long.S
@@ -1,56 +1,61 @@
-%include "mips/funopWide.S" {"instr":"b d2l_doconv", "st_result":"SET_VREG64(rRESULT0, rRESULT1, rOBJ)"}
+    /*
+     * double-to-long
+     *
+     * We have to clip values to long min/max per the specification.  The
+     * expected common case is a "reasonable" value that converts directly
+     * to modest integer.  The EABI convert function isn't doing this for us.
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+    LOAD64_F(fa0, fa0f, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+
+#ifdef MIPS32REVGE6
+    /*
+     * TODO: simplify this when the MIPS64R6 emulator
+     * supports NAN2008=1.
+     */
+    li        t0, LONG_MIN_AS_DOUBLE_HIGH
+    mtc1      zero, fa1
+    mthc1     t0, fa1
+    cmp.le.d  ft0, fa1, fa0
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    bc1nez    ft0, 1f                      #  if LONG_MIN <= vB, proceed to truncation
+    cmp.eq.d  ft0, fa0, fa0
+    selnez.d  fa0, fa1, ft0                #  fa0 = ordered(vB) ? LONG_MIN_AS_DOUBLE : 0
+1:
+    trunc.l.d fa0, fa0
+    SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) #  vA <- result
+#else
+    c.eq.d    fcc0, fa0, fa0
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bc1f      fcc0, .L${opcode}_get_opcode
+
+    li        t0, LONG_MIN_AS_DOUBLE_HIGH
+    mtc1      zero, fa1
+    MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
+    c.ole.d   fcc0, fa0, fa1
+    li        rRESULT1, LONG_MIN_HIGH
+    bc1t      fcc0, .L${opcode}_get_opcode
+
+    neg.d     fa1, fa1
+    c.ole.d   fcc0, fa1, fa0
+    nor       rRESULT0, rRESULT0, zero
+    nor       rRESULT1, rRESULT1, zero
+    bc1t      fcc0, .L${opcode}_get_opcode
+
+    JAL(__fixdfdi)
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    b         .L${opcode}_set_vreg
+#endif
 %break
 
-d2l_doconv:
-#ifdef MIPS32REVGE6
-    la        t0, .LDOUBLE_TO_LONG_max
-    LOAD64_F(fa1, fa1f, t0)
-    cmp.le.d  ft2, fa1, fa0
-    la        t0, .LDOUBLE_TO_LONG_ret_max
-    LOAD64(rRESULT0, rRESULT1, t0)
-    bc1nez    ft2, .L${opcode}_set_vreg
-
-    la        t0, .LDOUBLE_TO_LONG_min
-    LOAD64_F(fa1, fa1f, t0)
-    cmp.le.d  ft2, fa0, fa1
-    la        t0, .LDOUBLE_TO_LONG_ret_min
-    LOAD64(rRESULT0, rRESULT1, t0)
-    bc1nez    ft2, .L${opcode}_set_vreg
-
-    mov.d     fa1, fa0
-    cmp.un.d  ft2, fa0, fa1
-    li        rRESULT0, 0
-    li        rRESULT1, 0
-    bc1nez    ft2, .L${opcode}_set_vreg
-#else
-    la        t0, .LDOUBLE_TO_LONG_max
-    LOAD64_F(fa1, fa1f, t0)
-    c.ole.d   fcc0, fa1, fa0
-    la        t0, .LDOUBLE_TO_LONG_ret_max
-    LOAD64(rRESULT0, rRESULT1, t0)
-    bc1t      .L${opcode}_set_vreg
-
-    la        t0, .LDOUBLE_TO_LONG_min
-    LOAD64_F(fa1, fa1f, t0)
-    c.ole.d   fcc0, fa0, fa1
-    la        t0, .LDOUBLE_TO_LONG_ret_min
-    LOAD64(rRESULT0, rRESULT1, t0)
-    bc1t      .L${opcode}_set_vreg
-
-    mov.d     fa1, fa0
-    c.un.d    fcc0, fa0, fa1
-    li        rRESULT0, 0
-    li        rRESULT1, 0
-    bc1t      .L${opcode}_set_vreg
+#ifndef MIPS32REVGE6
+.L${opcode}_get_opcode:
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+.L${opcode}_set_vreg:
+    SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1)   #  vA/vA+1 <- v0/v1
 #endif
-    JAL(__fixdfdi)
-    b         .L${opcode}_set_vreg
-
-.LDOUBLE_TO_LONG_max:
-    .dword 0x43e0000000000000              #  maxlong, as a double (high word)
-.LDOUBLE_TO_LONG_min:
-    .dword 0xc3e0000000000000              #  minlong, as a double (high word)
-.LDOUBLE_TO_LONG_ret_max:
-    .dword 0x7fffffffffffffff
-.LDOUBLE_TO_LONG_ret_min:
-    .dword 0x8000000000000000
diff --git a/runtime/interpreter/mterp/mips/op_fill_array_data.S b/runtime/interpreter/mterp/mips/op_fill_array_data.S
index 8605746..c3cd371 100644
--- a/runtime/interpreter/mterp/mips/op_fill_array_data.S
+++ b/runtime/interpreter/mterp/mips/op_fill_array_data.S
@@ -1,10 +1,9 @@
     /* fill-array-data vAA, +BBBBBBBB */
     EXPORT_PC()
-    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
-    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
+    FETCH(a1, 1)                           #  a1 <- bbbb (lo)
+    FETCH(a0, 2)                           #  a0 <- BBBB (hi)
     GET_OPA(a3)                            #  a3 <- AA
-    sll       a1, a1, 16                   #  a1 <- BBBBbbbb
-    or        a1, a0, a1                   #  a1 <- BBBBbbbb
+    INSERT_HIGH_HALF(a1, a0)               #  a1 <- BBBBbbbb
     GET_VREG(a0, a3)                       #  a0 <- vAA (array object)
     EAS1(a1, rPC, a1)                      #  a1 <- PC + BBBBbbbb*2 (array data off.)
     JAL(MterpFillArrayData)                #  v0 <- Mterp(obj, payload)
diff --git a/runtime/interpreter/mterp/mips/op_filled_new_array.S b/runtime/interpreter/mterp/mips/op_filled_new_array.S
index 3f62fae..9511578 100644
--- a/runtime/interpreter/mterp/mips/op_filled_new_array.S
+++ b/runtime/interpreter/mterp/mips/op_filled_new_array.S
@@ -4,8 +4,8 @@
      *
      * for: filled-new-array, filled-new-array/range
      */
-    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
-    # op {vCCCC..v(CCCC+AA-1)}, type       /* BBBB */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
     .extern $helper
     EXPORT_PC()
     addu   a0, rFP, OFF_FP_SHADOWFRAME     # a0 <- shadow frame
diff --git a/runtime/interpreter/mterp/mips/op_float_to_int.S b/runtime/interpreter/mterp/mips/op_float_to_int.S
index 8292652..087e50f 100644
--- a/runtime/interpreter/mterp/mips/op_float_to_int.S
+++ b/runtime/interpreter/mterp/mips/op_float_to_int.S
@@ -1,50 +1,36 @@
-%include "mips/funop.S" {"instr":"b f2i_doconv"}
-%break
+    /*
+     * float-to-int
+     *
+     * We have to clip values to int min/max per the specification.  The
+     * expected common case is a "reasonable" value that converts directly
+     * to modest integer.  The EABI convert function isn't doing this for us.
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_VREG_F(fa0, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
 
-/*
- * Not an entry point as it is used only once !!
- */
-f2i_doconv:
+    li        t0, INT_MIN_AS_FLOAT
+    mtc1      t0, fa1
 #ifdef MIPS32REVGE6
-    l.s       fa1, .LFLOAT_TO_INT_max
-    cmp.le.s  ft2, fa1, fa0
-    l.s       fv0, .LFLOAT_TO_INT_ret_max
-    bc1nez    ft2, .L${opcode}_set_vreg_f
-
-    l.s       fa1, .LFLOAT_TO_INT_min
-    cmp.le.s  ft2, fa0, fa1
-    l.s       fv0, .LFLOAT_TO_INT_ret_min
-    bc1nez    ft2, .L${opcode}_set_vreg_f
-
-    mov.s     fa1, fa0
-    cmp.un.s  ft2, fa0, fa1
-    li.s      fv0, 0
-    bc1nez    ft2, .L${opcode}_set_vreg_f
+    /*
+     * TODO: simplify this when the MIPS64R6 emulator
+     * supports NAN2008=1.
+     */
+    cmp.le.s  ft0, fa1, fa0
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    bc1nez    ft0, 1f                      #  if INT_MIN <= vB, proceed to truncation
+    cmp.eq.s  ft0, fa0, fa0
+    selnez.s  fa0, fa1, ft0                #  fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
 #else
-    l.s       fa1, .LFLOAT_TO_INT_max
     c.ole.s   fcc0, fa1, fa0
-    l.s       fv0, .LFLOAT_TO_INT_ret_max
-    bc1t      .L${opcode}_set_vreg_f
-
-    l.s       fa1, .LFLOAT_TO_INT_min
-    c.ole.s   fcc0, fa0, fa1
-    l.s       fv0, .LFLOAT_TO_INT_ret_min
-    bc1t      .L${opcode}_set_vreg_f
-
-    mov.s     fa1, fa0
-    c.un.s    fcc0, fa0, fa1
-    li.s      fv0, 0
-    bc1t      .L${opcode}_set_vreg_f
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    bc1t      fcc0, 1f                     #  if INT_MIN <= vB, proceed to truncation
+    c.eq.s    fcc0, fa0, fa0
+    mtc1      zero, fa0
+    movt.s    fa0, fa1, fcc0               #  fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
 #endif
-
-    trunc.w.s  fv0, fa0
-    b         .L${opcode}_set_vreg_f
-
-.LFLOAT_TO_INT_max:
-    .word 0x4f000000
-.LFLOAT_TO_INT_min:
-    .word 0xcf000000
-.LFLOAT_TO_INT_ret_max:
-    .word 0x7fffffff
-.LFLOAT_TO_INT_ret_min:
-    .word 0x80000000
+1:
+    trunc.w.s fa0, fa0
+    SET_VREG_F_GOTO(fa0, rOBJ, t1)         #  vA <- result
diff --git a/runtime/interpreter/mterp/mips/op_float_to_long.S b/runtime/interpreter/mterp/mips/op_float_to_long.S
index a51384f..dc88a78 100644
--- a/runtime/interpreter/mterp/mips/op_float_to_long.S
+++ b/runtime/interpreter/mterp/mips/op_float_to_long.S
@@ -1,51 +1,58 @@
-%include "mips/funopWider.S" {"instr":"b f2l_doconv", "st_result":"SET_VREG64(rRESULT0, rRESULT1, rOBJ)"}
-%break
+    /*
+     * float-to-long
+     *
+     * We have to clip values to long min/max per the specification.  The
+     * expected common case is a "reasonable" value that converts directly
+     * to modest integer.  The EABI convert function isn't doing this for us.
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    GET_VREG_F(fa0, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
 
-f2l_doconv:
 #ifdef MIPS32REVGE6
-    l.s       fa1, .LLONG_TO_max
-    cmp.le.s  ft2, fa1, fa0
-    li        rRESULT0, ~0
-    li        rRESULT1, ~0x80000000
-    bc1nez    ft2, .L${opcode}_set_vreg
-
-    l.s       fa1, .LLONG_TO_min
-    cmp.le.s  ft2, fa0, fa1
-    li        rRESULT0, 0
-    li        rRESULT1, 0x80000000
-    bc1nez    ft2, .L${opcode}_set_vreg
-
-    mov.s     fa1, fa0
-    cmp.un.s  ft2, fa0, fa1
-    li        rRESULT0, 0
-    li        rRESULT1, 0
-    bc1nez    ft2, .L${opcode}_set_vreg
+    /*
+     * TODO: simplify this when the MIPS64R6 emulator
+     * supports NAN2008=1.
+     */
+    li        t0, LONG_MIN_AS_FLOAT
+    mtc1      t0, fa1
+    cmp.le.s  ft0, fa1, fa0
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    bc1nez    ft0, 1f                      #  if LONG_MIN <= vB, proceed to truncation
+    cmp.eq.s  ft0, fa0, fa0
+    selnez.s  fa0, fa1, ft0                #  fa0 = ordered(vB) ? LONG_MIN_AS_FLOAT : 0
+1:
+    trunc.l.s fa0, fa0
+    SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) #  vA <- result
 #else
-    l.s       fa1, .LLONG_TO_max
-    c.ole.s   fcc0, fa1, fa0
-    li        rRESULT0, ~0
-    li        rRESULT1, ~0x80000000
-    bc1t      .L${opcode}_set_vreg
-
-    l.s       fa1, .LLONG_TO_min
-    c.ole.s   fcc0, fa0, fa1
-    li        rRESULT0, 0
-    li        rRESULT1, 0x80000000
-    bc1t      .L${opcode}_set_vreg
-
-    mov.s     fa1, fa0
-    c.un.s    fcc0, fa0, fa1
+    c.eq.s    fcc0, fa0, fa0
     li        rRESULT0, 0
     li        rRESULT1, 0
-    bc1t      .L${opcode}_set_vreg
-#endif
+    bc1f      fcc0, .L${opcode}_get_opcode
+
+    li        t0, LONG_MIN_AS_FLOAT
+    mtc1      t0, fa1
+    c.ole.s   fcc0, fa0, fa1
+    li        rRESULT1, LONG_MIN_HIGH
+    bc1t      fcc0, .L${opcode}_get_opcode
+
+    neg.s     fa1, fa1
+    c.ole.s   fcc0, fa1, fa0
+    nor       rRESULT0, rRESULT0, zero
+    nor       rRESULT1, rRESULT1, zero
+    bc1t      fcc0, .L${opcode}_get_opcode
 
     JAL(__fixsfdi)
-
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
     b         .L${opcode}_set_vreg
+#endif
+%break
 
-.LLONG_TO_max:
-    .word 0x5f000000
-
-.LLONG_TO_min:
-    .word 0xdf000000
+#ifndef MIPS32REVGE6
+.L${opcode}_get_opcode:
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+.L${opcode}_set_vreg:
+    SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1)   #  vA/vA+1 <- v0/v1
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_goto_32.S b/runtime/interpreter/mterp/mips/op_goto_32.S
index 67f52e9..ef5bf6b 100644
--- a/runtime/interpreter/mterp/mips/op_goto_32.S
+++ b/runtime/interpreter/mterp/mips/op_goto_32.S
@@ -8,8 +8,7 @@
      * our "backward branch" test must be "<=0" instead of "<0".
      */
     /* goto/32 +AAAAAAAA */
-    FETCH(a0, 1)                           #  a0 <- aaaa (lo)
+    FETCH(rINST, 1)                        #  rINST <- aaaa (lo)
     FETCH(a1, 2)                           #  a1 <- AAAA (hi)
-    sll       a1, a1, 16
-    or        rINST, a0, a1                #  rINST <- AAAAaaaa
+    INSERT_HIGH_HALF(rINST, a1)            #  rINST <- AAAAaaaa
     b         MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/mips/op_iget.S b/runtime/interpreter/mterp/mips/op_iget.S
index 86d44fa..01f42d9 100644
--- a/runtime/interpreter/mterp/mips/op_iget.S
+++ b/runtime/interpreter/mterp/mips/op_iget.S
@@ -4,6 +4,7 @@
      *
      * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
      */
+    /* op vA, vB, field@CCCC */
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref CCCC
     GET_OPB(a1)                            # a1 <- B
@@ -15,11 +16,10 @@
     GET_OPA4(a2)                           # a2<- A+
     PREFETCH_INST(2)                       # load rINST
     bnez  a3, MterpPossibleException        # bail out
-    .if $is_object
-    SET_VREG_OBJECT(v0, a2)                # fp[A] <- v0
-    .else
-    SET_VREG(v0, a2)                       # fp[A] <- v0
-    .endif
     ADVANCE(2)                             #  advance rPC
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    .if $is_object
+    SET_VREG_OBJECT_GOTO(v0, a2, t0)       # fp[A] <- v0
+    .else
+    SET_VREG_GOTO(v0, a2, t0)              # fp[A] <- v0
+    .endif
diff --git a/runtime/interpreter/mterp/mips/op_iget_object_quick.S b/runtime/interpreter/mterp/mips/op_iget_object_quick.S
index 31d94b9..95c34d7 100644
--- a/runtime/interpreter/mterp/mips/op_iget_object_quick.S
+++ b/runtime/interpreter/mterp/mips/op_iget_object_quick.S
@@ -9,7 +9,6 @@
     GET_OPA4(a2)                           #  a2<- A+
     PREFETCH_INST(2)                       #  load rINST
     bnez a3, MterpPossibleException        #  bail out
-    SET_VREG_OBJECT(v0, a2)                #  fp[A] <- v0
     ADVANCE(2)                             #  advance rPC
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG_OBJECT_GOTO(v0, a2, t0)       #  fp[A] <- v0
diff --git a/runtime/interpreter/mterp/mips/op_iget_quick.S b/runtime/interpreter/mterp/mips/op_iget_quick.S
index fbafa5b..46277d3 100644
--- a/runtime/interpreter/mterp/mips/op_iget_quick.S
+++ b/runtime/interpreter/mterp/mips/op_iget_quick.S
@@ -1,6 +1,6 @@
 %default { "load":"lw" }
     /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    # op vA, vB, offset                    /* CCCC */
+    /* op vA, vB, offset@CCCC */
     GET_OPB(a2)                            #  a2 <- B
     GET_VREG(a3, a2)                       #  a3 <- object we're operating on
     FETCH(a1, 1)                           #  a1 <- field byte offset
diff --git a/runtime/interpreter/mterp/mips/op_iget_wide.S b/runtime/interpreter/mterp/mips/op_iget_wide.S
index 8fe3089..cf5019e 100644
--- a/runtime/interpreter/mterp/mips/op_iget_wide.S
+++ b/runtime/interpreter/mterp/mips/op_iget_wide.S
@@ -3,6 +3,7 @@
      *
      * for: iget-wide
      */
+    /* op vA, vB, field@CCCC */
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field byte offset
     GET_OPB(a1)                            # a1 <- B
@@ -14,7 +15,6 @@
     GET_OPA4(a2)                           # a2<- A+
     PREFETCH_INST(2)                       # load rINST
     bnez a3, MterpException                # bail out
-    SET_VREG64(v0, v1, a2)                 # fp[A] <- v0/v1
     ADVANCE(2)                             # advance rPC
     GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
+    SET_VREG64_GOTO(v0, v1, a2, t0)        # fp[A] <- v0/v1
diff --git a/runtime/interpreter/mterp/mips/op_iget_wide_quick.S b/runtime/interpreter/mterp/mips/op_iget_wide_quick.S
index 4d2f291..128be57 100644
--- a/runtime/interpreter/mterp/mips/op_iget_wide_quick.S
+++ b/runtime/interpreter/mterp/mips/op_iget_wide_quick.S
@@ -1,4 +1,4 @@
-    # iget-wide-quick vA, vB, offset       /* CCCC */
+    /* iget-wide-quick vA, vB, offset@CCCC */
     GET_OPB(a2)                            #  a2 <- B
     GET_VREG(a3, a2)                       #  a3 <- object we're operating on
     FETCH(a1, 1)                           #  a1 <- field byte offset
@@ -9,5 +9,4 @@
     LOAD64(a0, a1, t0)                     #  a0 <- obj.field (64 bits, aligned)
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(a0, a1, a2)                 #  fp[A] <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[A] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_instance_of.S b/runtime/interpreter/mterp/mips/op_instance_of.S
index d2679bd..706dcf3 100644
--- a/runtime/interpreter/mterp/mips/op_instance_of.S
+++ b/runtime/interpreter/mterp/mips/op_instance_of.S
@@ -4,7 +4,7 @@
      * Most common situation is a non-null object, being compared against
      * an already-resolved class.
      */
-    # instance-of vA, vB, class            /* CCCC */
+    /* instance-of vA, vB, class@CCCC */
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- CCCC
     GET_OPB(a1)                            # a1 <- B
diff --git a/runtime/interpreter/mterp/mips/op_int_to_byte.S b/runtime/interpreter/mterp/mips/op_int_to_byte.S
index 77314c62..9266aab 100644
--- a/runtime/interpreter/mterp/mips/op_int_to_byte.S
+++ b/runtime/interpreter/mterp/mips/op_int_to_byte.S
@@ -1 +1 @@
-%include "mips/unop.S" {"preinstr":"sll a0, a0, 24", "instr":"sra a0, a0, 24"}
+%include "mips/unop.S" {"instr":"SEB(a0, a0)"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_short.S b/runtime/interpreter/mterp/mips/op_int_to_short.S
index 5649c2a..8749cd8 100644
--- a/runtime/interpreter/mterp/mips/op_int_to_short.S
+++ b/runtime/interpreter/mterp/mips/op_int_to_short.S
@@ -1 +1 @@
-%include "mips/unop.S" {"preinstr":"sll a0, 16", "instr":"sra a0, 16"}
+%include "mips/unop.S" {"instr":"SEH(a0, a0)"}
diff --git a/runtime/interpreter/mterp/mips/op_iput.S b/runtime/interpreter/mterp/mips/op_iput.S
index 732a9a4..9133d60 100644
--- a/runtime/interpreter/mterp/mips/op_iput.S
+++ b/runtime/interpreter/mterp/mips/op_iput.S
@@ -4,7 +4,7 @@
      *
      * for: iput, iput-boolean, iput-byte, iput-char, iput-short
      */
-    # op vA, vB, field                     /* CCCC */
+    /* op vA, vB, field@CCCC */
     .extern $handler
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref CCCC
diff --git a/runtime/interpreter/mterp/mips/op_iput_object.S b/runtime/interpreter/mterp/mips/op_iput_object.S
index 6b856e7..cfa56ec 100644
--- a/runtime/interpreter/mterp/mips/op_iput_object.S
+++ b/runtime/interpreter/mterp/mips/op_iput_object.S
@@ -3,7 +3,7 @@
      *
      * for: iput-object, iput-object-volatile
      */
-    # op vA, vB, field                     /* CCCC */
+    /* op vA, vB, field@CCCC */
     EXPORT_PC()
     addu   a0, rFP, OFF_FP_SHADOWFRAME
     move   a1, rPC
diff --git a/runtime/interpreter/mterp/mips/op_iput_object_quick.S b/runtime/interpreter/mterp/mips/op_iput_object_quick.S
index c3f1526..82044f5 100644
--- a/runtime/interpreter/mterp/mips/op_iput_object_quick.S
+++ b/runtime/interpreter/mterp/mips/op_iput_object_quick.S
@@ -1,5 +1,5 @@
     /* For: iput-object-quick */
-    # op vA, vB, offset                 /* CCCC */
+    /* op vA, vB, offset@CCCC */
     EXPORT_PC()
     addu   a0, rFP, OFF_FP_SHADOWFRAME
     move   a1, rPC
diff --git a/runtime/interpreter/mterp/mips/op_iput_quick.S b/runtime/interpreter/mterp/mips/op_iput_quick.S
index 0829666..d9753b1 100644
--- a/runtime/interpreter/mterp/mips/op_iput_quick.S
+++ b/runtime/interpreter/mterp/mips/op_iput_quick.S
@@ -1,6 +1,6 @@
 %default { "store":"sw" }
     /* For: iput-quick, iput-object-quick */
-    # op vA, vB, offset                    /* CCCC */
+    /* op vA, vB, offset@CCCC */
     GET_OPB(a2)                            #  a2 <- B
     GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
     FETCH(a1, 1)                           #  a1 <- field byte offset
@@ -9,6 +9,7 @@
     GET_VREG(a0, a2)                       #  a0 <- fp[A]
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     addu      t0, a3, a1
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    GET_OPCODE_TARGET(t1)
     $store    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    JR(t1)                                 #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_wide.S b/runtime/interpreter/mterp/mips/op_iput_wide.S
index 6d23f8c..bc3d758 100644
--- a/runtime/interpreter/mterp/mips/op_iput_wide.S
+++ b/runtime/interpreter/mterp/mips/op_iput_wide.S
@@ -1,4 +1,4 @@
-    # iput-wide vA, vB, field              /* CCCC */
+    /* iput-wide vA, vB, field@CCCC */
     .extern artSet64InstanceFromMterp
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref CCCC
diff --git a/runtime/interpreter/mterp/mips/op_iput_wide_quick.S b/runtime/interpreter/mterp/mips/op_iput_wide_quick.S
index 9fdb847..0eb228d 100644
--- a/runtime/interpreter/mterp/mips/op_iput_wide_quick.S
+++ b/runtime/interpreter/mterp/mips/op_iput_wide_quick.S
@@ -1,4 +1,4 @@
-    # iput-wide-quick vA, vB, offset       /* CCCC */
+    /* iput-wide-quick vA, vB, offset@CCCC */
     GET_OPA4(a0)                           #  a0 <- A(+)
     GET_OPB(a1)                            #  a1 <- B
     GET_VREG(a2, a1)                       #  a2 <- fp[B], the object pointer
@@ -9,6 +9,7 @@
     FETCH(a3, 1)                           #  a3 <- field byte offset
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     addu      a2, a2, a3                   #  obj.field (64 bits, aligned) <- a0/a1
-    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0/a1
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    GET_OPCODE_TARGET(t0)
+    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0/a1
+    JR(t0)                                 #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_long_to_double.S b/runtime/interpreter/mterp/mips/op_long_to_double.S
index b83aaf4..153f582 100644
--- a/runtime/interpreter/mterp/mips/op_long_to_double.S
+++ b/runtime/interpreter/mterp/mips/op_long_to_double.S
@@ -1 +1,20 @@
-%include "mips/funopWide.S" {"instr":"JAL(__floatdidf)", "ld_arg":"LOAD64(rARG0, rARG1, a3)"}
+    /*
+     * long-to-double
+     */
+    /* unop vA, vB */
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    GET_OPB(a3)                            #  a3 <- B
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+
+#ifdef MIPS32REVGE6
+    LOAD64_F(fv0, fv0f, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    cvt.d.l   fv0, fv0
+#else
+    LOAD64(rARG0, rARG1, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    JAL(__floatdidf)                       #  a0/a1 <- op, a2-a3 changed
+#endif
+
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) #  vA/vA+1 <- result
diff --git a/runtime/interpreter/mterp/mips/op_long_to_float.S b/runtime/interpreter/mterp/mips/op_long_to_float.S
index 27faba5..dd1ab81 100644
--- a/runtime/interpreter/mterp/mips/op_long_to_float.S
+++ b/runtime/interpreter/mterp/mips/op_long_to_float.S
@@ -1 +1,20 @@
-%include "mips/unopNarrower.S" {"instr":"JAL(__floatdisf)", "load":"LOAD64(rARG0, rARG1, a3)"}
+    /*
+     * long-to-float
+     */
+    /* unop vA, vB */
+    GET_OPB(a3)                            #  a3 <- B
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
+    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+
+#ifdef MIPS32REVGE6
+    LOAD64_F(fv0, fv0f, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    cvt.s.l   fv0, fv0
+#else
+    LOAD64(rARG0, rARG1, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    JAL(__floatdisf)
+#endif
+
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- fv0
diff --git a/runtime/interpreter/mterp/mips/op_move.S b/runtime/interpreter/mterp/mips/op_move.S
index 76588ba..547ea3a 100644
--- a/runtime/interpreter/mterp/mips/op_move.S
+++ b/runtime/interpreter/mterp/mips/op_move.S
@@ -7,8 +7,7 @@
     GET_VREG(a2, a1)                       #  a2 <- fp[B]
     GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
     .if $is_object
-    SET_VREG_OBJECT(a2, a0)                #  fp[A] <- a2
+    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[A] <- a2
     .else
-    SET_VREG(a2, a0)                       #  fp[A] <- a2
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[A] <- a2
     .endif
-    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_16.S b/runtime/interpreter/mterp/mips/op_move_16.S
index f7de6c2..91b7399 100644
--- a/runtime/interpreter/mterp/mips/op_move_16.S
+++ b/runtime/interpreter/mterp/mips/op_move_16.S
@@ -7,8 +7,7 @@
     GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     .if $is_object
-    SET_VREG_OBJECT(a2, a0)                #  fp[AAAA] <- a2
+    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[AAAA] <- a2
     .else
-    SET_VREG(a2, a0)                       #  fp[AAAA] <- a2
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[AAAA] <- a2
     .endif
-    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_exception.S b/runtime/interpreter/mterp/mips/op_move_exception.S
index f04a035..f1bece7 100644
--- a/runtime/interpreter/mterp/mips/op_move_exception.S
+++ b/runtime/interpreter/mterp/mips/op_move_exception.S
@@ -2,7 +2,8 @@
     GET_OPA(a2)                                 #  a2 <- AA
     lw    a3, THREAD_EXCEPTION_OFFSET(rSELF)    #  get exception obj
     FETCH_ADVANCE_INST(1)                       #  advance rPC, load rINST
-    SET_VREG_OBJECT(a3, a2)                     #  fp[AA] <- exception obj
     GET_INST_OPCODE(t0)                         #  extract opcode from rINST
+    GET_OPCODE_TARGET(t0)
+    SET_VREG_OBJECT(a3, a2)                     #  fp[AA] <- exception obj
     sw    zero, THREAD_EXCEPTION_OFFSET(rSELF)  #  clear exception
-    GOTO_OPCODE(t0)                             #  jump to next instruction
+    JR(t0)                                      #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_from16.S b/runtime/interpreter/mterp/mips/op_move_from16.S
index b8be741..90c25c9 100644
--- a/runtime/interpreter/mterp/mips/op_move_from16.S
+++ b/runtime/interpreter/mterp/mips/op_move_from16.S
@@ -7,8 +7,7 @@
     GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     .if $is_object
-    SET_VREG_OBJECT(a2, a0)                #  fp[AA] <- a2
+    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[AA] <- a2
     .else
-    SET_VREG(a2, a0)                       #  fp[AA] <- a2
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[AA] <- a2
     .endif
-    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_result.S b/runtime/interpreter/mterp/mips/op_move_result.S
index 315c68e..a4d5bfe 100644
--- a/runtime/interpreter/mterp/mips/op_move_result.S
+++ b/runtime/interpreter/mterp/mips/op_move_result.S
@@ -7,8 +7,7 @@
     lw    a0, 0(a0)                        #  a0 <- result.i
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     .if $is_object
-    SET_VREG_OBJECT(a0, a2)                #  fp[AA] <- a0
+    SET_VREG_OBJECT_GOTO(a0, a2, t0)       #  fp[AA] <- a0
     .else
-    SET_VREG(a0, a2)                       #  fp[AA] <- a0
+    SET_VREG_GOTO(a0, a2, t0)              #  fp[AA] <- a0
     .endif
-    GOTO_OPCODE(t0)                        #  jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_result_wide.S b/runtime/interpreter/mterp/mips/op_move_result_wide.S
index 940c1ff..1259218 100644
--- a/runtime/interpreter/mterp/mips/op_move_result_wide.S
+++ b/runtime/interpreter/mterp/mips/op_move_result_wide.S
@@ -3,6 +3,5 @@
     lw    a3, OFF_FP_RESULT_REGISTER(rFP)  #  get pointer to result JType
     LOAD64(a0, a1, a3)                     #  a0/a1 <- retval.j
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    SET_VREG64(a0, a1, a2)                 #  fp[AA] <- a0/a1
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AA] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_move_wide.S b/runtime/interpreter/mterp/mips/op_move_wide.S
index dd224c3..01d0949 100644
--- a/runtime/interpreter/mterp/mips/op_move_wide.S
+++ b/runtime/interpreter/mterp/mips/op_move_wide.S
@@ -5,6 +5,5 @@
     EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
     LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[B]
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    SET_VREG64(a0, a1, a2)                 #  fp[A] <- a0/a1
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[A] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_move_wide_16.S b/runtime/interpreter/mterp/mips/op_move_wide_16.S
index d8761eb..587ba04 100644
--- a/runtime/interpreter/mterp/mips/op_move_wide_16.S
+++ b/runtime/interpreter/mterp/mips/op_move_wide_16.S
@@ -5,6 +5,5 @@
     EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
     LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
     FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    SET_VREG64(a0, a1, a2)                 #  fp[AAAA] <- a0/a1
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AAAA] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_move_wide_from16.S b/runtime/interpreter/mterp/mips/op_move_wide_from16.S
index 2103fa1..5003fbd 100644
--- a/runtime/interpreter/mterp/mips/op_move_wide_from16.S
+++ b/runtime/interpreter/mterp/mips/op_move_wide_from16.S
@@ -5,6 +5,5 @@
     EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
     LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    SET_VREG64(a0, a1, a2)                 #  fp[AA] <- a0/a1
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AA] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_mul_long.S b/runtime/interpreter/mterp/mips/op_mul_long.S
index 803bbec..74b049a 100644
--- a/runtime/interpreter/mterp/mips/op_mul_long.S
+++ b/runtime/interpreter/mterp/mips/op_mul_long.S
@@ -39,5 +39,4 @@
 
 .L${opcode}_finish:
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(v0, v1, a0)                 #  vAA::vAA+1 <- v0(low) :: v1(high)
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(v0, v1, a0, t0)        #  vAA/vAA+1 <- v0(low)/v1(high)
diff --git a/runtime/interpreter/mterp/mips/op_mul_long_2addr.S b/runtime/interpreter/mterp/mips/op_mul_long_2addr.S
index 6950b71..683b055 100644
--- a/runtime/interpreter/mterp/mips/op_mul_long_2addr.S
+++ b/runtime/interpreter/mterp/mips/op_mul_long_2addr.S
@@ -26,6 +26,4 @@
 
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    # vAA <- v0 (low)
-    SET_VREG64(v0, v1, rOBJ)               #  vAA+1 <- v1 (high)
-    GOTO_OPCODE(t1)                        #  jump to next instruction
+    SET_VREG64_GOTO(v0, v1, rOBJ, t1)      #  vA/vA+1 <- v0(low)/v1(high)
diff --git a/runtime/interpreter/mterp/mips/op_new_instance.S b/runtime/interpreter/mterp/mips/op_new_instance.S
index 51a09b2..3c9e83f 100644
--- a/runtime/interpreter/mterp/mips/op_new_instance.S
+++ b/runtime/interpreter/mterp/mips/op_new_instance.S
@@ -1,7 +1,7 @@
     /*
      * Create a new instance of a class.
      */
-    # new-instance vAA, class              /* BBBB */
+    /* new-instance vAA, class@BBBB */
     EXPORT_PC()
     addu   a0, rFP, OFF_FP_SHADOWFRAME
     move   a1, rSELF
diff --git a/runtime/interpreter/mterp/mips/op_packed_switch.S b/runtime/interpreter/mterp/mips/op_packed_switch.S
index ffa4f47..0a1ff98 100644
--- a/runtime/interpreter/mterp/mips/op_packed_switch.S
+++ b/runtime/interpreter/mterp/mips/op_packed_switch.S
@@ -12,8 +12,7 @@
     FETCH(a0, 1)                           #  a0 <- bbbb (lo)
     FETCH(a1, 2)                           #  a1 <- BBBB (hi)
     GET_OPA(a3)                            #  a3 <- AA
-    sll       t0, a1, 16
-    or        a0, a0, t0                   #  a0 <- BBBBbbbb
+    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb
     GET_VREG(a1, a3)                       #  a1 <- vAA
     EAS1(a0, rPC, a0)                      #  a0 <- PC + BBBBbbbb*2
     JAL($func)                             #  a0 <- code-unit branch offset
diff --git a/runtime/interpreter/mterp/mips/op_return.S b/runtime/interpreter/mterp/mips/op_return.S
index 894ae18..44b9395 100644
--- a/runtime/interpreter/mterp/mips/op_return.S
+++ b/runtime/interpreter/mterp/mips/op_return.S
@@ -8,7 +8,7 @@
     JAL(MterpThreadFenceForConstructor)
     lw        ra, THREAD_FLAGS_OFFSET(rSELF)
     move      a0, rSELF
-    and       ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqz      ra, 1f
     JAL(MterpSuspendCheck)                 # (self)
 1:
diff --git a/runtime/interpreter/mterp/mips/op_return_void.S b/runtime/interpreter/mterp/mips/op_return_void.S
index 35c1326..1f616ea 100644
--- a/runtime/interpreter/mterp/mips/op_return_void.S
+++ b/runtime/interpreter/mterp/mips/op_return_void.S
@@ -2,7 +2,7 @@
     JAL(MterpThreadFenceForConstructor)
     lw        ra, THREAD_FLAGS_OFFSET(rSELF)
     move      a0, rSELF
-    and       ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqz      ra, 1f
     JAL(MterpSuspendCheck)                 # (self)
 1:
diff --git a/runtime/interpreter/mterp/mips/op_return_void_no_barrier.S b/runtime/interpreter/mterp/mips/op_return_void_no_barrier.S
index 56968b5..e670c28 100644
--- a/runtime/interpreter/mterp/mips/op_return_void_no_barrier.S
+++ b/runtime/interpreter/mterp/mips/op_return_void_no_barrier.S
@@ -1,6 +1,6 @@
     lw     ra, THREAD_FLAGS_OFFSET(rSELF)
     move   a0, rSELF
-    and    ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and    ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqz   ra, 1f
     JAL(MterpSuspendCheck)                 # (self)
 1:
diff --git a/runtime/interpreter/mterp/mips/op_return_wide.S b/runtime/interpreter/mterp/mips/op_return_wide.S
index 91d62bf..f0f679d 100644
--- a/runtime/interpreter/mterp/mips/op_return_wide.S
+++ b/runtime/interpreter/mterp/mips/op_return_wide.S
@@ -6,7 +6,7 @@
     JAL(MterpThreadFenceForConstructor)
     lw        ra, THREAD_FLAGS_OFFSET(rSELF)
     move      a0, rSELF
-    and       ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqz      ra, 1f
     JAL(MterpSuspendCheck)                 # (self)
 1:
diff --git a/runtime/interpreter/mterp/mips/op_sget.S b/runtime/interpreter/mterp/mips/op_sget.S
index 3efcfbb..64ece1e 100644
--- a/runtime/interpreter/mterp/mips/op_sget.S
+++ b/runtime/interpreter/mterp/mips/op_sget.S
@@ -4,7 +4,7 @@
      *
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
-    # op vAA, field                        /* BBBB */
+    /* op vAA, field@BBBB */
     .extern $helper
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref BBBB
@@ -15,11 +15,10 @@
     GET_OPA(a2)                            # a2 <- AA
     PREFETCH_INST(2)
     bnez  a3, MterpException               # bail out
-.if $is_object
-    SET_VREG_OBJECT(v0, a2)                # fp[AA] <- v0
-.else
-    SET_VREG(v0, a2)                       # fp[AA] <- v0
-.endif
     ADVANCE(2)
     GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
+.if $is_object
+    SET_VREG_OBJECT_GOTO(v0, a2, t0)       # fp[AA] <- v0
+.else
+    SET_VREG_GOTO(v0, a2, t0)              # fp[AA] <- v0
+.endif
diff --git a/runtime/interpreter/mterp/mips/op_sget_wide.S b/runtime/interpreter/mterp/mips/op_sget_wide.S
index 7aee386..c729250 100644
--- a/runtime/interpreter/mterp/mips/op_sget_wide.S
+++ b/runtime/interpreter/mterp/mips/op_sget_wide.S
@@ -1,7 +1,7 @@
     /*
      * 64-bit SGET handler.
      */
-    # sget-wide vAA, field                 /* BBBB */
+    /* sget-wide vAA, field@BBBB */
     .extern artGet64StaticFromCode
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref BBBB
@@ -12,6 +12,5 @@
     bnez  a3, MterpException
     GET_OPA(a1)                            # a1 <- AA
     FETCH_ADVANCE_INST(2)                  # advance rPC, load rINST
-    SET_VREG64(v0, v1, a1)                 # vAA/vAA+1 <- v0/v1
     GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
+    SET_VREG64_GOTO(v0, v1, a1, t0)        # vAA/vAA+1 <- v0/v1
diff --git a/runtime/interpreter/mterp/mips/op_shl_long.S b/runtime/interpreter/mterp/mips/op_shl_long.S
index 0121669..cc08112 100644
--- a/runtime/interpreter/mterp/mips/op_shl_long.S
+++ b/runtime/interpreter/mterp/mips/op_shl_long.S
@@ -24,7 +24,7 @@
     srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
     sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
     or      v1, a0                         #  rhi<- rhi | alo
-    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vAA/vAA+1 <- a0/a1
+    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vAA/vAA+1 <- v0/v1
 %break
 
 .L${opcode}_finish:
diff --git a/runtime/interpreter/mterp/mips/op_shl_long_2addr.S b/runtime/interpreter/mterp/mips/op_shl_long_2addr.S
index 8ce6058..93c5783 100644
--- a/runtime/interpreter/mterp/mips/op_shl_long_2addr.S
+++ b/runtime/interpreter/mterp/mips/op_shl_long_2addr.S
@@ -7,7 +7,7 @@
     GET_OPB(a3)                            #  a3 <- B
     GET_VREG(a2, a3)                       #  a2 <- vB
     EAS2(t2, rFP, rOBJ)                    #  t2 <- &fp[A]
-    LOAD64(a0, a1, t2)                     #  a0/a1 <- vAA/vAA+1
+    LOAD64(a0, a1, t2)                     #  a0/a1 <- vA/vA+1
 
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
@@ -20,8 +20,8 @@
     srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
     sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
     or      v1, a0                         #  rhi<- rhi | alo
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)      #  vAA/vAA+1 <- a0/a1
+    SET_VREG64_GOTO(v0, v1, rOBJ, t0)      #  vA/vA+1 <- v0/v1
 %break
 
 .L${opcode}_finish:
-    SET_VREG64_GOTO(zero, v0, rOBJ, t0)    #  vAA/vAA+1 <- rlo/rhi
+    SET_VREG64_GOTO(zero, v0, rOBJ, t0)    #  vA/vA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_shr_long.S b/runtime/interpreter/mterp/mips/op_shr_long.S
index 4c42758..ea032fe 100644
--- a/runtime/interpreter/mterp/mips/op_shr_long.S
+++ b/runtime/interpreter/mterp/mips/op_shr_long.S
@@ -23,7 +23,7 @@
     sll     a1, 1
     sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
     or      v0, a1                         #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vAA/VAA+1 <- v0/v0
+    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vAA/VAA+1 <- v0/v1
 %break
 
 .L${opcode}_finish:
diff --git a/runtime/interpreter/mterp/mips/op_shr_long_2addr.S b/runtime/interpreter/mterp/mips/op_shr_long_2addr.S
index 3adc085..c805ea4 100644
--- a/runtime/interpreter/mterp/mips/op_shr_long_2addr.S
+++ b/runtime/interpreter/mterp/mips/op_shr_long_2addr.S
@@ -7,7 +7,7 @@
     GET_OPB(a3)                            #  a3 <- B
     GET_VREG(a2, a3)                       #  a2 <- vB
     EAS2(t0, rFP, t2)                      #  t0 <- &fp[A]
-    LOAD64(a0, a1, t0)                     #  a0/a1 <- vAA/vAA+1
+    LOAD64(a0, a1, t0)                     #  a0/a1 <- vA/vA+1
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
 
@@ -19,9 +19,9 @@
     sll     a1, 1
     sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
     or      v0, a1                         #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vAA/vAA+1 <- a0/a1
+    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vA/vA+1 <- v0/v1
 %break
 
 .L${opcode}_finish:
     sra     a3, a1, 31                     #  a3<- sign(ah)
-    SET_VREG64_GOTO(v1, a3, t2, t0)        #  vAA/vAA+1 <- rlo/rhi
+    SET_VREG64_GOTO(v1, a3, t2, t0)        #  vA/vA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_sput.S b/runtime/interpreter/mterp/mips/op_sput.S
index ee313b9..7034a0e 100644
--- a/runtime/interpreter/mterp/mips/op_sput.S
+++ b/runtime/interpreter/mterp/mips/op_sput.S
@@ -4,7 +4,7 @@
      *
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
-    # op vAA, field                        /* BBBB */
+    /* op vAA, field@BBBB */
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref BBBB
     GET_OPA(a3)                            # a3 <- AA
diff --git a/runtime/interpreter/mterp/mips/op_sput_wide.S b/runtime/interpreter/mterp/mips/op_sput_wide.S
index 1e11466..3b347fc 100644
--- a/runtime/interpreter/mterp/mips/op_sput_wide.S
+++ b/runtime/interpreter/mterp/mips/op_sput_wide.S
@@ -1,7 +1,7 @@
     /*
      * 64-bit SPUT handler.
      */
-    # sput-wide vAA, field                 /* BBBB */
+    /* sput-wide vAA, field@BBBB */
     .extern artSet64IndirectStaticFromMterp
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref CCCC
diff --git a/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S b/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S
index ccf1f7e..9e93f34 100644
--- a/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S
+++ b/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S
@@ -7,7 +7,7 @@
     GET_OPB(a3)                            #  a3 <- B
     GET_VREG(a2, a3)                       #  a2 <- vB
     EAS2(t0, rFP, t3)                      #  t0 <- &fp[A]
-    LOAD64(a0, a1, t0)                     #  a0/a1 <- vAA/vAA+1
+    LOAD64(a0, a1, t0)                     #  a0/a1 <- vA/vA+1
 
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
@@ -20,8 +20,8 @@
     sll       a1, 1
     sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
     or        v0, a1                       #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vAA/vAA+1 <- a0/a1
+    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vA/vA+1 <- v0/v1
 %break
 
 .L${opcode}_finish:
-    SET_VREG64_GOTO(v1, zero, t3, t0)      #  vAA/vAA+1 <- rlo/rhi
+    SET_VREG64_GOTO(v1, zero, t3, t0)      #  vA/vA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/unop.S b/runtime/interpreter/mterp/mips/unop.S
index 52a8f0a..bc99263 100644
--- a/runtime/interpreter/mterp/mips/unop.S
+++ b/runtime/interpreter/mterp/mips/unop.S
@@ -1,11 +1,11 @@
 %default {"preinstr":"", "result0":"a0"}
     /*
      * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op a0".
+     * specifies an instruction that performs "result0 = op a0".
      * This could be a MIPS instruction or a function call.
      *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
+     * for: int-to-byte, int-to-char, int-to-short,
+     *      neg-int, not-int, neg-float
      */
     /* unop vA, vB */
     GET_OPB(a3)                            #  a3 <- B
@@ -15,5 +15,4 @@
     $preinstr                              #  optional op
     $instr                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_GOTO($result0, t0, t1)        #  vAA <- result0
-    /* 9-10 instructions */
+    SET_VREG_GOTO($result0, t0, t1)        #  vA <- result0
diff --git a/runtime/interpreter/mterp/mips/unopNarrower.S b/runtime/interpreter/mterp/mips/unopNarrower.S
index 9c38bad..0196e27 100644
--- a/runtime/interpreter/mterp/mips/unopNarrower.S
+++ b/runtime/interpreter/mterp/mips/unopNarrower.S
@@ -1,24 +1,16 @@
 %default {"load":"LOAD64_F(fa0, fa0f, a3)"}
     /*
-     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = op a0/a1", where
-     * "result" is a 32-bit quantity in a0.
+     * Generic 64bit-to-32bit floating-point unary operation.  Provide an "instr"
+     * line that specifies an instruction that performs "fv0 = op fa0".
      *
-     * For: long-to-float, double-to-int, double-to-float
-     * If hard floating point support is available, use fa0 as the parameter,
-     * except for long-to-float opcode.
-     * (This would work for long-to-int, but that instruction is actually
-     * an exact match for OP_MOVE.)
+     * For: double-to-float
      */
     /* unop vA, vB */
     GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
     $load
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     $instr
-
-.L${opcode}_set_vreg_f:
-    SET_VREG_F(fv0, rOBJ)                  #  vA <- result0
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- fv0
diff --git a/runtime/interpreter/mterp/mips/unopWide.S b/runtime/interpreter/mterp/mips/unopWide.S
index fd25dff..135d9fa 100644
--- a/runtime/interpreter/mterp/mips/unopWide.S
+++ b/runtime/interpreter/mterp/mips/unopWide.S
@@ -1,7 +1,7 @@
 %default {"preinstr":"", "result0":"a0", "result1":"a1"}
     /*
      * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op a0/a1".
+     * specifies an instruction that performs "result0/result1 = op a0/a1".
      * This could be MIPS instruction or a function call.
      *
      * For: neg-long, not-long, neg-double,
@@ -10,11 +10,9 @@
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a3)                            #  a3 <- B
     EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vAA
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vA
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     $preinstr                              #  optional op
     $instr                                 #  a0/a1 <- op, a2-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64($result0, $result1, rOBJ)   #  vAA <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-    /* 12-13 instructions */
+    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/unopWider.S b/runtime/interpreter/mterp/mips/unopWider.S
index 1c18837..ca888ad 100644
--- a/runtime/interpreter/mterp/mips/unopWider.S
+++ b/runtime/interpreter/mterp/mips/unopWider.S
@@ -1,8 +1,7 @@
 %default {"preinstr":"", "result0":"a0", "result1":"a1"}
     /*
      * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = op a0", where
-     * "result" is a 64-bit quantity in a0/a1.
+     * that specifies an instruction that performs "result0/result1 = op a0".
      *
      * For: int-to-long
      */
@@ -14,6 +13,4 @@
     $preinstr                              #  optional op
     $instr                                 #  result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64($result0, $result1, rOBJ)   #  vA/vA+1 <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-    /* 10-11 instructions */
+    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips64/footer.S b/runtime/interpreter/mterp/mips64/footer.S
index 4063162..64772c8 100644
--- a/runtime/interpreter/mterp/mips64/footer.S
+++ b/runtime/interpreter/mterp/mips64/footer.S
@@ -108,7 +108,7 @@
     REFRESH_IBASE
     daddu   a2, rINST, rINST            # a2<- byte offset
     FETCH_ADVANCE_INST_RB a2            # update rPC, load rINST
-    and     ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and     ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     bnezc   ra, .L_suspend_request_pending
     GET_INST_OPCODE v0                  # extract opcode from rINST
     GOTO_OPCODE v0                      # jump to next instruction
@@ -225,7 +225,7 @@
     lw      ra, THREAD_FLAGS_OFFSET(rSELF)
     sd      a0, 0(a2)
     move    a0, rSELF
-    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqzc   ra, check2
     jal     MterpSuspendCheck                       # (self)
 check2:
diff --git a/runtime/interpreter/mterp/mips64/op_return.S b/runtime/interpreter/mterp/mips64/op_return.S
index b10c03f..edd795f 100644
--- a/runtime/interpreter/mterp/mips64/op_return.S
+++ b/runtime/interpreter/mterp/mips64/op_return.S
@@ -10,7 +10,7 @@
     jal     MterpThreadFenceForConstructor
     lw      ra, THREAD_FLAGS_OFFSET(rSELF)
     move    a0, rSELF
-    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqzc   ra, 1f
     jal     MterpSuspendCheck           # (self)
 1:
diff --git a/runtime/interpreter/mterp/mips64/op_return_void.S b/runtime/interpreter/mterp/mips64/op_return_void.S
index 05253ae..f6eee91 100644
--- a/runtime/interpreter/mterp/mips64/op_return_void.S
+++ b/runtime/interpreter/mterp/mips64/op_return_void.S
@@ -3,7 +3,7 @@
     jal     MterpThreadFenceForConstructor
     lw      ra, THREAD_FLAGS_OFFSET(rSELF)
     move    a0, rSELF
-    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqzc   ra, 1f
     jal     MterpSuspendCheck           # (self)
 1:
diff --git a/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S b/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S
index f67e811..4e9b640 100644
--- a/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S
+++ b/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S
@@ -1,7 +1,7 @@
     .extern MterpSuspendCheck
     lw      ra, THREAD_FLAGS_OFFSET(rSELF)
     move    a0, rSELF
-    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqzc   ra, 1f
     jal     MterpSuspendCheck           # (self)
 1:
diff --git a/runtime/interpreter/mterp/mips64/op_return_wide.S b/runtime/interpreter/mterp/mips64/op_return_wide.S
index 544e027..91ca1fa 100644
--- a/runtime/interpreter/mterp/mips64/op_return_wide.S
+++ b/runtime/interpreter/mterp/mips64/op_return_wide.S
@@ -8,7 +8,7 @@
     jal     MterpThreadFenceForConstructor
     lw      ra, THREAD_FLAGS_OFFSET(rSELF)
     move    a0, rSELF
-    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqzc   ra, 1f
     jal     MterpSuspendCheck           # (self)
 1:
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index cf8d4bd..2bd47bb 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -291,11 +291,11 @@
                                    ShadowFrame* shadow_frame,
                                    Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  String* s = ResolveString(self, *shadow_frame,  index);
+  ObjPtr<mirror::String> s = ResolveString(self, *shadow_frame, index);
   if (UNLIKELY(s == nullptr)) {
     return true;
   }
-  shadow_frame->SetVRegReference(tgt_vreg, s);
+  shadow_frame->SetVRegReference(tgt_vreg, s.Ptr());
   return false;
 }
 
@@ -304,7 +304,7 @@
                                   ShadowFrame* shadow_frame,
                                   Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  Class* c = ResolveVerifyAndClinit(index, shadow_frame->GetMethod(), self, false, false);
+  mirror::Class* c = ResolveVerifyAndClinit(index, shadow_frame->GetMethod(), self, false, false);
   if (UNLIKELY(c == nullptr)) {
     return true;
   }
@@ -317,12 +317,12 @@
                                  art::ArtMethod* method,
                                  Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  Class* c = ResolveVerifyAndClinit(index, method, self, false, false);
+  ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(index, method, self, false, false);
   if (UNLIKELY(c == nullptr)) {
     return true;
   }
   // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
-  Object* obj = vreg_addr->AsMirrorPtr();
+  mirror::Object* obj = vreg_addr->AsMirrorPtr();
   if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
     ThrowClassCastException(c, obj->GetClass());
     return true;
@@ -335,16 +335,16 @@
                                   art::ArtMethod* method,
                                   Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  Class* c = ResolveVerifyAndClinit(index, method, self, false, false);
+  ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(index, method, self, false, false);
   if (UNLIKELY(c == nullptr)) {
     return false;  // Caller will check for pending exception.  Return value unimportant.
   }
   // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
-  Object* obj = vreg_addr->AsMirrorPtr();
+  mirror::Object* obj = vreg_addr->AsMirrorPtr();
   return (obj != nullptr) && obj->InstanceOf(c);
 }
 
-extern "C" size_t MterpFillArrayData(Object* obj, const Instruction::ArrayDataPayload* payload)
+extern "C" size_t MterpFillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   return FillArrayData(obj, payload);
 }
@@ -352,9 +352,12 @@
 extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
-  Object* obj = nullptr;
-  Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame->GetMethod(),
-                                    self, false, false);
+  mirror::Object* obj = nullptr;
+  mirror::Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(),
+                                            shadow_frame->GetMethod(),
+                                            self,
+                                            false,
+                                            false);
   if (LIKELY(c != nullptr)) {
     if (UNLIKELY(c->IsStringClass())) {
       gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
@@ -404,13 +407,13 @@
                                   uint32_t inst_data)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  Object* a = shadow_frame->GetVRegReference(inst->VRegB_23x());
+  mirror::Object* a = shadow_frame->GetVRegReference(inst->VRegB_23x());
   if (UNLIKELY(a == nullptr)) {
     return false;
   }
   int32_t index = shadow_frame->GetVReg(inst->VRegC_23x());
-  Object* val = shadow_frame->GetVRegReference(inst->VRegA_23x(inst_data));
-  ObjectArray<Object>* array = a->AsObjectArray<Object>();
+  mirror::Object* val = shadow_frame->GetVRegReference(inst->VRegA_23x(inst_data));
+  mirror::ObjectArray<mirror::Object>* array = a->AsObjectArray<mirror::Object>();
   if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
     array->SetWithoutChecks<false>(index, val);
     return true;
@@ -442,7 +445,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   int32_t length = shadow_frame->GetVReg(inst->VRegB_22c(inst_data));
-  Object* obj = AllocArrayFromCode<false, true>(
+  mirror::Object* obj = AllocArrayFromCode<false, true>(
       inst->VRegC_22c(), length, shadow_frame->GetMethod(), self,
       Runtime::Current()->GetHeap()->GetCurrentAllocator());
   if (UNLIKELY(obj == nullptr)) {
@@ -561,6 +564,8 @@
     LOG(INFO) << "Checkpoint fallback: " << inst->Opcode(inst_data);
   } else if (flags & kSuspendRequest) {
     LOG(INFO) << "Suspend fallback: " << inst->Opcode(inst_data);
+  } else if (flags & kEmptyCheckpointRequest) {
+    LOG(INFO) << "Empty checkpoint fallback: " << inst->Opcode(inst_data);
   }
 }
 
@@ -678,7 +683,7 @@
     ThrowNullPointerExceptionFromInterpreter();
     return nullptr;
   }
-  ObjectArray<Object>* array = arr->AsObjectArray<Object>();
+  mirror::ObjectArray<mirror::Object>* array = arr->AsObjectArray<mirror::Object>();
   if (LIKELY(array->CheckIsValidIndex(index))) {
     return array->GetWithoutChecks(index);
   } else {
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
index 78a90af..4d540d7 100644
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -619,7 +619,7 @@
     bl      MterpThreadFenceForConstructor
     ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
     mov     r0, rSELF
-    ands    lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     blne    MterpSuspendCheck                       @ (self)
     mov    r0, #0
     mov    r1, #0
@@ -639,7 +639,7 @@
     bl      MterpThreadFenceForConstructor
     ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
     mov     r0, rSELF
-    ands    lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     blne    MterpSuspendCheck                       @ (self)
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG r0, r2                     @ r0<- vAA
@@ -658,7 +658,7 @@
     bl      MterpThreadFenceForConstructor
     ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
     mov     r0, rSELF
-    ands    lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     blne    MterpSuspendCheck                       @ (self)
     mov     r2, rINST, lsr #8           @ r2<- AA
     VREG_INDEX_TO_ADDR r2, r2           @ r2<- &fp[AA]
@@ -680,7 +680,7 @@
     bl      MterpThreadFenceForConstructor
     ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
     mov     r0, rSELF
-    ands    lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     blne    MterpSuspendCheck                       @ (self)
     mov     r2, rINST, lsr #8           @ r2<- AA
     GET_VREG r0, r2                     @ r0<- vAA
@@ -3149,7 +3149,7 @@
 /* File: arm/op_return_void_no_barrier.S */
     ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
     mov     r0, rSELF
-    ands    lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     blne    MterpSuspendCheck                       @ (self)
     mov    r0, #0
     mov    r1, #0
@@ -11989,7 +11989,7 @@
     REFRESH_IBASE
     add     r2, rINST, rINST            @ r2<- byte offset
     FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
-    ands    lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     bne     .L_suspend_request_pending
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
index dafcc3e..42f8c1b 100644
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ b/runtime/interpreter/mterp/out/mterp_arm64.S
@@ -616,7 +616,7 @@
     bl      MterpThreadFenceForConstructor
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     mov     x0, xSELF
-    ands    w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     b.ne    .Lop_return_void_check
 .Lop_return_void_return:
     mov     x0, #0
@@ -639,7 +639,7 @@
     bl      MterpThreadFenceForConstructor
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     mov     x0, xSELF
-    ands    w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     b.ne    .Lop_return_check
 .Lop_return_return:
     lsr     w2, wINST, #8               // r2<- AA
@@ -662,7 +662,7 @@
     bl      MterpThreadFenceForConstructor
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     mov     x0, xSELF
-    ands    w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     b.ne    .Lop_return_wide_check
 .Lop_return_wide_return:
     lsr     w2, wINST, #8               // w2<- AA
@@ -687,7 +687,7 @@
     bl      MterpThreadFenceForConstructor
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     mov     x0, xSELF
-    ands    w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     b.ne    .Lop_return_object_check
 .Lop_return_object_return:
     lsr     w2, wINST, #8               // r2<- AA
@@ -3033,7 +3033,7 @@
 /* File: arm64/op_return_void_no_barrier.S */
     ldr     w7, [xSELF, #THREAD_FLAGS_OFFSET]
     mov     x0, xSELF
-    ands    w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     b.ne    .Lop_return_void_no_barrier_check
 .Lop_return_void_no_barrier_return:
     mov     x0, #0
@@ -7082,7 +7082,7 @@
     add     w2, wINST, wINST            // w2<- byte offset
     FETCH_ADVANCE_INST_RB w2            // update rPC, load wINST
     REFRESH_IBASE
-    ands    lr, lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     b.ne    .L_suspend_request_pending
     GET_INST_OPCODE ip                  // extract opcode from wINST
     GOTO_OPCODE ip                      // jump to next instruction
@@ -7156,7 +7156,7 @@
  */
 MterpCheckSuspendAndContinue:
     ldr     xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]  // refresh xIBASE
-    ands    w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     b.ne    check1
     GET_INST_OPCODE ip                  // extract opcode from wINST
     GOTO_OPCODE ip                      // jump to next instruction
@@ -7211,7 +7211,7 @@
     ldr     lr, [xSELF, #THREAD_FLAGS_OFFSET]
     str     x0, [x2]
     mov     x0, xSELF
-    ands    lr, lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    ands    lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     b.eq    check2
     bl      MterpSuspendCheck                       // (self)
 check2:
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
index c1ba794..e154e6c 100644
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ b/runtime/interpreter/mterp/out/mterp_mips.S
@@ -160,6 +160,58 @@
 #define fcc1   $fcc1
 #endif
 
+#ifdef MIPS32REVGE2
+#define SEB(rd, rt) \
+    seb       rd, rt
+#define SEH(rd, rt) \
+    seh       rd, rt
+#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
+    ins       rd_lo, rt_hi, 16, 16
+#else
+#define SEB(rd, rt) \
+    sll       rd, rt, 24; \
+    sra       rd, rd, 24
+#define SEH(rd, rt) \
+    sll       rd, rt, 16; \
+    sra       rd, rd, 16
+/* Clobbers rt_hi on pre-R2. */
+#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
+    sll       rt_hi, rt_hi, 16; \
+    or        rd_lo, rt_hi
+#endif
+
+#ifdef FPU64
+#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
+    mthc1     r, flo
+#else
+#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
+    mtc1      r, fhi
+#endif
+
+#ifdef MIPS32REVGE6
+#define JR(rt) \
+    jic       rt, 0
+#define LSA(rd, rs, rt, sa) \
+    .if sa; \
+    lsa       rd, rs, rt, sa; \
+    .else; \
+    addu      rd, rs, rt; \
+    .endif
+#else
+#define JR(rt) \
+    jalr      zero, rt
+#define LSA(rd, rs, rt, sa) \
+    .if sa; \
+    .set      push; \
+    .set      noat; \
+    sll       AT, rs, sa; \
+    addu      rd, AT, rt; \
+    .set      pop; \
+    .else; \
+    addu      rd, rs, rt; \
+    .endif
+#endif
+
 /*
  * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
  * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
@@ -193,12 +245,12 @@
     sw        rPC, OFF_FP_DEX_PC_PTR(rFP)
 
 #define EXPORT_DEX_PC(tmp) \
-    lw   tmp, OFF_FP_CODE_ITEM(rFP) \
-    sw   rPC, OFF_FP_DEX_PC_PTR(rFP) \
-    addu tmp, CODEITEM_INSNS_OFFSET \
-    subu tmp, rPC, tmp \
-    sra  tmp, tmp, 1 \
-    sw   tmp, OFF_FP_DEX_PC(rFP)
+    lw        tmp, OFF_FP_CODE_ITEM(rFP); \
+    sw        rPC, OFF_FP_DEX_PC_PTR(rFP); \
+    addu      tmp, CODEITEM_INSNS_OFFSET; \
+    subu      tmp, rPC, tmp; \
+    sra       tmp, tmp, 1; \
+    sw        tmp, OFF_FP_DEX_PC(rFP)
 
 /*
  * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
@@ -213,18 +265,11 @@
  * exception catch may miss.  (This also implies that it must come after
  * EXPORT_PC().)
  */
-#define FETCH_ADVANCE_INST(_count) lhu rINST, ((_count)*2)(rPC); \
+#define FETCH_ADVANCE_INST(_count) \
+    lhu       rINST, ((_count)*2)(rPC); \
     addu      rPC, rPC, ((_count) * 2)
 
 /*
- * The operation performed here is similar to FETCH_ADVANCE_INST, except the
- * src and dest registers are parameterized (not hard-wired to rPC and rINST).
- */
-#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
-    lhu       _dreg, ((_count)*2)(_sreg) ;            \
-    addu      _sreg, _sreg, (_count)*2
-
-/*
  * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
  * rINST ahead of possible exception point.  Be sure to manually advance rPC
  * later.
@@ -239,7 +284,8 @@
  * rPC to point to the next instruction.  "rd" must specify the distance
  * in bytes, *not* 16-bit code units, and may be a signed value.
  */
-#define FETCH_ADVANCE_INST_RB(rd) addu rPC, rPC, rd; \
+#define FETCH_ADVANCE_INST_RB(rd) \
+    addu      rPC, rPC, rd; \
     lhu       rINST, (rPC)
 
 /*
@@ -264,38 +310,75 @@
 #define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
 
 /*
- * Put the prefetched instruction's opcode field into the specified register.
+ * Transform opcode into branch target address.
  */
-#define GET_PREFETCHED_OPCODE(dreg, sreg)   andi     dreg, sreg, 255
+#define GET_OPCODE_TARGET(rd) \
+    sll       rd, rd, 7; \
+    addu      rd, rIBASE, rd
 
 /*
  * Begin executing the opcode in rd.
  */
-#define GOTO_OPCODE(rd) sll rd, rd, 7; \
-    addu      rd, rIBASE, rd; \
-    jalr      zero, rd
-
-#define GOTO_OPCODE_BASE(_base, rd)  sll rd, rd, 7; \
-    addu      rd, _base, rd; \
-    jalr      zero, rd
+#define GOTO_OPCODE(rd) \
+    GET_OPCODE_TARGET(rd); \
+    JR(rd)
 
 /*
  * Get/set the 32-bit value from a Dalvik register.
  */
 #define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
 
-#define GET_VREG_F(rd, rix) EAS2(AT, rFP, rix); \
-    .set noat; l.s rd, (AT); .set at
+#define GET_VREG_F(rd, rix) \
+    .set noat; \
+    EAS2(AT, rFP, rix); \
+    l.s       rd, (AT); \
+    .set at
 
-#define SET_VREG(rd, rix) .set noat; \
+#ifdef MIPS32REVGE6
+#define SET_VREG(rd, rix) \
+    lsa       t8, rix, rFP, 2; \
+    sw        rd, 0(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    sw        zero, 0(t8)
+#else
+#define SET_VREG(rd, rix) \
+    .set noat; \
     sll       AT, rix, 2; \
     addu      t8, rFP, AT; \
     sw        rd, 0(t8); \
     addu      t8, rREFS, AT; \
     .set at; \
     sw        zero, 0(t8)
+#endif
 
-#define SET_VREG64(rlo, rhi, rix) .set noat; \
+#ifdef MIPS32REVGE6
+#define SET_VREG_OBJECT(rd, rix) \
+    lsa       t8, rix, rFP, 2; \
+    sw        rd, 0(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    sw        rd, 0(t8)
+#else
+#define SET_VREG_OBJECT(rd, rix) \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        rd, 0(t8)
+#endif
+
+#ifdef MIPS32REVGE6
+#define SET_VREG64(rlo, rhi, rix) \
+    lsa       t8, rix, rFP, 2; \
+    sw        rlo, 0(t8); \
+    sw        rhi, 4(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    sw        zero, 0(t8); \
+    sw        zero, 4(t8)
+#else
+#define SET_VREG64(rlo, rhi, rix) \
+    .set noat; \
     sll       AT, rix, 2; \
     addu      t8, rFP, AT; \
     sw        rlo, 0(t8); \
@@ -304,9 +387,39 @@
     .set at; \
     sw        zero, 0(t8); \
     sw        zero, 4(t8)
+#endif
 
-#ifdef FPU64
-#define SET_VREG64_F(rlo, rhi, rix) .set noat; \
+#ifdef MIPS32REVGE6
+#define SET_VREG_F(rd, rix) \
+    lsa       t8, rix, rFP, 2; \
+    s.s       rd, 0(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    sw        zero, 0(t8)
+#else
+#define SET_VREG_F(rd, rix) \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    s.s       rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8)
+#endif
+
+#ifdef MIPS32REVGE6
+#define SET_VREG64_F(rlo, rhi, rix) \
+    lsa       t8, rix, rFP, 2; \
+    .set noat; \
+    mfhc1     AT, rlo; \
+    s.s       rlo, 0(t8); \
+    sw        AT, 4(t8); \
+    .set at; \
+    lsa       t8, rix, rREFS, 2; \
+    sw        zero, 0(t8); \
+    sw        zero, 4(t8)
+#elif defined(FPU64)
+#define SET_VREG64_F(rlo, rhi, rix) \
+    .set noat; \
     sll       AT, rix, 2; \
     addu      t8, rREFS, AT; \
     sw        zero, 0(t8); \
@@ -317,7 +430,8 @@
     .set at; \
     s.s       rlo, 0(t8)
 #else
-#define SET_VREG64_F(rlo, rhi, rix) .set noat; \
+#define SET_VREG64_F(rlo, rhi, rix) \
+    .set noat; \
     sll       AT, rix, 2; \
     addu      t8, rFP, AT; \
     s.s       rlo, 0(t8); \
@@ -328,18 +442,21 @@
     sw        zero, 4(t8)
 #endif
 
-#define SET_VREG_OBJECT(rd, rix) .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        rd, 0(t8)
-
 /* Combination of the SET_VREG and GOTO_OPCODE functions to save 1 instruction */
-#define SET_VREG_GOTO(rd, rix, dst) .set noreorder; \
-    sll       dst, dst, 7; \
-    addu      dst, rIBASE, dst; \
+#ifdef MIPS32REVGE6
+#define SET_VREG_GOTO(rd, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    lsa       t8, rix, rFP, 2; \
+    sw        rd, 0(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    jalr      zero, dst; \
+    sw        zero, 0(t8); \
+    .set reorder
+#else
+#define SET_VREG_GOTO(rd, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
     .set noat; \
     sll       AT, rix, 2; \
     addu      t8, rFP, AT; \
@@ -349,11 +466,51 @@
     jalr      zero, dst; \
     sw        zero, 0(t8); \
     .set reorder
+#endif
+
+/* Combination of the SET_VREG_OBJECT and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    lsa       t8, rix, rFP, 2; \
+    sw        rd, 0(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    jalr      zero, dst; \
+    sw        rd, 0(t8); \
+    .set reorder
+#else
+#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    sw        rd, 0(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    jalr      zero, dst; \
+    sw        rd, 0(t8); \
+    .set reorder
+#endif
 
 /* Combination of the SET_VREG64 and GOTO_OPCODE functions to save 1 instruction */
-#define SET_VREG64_GOTO(rlo, rhi, rix, dst) .set noreorder; \
-    sll       dst, dst, 7; \
-    addu      dst, rIBASE, dst; \
+#ifdef MIPS32REVGE6
+#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    lsa       t8, rix, rFP, 2; \
+    sw        rlo, 0(t8); \
+    sw        rhi, 4(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    sw        zero, 0(t8); \
+    jalr      zero, dst; \
+    sw        zero, 4(t8); \
+    .set reorder
+#else
+#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
     .set noat; \
     sll       AT, rix, 2; \
     addu      t8, rFP, AT; \
@@ -365,14 +522,82 @@
     jalr      zero, dst; \
     sw        zero, 4(t8); \
     .set reorder
+#endif
 
-#define SET_VREG_F(rd, rix) .set noat; \
+/* Combination of the SET_VREG_F and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG_F_GOTO(rd, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    lsa       t8, rix, rFP, 2; \
+    s.s       rd, 0(t8); \
+    lsa       t8, rix, rREFS, 2; \
+    jalr      zero, dst; \
+    sw        zero, 0(t8); \
+    .set reorder
+#else
+#define SET_VREG_F_GOTO(rd, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    .set noat; \
     sll       AT, rix, 2; \
     addu      t8, rFP, AT; \
     s.s       rd, 0(t8); \
     addu      t8, rREFS, AT; \
     .set at; \
-    sw        zero, 0(t8)
+    jalr      zero, dst; \
+    sw        zero, 0(t8); \
+    .set reorder
+#endif
+
+/* Combination of the SET_VREG64_F and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    lsa       t8, rix, rFP, 2; \
+    .set noat; \
+    mfhc1     AT, rlo; \
+    s.s       rlo, 0(t8); \
+    sw        AT, 4(t8); \
+    .set at; \
+    lsa       t8, rix, rREFS, 2; \
+    sw        zero, 0(t8); \
+    jalr      zero, dst; \
+    sw        zero, 4(t8); \
+    .set reorder
+#elif defined(FPU64)
+#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rREFS, AT; \
+    sw        zero, 0(t8); \
+    sw        zero, 4(t8); \
+    addu      t8, rFP, AT; \
+    mfhc1     AT, rlo; \
+    sw        AT, 4(t8); \
+    .set at; \
+    jalr      zero, dst; \
+    s.s       rlo, 0(t8); \
+    .set reorder
+#else
+#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
+    .set noreorder; \
+    GET_OPCODE_TARGET(dst); \
+    .set noat; \
+    sll       AT, rix, 2; \
+    addu      t8, rFP, AT; \
+    s.s       rlo, 0(t8); \
+    s.s       rhi, 4(t8); \
+    addu      t8, rREFS, AT; \
+    .set at; \
+    sw        zero, 0(t8); \
+    jalr      zero, dst; \
+    sw        zero, 4(t8); \
+    .set reorder
+#endif
 
 #define GET_OPA(rd) srl rd, rINST, 8
 #ifdef MIPS32REVGE2
@@ -383,60 +608,60 @@
 #define GET_OPB(rd) srl rd, rINST, 12
 
 /*
- * Form an Effective Address rd = rbase + roff<<n;
- * Uses reg AT
+ * Form an Effective Address rd = rbase + roff<<shift;
+ * Uses reg AT on pre-R6.
  */
-#define EASN(rd, rbase, roff, rshift) .set noat; \
-    sll       AT, roff, rshift; \
-    addu      rd, rbase, AT; \
-    .set at
+#define EASN(rd, rbase, roff, shift) LSA(rd, roff, rbase, shift)
 
 #define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
 #define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
 #define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
 #define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
 
-/*
- * Form an Effective Shift Right rd = rbase + roff>>n;
- * Uses reg AT
- */
-#define ESRN(rd, rbase, roff, rshift) .set noat; \
-    srl       AT, roff, rshift; \
-    addu      rd, rbase, AT; \
+#define LOAD_eas2(rd, rbase, roff) \
+    .set noat; \
+    EAS2(AT, rbase, roff); \
+    lw        rd, 0(AT); \
     .set at
 
-#define LOAD_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
-    .set noat; lw rd, 0(AT); .set at
-
-#define STORE_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
-    .set noat; sw rd, 0(AT); .set at
+#define STORE_eas2(rd, rbase, roff) \
+    .set noat; \
+    EAS2(AT, rbase, roff); \
+    sw        rd, 0(AT); \
+    .set at
 
 #define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
 #define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
 
-#define STORE64_off(rlo, rhi, rbase, off) sw rlo, off(rbase); \
+#define STORE64_off(rlo, rhi, rbase, off) \
+    sw        rlo, off(rbase); \
     sw        rhi, (off+4)(rbase)
-#define LOAD64_off(rlo, rhi, rbase, off) lw rlo, off(rbase); \
+#define LOAD64_off(rlo, rhi, rbase, off) \
+    lw        rlo, off(rbase); \
     lw        rhi, (off+4)(rbase)
 
 #define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
 #define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
 
 #ifdef FPU64
-#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+#define STORE64_off_F(rlo, rhi, rbase, off) \
+    s.s       rlo, off(rbase); \
     .set noat; \
     mfhc1     AT, rlo; \
     sw        AT, (off+4)(rbase); \
     .set at
-#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+#define LOAD64_off_F(rlo, rhi, rbase, off) \
+    l.s       rlo, off(rbase); \
     .set noat; \
     lw        AT, (off+4)(rbase); \
     mthc1     AT, rlo; \
     .set at
 #else
-#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+#define STORE64_off_F(rlo, rhi, rbase, off) \
+    s.s       rlo, off(rbase); \
     s.s       rhi, (off+4)(rbase)
-#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+#define LOAD64_off_F(rlo, rhi, rbase, off) \
+    l.s       rlo, off(rbase); \
     l.s       rhi, (off+4)(rbase)
 #endif
 
@@ -498,6 +723,14 @@
 #define REFRESH_IBASE() \
     lw        rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
 
+/* Constants for float/double_to_int/long conversions */
+#define INT_MIN                 0x80000000
+#define INT_MIN_AS_FLOAT        0xCF000000
+#define INT_MIN_AS_DOUBLE_HIGH  0xC1E00000
+#define LONG_MIN_HIGH           0x80000000
+#define LONG_MIN_AS_FLOAT       0xDF000000
+#define LONG_MIN_AS_DOUBLE_HIGH 0xC3E00000
+
 /* File: mips/entry.S */
 /*
  * Copyright (C) 2016 The Android Open Source Project
@@ -599,11 +832,10 @@
     GET_VREG(a2, a1)                       #  a2 <- fp[B]
     GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
     .if 0
-    SET_VREG_OBJECT(a2, a0)                #  fp[A] <- a2
+    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[A] <- a2
     .else
-    SET_VREG(a2, a0)                       #  fp[A] <- a2
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[A] <- a2
     .endif
-    GOTO_OPCODE(t0)                        #  jump to next instruction
 
 /* ------------------------------ */
     .balign 128
@@ -617,11 +849,10 @@
     GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     .if 0
-    SET_VREG_OBJECT(a2, a0)                #  fp[AA] <- a2
+    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[AA] <- a2
     .else
-    SET_VREG(a2, a0)                       #  fp[AA] <- a2
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[AA] <- a2
     .endif
-    GOTO_OPCODE(t0)                        #  jump to next instruction
 
 /* ------------------------------ */
     .balign 128
@@ -635,11 +866,10 @@
     GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     .if 0
-    SET_VREG_OBJECT(a2, a0)                #  fp[AAAA] <- a2
+    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[AAAA] <- a2
     .else
-    SET_VREG(a2, a0)                       #  fp[AAAA] <- a2
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[AAAA] <- a2
     .endif
-    GOTO_OPCODE(t0)                        #  jump to next instruction
 
 /* ------------------------------ */
     .balign 128
@@ -652,9 +882,8 @@
     EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
     LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[B]
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    SET_VREG64(a0, a1, a2)                 #  fp[A] <- a0/a1
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[A] <- a0/a1
 
 /* ------------------------------ */
     .balign 128
@@ -667,9 +896,8 @@
     EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
     LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    SET_VREG64(a0, a1, a2)                 #  fp[AA] <- a0/a1
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AA] <- a0/a1
 
 /* ------------------------------ */
     .balign 128
@@ -682,9 +910,8 @@
     EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
     LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
     FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    SET_VREG64(a0, a1, a2)                 #  fp[AAAA] <- a0/a1
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AAAA] <- a0/a1
 
 /* ------------------------------ */
     .balign 128
@@ -699,11 +926,10 @@
     GET_VREG(a2, a1)                       #  a2 <- fp[B]
     GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
     .if 1
-    SET_VREG_OBJECT(a2, a0)                #  fp[A] <- a2
+    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[A] <- a2
     .else
-    SET_VREG(a2, a0)                       #  fp[A] <- a2
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[A] <- a2
     .endif
-    GOTO_OPCODE(t0)                        #  jump to next instruction
 
 
 /* ------------------------------ */
@@ -719,11 +945,10 @@
     GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     .if 1
-    SET_VREG_OBJECT(a2, a0)                #  fp[AA] <- a2
+    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[AA] <- a2
     .else
-    SET_VREG(a2, a0)                       #  fp[AA] <- a2
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[AA] <- a2
     .endif
-    GOTO_OPCODE(t0)                        #  jump to next instruction
 
 
 /* ------------------------------ */
@@ -739,11 +964,10 @@
     GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     .if 1
-    SET_VREG_OBJECT(a2, a0)                #  fp[AAAA] <- a2
+    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[AAAA] <- a2
     .else
-    SET_VREG(a2, a0)                       #  fp[AAAA] <- a2
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[AAAA] <- a2
     .endif
-    GOTO_OPCODE(t0)                        #  jump to next instruction
 
 
 /* ------------------------------ */
@@ -758,11 +982,10 @@
     lw    a0, 0(a0)                        #  a0 <- result.i
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     .if 0
-    SET_VREG_OBJECT(a0, a2)                #  fp[AA] <- a0
+    SET_VREG_OBJECT_GOTO(a0, a2, t0)       #  fp[AA] <- a0
     .else
-    SET_VREG(a0, a2)                       #  fp[AA] <- a0
+    SET_VREG_GOTO(a0, a2, t0)              #  fp[AA] <- a0
     .endif
-    GOTO_OPCODE(t0)                        #  jump to next instruction
 
 /* ------------------------------ */
     .balign 128
@@ -773,9 +996,8 @@
     lw    a3, OFF_FP_RESULT_REGISTER(rFP)  #  get pointer to result JType
     LOAD64(a0, a1, a3)                     #  a0/a1 <- retval.j
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    SET_VREG64(a0, a1, a2)                 #  fp[AA] <- a0/a1
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AA] <- a0/a1
 
 /* ------------------------------ */
     .balign 128
@@ -790,11 +1012,10 @@
     lw    a0, 0(a0)                        #  a0 <- result.i
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     .if 1
-    SET_VREG_OBJECT(a0, a2)                #  fp[AA] <- a0
+    SET_VREG_OBJECT_GOTO(a0, a2, t0)       #  fp[AA] <- a0
     .else
-    SET_VREG(a0, a2)                       #  fp[AA] <- a0
+    SET_VREG_GOTO(a0, a2, t0)              #  fp[AA] <- a0
     .endif
-    GOTO_OPCODE(t0)                        #  jump to next instruction
 
 
 /* ------------------------------ */
@@ -805,10 +1026,11 @@
     GET_OPA(a2)                                 #  a2 <- AA
     lw    a3, THREAD_EXCEPTION_OFFSET(rSELF)    #  get exception obj
     FETCH_ADVANCE_INST(1)                       #  advance rPC, load rINST
-    SET_VREG_OBJECT(a3, a2)                     #  fp[AA] <- exception obj
     GET_INST_OPCODE(t0)                         #  extract opcode from rINST
+    GET_OPCODE_TARGET(t0)
+    SET_VREG_OBJECT(a3, a2)                     #  fp[AA] <- exception obj
     sw    zero, THREAD_EXCEPTION_OFFSET(rSELF)  #  clear exception
-    GOTO_OPCODE(t0)                             #  jump to next instruction
+    JR(t0)                                      #  jump to next instruction
 
 /* ------------------------------ */
     .balign 128
@@ -818,7 +1040,7 @@
     JAL(MterpThreadFenceForConstructor)
     lw        ra, THREAD_FLAGS_OFFSET(rSELF)
     move      a0, rSELF
-    and       ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqz      ra, 1f
     JAL(MterpSuspendCheck)                 # (self)
 1:
@@ -840,7 +1062,7 @@
     JAL(MterpThreadFenceForConstructor)
     lw        ra, THREAD_FLAGS_OFFSET(rSELF)
     move      a0, rSELF
-    and       ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqz      ra, 1f
     JAL(MterpSuspendCheck)                 # (self)
 1:
@@ -861,7 +1083,7 @@
     JAL(MterpThreadFenceForConstructor)
     lw        ra, THREAD_FLAGS_OFFSET(rSELF)
     move      a0, rSELF
-    and       ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqz      ra, 1f
     JAL(MterpSuspendCheck)                 # (self)
 1:
@@ -885,7 +1107,7 @@
     JAL(MterpThreadFenceForConstructor)
     lw        ra, THREAD_FLAGS_OFFSET(rSELF)
     move      a0, rSELF
-    and       ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqz      ra, 1f
     JAL(MterpSuspendCheck)                 # (self)
 1:
@@ -899,7 +1121,7 @@
     .balign 128
 .L_op_const_4: /* 0x12 */
 /* File: mips/op_const_4.S */
-    # const/4 vA,                          /* +B */
+    /* const/4 vA, +B */
     sll       a1, rINST, 16                #  a1 <- Bxxx0000
     GET_OPA(a0)                            #  a0 <- A+
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
@@ -912,7 +1134,7 @@
     .balign 128
 .L_op_const_16: /* 0x13 */
 /* File: mips/op_const_16.S */
-    # const/16 vAA,                        /* +BBBB */
+    /* const/16 vAA, +BBBB */
     FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
     GET_OPA(a3)                            #  a3 <- AA
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
@@ -923,13 +1145,12 @@
     .balign 128
 .L_op_const: /* 0x14 */
 /* File: mips/op_const.S */
-    # const vAA,                           /* +BBBBbbbb */
+    /* const vAA, +BBBBbbbb */
     GET_OPA(a3)                            #  a3 <- AA
     FETCH(a0, 1)                           #  a0 <- bbbb (low)
     FETCH(a1, 2)                           #  a1 <- BBBB (high)
     FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    sll       a1, a1, 16
-    or        a0, a1, a0                   #  a0 <- BBBBbbbb
+    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
 
@@ -937,7 +1158,7 @@
     .balign 128
 .L_op_const_high16: /* 0x15 */
 /* File: mips/op_const_high16.S */
-    # const/high16 vAA,                    /* +BBBB0000 */
+    /* const/high16 vAA, +BBBB0000 */
     FETCH(a0, 1)                           #  a0 <- 0000BBBB (zero-extended)
     GET_OPA(a3)                            #  a3 <- AA
     sll       a0, a0, 16                   #  a0 <- BBBB0000
@@ -949,69 +1170,62 @@
     .balign 128
 .L_op_const_wide_16: /* 0x16 */
 /* File: mips/op_const_wide_16.S */
-    # const-wide/16 vAA,                   /* +BBBB */
+    /* const-wide/16 vAA, +BBBB */
     FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
     GET_OPA(a3)                            #  a3 <- AA
     sra       a1, a0, 31                   #  a1 <- ssssssss
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(a0, a1, a3)                 #  vAA <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
 
 /* ------------------------------ */
     .balign 128
 .L_op_const_wide_32: /* 0x17 */
 /* File: mips/op_const_wide_32.S */
-    # const-wide/32 vAA,                   /* +BBBBbbbb */
+    /* const-wide/32 vAA, +BBBBbbbb */
     FETCH(a0, 1)                           #  a0 <- 0000bbbb (low)
     GET_OPA(a3)                            #  a3 <- AA
     FETCH_S(a2, 2)                         #  a2 <- ssssBBBB (high)
     FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    sll       a2, a2, 16
-    or        a0, a0, a2                   #  a0 <- BBBBbbbb
+    INSERT_HIGH_HALF(a0, a2)               #  a0 <- BBBBbbbb
     sra       a1, a0, 31                   #  a1 <- ssssssss
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(a0, a1, a3)                 #  vAA <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
 
 /* ------------------------------ */
     .balign 128
 .L_op_const_wide: /* 0x18 */
 /* File: mips/op_const_wide.S */
-    # const-wide vAA,                      /* +HHHHhhhhBBBBbbbb */
+    /* const-wide vAA, +HHHHhhhhBBBBbbbb */
     FETCH(a0, 1)                           #  a0 <- bbbb (low)
     FETCH(a1, 2)                           #  a1 <- BBBB (low middle)
     FETCH(a2, 3)                           #  a2 <- hhhh (high middle)
-    sll       a1, 16 #
-    or        a0, a1                       #  a0 <- BBBBbbbb (low word)
+    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb (low word)
     FETCH(a3, 4)                           #  a3 <- HHHH (high)
     GET_OPA(t1)                            #  t1 <- AA
-    sll       a3, 16
-    or        a1, a3, a2                   #  a1 <- HHHHhhhh (high word)
+    INSERT_HIGH_HALF(a2, a3)               #  a2 <- HHHHhhhh (high word)
     FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(a0, a1, t1)                 #  vAA <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(a0, a2, t1, t0)        #  vAA/vAA+1 <- a0/a2
 
 /* ------------------------------ */
     .balign 128
 .L_op_const_wide_high16: /* 0x19 */
 /* File: mips/op_const_wide_high16.S */
-    # const-wide/high16 vAA,               /* +BBBB000000000000 */
+    /* const-wide/high16 vAA, +BBBB000000000000 */
     FETCH(a1, 1)                           #  a1 <- 0000BBBB (zero-extended)
     GET_OPA(a3)                            #  a3 <- AA
     li        a0, 0                        #  a0 <- 00000000
     sll       a1, 16                       #  a1 <- BBBB0000
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(a0, a1, a3)                 #  vAA <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
 
 /* ------------------------------ */
     .balign 128
 .L_op_const_string: /* 0x1a */
 /* File: mips/op_const_string.S */
-    # const/string vAA, String             /* BBBB */
+    /* const/string vAA, string@BBBB */
     EXPORT_PC()
     FETCH(a0, 1)                        # a0 <- BBBB
     GET_OPA(a1)                         # a1 <- AA
@@ -1028,13 +1242,12 @@
     .balign 128
 .L_op_const_string_jumbo: /* 0x1b */
 /* File: mips/op_const_string_jumbo.S */
-    # const/string vAA, String          /* BBBBBBBB */
+    /* const/string vAA, string@BBBBBBBB */
     EXPORT_PC()
     FETCH(a0, 1)                        # a0 <- bbbb (low)
     FETCH(a2, 2)                        # a2 <- BBBB (high)
     GET_OPA(a1)                         # a1 <- AA
-    sll    a2, a2, 16
-    or     a0, a0, a2                   # a0 <- BBBBbbbb
+    INSERT_HIGH_HALF(a0, a2)            # a0 <- BBBBbbbb
     addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
     move   a3, rSELF
     JAL(MterpConstString)               # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
@@ -1048,7 +1261,7 @@
     .balign 128
 .L_op_const_class: /* 0x1c */
 /* File: mips/op_const_class.S */
-    # const/class vAA, Class               /* BBBB */
+    /* const/class vAA, class@BBBB */
     EXPORT_PC()
     FETCH(a0, 1)                        # a0 <- BBBB
     GET_OPA(a1)                         # a1 <- AA
@@ -1108,7 +1321,7 @@
     /*
      * Check to see if a cast from one class to another is allowed.
      */
-    # check-cast vAA, class                /* BBBB */
+    /* check-cast vAA, class@BBBB */
     EXPORT_PC()
     FETCH(a0, 1)                           #  a0 <- BBBB
     GET_OPA(a1)                            #  a1 <- AA
@@ -1132,7 +1345,7 @@
      * Most common situation is a non-null object, being compared against
      * an already-resolved class.
      */
-    # instance-of vA, vB, class            /* CCCC */
+    /* instance-of vA, vB, class@CCCC */
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- CCCC
     GET_OPB(a1)                            # a1 <- B
@@ -1155,6 +1368,7 @@
     /*
      * Return the length of an array.
      */
+    /* array-length vA, vB */
     GET_OPB(a1)                            #  a1 <- B
     GET_OPA4(a2)                           #  a2 <- A+
     GET_VREG(a0, a1)                       #  a0 <- vB (object ref)
@@ -1172,7 +1386,7 @@
     /*
      * Create a new instance of a class.
      */
-    # new-instance vAA, class              /* BBBB */
+    /* new-instance vAA, class@BBBB */
     EXPORT_PC()
     addu   a0, rFP, OFF_FP_SHADOWFRAME
     move   a1, rSELF
@@ -1215,8 +1429,8 @@
      *
      * for: filled-new-array, filled-new-array/range
      */
-    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
-    # op {vCCCC..v(CCCC+AA-1)}, type       /* BBBB */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
     .extern MterpFilledNewArray
     EXPORT_PC()
     addu   a0, rFP, OFF_FP_SHADOWFRAME     # a0 <- shadow frame
@@ -1238,8 +1452,8 @@
      *
      * for: filled-new-array, filled-new-array/range
      */
-    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
-    # op {vCCCC..v(CCCC+AA-1)}, type       /* BBBB */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
     .extern MterpFilledNewArrayRange
     EXPORT_PC()
     addu   a0, rFP, OFF_FP_SHADOWFRAME     # a0 <- shadow frame
@@ -1258,11 +1472,10 @@
 /* File: mips/op_fill_array_data.S */
     /* fill-array-data vAA, +BBBBBBBB */
     EXPORT_PC()
-    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
-    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
+    FETCH(a1, 1)                           #  a1 <- bbbb (lo)
+    FETCH(a0, 2)                           #  a0 <- BBBB (hi)
     GET_OPA(a3)                            #  a3 <- AA
-    sll       a1, a1, 16                   #  a1 <- BBBBbbbb
-    or        a1, a0, a1                   #  a1 <- BBBBbbbb
+    INSERT_HIGH_HALF(a1, a0)               #  a1 <- BBBBbbbb
     GET_VREG(a0, a3)                       #  a0 <- vAA (array object)
     EAS1(a1, rPC, a1)                      #  a1 <- PC + BBBBbbbb*2 (array data off.)
     JAL(MterpFillArrayData)                #  v0 <- Mterp(obj, payload)
@@ -1330,10 +1543,9 @@
      * our "backward branch" test must be "<=0" instead of "<0".
      */
     /* goto/32 +AAAAAAAA */
-    FETCH(a0, 1)                           #  a0 <- aaaa (lo)
+    FETCH(rINST, 1)                        #  rINST <- aaaa (lo)
     FETCH(a1, 2)                           #  a1 <- AAAA (hi)
-    sll       a1, a1, 16
-    or        rINST, a0, a1                #  rINST <- AAAAaaaa
+    INSERT_HIGH_HALF(rINST, a1)            #  rINST <- AAAAaaaa
     b         MterpCommonTakenBranchNoFlags
 
 /* ------------------------------ */
@@ -1353,8 +1565,7 @@
     FETCH(a0, 1)                           #  a0 <- bbbb (lo)
     FETCH(a1, 2)                           #  a1 <- BBBB (hi)
     GET_OPA(a3)                            #  a3 <- AA
-    sll       t0, a1, 16
-    or        a0, a0, t0                   #  a0 <- BBBBbbbb
+    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb
     GET_VREG(a1, a3)                       #  a1 <- vAA
     EAS1(a0, rPC, a0)                      #  a0 <- PC + BBBBbbbb*2
     JAL(MterpDoPackedSwitch)                             #  a0 <- code-unit branch offset
@@ -1379,8 +1590,7 @@
     FETCH(a0, 1)                           #  a0 <- bbbb (lo)
     FETCH(a1, 2)                           #  a1 <- BBBB (hi)
     GET_OPA(a3)                            #  a3 <- AA
-    sll       t0, a1, 16
-    or        a0, a0, t0                   #  a0 <- BBBBbbbb
+    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb
     GET_VREG(a1, a3)                       #  a1 <- vAA
     EAS1(a0, rPC, a0)                      #  a0 <- PC + BBBBbbbb*2
     JAL(MterpDoSparseSwitch)                             #  a0 <- code-unit branch offset
@@ -1393,55 +1603,54 @@
 .L_op_cmpl_float: /* 0x2d */
 /* File: mips/op_cmpl_float.S */
     /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register rTEMP based on the results of the comparison.
-     *
-     * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
-     * on what value we'd like to return when one of the operands is NaN.
-     *
-     * The operation we're implementing is:
-     *   if (x == y)
-     *     return 0;
-     *   else if (x < y)
-     *     return -1;
-     *   else if (x > y)
-     *     return 1;
-     *   else
-     *     return {-1 or 1};  // one or both operands was NaN
+     * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
+     * into the destination register based on the comparison results.
      *
      * for: cmpl-float, cmpg-float
      */
     /* op vAA, vBB, vCC */
 
-    /* "clasic" form */
     FETCH(a0, 1)                           #  a0 <- CCBB
     and       a2, a0, 255                  #  a2 <- BB
     srl       a3, a0, 8
     GET_VREG_F(ft0, a2)
     GET_VREG_F(ft1, a3)
 #ifdef MIPS32REVGE6
-    cmp.lt.s  ft2, ft0, ft1               # Is ft0 < ft1
-    li        rTEMP, -1
-    bc1nez    ft2, .Lop_cmpl_float_finish
-    cmp.lt.s  ft2, ft1, ft0
-    li        rTEMP, 1
-    bc1nez    ft2, .Lop_cmpl_float_finish
     cmp.eq.s  ft2, ft0, ft1
     li        rTEMP, 0
-    bc1nez    ft2, .Lop_cmpl_float_finish
-    b         .Lop_cmpl_float_nan
-#else
-    c.olt.s   fcc0, ft0, ft1               # Is ft0 < ft1
+    bc1nez    ft2, 1f                      # done if vBB == vCC (ordered)
+    .if 0
+    cmp.lt.s  ft2, ft0, ft1
     li        rTEMP, -1
-    bc1t      fcc0, .Lop_cmpl_float_finish
-    c.olt.s   fcc0, ft1, ft0
+    bc1nez    ft2, 1f                      # done if vBB < vCC (ordered)
+    li        rTEMP, 1                     # vBB > vCC or unordered
+    .else
+    cmp.lt.s  ft2, ft1, ft0
     li        rTEMP, 1
-    bc1t      fcc0, .Lop_cmpl_float_finish
+    bc1nez    ft2, 1f                      # done if vBB > vCC (ordered)
+    li        rTEMP, -1                    # vBB < vCC or unordered
+    .endif
+#else
     c.eq.s    fcc0, ft0, ft1
     li        rTEMP, 0
-    bc1t      fcc0, .Lop_cmpl_float_finish
-    b         .Lop_cmpl_float_nan
+    bc1t      fcc0, 1f                     # done if vBB == vCC (ordered)
+    .if 0
+    c.olt.s   fcc0, ft0, ft1
+    li        rTEMP, -1
+    bc1t      fcc0, 1f                     # done if vBB < vCC (ordered)
+    li        rTEMP, 1                     # vBB > vCC or unordered
+    .else
+    c.olt.s   fcc0, ft1, ft0
+    li        rTEMP, 1
+    bc1t      fcc0, 1f                     # done if vBB > vCC (ordered)
+    li        rTEMP, -1                    # vBB < vCC or unordered
+    .endif
 #endif
+1:
+    GET_OPA(rOBJ)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
 
 /* ------------------------------ */
     .balign 128
@@ -1449,55 +1658,54 @@
 /* File: mips/op_cmpg_float.S */
 /* File: mips/op_cmpl_float.S */
     /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register rTEMP based on the results of the comparison.
-     *
-     * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
-     * on what value we'd like to return when one of the operands is NaN.
-     *
-     * The operation we're implementing is:
-     *   if (x == y)
-     *     return 0;
-     *   else if (x < y)
-     *     return -1;
-     *   else if (x > y)
-     *     return 1;
-     *   else
-     *     return {-1 or 1};  // one or both operands was NaN
+     * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
+     * into the destination register based on the comparison results.
      *
      * for: cmpl-float, cmpg-float
      */
     /* op vAA, vBB, vCC */
 
-    /* "clasic" form */
     FETCH(a0, 1)                           #  a0 <- CCBB
     and       a2, a0, 255                  #  a2 <- BB
     srl       a3, a0, 8
     GET_VREG_F(ft0, a2)
     GET_VREG_F(ft1, a3)
 #ifdef MIPS32REVGE6
-    cmp.lt.s  ft2, ft0, ft1               # Is ft0 < ft1
-    li        rTEMP, -1
-    bc1nez    ft2, .Lop_cmpg_float_finish
-    cmp.lt.s  ft2, ft1, ft0
-    li        rTEMP, 1
-    bc1nez    ft2, .Lop_cmpg_float_finish
     cmp.eq.s  ft2, ft0, ft1
     li        rTEMP, 0
-    bc1nez    ft2, .Lop_cmpg_float_finish
-    b         .Lop_cmpg_float_nan
-#else
-    c.olt.s   fcc0, ft0, ft1               # Is ft0 < ft1
+    bc1nez    ft2, 1f                      # done if vBB == vCC (ordered)
+    .if 1
+    cmp.lt.s  ft2, ft0, ft1
     li        rTEMP, -1
-    bc1t      fcc0, .Lop_cmpg_float_finish
-    c.olt.s   fcc0, ft1, ft0
+    bc1nez    ft2, 1f                      # done if vBB < vCC (ordered)
+    li        rTEMP, 1                     # vBB > vCC or unordered
+    .else
+    cmp.lt.s  ft2, ft1, ft0
     li        rTEMP, 1
-    bc1t      fcc0, .Lop_cmpg_float_finish
+    bc1nez    ft2, 1f                      # done if vBB > vCC (ordered)
+    li        rTEMP, -1                    # vBB < vCC or unordered
+    .endif
+#else
     c.eq.s    fcc0, ft0, ft1
     li        rTEMP, 0
-    bc1t      fcc0, .Lop_cmpg_float_finish
-    b         .Lop_cmpg_float_nan
+    bc1t      fcc0, 1f                     # done if vBB == vCC (ordered)
+    .if 1
+    c.olt.s   fcc0, ft0, ft1
+    li        rTEMP, -1
+    bc1t      fcc0, 1f                     # done if vBB < vCC (ordered)
+    li        rTEMP, 1                     # vBB > vCC or unordered
+    .else
+    c.olt.s   fcc0, ft1, ft0
+    li        rTEMP, 1
+    bc1t      fcc0, 1f                     # done if vBB > vCC (ordered)
+    li        rTEMP, -1                    # vBB < vCC or unordered
+    .endif
 #endif
+1:
+    GET_OPA(rOBJ)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
 
 
 /* ------------------------------ */
@@ -1506,47 +1714,55 @@
 /* File: mips/op_cmpl_double.S */
     /*
      * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
-     * into the destination register (rTEMP) based on the comparison results.
-     *
-     * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
-     * on what value we'd like to return when one of the operands is NaN.
-     *
-     * See op_cmpl_float for more details.
+     * into the destination register based on the comparison results.
      *
      * For: cmpl-double, cmpg-double
      */
     /* op vAA, vBB, vCC */
 
     FETCH(a0, 1)                           #  a0 <- CCBB
-    and       rOBJ, a0, 255                #  s5 <- BB
+    and       rOBJ, a0, 255                #  rOBJ <- BB
     srl       t0, a0, 8                    #  t0 <- CC
-    EAS2(rOBJ, rFP, rOBJ)                  #  s5 <- &fp[BB]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[BB]
     EAS2(t0, rFP, t0)                      #  t0 <- &fp[CC]
     LOAD64_F(ft0, ft0f, rOBJ)
     LOAD64_F(ft1, ft1f, t0)
 #ifdef MIPS32REVGE6
-    cmp.lt.d  ft2, ft0, ft1
-    li        rTEMP, -1
-    bc1nez    ft2, .Lop_cmpl_double_finish
-    cmp.lt.d  ft2, ft1, ft0
-    li        rTEMP, 1
-    bc1nez    ft2, .Lop_cmpl_double_finish
     cmp.eq.d  ft2, ft0, ft1
     li        rTEMP, 0
-    bc1nez    ft2, .Lop_cmpl_double_finish
-    b         .Lop_cmpl_double_nan
-#else
-    c.olt.d   fcc0, ft0, ft1
+    bc1nez    ft2, 1f                      # done if vBB == vCC (ordered)
+    .if 0
+    cmp.lt.d  ft2, ft0, ft1
     li        rTEMP, -1
-    bc1t      fcc0, .Lop_cmpl_double_finish
-    c.olt.d   fcc0, ft1, ft0
+    bc1nez    ft2, 1f                      # done if vBB < vCC (ordered)
+    li        rTEMP, 1                     # vBB > vCC or unordered
+    .else
+    cmp.lt.d  ft2, ft1, ft0
     li        rTEMP, 1
-    bc1t      fcc0, .Lop_cmpl_double_finish
+    bc1nez    ft2, 1f                      # done if vBB > vCC (ordered)
+    li        rTEMP, -1                    # vBB < vCC or unordered
+    .endif
+#else
     c.eq.d    fcc0, ft0, ft1
     li        rTEMP, 0
-    bc1t      fcc0, .Lop_cmpl_double_finish
-    b         .Lop_cmpl_double_nan
+    bc1t      fcc0, 1f                     # done if vBB == vCC (ordered)
+    .if 0
+    c.olt.d   fcc0, ft0, ft1
+    li        rTEMP, -1
+    bc1t      fcc0, 1f                     # done if vBB < vCC (ordered)
+    li        rTEMP, 1                     # vBB > vCC or unordered
+    .else
+    c.olt.d   fcc0, ft1, ft0
+    li        rTEMP, 1
+    bc1t      fcc0, 1f                     # done if vBB > vCC (ordered)
+    li        rTEMP, -1                    # vBB < vCC or unordered
+    .endif
 #endif
+1:
+    GET_OPA(rOBJ)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
 
 /* ------------------------------ */
     .balign 128
@@ -1555,47 +1771,55 @@
 /* File: mips/op_cmpl_double.S */
     /*
      * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
-     * into the destination register (rTEMP) based on the comparison results.
-     *
-     * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
-     * on what value we'd like to return when one of the operands is NaN.
-     *
-     * See op_cmpl_float for more details.
+     * into the destination register based on the comparison results.
      *
      * For: cmpl-double, cmpg-double
      */
     /* op vAA, vBB, vCC */
 
     FETCH(a0, 1)                           #  a0 <- CCBB
-    and       rOBJ, a0, 255                #  s5 <- BB
+    and       rOBJ, a0, 255                #  rOBJ <- BB
     srl       t0, a0, 8                    #  t0 <- CC
-    EAS2(rOBJ, rFP, rOBJ)                  #  s5 <- &fp[BB]
+    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[BB]
     EAS2(t0, rFP, t0)                      #  t0 <- &fp[CC]
     LOAD64_F(ft0, ft0f, rOBJ)
     LOAD64_F(ft1, ft1f, t0)
 #ifdef MIPS32REVGE6
-    cmp.lt.d  ft2, ft0, ft1
-    li        rTEMP, -1
-    bc1nez    ft2, .Lop_cmpg_double_finish
-    cmp.lt.d  ft2, ft1, ft0
-    li        rTEMP, 1
-    bc1nez    ft2, .Lop_cmpg_double_finish
     cmp.eq.d  ft2, ft0, ft1
     li        rTEMP, 0
-    bc1nez    ft2, .Lop_cmpg_double_finish
-    b         .Lop_cmpg_double_nan
-#else
-    c.olt.d   fcc0, ft0, ft1
+    bc1nez    ft2, 1f                      # done if vBB == vCC (ordered)
+    .if 1
+    cmp.lt.d  ft2, ft0, ft1
     li        rTEMP, -1
-    bc1t      fcc0, .Lop_cmpg_double_finish
-    c.olt.d   fcc0, ft1, ft0
+    bc1nez    ft2, 1f                      # done if vBB < vCC (ordered)
+    li        rTEMP, 1                     # vBB > vCC or unordered
+    .else
+    cmp.lt.d  ft2, ft1, ft0
     li        rTEMP, 1
-    bc1t      fcc0, .Lop_cmpg_double_finish
+    bc1nez    ft2, 1f                      # done if vBB > vCC (ordered)
+    li        rTEMP, -1                    # vBB < vCC or unordered
+    .endif
+#else
     c.eq.d    fcc0, ft0, ft1
     li        rTEMP, 0
-    bc1t      fcc0, .Lop_cmpg_double_finish
-    b         .Lop_cmpg_double_nan
+    bc1t      fcc0, 1f                     # done if vBB == vCC (ordered)
+    .if 1
+    c.olt.d   fcc0, ft0, ft1
+    li        rTEMP, -1
+    bc1t      fcc0, 1f                     # done if vBB < vCC (ordered)
+    li        rTEMP, 1                     # vBB > vCC or unordered
+    .else
+    c.olt.d   fcc0, ft1, ft0
+    li        rTEMP, 1
+    bc1t      fcc0, 1f                     # done if vBB > vCC (ordered)
+    li        rTEMP, -1                    # vBB < vCC or unordered
+    .endif
 #endif
+1:
+    GET_OPA(rOBJ)
+    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
 
 
 /* ------------------------------ */
@@ -2015,11 +2239,7 @@
     # null array object?
     beqz      a0, common_errNullObject     #  yes, bail
     LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    .if 2
     EASN(a0, a0, a1, 2)               #  a0 <- arrayObj + index*width
-    .else
-    addu      a0, a0, a1
-    .endif
     # a1 >= a3; compare unsigned index
     bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
@@ -2074,10 +2294,9 @@
     lw   a1, THREAD_EXCEPTION_OFFSET(rSELF)
     PREFETCH_INST(2)                       #  load rINST
     bnez a1, MterpException
-    SET_VREG_OBJECT(v0, rOBJ)              #  vAA <- v0
     ADVANCE(2)                             #  advance rPC
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG_OBJECT_GOTO(v0, rOBJ, t0)     #  vAA <- v0
 
 /* ------------------------------ */
     .balign 128
@@ -2104,11 +2323,7 @@
     # null array object?
     beqz      a0, common_errNullObject     #  yes, bail
     LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    .if 0
     EASN(a0, a0, a1, 0)               #  a0 <- arrayObj + index*width
-    .else
-    addu      a0, a0, a1
-    .endif
     # a1 >= a3; compare unsigned index
     bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
@@ -2142,11 +2357,7 @@
     # null array object?
     beqz      a0, common_errNullObject     #  yes, bail
     LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    .if 0
     EASN(a0, a0, a1, 0)               #  a0 <- arrayObj + index*width
-    .else
-    addu      a0, a0, a1
-    .endif
     # a1 >= a3; compare unsigned index
     bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
@@ -2180,11 +2391,7 @@
     # null array object?
     beqz      a0, common_errNullObject     #  yes, bail
     LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    .if 1
     EASN(a0, a0, a1, 1)               #  a0 <- arrayObj + index*width
-    .else
-    addu      a0, a0, a1
-    .endif
     # a1 >= a3; compare unsigned index
     bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
@@ -2218,11 +2425,7 @@
     # null array object?
     beqz      a0, common_errNullObject     #  yes, bail
     LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    .if 1
     EASN(a0, a0, a1, 1)               #  a0 <- arrayObj + index*width
-    .else
-    addu      a0, a0, a1
-    .endif
     # a1 >= a3; compare unsigned index
     bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
@@ -2253,17 +2456,14 @@
     # null array object?
     beqz      a0, common_errNullObject     #  yes, bail
     LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    .if 2
     EASN(a0, a0, a1, 2)               #  a0 <- arrayObj + index*width
-    .else
-    addu      a0, a0, a1
-    .endif
     bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     GET_VREG(a2, rOBJ)                     #  a2 <- vAA
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GET_OPCODE_TARGET(t0)
     sw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0)            #  vBB[vCC] <- a2
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    JR(t0)                                 #  jump to next instruction
 
 /* ------------------------------ */
     .balign 128
@@ -2271,8 +2471,6 @@
 /* File: mips/op_aput_wide.S */
     /*
      * Array put, 64 bits.  vBB[vCC] <- vAA.
-     *
-     * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
      */
     /* aput-wide vAA, vBB, vCC */
     FETCH(a0, 1)                           #  a0 <- CCBB
@@ -2292,8 +2490,9 @@
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     LOAD64(a2, a3, rOBJ)                   #  a2/a3 <- vAA/vAA+1
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GET_OPCODE_TARGET(t0)
     STORE64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET) #  a2/a3 <- vBB[vCC]
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    JR(t0)                                 #  jump to next instruction
 
 /* ------------------------------ */
     .balign 128
@@ -2337,17 +2536,14 @@
     # null array object?
     beqz      a0, common_errNullObject     #  yes, bail
     LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    .if 0
     EASN(a0, a0, a1, 0)               #  a0 <- arrayObj + index*width
-    .else
-    addu      a0, a0, a1
-    .endif
     bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     GET_VREG(a2, rOBJ)                     #  a2 <- vAA
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GET_OPCODE_TARGET(t0)
     sb a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0)            #  vBB[vCC] <- a2
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    JR(t0)                                 #  jump to next instruction
 
 
 /* ------------------------------ */
@@ -2373,17 +2569,14 @@
     # null array object?
     beqz      a0, common_errNullObject     #  yes, bail
     LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    .if 0
     EASN(a0, a0, a1, 0)               #  a0 <- arrayObj + index*width
-    .else
-    addu      a0, a0, a1
-    .endif
     bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     GET_VREG(a2, rOBJ)                     #  a2 <- vAA
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GET_OPCODE_TARGET(t0)
     sb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0)            #  vBB[vCC] <- a2
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    JR(t0)                                 #  jump to next instruction
 
 
 /* ------------------------------ */
@@ -2409,17 +2602,14 @@
     # null array object?
     beqz      a0, common_errNullObject     #  yes, bail
     LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    .if 1
     EASN(a0, a0, a1, 1)               #  a0 <- arrayObj + index*width
-    .else
-    addu      a0, a0, a1
-    .endif
     bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     GET_VREG(a2, rOBJ)                     #  a2 <- vAA
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GET_OPCODE_TARGET(t0)
     sh a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0)            #  vBB[vCC] <- a2
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    JR(t0)                                 #  jump to next instruction
 
 
 /* ------------------------------ */
@@ -2445,17 +2635,14 @@
     # null array object?
     beqz      a0, common_errNullObject     #  yes, bail
     LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    .if 1
     EASN(a0, a0, a1, 1)               #  a0 <- arrayObj + index*width
-    .else
-    addu      a0, a0, a1
-    .endif
     bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     GET_VREG(a2, rOBJ)                     #  a2 <- vAA
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    GET_OPCODE_TARGET(t0)
     sh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0)            #  vBB[vCC] <- a2
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    JR(t0)                                 #  jump to next instruction
 
 
 /* ------------------------------ */
@@ -2467,6 +2654,7 @@
      *
      * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
      */
+    /* op vA, vB, field@CCCC */
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref CCCC
     GET_OPB(a1)                            # a1 <- B
@@ -2478,14 +2666,13 @@
     GET_OPA4(a2)                           # a2<- A+
     PREFETCH_INST(2)                       # load rINST
     bnez  a3, MterpPossibleException        # bail out
-    .if 0
-    SET_VREG_OBJECT(v0, a2)                # fp[A] <- v0
-    .else
-    SET_VREG(v0, a2)                       # fp[A] <- v0
-    .endif
     ADVANCE(2)                             #  advance rPC
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    .if 0
+    SET_VREG_OBJECT_GOTO(v0, a2, t0)       # fp[A] <- v0
+    .else
+    SET_VREG_GOTO(v0, a2, t0)              # fp[A] <- v0
+    .endif
 
 /* ------------------------------ */
     .balign 128
@@ -2496,6 +2683,7 @@
      *
      * for: iget-wide
      */
+    /* op vA, vB, field@CCCC */
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field byte offset
     GET_OPB(a1)                            # a1 <- B
@@ -2507,10 +2695,9 @@
     GET_OPA4(a2)                           # a2<- A+
     PREFETCH_INST(2)                       # load rINST
     bnez a3, MterpException                # bail out
-    SET_VREG64(v0, v1, a2)                 # fp[A] <- v0/v1
     ADVANCE(2)                             # advance rPC
     GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
+    SET_VREG64_GOTO(v0, v1, a2, t0)        # fp[A] <- v0/v1
 
 /* ------------------------------ */
     .balign 128
@@ -2522,6 +2709,7 @@
      *
      * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
      */
+    /* op vA, vB, field@CCCC */
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref CCCC
     GET_OPB(a1)                            # a1 <- B
@@ -2533,14 +2721,13 @@
     GET_OPA4(a2)                           # a2<- A+
     PREFETCH_INST(2)                       # load rINST
     bnez  a3, MterpPossibleException        # bail out
-    .if 1
-    SET_VREG_OBJECT(v0, a2)                # fp[A] <- v0
-    .else
-    SET_VREG(v0, a2)                       # fp[A] <- v0
-    .endif
     ADVANCE(2)                             #  advance rPC
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    .if 1
+    SET_VREG_OBJECT_GOTO(v0, a2, t0)       # fp[A] <- v0
+    .else
+    SET_VREG_GOTO(v0, a2, t0)              # fp[A] <- v0
+    .endif
 
 
 /* ------------------------------ */
@@ -2553,6 +2740,7 @@
      *
      * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
      */
+    /* op vA, vB, field@CCCC */
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref CCCC
     GET_OPB(a1)                            # a1 <- B
@@ -2564,14 +2752,13 @@
     GET_OPA4(a2)                           # a2<- A+
     PREFETCH_INST(2)                       # load rINST
     bnez  a3, MterpPossibleException        # bail out
-    .if 0
-    SET_VREG_OBJECT(v0, a2)                # fp[A] <- v0
-    .else
-    SET_VREG(v0, a2)                       # fp[A] <- v0
-    .endif
     ADVANCE(2)                             #  advance rPC
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    .if 0
+    SET_VREG_OBJECT_GOTO(v0, a2, t0)       # fp[A] <- v0
+    .else
+    SET_VREG_GOTO(v0, a2, t0)              # fp[A] <- v0
+    .endif
 
 
 /* ------------------------------ */
@@ -2584,6 +2771,7 @@
      *
      * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
      */
+    /* op vA, vB, field@CCCC */
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref CCCC
     GET_OPB(a1)                            # a1 <- B
@@ -2595,14 +2783,13 @@
     GET_OPA4(a2)                           # a2<- A+
     PREFETCH_INST(2)                       # load rINST
     bnez  a3, MterpPossibleException        # bail out
-    .if 0
-    SET_VREG_OBJECT(v0, a2)                # fp[A] <- v0
-    .else
-    SET_VREG(v0, a2)                       # fp[A] <- v0
-    .endif
     ADVANCE(2)                             #  advance rPC
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    .if 0
+    SET_VREG_OBJECT_GOTO(v0, a2, t0)       # fp[A] <- v0
+    .else
+    SET_VREG_GOTO(v0, a2, t0)              # fp[A] <- v0
+    .endif
 
 
 /* ------------------------------ */
@@ -2615,6 +2802,7 @@
      *
      * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
      */
+    /* op vA, vB, field@CCCC */
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref CCCC
     GET_OPB(a1)                            # a1 <- B
@@ -2626,14 +2814,13 @@
     GET_OPA4(a2)                           # a2<- A+
     PREFETCH_INST(2)                       # load rINST
     bnez  a3, MterpPossibleException        # bail out
-    .if 0
-    SET_VREG_OBJECT(v0, a2)                # fp[A] <- v0
-    .else
-    SET_VREG(v0, a2)                       # fp[A] <- v0
-    .endif
     ADVANCE(2)                             #  advance rPC
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    .if 0
+    SET_VREG_OBJECT_GOTO(v0, a2, t0)       # fp[A] <- v0
+    .else
+    SET_VREG_GOTO(v0, a2, t0)              # fp[A] <- v0
+    .endif
 
 
 /* ------------------------------ */
@@ -2646,6 +2833,7 @@
      *
      * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
      */
+    /* op vA, vB, field@CCCC */
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref CCCC
     GET_OPB(a1)                            # a1 <- B
@@ -2657,14 +2845,13 @@
     GET_OPA4(a2)                           # a2<- A+
     PREFETCH_INST(2)                       # load rINST
     bnez  a3, MterpPossibleException        # bail out
-    .if 0
-    SET_VREG_OBJECT(v0, a2)                # fp[A] <- v0
-    .else
-    SET_VREG(v0, a2)                       # fp[A] <- v0
-    .endif
     ADVANCE(2)                             #  advance rPC
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    .if 0
+    SET_VREG_OBJECT_GOTO(v0, a2, t0)       # fp[A] <- v0
+    .else
+    SET_VREG_GOTO(v0, a2, t0)              # fp[A] <- v0
+    .endif
 
 
 /* ------------------------------ */
@@ -2676,7 +2863,7 @@
      *
      * for: iput, iput-boolean, iput-byte, iput-char, iput-short
      */
-    # op vA, vB, field                     /* CCCC */
+    /* op vA, vB, field@CCCC */
     .extern artSet32InstanceFromMterp
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref CCCC
@@ -2696,7 +2883,7 @@
     .balign 128
 .L_op_iput_wide: /* 0x5a */
 /* File: mips/op_iput_wide.S */
-    # iput-wide vA, vB, field              /* CCCC */
+    /* iput-wide vA, vB, field@CCCC */
     .extern artSet64InstanceFromMterp
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref CCCC
@@ -2721,7 +2908,7 @@
      *
      * for: iput-object, iput-object-volatile
      */
-    # op vA, vB, field                     /* CCCC */
+    /* op vA, vB, field@CCCC */
     EXPORT_PC()
     addu   a0, rFP, OFF_FP_SHADOWFRAME
     move   a1, rPC
@@ -2743,7 +2930,7 @@
      *
      * for: iput, iput-boolean, iput-byte, iput-char, iput-short
      */
-    # op vA, vB, field                     /* CCCC */
+    /* op vA, vB, field@CCCC */
     .extern artSet8InstanceFromMterp
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref CCCC
@@ -2770,7 +2957,7 @@
      *
      * for: iput, iput-boolean, iput-byte, iput-char, iput-short
      */
-    # op vA, vB, field                     /* CCCC */
+    /* op vA, vB, field@CCCC */
     .extern artSet8InstanceFromMterp
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref CCCC
@@ -2797,7 +2984,7 @@
      *
      * for: iput, iput-boolean, iput-byte, iput-char, iput-short
      */
-    # op vA, vB, field                     /* CCCC */
+    /* op vA, vB, field@CCCC */
     .extern artSet16InstanceFromMterp
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref CCCC
@@ -2824,7 +3011,7 @@
      *
      * for: iput, iput-boolean, iput-byte, iput-char, iput-short
      */
-    # op vA, vB, field                     /* CCCC */
+    /* op vA, vB, field@CCCC */
     .extern artSet16InstanceFromMterp
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref CCCC
@@ -2850,7 +3037,7 @@
      *
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
-    # op vAA, field                        /* BBBB */
+    /* op vAA, field@BBBB */
     .extern artGet32StaticFromCode
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref BBBB
@@ -2861,14 +3048,13 @@
     GET_OPA(a2)                            # a2 <- AA
     PREFETCH_INST(2)
     bnez  a3, MterpException               # bail out
-.if 0
-    SET_VREG_OBJECT(v0, a2)                # fp[AA] <- v0
-.else
-    SET_VREG(v0, a2)                       # fp[AA] <- v0
-.endif
     ADVANCE(2)
     GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
+.if 0
+    SET_VREG_OBJECT_GOTO(v0, a2, t0)       # fp[AA] <- v0
+.else
+    SET_VREG_GOTO(v0, a2, t0)              # fp[AA] <- v0
+.endif
 
 /* ------------------------------ */
     .balign 128
@@ -2877,7 +3063,7 @@
     /*
      * 64-bit SGET handler.
      */
-    # sget-wide vAA, field                 /* BBBB */
+    /* sget-wide vAA, field@BBBB */
     .extern artGet64StaticFromCode
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref BBBB
@@ -2888,9 +3074,8 @@
     bnez  a3, MterpException
     GET_OPA(a1)                            # a1 <- AA
     FETCH_ADVANCE_INST(2)                  # advance rPC, load rINST
-    SET_VREG64(v0, v1, a1)                 # vAA/vAA+1 <- v0/v1
     GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
+    SET_VREG64_GOTO(v0, v1, a1, t0)        # vAA/vAA+1 <- v0/v1
 
 /* ------------------------------ */
     .balign 128
@@ -2902,7 +3087,7 @@
      *
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
-    # op vAA, field                        /* BBBB */
+    /* op vAA, field@BBBB */
     .extern artGetObjStaticFromCode
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref BBBB
@@ -2913,14 +3098,13 @@
     GET_OPA(a2)                            # a2 <- AA
     PREFETCH_INST(2)
     bnez  a3, MterpException               # bail out
-.if 1
-    SET_VREG_OBJECT(v0, a2)                # fp[AA] <- v0
-.else
-    SET_VREG(v0, a2)                       # fp[AA] <- v0
-.endif
     ADVANCE(2)
     GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
+.if 1
+    SET_VREG_OBJECT_GOTO(v0, a2, t0)       # fp[AA] <- v0
+.else
+    SET_VREG_GOTO(v0, a2, t0)              # fp[AA] <- v0
+.endif
 
 
 /* ------------------------------ */
@@ -2933,7 +3117,7 @@
      *
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
-    # op vAA, field                        /* BBBB */
+    /* op vAA, field@BBBB */
     .extern artGetBooleanStaticFromCode
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref BBBB
@@ -2944,14 +3128,13 @@
     GET_OPA(a2)                            # a2 <- AA
     PREFETCH_INST(2)
     bnez  a3, MterpException               # bail out
-.if 0
-    SET_VREG_OBJECT(v0, a2)                # fp[AA] <- v0
-.else
-    SET_VREG(v0, a2)                       # fp[AA] <- v0
-.endif
     ADVANCE(2)
     GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
+.if 0
+    SET_VREG_OBJECT_GOTO(v0, a2, t0)       # fp[AA] <- v0
+.else
+    SET_VREG_GOTO(v0, a2, t0)              # fp[AA] <- v0
+.endif
 
 
 /* ------------------------------ */
@@ -2964,7 +3147,7 @@
      *
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
-    # op vAA, field                        /* BBBB */
+    /* op vAA, field@BBBB */
     .extern artGetByteStaticFromCode
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref BBBB
@@ -2975,14 +3158,13 @@
     GET_OPA(a2)                            # a2 <- AA
     PREFETCH_INST(2)
     bnez  a3, MterpException               # bail out
-.if 0
-    SET_VREG_OBJECT(v0, a2)                # fp[AA] <- v0
-.else
-    SET_VREG(v0, a2)                       # fp[AA] <- v0
-.endif
     ADVANCE(2)
     GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
+.if 0
+    SET_VREG_OBJECT_GOTO(v0, a2, t0)       # fp[AA] <- v0
+.else
+    SET_VREG_GOTO(v0, a2, t0)              # fp[AA] <- v0
+.endif
 
 
 /* ------------------------------ */
@@ -2995,7 +3177,7 @@
      *
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
-    # op vAA, field                        /* BBBB */
+    /* op vAA, field@BBBB */
     .extern artGetCharStaticFromCode
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref BBBB
@@ -3006,14 +3188,13 @@
     GET_OPA(a2)                            # a2 <- AA
     PREFETCH_INST(2)
     bnez  a3, MterpException               # bail out
-.if 0
-    SET_VREG_OBJECT(v0, a2)                # fp[AA] <- v0
-.else
-    SET_VREG(v0, a2)                       # fp[AA] <- v0
-.endif
     ADVANCE(2)
     GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
+.if 0
+    SET_VREG_OBJECT_GOTO(v0, a2, t0)       # fp[AA] <- v0
+.else
+    SET_VREG_GOTO(v0, a2, t0)              # fp[AA] <- v0
+.endif
 
 
 /* ------------------------------ */
@@ -3026,7 +3207,7 @@
      *
      * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
      */
-    # op vAA, field                        /* BBBB */
+    /* op vAA, field@BBBB */
     .extern artGetShortStaticFromCode
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref BBBB
@@ -3037,14 +3218,13 @@
     GET_OPA(a2)                            # a2 <- AA
     PREFETCH_INST(2)
     bnez  a3, MterpException               # bail out
-.if 0
-    SET_VREG_OBJECT(v0, a2)                # fp[AA] <- v0
-.else
-    SET_VREG(v0, a2)                       # fp[AA] <- v0
-.endif
     ADVANCE(2)
     GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
+.if 0
+    SET_VREG_OBJECT_GOTO(v0, a2, t0)       # fp[AA] <- v0
+.else
+    SET_VREG_GOTO(v0, a2, t0)              # fp[AA] <- v0
+.endif
 
 
 /* ------------------------------ */
@@ -3056,7 +3236,7 @@
      *
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
-    # op vAA, field                        /* BBBB */
+    /* op vAA, field@BBBB */
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref BBBB
     GET_OPA(a3)                            # a3 <- AA
@@ -3077,7 +3257,7 @@
     /*
      * 64-bit SPUT handler.
      */
-    # sput-wide vAA, field                 /* BBBB */
+    /* sput-wide vAA, field@BBBB */
     .extern artSet64IndirectStaticFromMterp
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref CCCC
@@ -3123,7 +3303,7 @@
      *
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
-    # op vAA, field                        /* BBBB */
+    /* op vAA, field@BBBB */
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref BBBB
     GET_OPA(a3)                            # a3 <- AA
@@ -3148,7 +3328,7 @@
      *
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
-    # op vAA, field                        /* BBBB */
+    /* op vAA, field@BBBB */
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref BBBB
     GET_OPA(a3)                            # a3 <- AA
@@ -3173,7 +3353,7 @@
      *
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
-    # op vAA, field                        /* BBBB */
+    /* op vAA, field@BBBB */
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref BBBB
     GET_OPA(a3)                            # a3 <- AA
@@ -3198,7 +3378,7 @@
      *
      * for: sput, sput-boolean, sput-byte, sput-char, sput-short
      */
-    # op vAA, field                        /* BBBB */
+    /* op vAA, field@BBBB */
     EXPORT_PC()
     FETCH(a0, 1)                           # a0 <- field ref BBBB
     GET_OPA(a3)                            # a3 <- AA
@@ -3221,8 +3401,8 @@
     /*
      * Generic invoke handler wrapper.
      */
-    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
-    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     .extern MterpInvokeVirtual
     EXPORT_PC()
     move    a0, rSELF
@@ -3246,8 +3426,8 @@
     /*
      * Generic invoke handler wrapper.
      */
-    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
-    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     .extern MterpInvokeSuper
     EXPORT_PC()
     move    a0, rSELF
@@ -3271,8 +3451,8 @@
     /*
      * Generic invoke handler wrapper.
      */
-    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
-    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     .extern MterpInvokeDirect
     EXPORT_PC()
     move    a0, rSELF
@@ -3296,8 +3476,8 @@
     /*
      * Generic invoke handler wrapper.
      */
-    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
-    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     .extern MterpInvokeStatic
     EXPORT_PC()
     move    a0, rSELF
@@ -3321,8 +3501,8 @@
     /*
      * Generic invoke handler wrapper.
      */
-    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
-    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     .extern MterpInvokeInterface
     EXPORT_PC()
     move    a0, rSELF
@@ -3344,7 +3524,7 @@
 /* File: mips/op_return_void_no_barrier.S */
     lw     ra, THREAD_FLAGS_OFFSET(rSELF)
     move   a0, rSELF
-    and    ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and    ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqz   ra, 1f
     JAL(MterpSuspendCheck)                 # (self)
 1:
@@ -3360,8 +3540,8 @@
     /*
      * Generic invoke handler wrapper.
      */
-    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
-    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     .extern MterpInvokeVirtualRange
     EXPORT_PC()
     move    a0, rSELF
@@ -3385,8 +3565,8 @@
     /*
      * Generic invoke handler wrapper.
      */
-    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
-    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     .extern MterpInvokeSuperRange
     EXPORT_PC()
     move    a0, rSELF
@@ -3410,8 +3590,8 @@
     /*
      * Generic invoke handler wrapper.
      */
-    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
-    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     .extern MterpInvokeDirectRange
     EXPORT_PC()
     move    a0, rSELF
@@ -3435,8 +3615,8 @@
     /*
      * Generic invoke handler wrapper.
      */
-    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
-    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     .extern MterpInvokeStaticRange
     EXPORT_PC()
     move    a0, rSELF
@@ -3460,8 +3640,8 @@
     /*
      * Generic invoke handler wrapper.
      */
-    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
-    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     .extern MterpInvokeInterfaceRange
     EXPORT_PC()
     move    a0, rSELF
@@ -3506,11 +3686,11 @@
 /* File: mips/unop.S */
     /*
      * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op a0".
+     * specifies an instruction that performs "result0 = op a0".
      * This could be a MIPS instruction or a function call.
      *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
+     * for: int-to-byte, int-to-char, int-to-short,
+     *      neg-int, not-int, neg-float
      */
     /* unop vA, vB */
     GET_OPB(a3)                            #  a3 <- B
@@ -3520,8 +3700,7 @@
                                   #  optional op
     negu a0, a0                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, t0, t1)        #  vAA <- result0
-    /* 9-10 instructions */
+    SET_VREG_GOTO(a0, t0, t1)        #  vA <- result0
 
 
 /* ------------------------------ */
@@ -3531,11 +3710,11 @@
 /* File: mips/unop.S */
     /*
      * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op a0".
+     * specifies an instruction that performs "result0 = op a0".
      * This could be a MIPS instruction or a function call.
      *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
+     * for: int-to-byte, int-to-char, int-to-short,
+     *      neg-int, not-int, neg-float
      */
     /* unop vA, vB */
     GET_OPB(a3)                            #  a3 <- B
@@ -3545,8 +3724,7 @@
                                   #  optional op
     not a0, a0                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, t0, t1)        #  vAA <- result0
-    /* 9-10 instructions */
+    SET_VREG_GOTO(a0, t0, t1)        #  vA <- result0
 
 
 /* ------------------------------ */
@@ -3556,7 +3734,7 @@
 /* File: mips/unopWide.S */
     /*
      * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op a0/a1".
+     * specifies an instruction that performs "result0/result1 = op a0/a1".
      * This could be MIPS instruction or a function call.
      *
      * For: neg-long, not-long, neg-double,
@@ -3565,14 +3743,12 @@
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a3)                            #  a3 <- B
     EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vAA
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vA
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     negu v0, a0                              #  optional op
     negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0                                 #  a0/a1 <- op, a2-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(v0, v1, rOBJ)   #  vAA <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-    /* 12-13 instructions */
+    SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
 
 
 /* ------------------------------ */
@@ -3582,7 +3758,7 @@
 /* File: mips/unopWide.S */
     /*
      * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op a0/a1".
+     * specifies an instruction that performs "result0/result1 = op a0/a1".
      * This could be MIPS instruction or a function call.
      *
      * For: neg-long, not-long, neg-double,
@@ -3591,14 +3767,12 @@
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a3)                            #  a3 <- B
     EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vAA
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vA
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     not a0, a0                              #  optional op
     not a1, a1                                 #  a0/a1 <- op, a2-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(a0, a1, rOBJ)   #  vAA <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-    /* 12-13 instructions */
+    SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
 
 
 /* ------------------------------ */
@@ -3608,11 +3782,11 @@
 /* File: mips/unop.S */
     /*
      * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op a0".
+     * specifies an instruction that performs "result0 = op a0".
      * This could be a MIPS instruction or a function call.
      *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
+     * for: int-to-byte, int-to-char, int-to-short,
+     *      neg-int, not-int, neg-float
      */
     /* unop vA, vB */
     GET_OPB(a3)                            #  a3 <- B
@@ -3622,8 +3796,7 @@
                                   #  optional op
     addu a0, a0, 0x80000000                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, t0, t1)        #  vAA <- result0
-    /* 9-10 instructions */
+    SET_VREG_GOTO(a0, t0, t1)        #  vA <- result0
 
 
 /* ------------------------------ */
@@ -3633,7 +3806,7 @@
 /* File: mips/unopWide.S */
     /*
      * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op a0/a1".
+     * specifies an instruction that performs "result0/result1 = op a0/a1".
      * This could be MIPS instruction or a function call.
      *
      * For: neg-long, not-long, neg-double,
@@ -3642,14 +3815,12 @@
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a3)                            #  a3 <- B
     EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vAA
+    LOAD64(a0, a1, a3)                     #  a0/a1 <- vA
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
                                   #  optional op
     addu a1, a1, 0x80000000                                 #  a0/a1 <- op, a2-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(a0, a1, rOBJ)   #  vAA <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-    /* 12-13 instructions */
+    SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
 
 
 /* ------------------------------ */
@@ -3659,8 +3830,7 @@
 /* File: mips/unopWider.S */
     /*
      * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = op a0", where
-     * "result" is a 64-bit quantity in a0/a1.
+     * that specifies an instruction that performs "result0/result1 = op a0".
      *
      * For: int-to-long
      */
@@ -3672,9 +3842,7 @@
                                   #  optional op
     sra a1, a0, 31                                 #  result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(a0, a1, rOBJ)   #  vA/vA+1 <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-    /* 10-11 instructions */
+    SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
 
 
 /* ------------------------------ */
@@ -3683,23 +3851,20 @@
 /* File: mips/op_int_to_float.S */
 /* File: mips/funop.S */
     /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op a0".
+     * Generic 32-bit floating-point unary operation.  Provide an "instr"
+     * line that specifies an instruction that performs "fv0 = op fa0".
      * This could be a MIPS instruction or a function call.
      *
-     * for: int-to-float, float-to-int
+     * for: int-to-float
      */
     /* unop vA, vB */
     GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  t0 <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_VREG_F(fa0, a3)
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     cvt.s.w fv0, fa0
-
-.Lop_int_to_float_set_vreg_f:
-    SET_VREG_F(fv0, rOBJ)
     GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    GOTO_OPCODE(t1)                        #  jump to next instruction
+    SET_VREG_F_GOTO(fv0, rOBJ, t1)         #  vA <- fv0
 
 
 /* ------------------------------ */
@@ -3708,11 +3873,10 @@
 /* File: mips/op_int_to_double.S */
 /* File: mips/funopWider.S */
     /*
-     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = op a0", where
-     * "result" is a 64-bit quantity in a0/a1.
+     * Generic 32bit-to-64bit floating-point unary operation.  Provide an "instr"
+     * line that specifies an instruction that performs "fv0 = op fa0".
      *
-     * For: int-to-double, float-to-long, float-to-double
+     * For: int-to-double, float-to-double
      */
     /* unop vA, vB */
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
@@ -3720,11 +3884,8 @@
     GET_VREG_F(fa0, a3)
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     cvt.d.w fv0, fa0
-
-.Lop_int_to_double_set_vreg:
-    SET_VREG64_F(fv0, fv0f, rOBJ)                             #  vA/vA+1 <- a0/a1
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) #  vA/vA+1 <- fv0
 
 
 /* ------------------------------ */
@@ -3741,120 +3902,157 @@
     GET_VREG(a2, a1)                       #  a2 <- fp[B]
     GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
     .if 0
-    SET_VREG_OBJECT(a2, a0)                #  fp[A] <- a2
+    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[A] <- a2
     .else
-    SET_VREG(a2, a0)                       #  fp[A] <- a2
+    SET_VREG_GOTO(a2, a0, t0)              #  fp[A] <- a2
     .endif
-    GOTO_OPCODE(t0)                        #  jump to next instruction
 
 
 /* ------------------------------ */
     .balign 128
 .L_op_long_to_float: /* 0x85 */
 /* File: mips/op_long_to_float.S */
-/* File: mips/unopNarrower.S */
     /*
-     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = op a0/a1", where
-     * "result" is a 32-bit quantity in a0.
-     *
-     * For: long-to-float, double-to-int, double-to-float
-     * If hard floating point support is available, use fa0 as the parameter,
-     * except for long-to-float opcode.
-     * (This would work for long-to-int, but that instruction is actually
-     * an exact match for OP_MOVE.)
+     * long-to-float
      */
     /* unop vA, vB */
     GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+
+#ifdef MIPS32REVGE6
+    LOAD64_F(fv0, fv0f, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    cvt.s.l   fv0, fv0
+#else
     LOAD64(rARG0, rARG1, a3)
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     JAL(__floatdisf)
+#endif
 
-.Lop_long_to_float_set_vreg_f:
-    SET_VREG_F(fv0, rOBJ)                  #  vA <- result0
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- fv0
 
 /* ------------------------------ */
     .balign 128
 .L_op_long_to_double: /* 0x86 */
 /* File: mips/op_long_to_double.S */
-/* File: mips/funopWide.S */
     /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op a0/a1".
-     * This could be a MIPS instruction or a function call.
-     *
-     * long-to-double, double-to-long
+     * long-to-double
      */
     /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a3)                            #  a3 <- B
     EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
+
+#ifdef MIPS32REVGE6
+    LOAD64_F(fv0, fv0f, a3)
+    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
+    cvt.d.l   fv0, fv0
+#else
     LOAD64(rARG0, rARG1, a3)
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-                                  #  optional op
-    JAL(__floatdidf)                                 #  a0/a1 <- op, a2-a3 changed
+    JAL(__floatdidf)                       #  a0/a1 <- op, a2-a3 changed
+#endif
 
-.Lop_long_to_double_set_vreg:
-    SET_VREG64_F(fv0, fv0f, rOBJ)                             #  vAA <- a0/a1
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-    /* 12-13 instructions */
-
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) #  vA/vA+1 <- result
 
 /* ------------------------------ */
     .balign 128
 .L_op_float_to_int: /* 0x87 */
 /* File: mips/op_float_to_int.S */
-/* File: mips/funop.S */
     /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op a0".
-     * This could be a MIPS instruction or a function call.
+     * float-to-int
      *
-     * for: int-to-float, float-to-int
+     * We have to clip values to int min/max per the specification.  The
+     * expected common case is a "reasonable" value that converts directly
+     * to modest integer.  The EABI convert function isn't doing this for us.
      */
     /* unop vA, vB */
     GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  t0 <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_VREG_F(fa0, a3)
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    b f2i_doconv
 
-.Lop_float_to_int_set_vreg_f:
-    SET_VREG_F(fv0, rOBJ)
+    li        t0, INT_MIN_AS_FLOAT
+    mtc1      t0, fa1
+#ifdef MIPS32REVGE6
+    /*
+     * TODO: simplify this when the MIPS64R6 emulator
+     * supports NAN2008=1.
+     */
+    cmp.le.s  ft0, fa1, fa0
     GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    GOTO_OPCODE(t1)                        #  jump to next instruction
-
+    bc1nez    ft0, 1f                      #  if INT_MIN <= vB, proceed to truncation
+    cmp.eq.s  ft0, fa0, fa0
+    selnez.s  fa0, fa1, ft0                #  fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
+#else
+    c.ole.s   fcc0, fa1, fa0
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    bc1t      fcc0, 1f                     #  if INT_MIN <= vB, proceed to truncation
+    c.eq.s    fcc0, fa0, fa0
+    mtc1      zero, fa0
+    movt.s    fa0, fa1, fcc0               #  fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
+#endif
+1:
+    trunc.w.s fa0, fa0
+    SET_VREG_F_GOTO(fa0, rOBJ, t1)         #  vA <- result
 
 /* ------------------------------ */
     .balign 128
 .L_op_float_to_long: /* 0x88 */
 /* File: mips/op_float_to_long.S */
-/* File: mips/funopWider.S */
     /*
-     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = op a0", where
-     * "result" is a 64-bit quantity in a0/a1.
+     * float-to-long
      *
-     * For: int-to-double, float-to-long, float-to-double
+     * We have to clip values to long min/max per the specification.  The
+     * expected common case is a "reasonable" value that converts directly
+     * to modest integer.  The EABI convert function isn't doing this for us.
      */
     /* unop vA, vB */
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a3)                            #  a3 <- B
     GET_VREG_F(fa0, a3)
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    b f2l_doconv
 
-.Lop_float_to_long_set_vreg:
-    SET_VREG64(rRESULT0, rRESULT1, rOBJ)                             #  vA/vA+1 <- a0/a1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+#ifdef MIPS32REVGE6
+    /*
+     * TODO: simplify this when the MIPS64R6 emulator
+     * supports NAN2008=1.
+     */
+    li        t0, LONG_MIN_AS_FLOAT
+    mtc1      t0, fa1
+    cmp.le.s  ft0, fa1, fa0
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    bc1nez    ft0, 1f                      #  if LONG_MIN <= vB, proceed to truncation
+    cmp.eq.s  ft0, fa0, fa0
+    selnez.s  fa0, fa1, ft0                #  fa0 = ordered(vB) ? LONG_MIN_AS_FLOAT : 0
+1:
+    trunc.l.s fa0, fa0
+    SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) #  vA <- result
+#else
+    c.eq.s    fcc0, fa0, fa0
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bc1f      fcc0, .Lop_float_to_long_get_opcode
 
+    li        t0, LONG_MIN_AS_FLOAT
+    mtc1      t0, fa1
+    c.ole.s   fcc0, fa0, fa1
+    li        rRESULT1, LONG_MIN_HIGH
+    bc1t      fcc0, .Lop_float_to_long_get_opcode
+
+    neg.s     fa1, fa1
+    c.ole.s   fcc0, fa1, fa0
+    nor       rRESULT0, rRESULT0, zero
+    nor       rRESULT1, rRESULT1, zero
+    bc1t      fcc0, .Lop_float_to_long_get_opcode
+
+    JAL(__fixsfdi)
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    b         .Lop_float_to_long_set_vreg
+#endif
 
 /* ------------------------------ */
     .balign 128
@@ -3862,11 +4060,10 @@
 /* File: mips/op_float_to_double.S */
 /* File: mips/funopWider.S */
     /*
-     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = op a0", where
-     * "result" is a 64-bit quantity in a0/a1.
+     * Generic 32bit-to-64bit floating-point unary operation.  Provide an "instr"
+     * line that specifies an instruction that performs "fv0 = op fa0".
      *
-     * For: int-to-double, float-to-long, float-to-double
+     * For: int-to-double, float-to-double
      */
     /* unop vA, vB */
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
@@ -3874,77 +4071,111 @@
     GET_VREG_F(fa0, a3)
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     cvt.d.s fv0, fa0
-
-.Lop_float_to_double_set_vreg:
-    SET_VREG64_F(fv0, fv0f, rOBJ)                             #  vA/vA+1 <- a0/a1
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) #  vA/vA+1 <- fv0
 
 
 /* ------------------------------ */
     .balign 128
 .L_op_double_to_int: /* 0x8a */
 /* File: mips/op_double_to_int.S */
-/* File: mips/unopNarrower.S */
     /*
-     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = op a0/a1", where
-     * "result" is a 32-bit quantity in a0.
+     * double-to-int
      *
-     * For: long-to-float, double-to-int, double-to-float
-     * If hard floating point support is available, use fa0 as the parameter,
-     * except for long-to-float opcode.
-     * (This would work for long-to-int, but that instruction is actually
-     * an exact match for OP_MOVE.)
+     * We have to clip values to int min/max per the specification.  The
+     * expected common case is a "reasonable" value that converts directly
+     * to modest integer.  The EABI convert function isn't doing this for us.
      */
     /* unop vA, vB */
     GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
     LOAD64_F(fa0, fa0f, a3)
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    b d2i_doconv
 
-.Lop_double_to_int_set_vreg_f:
-    SET_VREG_F(fv0, rOBJ)                  #  vA <- result0
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-/*
- * Convert the double in a0/a1 to an int in a0.
- *
- * We have to clip values to int min/max per the specification.  The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer.  The EABI convert function isn't doing this for us.
- */
+    li        t0, INT_MIN_AS_DOUBLE_HIGH
+    mtc1      zero, fa1
+    MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
+#ifdef MIPS32REVGE6
+    /*
+     * TODO: simplify this when the MIPS64R6 emulator
+     * supports NAN2008=1.
+     */
+    cmp.le.d  ft0, fa1, fa0
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    bc1nez    ft0, 1f                      #  if INT_MIN <= vB, proceed to truncation
+    cmp.eq.d  ft0, fa0, fa0
+    selnez.d  fa0, fa1, ft0                #  fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
+#else
+    c.ole.d   fcc0, fa1, fa0
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    bc1t      fcc0, 1f                     #  if INT_MIN <= vB, proceed to truncation
+    c.eq.d    fcc0, fa0, fa0
+    mtc1      zero, fa0
+    MOVE_TO_FPU_HIGH(zero, fa0, fa0f)
+    movt.d    fa0, fa1, fcc0               #  fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
+#endif
+1:
+    trunc.w.d fa0, fa0
+    SET_VREG_F_GOTO(fa0, rOBJ, t1)         #  vA <- result
 
 /* ------------------------------ */
     .balign 128
 .L_op_double_to_long: /* 0x8b */
 /* File: mips/op_double_to_long.S */
-/* File: mips/funopWide.S */
     /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op a0/a1".
-     * This could be a MIPS instruction or a function call.
+     * double-to-long
      *
-     * long-to-double, double-to-long
+     * We have to clip values to long min/max per the specification.  The
+     * expected common case is a "reasonable" value that converts directly
+     * to modest integer.  The EABI convert function isn't doing this for us.
      */
     /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a3)                            #  a3 <- B
     EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
     LOAD64_F(fa0, fa0f, a3)
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-                                  #  optional op
-    b d2l_doconv                                 #  a0/a1 <- op, a2-a3 changed
 
-.Lop_double_to_long_set_vreg:
-    SET_VREG64(rRESULT0, rRESULT1, rOBJ)                             #  vAA <- a0/a1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-    /* 12-13 instructions */
+#ifdef MIPS32REVGE6
+    /*
+     * TODO: simplify this when the MIPS64R6 emulator
+     * supports NAN2008=1.
+     */
+    li        t0, LONG_MIN_AS_DOUBLE_HIGH
+    mtc1      zero, fa1
+    mthc1     t0, fa1
+    cmp.le.d  ft0, fa1, fa0
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    bc1nez    ft0, 1f                      #  if LONG_MIN <= vB, proceed to truncation
+    cmp.eq.d  ft0, fa0, fa0
+    selnez.d  fa0, fa1, ft0                #  fa0 = ordered(vB) ? LONG_MIN_AS_DOUBLE : 0
+1:
+    trunc.l.d fa0, fa0
+    SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) #  vA <- result
+#else
+    c.eq.d    fcc0, fa0, fa0
+    li        rRESULT0, 0
+    li        rRESULT1, 0
+    bc1f      fcc0, .Lop_double_to_long_get_opcode
 
+    li        t0, LONG_MIN_AS_DOUBLE_HIGH
+    mtc1      zero, fa1
+    MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
+    c.ole.d   fcc0, fa0, fa1
+    li        rRESULT1, LONG_MIN_HIGH
+    bc1t      fcc0, .Lop_double_to_long_get_opcode
+
+    neg.d     fa1, fa1
+    c.ole.d   fcc0, fa1, fa0
+    nor       rRESULT0, rRESULT0, zero
+    nor       rRESULT1, rRESULT1, zero
+    bc1t      fcc0, .Lop_double_to_long_get_opcode
+
+    JAL(__fixdfdi)
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    b         .Lop_double_to_long_set_vreg
+#endif
 
 /* ------------------------------ */
     .balign 128
@@ -3952,28 +4183,20 @@
 /* File: mips/op_double_to_float.S */
 /* File: mips/unopNarrower.S */
     /*
-     * Generic 64bit-to-32bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = op a0/a1", where
-     * "result" is a 32-bit quantity in a0.
+     * Generic 64bit-to-32bit floating-point unary operation.  Provide an "instr"
+     * line that specifies an instruction that performs "fv0 = op fa0".
      *
-     * For: long-to-float, double-to-int, double-to-float
-     * If hard floating point support is available, use fa0 as the parameter,
-     * except for long-to-float opcode.
-     * (This would work for long-to-int, but that instruction is actually
-     * an exact match for OP_MOVE.)
+     * For: double-to-float
      */
     /* unop vA, vB */
     GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
     LOAD64_F(fa0, fa0f, a3)
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     cvt.s.d fv0, fa0
-
-.Lop_double_to_float_set_vreg_f:
-    SET_VREG_F(fv0, rOBJ)                  #  vA <- result0
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- fv0
 
 
 /* ------------------------------ */
@@ -3983,22 +4206,21 @@
 /* File: mips/unop.S */
     /*
      * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op a0".
+     * specifies an instruction that performs "result0 = op a0".
      * This could be a MIPS instruction or a function call.
      *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
+     * for: int-to-byte, int-to-char, int-to-short,
+     *      neg-int, not-int, neg-float
      */
     /* unop vA, vB */
     GET_OPB(a3)                            #  a3 <- B
     GET_OPA4(t0)                           #  t0 <- A+
     GET_VREG(a0, a3)                       #  a0 <- vB
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    sll a0, a0, 24                              #  optional op
-    sra a0, a0, 24                                 #  a0 <- op, a0-a3 changed
+                                  #  optional op
+    SEB(a0, a0)                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, t0, t1)        #  vAA <- result0
-    /* 9-10 instructions */
+    SET_VREG_GOTO(a0, t0, t1)        #  vA <- result0
 
 
 /* ------------------------------ */
@@ -4008,11 +4230,11 @@
 /* File: mips/unop.S */
     /*
      * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op a0".
+     * specifies an instruction that performs "result0 = op a0".
      * This could be a MIPS instruction or a function call.
      *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
+     * for: int-to-byte, int-to-char, int-to-short,
+     *      neg-int, not-int, neg-float
      */
     /* unop vA, vB */
     GET_OPB(a3)                            #  a3 <- B
@@ -4022,8 +4244,7 @@
                                   #  optional op
     and a0, 0xffff                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, t0, t1)        #  vAA <- result0
-    /* 9-10 instructions */
+    SET_VREG_GOTO(a0, t0, t1)        #  vA <- result0
 
 
 /* ------------------------------ */
@@ -4033,22 +4254,21 @@
 /* File: mips/unop.S */
     /*
      * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = op a0".
+     * specifies an instruction that performs "result0 = op a0".
      * This could be a MIPS instruction or a function call.
      *
-     * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
-     *      int-to-byte, int-to-char, int-to-short
+     * for: int-to-byte, int-to-char, int-to-short,
+     *      neg-int, not-int, neg-float
      */
     /* unop vA, vB */
     GET_OPB(a3)                            #  a3 <- B
     GET_OPA4(t0)                           #  t0 <- A+
     GET_VREG(a0, a3)                       #  a0 <- vB
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    sll a0, 16                              #  optional op
-    sra a0, 16                                 #  a0 <- op, a0-a3 changed
+                                  #  optional op
+    SEH(a0, a0)                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, t0, t1)        #  vAA <- result0
-    /* 9-10 instructions */
+    SET_VREG_GOTO(a0, t0, t1)        #  vA <- result0
 
 
 /* ------------------------------ */
@@ -4087,7 +4307,6 @@
     addu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 11-14 instructions */
 
 
 /* ------------------------------ */
@@ -4126,7 +4345,6 @@
     subu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 11-14 instructions */
 
 
 /* ------------------------------ */
@@ -4165,7 +4383,6 @@
     mul a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 11-14 instructions */
 
 
 /* ------------------------------ */
@@ -4205,7 +4422,6 @@
     div a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 11-14 instructions */
 
 #else
 /* File: mips/binop.S */
@@ -4240,7 +4456,6 @@
     mflo a0                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 11-14 instructions */
 
 #endif
 
@@ -4281,7 +4496,6 @@
     mod a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 11-14 instructions */
 
 #else
 /* File: mips/binop.S */
@@ -4316,7 +4530,6 @@
     mfhi a0                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 11-14 instructions */
 
 #endif
 
@@ -4356,7 +4569,6 @@
     and a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 11-14 instructions */
 
 
 /* ------------------------------ */
@@ -4395,7 +4607,6 @@
     or a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 11-14 instructions */
 
 
 /* ------------------------------ */
@@ -4434,7 +4645,6 @@
     xor a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 11-14 instructions */
 
 
 /* ------------------------------ */
@@ -4473,7 +4683,6 @@
     sll a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 11-14 instructions */
 
 
 /* ------------------------------ */
@@ -4512,7 +4721,6 @@
     sra a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 11-14 instructions */
 
 
 /* ------------------------------ */
@@ -4551,7 +4759,6 @@
     srl a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 11-14 instructions */
 
 
 /* ------------------------------ */
@@ -4571,10 +4778,10 @@
      * Generic 64-bit binary operation.  Provide an "instr" line that
      * specifies an instruction that performs "result = a0-a1 op a2-a3".
      * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
+     * comes back in a register pair other than a0-a1, you can override "result".)
      *
      * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
+     * vCC (a2-a3).  Useful for integer division and modulus.
      *
      * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
      *      xor-long
@@ -4600,7 +4807,6 @@
     addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1                                 #  result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vAA/vAA+1 <- v0/v1
-    /* 14-17 instructions */
 
 
 /* ------------------------------ */
@@ -4619,10 +4825,10 @@
      * Generic 64-bit binary operation.  Provide an "instr" line that
      * specifies an instruction that performs "result = a0-a1 op a2-a3".
      * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
+     * comes back in a register pair other than a0-a1, you can override "result".)
      *
      * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
+     * vCC (a2-a3).  Useful for integer division and modulus.
      *
      * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
      *      xor-long
@@ -4648,7 +4854,6 @@
     subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0                                 #  result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vAA/vAA+1 <- v0/v1
-    /* 14-17 instructions */
 
 
 /* ------------------------------ */
@@ -4702,10 +4907,10 @@
      * Generic 64-bit binary operation.  Provide an "instr" line that
      * specifies an instruction that performs "result = a0-a1 op a2-a3".
      * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
+     * comes back in a register pair other than a0-a1, you can override "result".)
      *
      * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
+     * vCC (a2-a3).  Useful for integer division and modulus.
      *
      * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
      *      xor-long
@@ -4731,7 +4936,6 @@
     JAL(__divdi3)                                 #  result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vAA/vAA+1 <- v0/v1
-    /* 14-17 instructions */
 
 
 /* ------------------------------ */
@@ -4743,10 +4947,10 @@
      * Generic 64-bit binary operation.  Provide an "instr" line that
      * specifies an instruction that performs "result = a0-a1 op a2-a3".
      * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
+     * comes back in a register pair other than a0-a1, you can override "result".)
      *
      * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
+     * vCC (a2-a3).  Useful for integer division and modulus.
      *
      * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
      *      xor-long
@@ -4772,7 +4976,6 @@
     JAL(__moddi3)                                 #  result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vAA/vAA+1 <- v0/v1
-    /* 14-17 instructions */
 
 
 /* ------------------------------ */
@@ -4784,10 +4987,10 @@
      * Generic 64-bit binary operation.  Provide an "instr" line that
      * specifies an instruction that performs "result = a0-a1 op a2-a3".
      * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
+     * comes back in a register pair other than a0-a1, you can override "result".)
      *
      * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
+     * vCC (a2-a3).  Useful for integer division and modulus.
      *
      * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
      *      xor-long
@@ -4813,7 +5016,6 @@
     and a1, a1, a3                                 #  result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vAA/vAA+1 <- a0/a1
-    /* 14-17 instructions */
 
 
 /* ------------------------------ */
@@ -4825,10 +5027,10 @@
      * Generic 64-bit binary operation.  Provide an "instr" line that
      * specifies an instruction that performs "result = a0-a1 op a2-a3".
      * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
+     * comes back in a register pair other than a0-a1, you can override "result".)
      *
      * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
+     * vCC (a2-a3).  Useful for integer division and modulus.
      *
      * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
      *      xor-long
@@ -4854,7 +5056,6 @@
     or a1, a1, a3                                 #  result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vAA/vAA+1 <- a0/a1
-    /* 14-17 instructions */
 
 
 /* ------------------------------ */
@@ -4866,10 +5067,10 @@
      * Generic 64-bit binary operation.  Provide an "instr" line that
      * specifies an instruction that performs "result = a0-a1 op a2-a3".
      * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
+     * comes back in a register pair other than a0-a1, you can override "result".)
      *
      * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
+     * vCC (a2-a3).  Useful for integer division and modulus.
      *
      * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
      *      xor-long
@@ -4895,7 +5096,6 @@
     xor a1, a1, a3                                 #  result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vAA/vAA+1 <- a0/a1
-    /* 14-17 instructions */
 
 
 /* ------------------------------ */
@@ -4928,7 +5128,7 @@
     srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
     sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
     or      v1, a0                         #  rhi<- rhi | alo
-    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vAA/vAA+1 <- a0/a1
+    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vAA/vAA+1 <- v0/v1
 
 /* ------------------------------ */
     .balign 128
@@ -4959,7 +5159,7 @@
     sll     a1, 1
     sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
     or      v0, a1                         #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vAA/VAA+1 <- v0/v0
+    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vAA/VAA+1 <- v0/v1
 
 /* ------------------------------ */
     .balign 128
@@ -5006,7 +5206,7 @@
 
     /* binop vAA, vBB, vCC */
     FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  s5 <- AA
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
     srl       a3, a0, 8                    #  a3 <- CC
     and       a2, a0, 255                  #  a2 <- BB
     GET_VREG_F(fa1, a3)                    #  a1 <- vCC
@@ -5014,9 +5214,8 @@
 
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     add.s fv0, fa0, fa1                                 #  f0 = result
-    SET_VREG_F(fv0, rOBJ)                  #  vAA <- fv0
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vAA <- fv0
 
 
 /* ------------------------------ */
@@ -5032,7 +5231,7 @@
 
     /* binop vAA, vBB, vCC */
     FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  s5 <- AA
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
     srl       a3, a0, 8                    #  a3 <- CC
     and       a2, a0, 255                  #  a2 <- BB
     GET_VREG_F(fa1, a3)                    #  a1 <- vCC
@@ -5040,9 +5239,8 @@
 
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     sub.s fv0, fa0, fa1                                 #  f0 = result
-    SET_VREG_F(fv0, rOBJ)                  #  vAA <- fv0
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vAA <- fv0
 
 
 /* ------------------------------ */
@@ -5058,7 +5256,7 @@
 
     /* binop vAA, vBB, vCC */
     FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  s5 <- AA
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
     srl       a3, a0, 8                    #  a3 <- CC
     and       a2, a0, 255                  #  a2 <- BB
     GET_VREG_F(fa1, a3)                    #  a1 <- vCC
@@ -5066,9 +5264,8 @@
 
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     mul.s fv0, fa0, fa1                                 #  f0 = result
-    SET_VREG_F(fv0, rOBJ)                  #  vAA <- fv0
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vAA <- fv0
 
 
 /* ------------------------------ */
@@ -5084,7 +5281,7 @@
 
     /* binop vAA, vBB, vCC */
     FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  s5 <- AA
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
     srl       a3, a0, 8                    #  a3 <- CC
     and       a2, a0, 255                  #  a2 <- BB
     GET_VREG_F(fa1, a3)                    #  a1 <- vCC
@@ -5092,9 +5289,8 @@
 
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     div.s fv0, fa0, fa1                                 #  f0 = result
-    SET_VREG_F(fv0, rOBJ)                  #  vAA <- fv0
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vAA <- fv0
 
 
 /* ------------------------------ */
@@ -5110,7 +5306,7 @@
 
     /* binop vAA, vBB, vCC */
     FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  s5 <- AA
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
     srl       a3, a0, 8                    #  a3 <- CC
     and       a2, a0, 255                  #  a2 <- BB
     GET_VREG_F(fa1, a3)                    #  a1 <- vCC
@@ -5118,9 +5314,8 @@
 
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     JAL(fmodf)                                 #  f0 = result
-    SET_VREG_F(fv0, rOBJ)                  #  vAA <- fv0
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vAA <- fv0
 
 
 /* ------------------------------ */
@@ -5129,8 +5324,8 @@
 /* File: mips/op_add_double.S */
 /* File: mips/fbinopWide.S */
     /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * Generic 64-bit floating-point binary operation.  Provide an "instr"
+     * line that specifies an instruction that performs "fv0 = fa0 op fa1".
      * This could be an MIPS instruction or a function call.
      *
      * for: add-double, sub-double, mul-double, div-double,
@@ -5139,7 +5334,7 @@
      */
     /* binop vAA, vBB, vCC */
     FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  s5 <- AA
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a0, 255                  #  a2 <- BB
     srl       a3, a0, 8                    #  a3 <- CC
     EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
@@ -5149,8 +5344,8 @@
 
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     add.d fv0, fa0, fa1
-    SET_VREG64_F(fv0, fv0f, rOBJ)
-    b         .Lop_add_double_finish
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vAA/vAA+1 <- fv0
 
 
 /* ------------------------------ */
@@ -5159,8 +5354,8 @@
 /* File: mips/op_sub_double.S */
 /* File: mips/fbinopWide.S */
     /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * Generic 64-bit floating-point binary operation.  Provide an "instr"
+     * line that specifies an instruction that performs "fv0 = fa0 op fa1".
      * This could be an MIPS instruction or a function call.
      *
      * for: add-double, sub-double, mul-double, div-double,
@@ -5169,7 +5364,7 @@
      */
     /* binop vAA, vBB, vCC */
     FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  s5 <- AA
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a0, 255                  #  a2 <- BB
     srl       a3, a0, 8                    #  a3 <- CC
     EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
@@ -5179,8 +5374,8 @@
 
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     sub.d fv0, fa0, fa1
-    SET_VREG64_F(fv0, fv0f, rOBJ)
-    b         .Lop_sub_double_finish
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vAA/vAA+1 <- fv0
 
 
 /* ------------------------------ */
@@ -5189,8 +5384,8 @@
 /* File: mips/op_mul_double.S */
 /* File: mips/fbinopWide.S */
     /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * Generic 64-bit floating-point binary operation.  Provide an "instr"
+     * line that specifies an instruction that performs "fv0 = fa0 op fa1".
      * This could be an MIPS instruction or a function call.
      *
      * for: add-double, sub-double, mul-double, div-double,
@@ -5199,7 +5394,7 @@
      */
     /* binop vAA, vBB, vCC */
     FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  s5 <- AA
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a0, 255                  #  a2 <- BB
     srl       a3, a0, 8                    #  a3 <- CC
     EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
@@ -5209,8 +5404,8 @@
 
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     mul.d fv0, fa0, fa1
-    SET_VREG64_F(fv0, fv0f, rOBJ)
-    b         .Lop_mul_double_finish
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vAA/vAA+1 <- fv0
 
 
 /* ------------------------------ */
@@ -5219,8 +5414,8 @@
 /* File: mips/op_div_double.S */
 /* File: mips/fbinopWide.S */
     /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * Generic 64-bit floating-point binary operation.  Provide an "instr"
+     * line that specifies an instruction that performs "fv0 = fa0 op fa1".
      * This could be an MIPS instruction or a function call.
      *
      * for: add-double, sub-double, mul-double, div-double,
@@ -5229,7 +5424,7 @@
      */
     /* binop vAA, vBB, vCC */
     FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  s5 <- AA
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a0, 255                  #  a2 <- BB
     srl       a3, a0, 8                    #  a3 <- CC
     EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
@@ -5239,8 +5434,8 @@
 
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     div.d fv0, fa0, fa1
-    SET_VREG64_F(fv0, fv0f, rOBJ)
-    b         .Lop_div_double_finish
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vAA/vAA+1 <- fv0
 
 
 /* ------------------------------ */
@@ -5249,8 +5444,8 @@
 /* File: mips/op_rem_double.S */
 /* File: mips/fbinopWide.S */
     /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * Generic 64-bit floating-point binary operation.  Provide an "instr"
+     * line that specifies an instruction that performs "fv0 = fa0 op fa1".
      * This could be an MIPS instruction or a function call.
      *
      * for: add-double, sub-double, mul-double, div-double,
@@ -5259,7 +5454,7 @@
      */
     /* binop vAA, vBB, vCC */
     FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  s5 <- AA
+    GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a0, 255                  #  a2 <- BB
     srl       a3, a0, 8                    #  a3 <- CC
     EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
@@ -5269,8 +5464,8 @@
 
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     JAL(fmod)
-    SET_VREG64_F(fv0, fv0f, rOBJ)
-    b         .Lop_rem_double_finish
+    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vAA/vAA+1 <- fv0
 
 
 /* ------------------------------ */
@@ -5304,8 +5499,7 @@
                                   #  optional op
     addu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 
 /* ------------------------------ */
@@ -5339,8 +5533,7 @@
                                   #  optional op
     subu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 
 /* ------------------------------ */
@@ -5374,8 +5567,7 @@
                                   #  optional op
     mul a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 
 /* ------------------------------ */
@@ -5410,8 +5602,7 @@
                                   #  optional op
     div a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 #else
 /* File: mips/binop2addr.S */
@@ -5441,8 +5632,7 @@
     div zero, a0, a1                              #  optional op
     mflo a0                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 #endif
 
@@ -5478,8 +5668,7 @@
                                   #  optional op
     mod a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 #else
 /* File: mips/binop2addr.S */
@@ -5509,8 +5698,7 @@
     div zero, a0, a1                              #  optional op
     mfhi a0                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 #endif
 
@@ -5545,8 +5733,7 @@
                                   #  optional op
     and a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 
 /* ------------------------------ */
@@ -5580,8 +5767,7 @@
                                   #  optional op
     or a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 
 /* ------------------------------ */
@@ -5615,8 +5801,7 @@
                                   #  optional op
     xor a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 
 /* ------------------------------ */
@@ -5650,8 +5835,7 @@
                                   #  optional op
     sll a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 
 /* ------------------------------ */
@@ -5685,8 +5869,7 @@
                                   #  optional op
     sra a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 
 /* ------------------------------ */
@@ -5720,8 +5903,7 @@
                                   #  optional op
     srl a0, a0, a1                                  #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 
 /* ------------------------------ */
@@ -5736,22 +5918,21 @@
      * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
      * that specifies an instruction that performs "result = a0-a1 op a2-a3".
      * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
+     * comes back in a register pair other than a0-a1, you can override "result".)
      *
      * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
+     * vB (a2-a3).  Useful for integer division and modulus.
      *
      * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
      *      and-long/2addr, or-long/2addr, xor-long/2addr
-     *      rem-double/2addr
      */
     /* binop/2addr vA, vB */
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a1)                            #  a1 <- B
     EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
     EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
-    LOAD64(a0, a1, t0)               #  a0/a1 <- vAA/vAA+1
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vB/vB+1
+    LOAD64(a0, a1, t0)               #  a0/a1 <- vA/vA+1
     .if 0
     or        t0, a2, a3             #  second arg (a2-a3) is zero?
     beqz      t0, common_errDivideByZero
@@ -5761,9 +5942,7 @@
     addu v0, a2, a0                              #  optional op
     addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1                                 #  result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(v0, v1, rOBJ)   #  vAA/vAA+1 <- v0/v1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-    /* 12-15 instructions */
+    SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vA/vA+1 <- v0/v1
 
 
 /* ------------------------------ */
@@ -5778,22 +5957,21 @@
      * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
      * that specifies an instruction that performs "result = a0-a1 op a2-a3".
      * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
+     * comes back in a register pair other than a0-a1, you can override "result".)
      *
      * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
+     * vB (a2-a3).  Useful for integer division and modulus.
      *
      * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
      *      and-long/2addr, or-long/2addr, xor-long/2addr
-     *      rem-double/2addr
      */
     /* binop/2addr vA, vB */
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a1)                            #  a1 <- B
     EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
     EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
-    LOAD64(a0, a1, t0)               #  a0/a1 <- vAA/vAA+1
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vB/vB+1
+    LOAD64(a0, a1, t0)               #  a0/a1 <- vA/vA+1
     .if 0
     or        t0, a2, a3             #  second arg (a2-a3) is zero?
     beqz      t0, common_errDivideByZero
@@ -5803,9 +5981,7 @@
     subu v0, a0, a2                              #  optional op
     subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0                                 #  result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(v0, v1, rOBJ)   #  vAA/vAA+1 <- v0/v1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-    /* 12-15 instructions */
+    SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vA/vA+1 <- v0/v1
 
 
 /* ------------------------------ */
@@ -5840,9 +6016,7 @@
 
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    # vAA <- v0 (low)
-    SET_VREG64(v0, v1, rOBJ)               #  vAA+1 <- v1 (high)
-    GOTO_OPCODE(t1)                        #  jump to next instruction
+    SET_VREG64_GOTO(v0, v1, rOBJ, t1)      #  vA/vA+1 <- v0(low)/v1(high)
 
 /* ------------------------------ */
     .balign 128
@@ -5853,22 +6027,21 @@
      * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
      * that specifies an instruction that performs "result = a0-a1 op a2-a3".
      * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
+     * comes back in a register pair other than a0-a1, you can override "result".)
      *
      * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
+     * vB (a2-a3).  Useful for integer division and modulus.
      *
      * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
      *      and-long/2addr, or-long/2addr, xor-long/2addr
-     *      rem-double/2addr
      */
     /* binop/2addr vA, vB */
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a1)                            #  a1 <- B
     EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
     EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
-    LOAD64(a0, a1, t0)               #  a0/a1 <- vAA/vAA+1
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vB/vB+1
+    LOAD64(a0, a1, t0)               #  a0/a1 <- vA/vA+1
     .if 1
     or        t0, a2, a3             #  second arg (a2-a3) is zero?
     beqz      t0, common_errDivideByZero
@@ -5878,9 +6051,7 @@
                                   #  optional op
     JAL(__divdi3)                                 #  result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(v0, v1, rOBJ)   #  vAA/vAA+1 <- v0/v1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-    /* 12-15 instructions */
+    SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vA/vA+1 <- v0/v1
 
 
 /* ------------------------------ */
@@ -5892,22 +6063,21 @@
      * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
      * that specifies an instruction that performs "result = a0-a1 op a2-a3".
      * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
+     * comes back in a register pair other than a0-a1, you can override "result".)
      *
      * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
+     * vB (a2-a3).  Useful for integer division and modulus.
      *
      * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
      *      and-long/2addr, or-long/2addr, xor-long/2addr
-     *      rem-double/2addr
      */
     /* binop/2addr vA, vB */
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a1)                            #  a1 <- B
     EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
     EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
-    LOAD64(a0, a1, t0)               #  a0/a1 <- vAA/vAA+1
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vB/vB+1
+    LOAD64(a0, a1, t0)               #  a0/a1 <- vA/vA+1
     .if 1
     or        t0, a2, a3             #  second arg (a2-a3) is zero?
     beqz      t0, common_errDivideByZero
@@ -5917,9 +6087,7 @@
                                   #  optional op
     JAL(__moddi3)                                 #  result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(v0, v1, rOBJ)   #  vAA/vAA+1 <- v0/v1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-    /* 12-15 instructions */
+    SET_VREG64_GOTO(v0, v1, rOBJ, t0)   #  vA/vA+1 <- v0/v1
 
 
 /* ------------------------------ */
@@ -5931,22 +6099,21 @@
      * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
      * that specifies an instruction that performs "result = a0-a1 op a2-a3".
      * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
+     * comes back in a register pair other than a0-a1, you can override "result".)
      *
      * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
+     * vB (a2-a3).  Useful for integer division and modulus.
      *
      * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
      *      and-long/2addr, or-long/2addr, xor-long/2addr
-     *      rem-double/2addr
      */
     /* binop/2addr vA, vB */
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a1)                            #  a1 <- B
     EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
     EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
-    LOAD64(a0, a1, t0)               #  a0/a1 <- vAA/vAA+1
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vB/vB+1
+    LOAD64(a0, a1, t0)               #  a0/a1 <- vA/vA+1
     .if 0
     or        t0, a2, a3             #  second arg (a2-a3) is zero?
     beqz      t0, common_errDivideByZero
@@ -5956,9 +6123,7 @@
     and a0, a0, a2                              #  optional op
     and a1, a1, a3                                 #  result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(a0, a1, rOBJ)   #  vAA/vAA+1 <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-    /* 12-15 instructions */
+    SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
 
 
 /* ------------------------------ */
@@ -5970,22 +6135,21 @@
      * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
      * that specifies an instruction that performs "result = a0-a1 op a2-a3".
      * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
+     * comes back in a register pair other than a0-a1, you can override "result".)
      *
      * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
+     * vB (a2-a3).  Useful for integer division and modulus.
      *
      * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
      *      and-long/2addr, or-long/2addr, xor-long/2addr
-     *      rem-double/2addr
      */
     /* binop/2addr vA, vB */
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a1)                            #  a1 <- B
     EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
     EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
-    LOAD64(a0, a1, t0)               #  a0/a1 <- vAA/vAA+1
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vB/vB+1
+    LOAD64(a0, a1, t0)               #  a0/a1 <- vA/vA+1
     .if 0
     or        t0, a2, a3             #  second arg (a2-a3) is zero?
     beqz      t0, common_errDivideByZero
@@ -5995,9 +6159,7 @@
     or a0, a0, a2                              #  optional op
     or a1, a1, a3                                 #  result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(a0, a1, rOBJ)   #  vAA/vAA+1 <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-    /* 12-15 instructions */
+    SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
 
 
 /* ------------------------------ */
@@ -6009,22 +6171,21 @@
      * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
      * that specifies an instruction that performs "result = a0-a1 op a2-a3".
      * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
+     * comes back in a register pair other than a0-a1, you can override "result".)
      *
      * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
+     * vB (a2-a3).  Useful for integer division and modulus.
      *
      * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
      *      and-long/2addr, or-long/2addr, xor-long/2addr
-     *      rem-double/2addr
      */
     /* binop/2addr vA, vB */
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a1)                            #  a1 <- B
     EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
     EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64(a2, a3, a1)               #  a2/a3 <- vBB/vBB+1
-    LOAD64(a0, a1, t0)               #  a0/a1 <- vAA/vAA+1
+    LOAD64(a2, a3, a1)               #  a2/a3 <- vB/vB+1
+    LOAD64(a0, a1, t0)               #  a0/a1 <- vA/vA+1
     .if 0
     or        t0, a2, a3             #  second arg (a2-a3) is zero?
     beqz      t0, common_errDivideByZero
@@ -6034,9 +6195,7 @@
     xor a0, a0, a2                              #  optional op
     xor a1, a1, a3                                 #  result <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(a0, a1, rOBJ)   #  vAA/vAA+1 <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-    /* 12-15 instructions */
+    SET_VREG64_GOTO(a0, a1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
 
 
 /* ------------------------------ */
@@ -6052,7 +6211,7 @@
     GET_OPB(a3)                            #  a3 <- B
     GET_VREG(a2, a3)                       #  a2 <- vB
     EAS2(t2, rFP, rOBJ)                    #  t2 <- &fp[A]
-    LOAD64(a0, a1, t2)                     #  a0/a1 <- vAA/vAA+1
+    LOAD64(a0, a1, t2)                     #  a0/a1 <- vA/vA+1
 
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
@@ -6065,7 +6224,7 @@
     srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
     sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
     or      v1, a0                         #  rhi<- rhi | alo
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)      #  vAA/vAA+1 <- a0/a1
+    SET_VREG64_GOTO(v0, v1, rOBJ, t0)      #  vA/vA+1 <- v0/v1
 
 /* ------------------------------ */
     .balign 128
@@ -6080,7 +6239,7 @@
     GET_OPB(a3)                            #  a3 <- B
     GET_VREG(a2, a3)                       #  a2 <- vB
     EAS2(t0, rFP, t2)                      #  t0 <- &fp[A]
-    LOAD64(a0, a1, t0)                     #  a0/a1 <- vAA/vAA+1
+    LOAD64(a0, a1, t0)                     #  a0/a1 <- vA/vA+1
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
 
@@ -6092,7 +6251,7 @@
     sll     a1, 1
     sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
     or      v0, a1                         #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vAA/vAA+1 <- a0/a1
+    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vA/vA+1 <- v0/v1
 
 /* ------------------------------ */
     .balign 128
@@ -6107,7 +6266,7 @@
     GET_OPB(a3)                            #  a3 <- B
     GET_VREG(a2, a3)                       #  a2 <- vB
     EAS2(t0, rFP, t3)                      #  t0 <- &fp[A]
-    LOAD64(a0, a1, t0)                     #  a0/a1 <- vAA/vAA+1
+    LOAD64(a0, a1, t0)                     #  a0/a1 <- vA/vA+1
 
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
@@ -6120,7 +6279,7 @@
     sll       a1, 1
     sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
     or        v0, a1                       #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vAA/vAA+1 <- a0/a1
+    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vA/vA+1 <- v0/v1
 
 /* ------------------------------ */
     .balign 128
@@ -6129,23 +6288,22 @@
 /* File: mips/fbinop2addr.S */
     /*
      * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
-     * that specifies an instruction that performs "result = a0 op a1".
+     * that specifies an instruction that performs "fv0 = fa0 op fa1".
      * This could be an MIPS instruction or a function call.
      *
      * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
-     * div-float/2addr, rem-float/2addr
+     *      div-float/2addr, rem-float/2addr
      */
     /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a3)                            #  a3 <- B
     GET_VREG_F(fa0, rOBJ)
     GET_VREG_F(fa1, a3)
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
 
     add.s fv0, fa0, fa1
-    SET_VREG_F(fv0, rOBJ)                  #  vAA <- result
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- result
 
 
 /* ------------------------------ */
@@ -6155,23 +6313,22 @@
 /* File: mips/fbinop2addr.S */
     /*
      * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
-     * that specifies an instruction that performs "result = a0 op a1".
+     * that specifies an instruction that performs "fv0 = fa0 op fa1".
      * This could be an MIPS instruction or a function call.
      *
      * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
-     * div-float/2addr, rem-float/2addr
+     *      div-float/2addr, rem-float/2addr
      */
     /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a3)                            #  a3 <- B
     GET_VREG_F(fa0, rOBJ)
     GET_VREG_F(fa1, a3)
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
 
     sub.s fv0, fa0, fa1
-    SET_VREG_F(fv0, rOBJ)                  #  vAA <- result
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- result
 
 
 /* ------------------------------ */
@@ -6181,23 +6338,22 @@
 /* File: mips/fbinop2addr.S */
     /*
      * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
-     * that specifies an instruction that performs "result = a0 op a1".
+     * that specifies an instruction that performs "fv0 = fa0 op fa1".
      * This could be an MIPS instruction or a function call.
      *
      * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
-     * div-float/2addr, rem-float/2addr
+     *      div-float/2addr, rem-float/2addr
      */
     /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a3)                            #  a3 <- B
     GET_VREG_F(fa0, rOBJ)
     GET_VREG_F(fa1, a3)
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
 
     mul.s fv0, fa0, fa1
-    SET_VREG_F(fv0, rOBJ)                  #  vAA <- result
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- result
 
 
 /* ------------------------------ */
@@ -6207,23 +6363,22 @@
 /* File: mips/fbinop2addr.S */
     /*
      * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
-     * that specifies an instruction that performs "result = a0 op a1".
+     * that specifies an instruction that performs "fv0 = fa0 op fa1".
      * This could be an MIPS instruction or a function call.
      *
      * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
-     * div-float/2addr, rem-float/2addr
+     *      div-float/2addr, rem-float/2addr
      */
     /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a3)                            #  a3 <- B
     GET_VREG_F(fa0, rOBJ)
     GET_VREG_F(fa1, a3)
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
 
     div.s fv0, fa0, fa1
-    SET_VREG_F(fv0, rOBJ)                  #  vAA <- result
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- result
 
 
 /* ------------------------------ */
@@ -6233,23 +6388,22 @@
 /* File: mips/fbinop2addr.S */
     /*
      * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
-     * that specifies an instruction that performs "result = a0 op a1".
+     * that specifies an instruction that performs "fv0 = fa0 op fa1".
      * This could be an MIPS instruction or a function call.
      *
      * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
-     * div-float/2addr, rem-float/2addr
+     *      div-float/2addr, rem-float/2addr
      */
     /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  t1 <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_OPB(a3)                            #  a3 <- B
     GET_VREG_F(fa0, rOBJ)
     GET_VREG_F(fa1, a3)
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
 
     JAL(fmodf)
-    SET_VREG_F(fv0, rOBJ)                  #  vAA <- result
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- result
 
 
 /* ------------------------------ */
@@ -6258,12 +6412,13 @@
 /* File: mips/op_add_double_2addr.S */
 /* File: mips/fbinopWide2addr.S */
     /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * Generic 64-bit floating-point "/2addr" binary operation.
+     * Provide an "instr" line that specifies an instruction that
+     * performs "fv0 = fa0 op fa1".
      * This could be an MIPS instruction or a function call.
      *
      * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *  div-double/2addr, rem-double/2addr
+     *      div-double/2addr, rem-double/2addr
      */
     /* binop/2addr vA, vB */
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
@@ -6275,9 +6430,8 @@
 
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     add.d fv0, fa0, fa1
-    SET_VREG64_F(fv0, fv0f, rOBJ)
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vA/vA+1 <- fv0
 
 
 /* ------------------------------ */
@@ -6286,12 +6440,13 @@
 /* File: mips/op_sub_double_2addr.S */
 /* File: mips/fbinopWide2addr.S */
     /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * Generic 64-bit floating-point "/2addr" binary operation.
+     * Provide an "instr" line that specifies an instruction that
+     * performs "fv0 = fa0 op fa1".
      * This could be an MIPS instruction or a function call.
      *
      * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *  div-double/2addr, rem-double/2addr
+     *      div-double/2addr, rem-double/2addr
      */
     /* binop/2addr vA, vB */
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
@@ -6303,9 +6458,8 @@
 
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     sub.d fv0, fa0, fa1
-    SET_VREG64_F(fv0, fv0f, rOBJ)
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vA/vA+1 <- fv0
 
 
 /* ------------------------------ */
@@ -6314,12 +6468,13 @@
 /* File: mips/op_mul_double_2addr.S */
 /* File: mips/fbinopWide2addr.S */
     /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * Generic 64-bit floating-point "/2addr" binary operation.
+     * Provide an "instr" line that specifies an instruction that
+     * performs "fv0 = fa0 op fa1".
      * This could be an MIPS instruction or a function call.
      *
      * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *  div-double/2addr, rem-double/2addr
+     *      div-double/2addr, rem-double/2addr
      */
     /* binop/2addr vA, vB */
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
@@ -6331,9 +6486,8 @@
 
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     mul.d fv0, fa0, fa1
-    SET_VREG64_F(fv0, fv0f, rOBJ)
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vA/vA+1 <- fv0
 
 
 /* ------------------------------ */
@@ -6342,12 +6496,13 @@
 /* File: mips/op_div_double_2addr.S */
 /* File: mips/fbinopWide2addr.S */
     /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * Generic 64-bit floating-point "/2addr" binary operation.
+     * Provide an "instr" line that specifies an instruction that
+     * performs "fv0 = fa0 op fa1".
      * This could be an MIPS instruction or a function call.
      *
      * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *  div-double/2addr, rem-double/2addr
+     *      div-double/2addr, rem-double/2addr
      */
     /* binop/2addr vA, vB */
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
@@ -6359,9 +6514,8 @@
 
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     div.d fv0, fa0, fa1
-    SET_VREG64_F(fv0, fv0f, rOBJ)
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vA/vA+1 <- fv0
 
 
 /* ------------------------------ */
@@ -6370,12 +6524,13 @@
 /* File: mips/op_rem_double_2addr.S */
 /* File: mips/fbinopWide2addr.S */
     /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+     * Generic 64-bit floating-point "/2addr" binary operation.
+     * Provide an "instr" line that specifies an instruction that
+     * performs "fv0 = fa0 op fa1".
      * This could be an MIPS instruction or a function call.
      *
      * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *  div-double/2addr, rem-double/2addr
+     *      div-double/2addr, rem-double/2addr
      */
     /* binop/2addr vA, vB */
     GET_OPA4(rOBJ)                         #  rOBJ <- A+
@@ -6387,9 +6542,8 @@
 
     FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
     JAL(fmod)
-    SET_VREG64_F(fv0, fv0f, rOBJ)
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vA/vA+1 <- fv0
 
 
 /* ------------------------------ */
@@ -6409,12 +6563,11 @@
      * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
      *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
      */
-    # binop/lit16 vA, vB,                  /* +CCCC */
+    /* binop/lit16 vA, vB, +CCCC */
     FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
     GET_OPB(a2)                            #  a2 <- B
-    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_VREG(a0, a2)                       #  a0 <- vB
-    and       rOBJ, rOBJ, 15
     .if 0
     # cmp a1, 0; is second operand zero?
     beqz      a1, common_errDivideByZero
@@ -6424,8 +6577,7 @@
                                   #  optional op
     addu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 
 /* ------------------------------ */
@@ -6446,12 +6598,11 @@
      * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
      *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
      */
-    # binop/lit16 vA, vB,                  /* +CCCC */
+    /* binop/lit16 vA, vB, +CCCC */
     FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
     GET_OPB(a2)                            #  a2 <- B
-    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_VREG(a0, a2)                       #  a0 <- vB
-    and       rOBJ, rOBJ, 15
     .if 0
     # cmp a1, 0; is second operand zero?
     beqz      a1, common_errDivideByZero
@@ -6461,8 +6612,7 @@
                                   #  optional op
     subu a0, a1, a0                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 
 /* ------------------------------ */
@@ -6482,12 +6632,11 @@
      * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
      *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
      */
-    # binop/lit16 vA, vB,                  /* +CCCC */
+    /* binop/lit16 vA, vB, +CCCC */
     FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
     GET_OPB(a2)                            #  a2 <- B
-    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_VREG(a0, a2)                       #  a0 <- vB
-    and       rOBJ, rOBJ, 15
     .if 0
     # cmp a1, 0; is second operand zero?
     beqz      a1, common_errDivideByZero
@@ -6497,8 +6646,7 @@
                                   #  optional op
     mul a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 
 /* ------------------------------ */
@@ -6519,12 +6667,11 @@
      * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
      *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
      */
-    # binop/lit16 vA, vB,                  /* +CCCC */
+    /* binop/lit16 vA, vB, +CCCC */
     FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
     GET_OPB(a2)                            #  a2 <- B
-    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_VREG(a0, a2)                       #  a0 <- vB
-    and       rOBJ, rOBJ, 15
     .if 1
     # cmp a1, 0; is second operand zero?
     beqz      a1, common_errDivideByZero
@@ -6534,8 +6681,7 @@
                                   #  optional op
     div a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 #else
 /* File: mips/binopLit16.S */
@@ -6551,12 +6697,11 @@
      * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
      *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
      */
-    # binop/lit16 vA, vB,                  /* +CCCC */
+    /* binop/lit16 vA, vB, +CCCC */
     FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
     GET_OPB(a2)                            #  a2 <- B
-    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_VREG(a0, a2)                       #  a0 <- vB
-    and       rOBJ, rOBJ, 15
     .if 1
     # cmp a1, 0; is second operand zero?
     beqz      a1, common_errDivideByZero
@@ -6566,8 +6711,7 @@
     div zero, a0, a1                              #  optional op
     mflo a0                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 #endif
 
@@ -6589,12 +6733,11 @@
      * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
      *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
      */
-    # binop/lit16 vA, vB,                  /* +CCCC */
+    /* binop/lit16 vA, vB, +CCCC */
     FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
     GET_OPB(a2)                            #  a2 <- B
-    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_VREG(a0, a2)                       #  a0 <- vB
-    and       rOBJ, rOBJ, 15
     .if 1
     # cmp a1, 0; is second operand zero?
     beqz      a1, common_errDivideByZero
@@ -6604,8 +6747,7 @@
                                   #  optional op
     mod a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 #else
 /* File: mips/binopLit16.S */
@@ -6621,12 +6763,11 @@
      * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
      *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
      */
-    # binop/lit16 vA, vB,                  /* +CCCC */
+    /* binop/lit16 vA, vB, +CCCC */
     FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
     GET_OPB(a2)                            #  a2 <- B
-    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_VREG(a0, a2)                       #  a0 <- vB
-    and       rOBJ, rOBJ, 15
     .if 1
     # cmp a1, 0; is second operand zero?
     beqz      a1, common_errDivideByZero
@@ -6636,8 +6777,7 @@
     div zero, a0, a1                              #  optional op
     mfhi a0                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 #endif
 
@@ -6658,12 +6798,11 @@
      * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
      *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
      */
-    # binop/lit16 vA, vB,                  /* +CCCC */
+    /* binop/lit16 vA, vB, +CCCC */
     FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
     GET_OPB(a2)                            #  a2 <- B
-    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_VREG(a0, a2)                       #  a0 <- vB
-    and       rOBJ, rOBJ, 15
     .if 0
     # cmp a1, 0; is second operand zero?
     beqz      a1, common_errDivideByZero
@@ -6673,8 +6812,7 @@
                                   #  optional op
     and a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 
 /* ------------------------------ */
@@ -6694,12 +6832,11 @@
      * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
      *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
      */
-    # binop/lit16 vA, vB,                  /* +CCCC */
+    /* binop/lit16 vA, vB, +CCCC */
     FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
     GET_OPB(a2)                            #  a2 <- B
-    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_VREG(a0, a2)                       #  a0 <- vB
-    and       rOBJ, rOBJ, 15
     .if 0
     # cmp a1, 0; is second operand zero?
     beqz      a1, common_errDivideByZero
@@ -6709,8 +6846,7 @@
                                   #  optional op
     or a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 
 /* ------------------------------ */
@@ -6730,12 +6866,11 @@
      * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
      *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
      */
-    # binop/lit16 vA, vB,                  /* +CCCC */
+    /* binop/lit16 vA, vB, +CCCC */
     FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
     GET_OPB(a2)                            #  a2 <- B
-    GET_OPA(rOBJ)                          #  rOBJ <- A+
+    GET_OPA4(rOBJ)                         #  rOBJ <- A+
     GET_VREG(a0, a2)                       #  a0 <- vB
-    and       rOBJ, rOBJ, 15
     .if 0
     # cmp a1, 0; is second operand zero?
     beqz      a1, common_errDivideByZero
@@ -6745,8 +6880,7 @@
                                   #  optional op
     xor a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-13 instructions */
+    SET_VREG_GOTO(a0, rOBJ, t0)       #  vA <- a0
 
 
 /* ------------------------------ */
@@ -6767,7 +6901,7 @@
      *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
      *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
      */
-    # binop/lit8 vAA, vBB,                 /* +CC */
+    /* binop/lit8 vAA, vBB, +CC */
     FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
     GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a3, 255                  #  a2 <- BB
@@ -6783,7 +6917,6 @@
     addu a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-12 instructions */
 
 
 /* ------------------------------ */
@@ -6804,7 +6937,7 @@
      *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
      *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
      */
-    # binop/lit8 vAA, vBB,                 /* +CC */
+    /* binop/lit8 vAA, vBB, +CC */
     FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
     GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a3, 255                  #  a2 <- BB
@@ -6820,7 +6953,6 @@
     subu a0, a1, a0                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-12 instructions */
 
 
 /* ------------------------------ */
@@ -6841,7 +6973,7 @@
      *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
      *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
      */
-    # binop/lit8 vAA, vBB,                 /* +CC */
+    /* binop/lit8 vAA, vBB, +CC */
     FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
     GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a3, 255                  #  a2 <- BB
@@ -6857,7 +6989,6 @@
     mul a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-12 instructions */
 
 
 /* ------------------------------ */
@@ -6879,7 +7010,7 @@
      *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
      *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
      */
-    # binop/lit8 vAA, vBB,                 /* +CC */
+    /* binop/lit8 vAA, vBB, +CC */
     FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
     GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a3, 255                  #  a2 <- BB
@@ -6895,7 +7026,6 @@
     div a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-12 instructions */
 
 #else
 /* File: mips/binopLit8.S */
@@ -6912,7 +7042,7 @@
      *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
      *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
      */
-    # binop/lit8 vAA, vBB,                 /* +CC */
+    /* binop/lit8 vAA, vBB, +CC */
     FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
     GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a3, 255                  #  a2 <- BB
@@ -6928,7 +7058,6 @@
     mflo a0                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-12 instructions */
 
 #endif
 
@@ -6951,7 +7080,7 @@
      *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
      *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
      */
-    # binop/lit8 vAA, vBB,                 /* +CC */
+    /* binop/lit8 vAA, vBB, +CC */
     FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
     GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a3, 255                  #  a2 <- BB
@@ -6967,7 +7096,6 @@
     mod a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-12 instructions */
 
 #else
 /* File: mips/binopLit8.S */
@@ -6984,7 +7112,7 @@
      *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
      *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
      */
-    # binop/lit8 vAA, vBB,                 /* +CC */
+    /* binop/lit8 vAA, vBB, +CC */
     FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
     GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a3, 255                  #  a2 <- BB
@@ -7000,7 +7128,6 @@
     mfhi a0                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-12 instructions */
 
 #endif
 
@@ -7022,7 +7149,7 @@
      *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
      *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
      */
-    # binop/lit8 vAA, vBB,                 /* +CC */
+    /* binop/lit8 vAA, vBB, +CC */
     FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
     GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a3, 255                  #  a2 <- BB
@@ -7038,7 +7165,6 @@
     and a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-12 instructions */
 
 
 /* ------------------------------ */
@@ -7059,7 +7185,7 @@
      *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
      *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
      */
-    # binop/lit8 vAA, vBB,                 /* +CC */
+    /* binop/lit8 vAA, vBB, +CC */
     FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
     GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a3, 255                  #  a2 <- BB
@@ -7075,7 +7201,6 @@
     or a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-12 instructions */
 
 
 /* ------------------------------ */
@@ -7096,7 +7221,7 @@
      *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
      *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
      */
-    # binop/lit8 vAA, vBB,                 /* +CC */
+    /* binop/lit8 vAA, vBB, +CC */
     FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
     GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a3, 255                  #  a2 <- BB
@@ -7112,7 +7237,6 @@
     xor a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-12 instructions */
 
 
 /* ------------------------------ */
@@ -7133,7 +7257,7 @@
      *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
      *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
      */
-    # binop/lit8 vAA, vBB,                 /* +CC */
+    /* binop/lit8 vAA, vBB, +CC */
     FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
     GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a3, 255                  #  a2 <- BB
@@ -7149,7 +7273,6 @@
     sll a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-12 instructions */
 
 
 /* ------------------------------ */
@@ -7170,7 +7293,7 @@
      *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
      *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
      */
-    # binop/lit8 vAA, vBB,                 /* +CC */
+    /* binop/lit8 vAA, vBB, +CC */
     FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
     GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a3, 255                  #  a2 <- BB
@@ -7186,7 +7309,6 @@
     sra a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-12 instructions */
 
 
 /* ------------------------------ */
@@ -7207,7 +7329,7 @@
      *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
      *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
      */
-    # binop/lit8 vAA, vBB,                 /* +CC */
+    /* binop/lit8 vAA, vBB, +CC */
     FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
     GET_OPA(rOBJ)                          #  rOBJ <- AA
     and       a2, a3, 255                  #  a2 <- BB
@@ -7223,7 +7345,6 @@
     srl a0, a0, a1                                 #  a0 <- op, a0-a3 changed
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
     SET_VREG_GOTO(a0, rOBJ, t0)       #  vAA <- a0
-    /* 10-12 instructions */
 
 
 /* ------------------------------ */
@@ -7231,7 +7352,7 @@
 .L_op_iget_quick: /* 0xe3 */
 /* File: mips/op_iget_quick.S */
     /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    # op vA, vB, offset                    /* CCCC */
+    /* op vA, vB, offset@CCCC */
     GET_OPB(a2)                            #  a2 <- B
     GET_VREG(a3, a2)                       #  a3 <- object we're operating on
     FETCH(a1, 1)                           #  a1 <- field byte offset
@@ -7248,7 +7369,7 @@
     .balign 128
 .L_op_iget_wide_quick: /* 0xe4 */
 /* File: mips/op_iget_wide_quick.S */
-    # iget-wide-quick vA, vB, offset       /* CCCC */
+    /* iget-wide-quick vA, vB, offset@CCCC */
     GET_OPB(a2)                            #  a2 <- B
     GET_VREG(a3, a2)                       #  a3 <- object we're operating on
     FETCH(a1, 1)                           #  a1 <- field byte offset
@@ -7259,8 +7380,7 @@
     LOAD64(a0, a1, t0)                     #  a0 <- obj.field (64 bits, aligned)
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(a0, a1, a2)                 #  fp[A] <- a0/a1
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[A] <- a0/a1
 
 /* ------------------------------ */
     .balign 128
@@ -7277,17 +7397,16 @@
     GET_OPA4(a2)                           #  a2<- A+
     PREFETCH_INST(2)                       #  load rINST
     bnez a3, MterpPossibleException        #  bail out
-    SET_VREG_OBJECT(v0, a2)                #  fp[A] <- v0
     ADVANCE(2)                             #  advance rPC
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG_OBJECT_GOTO(v0, a2, t0)       #  fp[A] <- v0
 
 /* ------------------------------ */
     .balign 128
 .L_op_iput_quick: /* 0xe6 */
 /* File: mips/op_iput_quick.S */
     /* For: iput-quick, iput-object-quick */
-    # op vA, vB, offset                    /* CCCC */
+    /* op vA, vB, offset@CCCC */
     GET_OPB(a2)                            #  a2 <- B
     GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
     FETCH(a1, 1)                           #  a1 <- field byte offset
@@ -7296,15 +7415,16 @@
     GET_VREG(a0, a2)                       #  a0 <- fp[A]
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     addu      t0, a3, a1
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    GET_OPCODE_TARGET(t1)
     sw    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    JR(t1)                                 #  jump to next instruction
 
 /* ------------------------------ */
     .balign 128
 .L_op_iput_wide_quick: /* 0xe7 */
 /* File: mips/op_iput_wide_quick.S */
-    # iput-wide-quick vA, vB, offset       /* CCCC */
+    /* iput-wide-quick vA, vB, offset@CCCC */
     GET_OPA4(a0)                           #  a0 <- A(+)
     GET_OPB(a1)                            #  a1 <- B
     GET_VREG(a2, a1)                       #  a2 <- fp[B], the object pointer
@@ -7315,16 +7435,17 @@
     FETCH(a3, 1)                           #  a3 <- field byte offset
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     addu      a2, a2, a3                   #  obj.field (64 bits, aligned) <- a0/a1
-    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0/a1
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    GET_OPCODE_TARGET(t0)
+    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0/a1
+    JR(t0)                                 #  jump to next instruction
 
 /* ------------------------------ */
     .balign 128
 .L_op_iput_object_quick: /* 0xe8 */
 /* File: mips/op_iput_object_quick.S */
     /* For: iput-object-quick */
-    # op vA, vB, offset                 /* CCCC */
+    /* op vA, vB, offset@CCCC */
     EXPORT_PC()
     addu   a0, rFP, OFF_FP_SHADOWFRAME
     move   a1, rPC
@@ -7343,8 +7464,8 @@
     /*
      * Generic invoke handler wrapper.
      */
-    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
-    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     .extern MterpInvokeVirtualQuick
     EXPORT_PC()
     move    a0, rSELF
@@ -7368,8 +7489,8 @@
     /*
      * Generic invoke handler wrapper.
      */
-    # op vB, {vD, vE, vF, vG, vA}, class   /* CCCC */
-    # op {vCCCC..v(CCCC+AA-1)}, meth       /* BBBB */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
     .extern MterpInvokeVirtualQuickRange
     EXPORT_PC()
     move    a0, rSELF
@@ -7391,7 +7512,7 @@
 /* File: mips/op_iput_boolean_quick.S */
 /* File: mips/op_iput_quick.S */
     /* For: iput-quick, iput-object-quick */
-    # op vA, vB, offset                    /* CCCC */
+    /* op vA, vB, offset@CCCC */
     GET_OPB(a2)                            #  a2 <- B
     GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
     FETCH(a1, 1)                           #  a1 <- field byte offset
@@ -7400,9 +7521,10 @@
     GET_VREG(a0, a2)                       #  a0 <- fp[A]
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     addu      t0, a3, a1
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    GET_OPCODE_TARGET(t1)
     sb    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    JR(t1)                                 #  jump to next instruction
 
 
 /* ------------------------------ */
@@ -7411,7 +7533,7 @@
 /* File: mips/op_iput_byte_quick.S */
 /* File: mips/op_iput_quick.S */
     /* For: iput-quick, iput-object-quick */
-    # op vA, vB, offset                    /* CCCC */
+    /* op vA, vB, offset@CCCC */
     GET_OPB(a2)                            #  a2 <- B
     GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
     FETCH(a1, 1)                           #  a1 <- field byte offset
@@ -7420,9 +7542,10 @@
     GET_VREG(a0, a2)                       #  a0 <- fp[A]
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     addu      t0, a3, a1
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    GET_OPCODE_TARGET(t1)
     sb    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    JR(t1)                                 #  jump to next instruction
 
 
 /* ------------------------------ */
@@ -7431,7 +7554,7 @@
 /* File: mips/op_iput_char_quick.S */
 /* File: mips/op_iput_quick.S */
     /* For: iput-quick, iput-object-quick */
-    # op vA, vB, offset                    /* CCCC */
+    /* op vA, vB, offset@CCCC */
     GET_OPB(a2)                            #  a2 <- B
     GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
     FETCH(a1, 1)                           #  a1 <- field byte offset
@@ -7440,9 +7563,10 @@
     GET_VREG(a0, a2)                       #  a0 <- fp[A]
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     addu      t0, a3, a1
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    GET_OPCODE_TARGET(t1)
     sh    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    JR(t1)                                 #  jump to next instruction
 
 
 /* ------------------------------ */
@@ -7451,7 +7575,7 @@
 /* File: mips/op_iput_short_quick.S */
 /* File: mips/op_iput_quick.S */
     /* For: iput-quick, iput-object-quick */
-    # op vA, vB, offset                    /* CCCC */
+    /* op vA, vB, offset@CCCC */
     GET_OPB(a2)                            #  a2 <- B
     GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
     FETCH(a1, 1)                           #  a1 <- field byte offset
@@ -7460,9 +7584,10 @@
     GET_VREG(a0, a2)                       #  a0 <- fp[A]
     FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
     addu      t0, a3, a1
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+    GET_OPCODE_TARGET(t1)
     sh    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    JR(t1)                                 #  jump to next instruction
 
 
 /* ------------------------------ */
@@ -7471,7 +7596,7 @@
 /* File: mips/op_iget_boolean_quick.S */
 /* File: mips/op_iget_quick.S */
     /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    # op vA, vB, offset                    /* CCCC */
+    /* op vA, vB, offset@CCCC */
     GET_OPB(a2)                            #  a2 <- B
     GET_VREG(a3, a2)                       #  a3 <- object we're operating on
     FETCH(a1, 1)                           #  a1 <- field byte offset
@@ -7491,7 +7616,7 @@
 /* File: mips/op_iget_byte_quick.S */
 /* File: mips/op_iget_quick.S */
     /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    # op vA, vB, offset                    /* CCCC */
+    /* op vA, vB, offset@CCCC */
     GET_OPB(a2)                            #  a2 <- B
     GET_VREG(a3, a2)                       #  a3 <- object we're operating on
     FETCH(a1, 1)                           #  a1 <- field byte offset
@@ -7511,7 +7636,7 @@
 /* File: mips/op_iget_char_quick.S */
 /* File: mips/op_iget_quick.S */
     /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    # op vA, vB, offset                    /* CCCC */
+    /* op vA, vB, offset@CCCC */
     GET_OPB(a2)                            #  a2 <- B
     GET_VREG(a3, a2)                       #  a3 <- object we're operating on
     FETCH(a1, 1)                           #  a1 <- field byte offset
@@ -7531,7 +7656,7 @@
 /* File: mips/op_iget_short_quick.S */
 /* File: mips/op_iget_quick.S */
     /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    # op vA, vB, offset                    /* CCCC */
+    /* op vA, vB, offset@CCCC */
     GET_OPB(a2)                            #  a2 <- B
     GET_VREG(a3, a2)                       #  a3 <- object we're operating on
     FETCH(a1, 1)                           #  a1 <- field byte offset
@@ -7694,264 +7819,29 @@
     .balign 4
 artMterpAsmSisterStart:
 
-/* continuation for op_cmpl_float */
-
-.Lop_cmpl_float_nan:
-    li rTEMP, -1
-
-.Lop_cmpl_float_finish:
-    GET_OPA(rOBJ)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
-
-/* continuation for op_cmpg_float */
-
-.Lop_cmpg_float_nan:
-    li rTEMP, 1
-
-.Lop_cmpg_float_finish:
-    GET_OPA(rOBJ)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
-
-/* continuation for op_cmpl_double */
-
-.Lop_cmpl_double_nan:
-    li rTEMP, -1
-
-.Lop_cmpl_double_finish:
-    GET_OPA(rOBJ)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
-
-/* continuation for op_cmpg_double */
-
-.Lop_cmpg_double_nan:
-    li rTEMP, 1
-
-.Lop_cmpg_double_finish:
-    GET_OPA(rOBJ)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
-
-/* continuation for op_float_to_int */
-
-/*
- * Not an entry point as it is used only once !!
- */
-f2i_doconv:
-#ifdef MIPS32REVGE6
-    l.s       fa1, .LFLOAT_TO_INT_max
-    cmp.le.s  ft2, fa1, fa0
-    l.s       fv0, .LFLOAT_TO_INT_ret_max
-    bc1nez    ft2, .Lop_float_to_int_set_vreg_f
-
-    l.s       fa1, .LFLOAT_TO_INT_min
-    cmp.le.s  ft2, fa0, fa1
-    l.s       fv0, .LFLOAT_TO_INT_ret_min
-    bc1nez    ft2, .Lop_float_to_int_set_vreg_f
-
-    mov.s     fa1, fa0
-    cmp.un.s  ft2, fa0, fa1
-    li.s      fv0, 0
-    bc1nez    ft2, .Lop_float_to_int_set_vreg_f
-#else
-    l.s       fa1, .LFLOAT_TO_INT_max
-    c.ole.s   fcc0, fa1, fa0
-    l.s       fv0, .LFLOAT_TO_INT_ret_max
-    bc1t      .Lop_float_to_int_set_vreg_f
-
-    l.s       fa1, .LFLOAT_TO_INT_min
-    c.ole.s   fcc0, fa0, fa1
-    l.s       fv0, .LFLOAT_TO_INT_ret_min
-    bc1t      .Lop_float_to_int_set_vreg_f
-
-    mov.s     fa1, fa0
-    c.un.s    fcc0, fa0, fa1
-    li.s      fv0, 0
-    bc1t      .Lop_float_to_int_set_vreg_f
-#endif
-
-    trunc.w.s  fv0, fa0
-    b         .Lop_float_to_int_set_vreg_f
-
-.LFLOAT_TO_INT_max:
-    .word 0x4f000000
-.LFLOAT_TO_INT_min:
-    .word 0xcf000000
-.LFLOAT_TO_INT_ret_max:
-    .word 0x7fffffff
-.LFLOAT_TO_INT_ret_min:
-    .word 0x80000000
-
 /* continuation for op_float_to_long */
 
-f2l_doconv:
-#ifdef MIPS32REVGE6
-    l.s       fa1, .LLONG_TO_max
-    cmp.le.s  ft2, fa1, fa0
-    li        rRESULT0, ~0
-    li        rRESULT1, ~0x80000000
-    bc1nez    ft2, .Lop_float_to_long_set_vreg
-
-    l.s       fa1, .LLONG_TO_min
-    cmp.le.s  ft2, fa0, fa1
-    li        rRESULT0, 0
-    li        rRESULT1, 0x80000000
-    bc1nez    ft2, .Lop_float_to_long_set_vreg
-
-    mov.s     fa1, fa0
-    cmp.un.s  ft2, fa0, fa1
-    li        rRESULT0, 0
-    li        rRESULT1, 0
-    bc1nez    ft2, .Lop_float_to_long_set_vreg
-#else
-    l.s       fa1, .LLONG_TO_max
-    c.ole.s   fcc0, fa1, fa0
-    li        rRESULT0, ~0
-    li        rRESULT1, ~0x80000000
-    bc1t      .Lop_float_to_long_set_vreg
-
-    l.s       fa1, .LLONG_TO_min
-    c.ole.s   fcc0, fa0, fa1
-    li        rRESULT0, 0
-    li        rRESULT1, 0x80000000
-    bc1t      .Lop_float_to_long_set_vreg
-
-    mov.s     fa1, fa0
-    c.un.s    fcc0, fa0, fa1
-    li        rRESULT0, 0
-    li        rRESULT1, 0
-    bc1t      .Lop_float_to_long_set_vreg
+#ifndef MIPS32REVGE6
+.Lop_float_to_long_get_opcode:
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+.Lop_float_to_long_set_vreg:
+    SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1)   #  vA/vA+1 <- v0/v1
 #endif
 
-    JAL(__fixsfdi)
-
-    b         .Lop_float_to_long_set_vreg
-
-.LLONG_TO_max:
-    .word 0x5f000000
-
-.LLONG_TO_min:
-    .word 0xdf000000
-
-/* continuation for op_double_to_int */
-
-d2i_doconv:
-#ifdef MIPS32REVGE6
-    la        t0, .LDOUBLE_TO_INT_max
-    LOAD64_F(fa1, fa1f, t0)
-    cmp.le.d  ft2, fa1, fa0
-    l.s       fv0, .LDOUBLE_TO_INT_maxret
-    bc1nez    ft2, .Lop_double_to_int_set_vreg_f
-
-    la        t0, .LDOUBLE_TO_INT_min
-    LOAD64_F(fa1, fa1f, t0)
-    cmp.le.d  ft2, fa0, fa1
-    l.s       fv0, .LDOUBLE_TO_INT_minret
-    bc1nez    ft2, .Lop_double_to_int_set_vreg_f
-
-    mov.d     fa1, fa0
-    cmp.un.d  ft2, fa0, fa1
-    li.s      fv0, 0
-    bc1nez    ft2, .Lop_double_to_int_set_vreg_f
-#else
-    la        t0, .LDOUBLE_TO_INT_max
-    LOAD64_F(fa1, fa1f, t0)
-    c.ole.d   fcc0, fa1, fa0
-    l.s       fv0, .LDOUBLE_TO_INT_maxret
-    bc1t      .Lop_double_to_int_set_vreg_f
-
-    la        t0, .LDOUBLE_TO_INT_min
-    LOAD64_F(fa1, fa1f, t0)
-    c.ole.d   fcc0, fa0, fa1
-    l.s       fv0, .LDOUBLE_TO_INT_minret
-    bc1t      .Lop_double_to_int_set_vreg_f
-
-    mov.d     fa1, fa0
-    c.un.d    fcc0, fa0, fa1
-    li.s      fv0, 0
-    bc1t      .Lop_double_to_int_set_vreg_f
-#endif
-
-    trunc.w.d  fv0, fa0
-    b         .Lop_double_to_int_set_vreg_f
-
-.LDOUBLE_TO_INT_max:
-    .dword 0x41dfffffffc00000
-.LDOUBLE_TO_INT_min:
-    .dword 0xc1e0000000000000              #  minint, as a double (high word)
-.LDOUBLE_TO_INT_maxret:
-    .word 0x7fffffff
-.LDOUBLE_TO_INT_minret:
-    .word 0x80000000
-
 /* continuation for op_double_to_long */
 
-d2l_doconv:
-#ifdef MIPS32REVGE6
-    la        t0, .LDOUBLE_TO_LONG_max
-    LOAD64_F(fa1, fa1f, t0)
-    cmp.le.d  ft2, fa1, fa0
-    la        t0, .LDOUBLE_TO_LONG_ret_max
-    LOAD64(rRESULT0, rRESULT1, t0)
-    bc1nez    ft2, .Lop_double_to_long_set_vreg
-
-    la        t0, .LDOUBLE_TO_LONG_min
-    LOAD64_F(fa1, fa1f, t0)
-    cmp.le.d  ft2, fa0, fa1
-    la        t0, .LDOUBLE_TO_LONG_ret_min
-    LOAD64(rRESULT0, rRESULT1, t0)
-    bc1nez    ft2, .Lop_double_to_long_set_vreg
-
-    mov.d     fa1, fa0
-    cmp.un.d  ft2, fa0, fa1
-    li        rRESULT0, 0
-    li        rRESULT1, 0
-    bc1nez    ft2, .Lop_double_to_long_set_vreg
-#else
-    la        t0, .LDOUBLE_TO_LONG_max
-    LOAD64_F(fa1, fa1f, t0)
-    c.ole.d   fcc0, fa1, fa0
-    la        t0, .LDOUBLE_TO_LONG_ret_max
-    LOAD64(rRESULT0, rRESULT1, t0)
-    bc1t      .Lop_double_to_long_set_vreg
-
-    la        t0, .LDOUBLE_TO_LONG_min
-    LOAD64_F(fa1, fa1f, t0)
-    c.ole.d   fcc0, fa0, fa1
-    la        t0, .LDOUBLE_TO_LONG_ret_min
-    LOAD64(rRESULT0, rRESULT1, t0)
-    bc1t      .Lop_double_to_long_set_vreg
-
-    mov.d     fa1, fa0
-    c.un.d    fcc0, fa0, fa1
-    li        rRESULT0, 0
-    li        rRESULT1, 0
-    bc1t      .Lop_double_to_long_set_vreg
+#ifndef MIPS32REVGE6
+.Lop_double_to_long_get_opcode:
+    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
+.Lop_double_to_long_set_vreg:
+    SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1)   #  vA/vA+1 <- v0/v1
 #endif
-    JAL(__fixdfdi)
-    b         .Lop_double_to_long_set_vreg
-
-.LDOUBLE_TO_LONG_max:
-    .dword 0x43e0000000000000              #  maxlong, as a double (high word)
-.LDOUBLE_TO_LONG_min:
-    .dword 0xc3e0000000000000              #  minlong, as a double (high word)
-.LDOUBLE_TO_LONG_ret_max:
-    .dword 0x7fffffffffffffff
-.LDOUBLE_TO_LONG_ret_min:
-    .dword 0x8000000000000000
 
 /* continuation for op_mul_long */
 
 .Lop_mul_long_finish:
     GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64(v0, v1, a0)                 #  vAA::vAA+1 <- v0(low) :: v1(high)
-    GOTO_OPCODE(t0)                        #  jump to next instruction
+    SET_VREG64_GOTO(v0, v1, a0, t0)        #  vAA/vAA+1 <- v0(low)/v1(high)
 
 /* continuation for op_shl_long */
 
@@ -7969,51 +7859,21 @@
 .Lop_ushr_long_finish:
     SET_VREG64_GOTO(v1, zero, rOBJ, t0)    #  vAA/vAA+1 <- rlo/rhi
 
-/* continuation for op_add_double */
-
-.Lop_add_double_finish:
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-/* continuation for op_sub_double */
-
-.Lop_sub_double_finish:
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-/* continuation for op_mul_double */
-
-.Lop_mul_double_finish:
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-/* continuation for op_div_double */
-
-.Lop_div_double_finish:
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-/* continuation for op_rem_double */
-
-.Lop_rem_double_finish:
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
 /* continuation for op_shl_long_2addr */
 
 .Lop_shl_long_2addr_finish:
-    SET_VREG64_GOTO(zero, v0, rOBJ, t0)    #  vAA/vAA+1 <- rlo/rhi
+    SET_VREG64_GOTO(zero, v0, rOBJ, t0)    #  vA/vA+1 <- rlo/rhi
 
 /* continuation for op_shr_long_2addr */
 
 .Lop_shr_long_2addr_finish:
     sra     a3, a1, 31                     #  a3<- sign(ah)
-    SET_VREG64_GOTO(v1, a3, t2, t0)        #  vAA/vAA+1 <- rlo/rhi
+    SET_VREG64_GOTO(v1, a3, t2, t0)        #  vA/vA+1 <- rlo/rhi
 
 /* continuation for op_ushr_long_2addr */
 
 .Lop_ushr_long_2addr_finish:
-    SET_VREG64_GOTO(v1, zero, t3, t0)      #  vAA/vAA+1 <- rlo/rhi
+    SET_VREG64_GOTO(v1, zero, t3, t0)      #  vA/vA+1 <- rlo/rhi
 
     .size   artMterpAsmSisterStart, .-artMterpAsmSisterStart
     .global artMterpAsmSisterEnd
@@ -12791,7 +12651,7 @@
     REFRESH_IBASE()
     addu    a2, rINST, rINST            # a2<- byte offset
     FETCH_ADVANCE_INST_RB(a2)           # update rPC, load rINST
-    and     ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and     ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     bnez    ra, .L_suspend_request_pending
     GET_INST_OPCODE(t0)                 # extract opcode from rINST
     GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
index 143aeb0..037787f 100644
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ b/runtime/interpreter/mterp/out/mterp_mips64.S
@@ -637,7 +637,7 @@
     jal     MterpThreadFenceForConstructor
     lw      ra, THREAD_FLAGS_OFFSET(rSELF)
     move    a0, rSELF
-    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqzc   ra, 1f
     jal     MterpSuspendCheck           # (self)
 1:
@@ -659,7 +659,7 @@
     jal     MterpThreadFenceForConstructor
     lw      ra, THREAD_FLAGS_OFFSET(rSELF)
     move    a0, rSELF
-    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqzc   ra, 1f
     jal     MterpSuspendCheck           # (self)
 1:
@@ -681,7 +681,7 @@
     jal     MterpThreadFenceForConstructor
     lw      ra, THREAD_FLAGS_OFFSET(rSELF)
     move    a0, rSELF
-    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqzc   ra, 1f
     jal     MterpSuspendCheck           # (self)
 1:
@@ -705,7 +705,7 @@
     jal     MterpThreadFenceForConstructor
     lw      ra, THREAD_FLAGS_OFFSET(rSELF)
     move    a0, rSELF
-    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqzc   ra, 1f
     jal     MterpSuspendCheck           # (self)
 1:
@@ -3121,7 +3121,7 @@
     .extern MterpSuspendCheck
     lw      ra, THREAD_FLAGS_OFFSET(rSELF)
     move    a0, rSELF
-    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqzc   ra, 1f
     jal     MterpSuspendCheck           # (self)
 1:
@@ -12179,7 +12179,7 @@
     REFRESH_IBASE
     daddu   a2, rINST, rINST            # a2<- byte offset
     FETCH_ADVANCE_INST_RB a2            # update rPC, load rINST
-    and     ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and     ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     bnezc   ra, .L_suspend_request_pending
     GET_INST_OPCODE v0                  # extract opcode from rINST
     GOTO_OPCODE v0                      # jump to next instruction
@@ -12296,7 +12296,7 @@
     lw      ra, THREAD_FLAGS_OFFSET(rSELF)
     sd      a0, 0(a2)
     move    a0, rSELF
-    and     ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
     beqzc   ra, check2
     jal     MterpSuspendCheck                       # (self)
 check2:
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
index d676fda..695d1e4 100644
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ b/runtime/interpreter/mterp/out/mterp_x86.S
@@ -612,7 +612,7 @@
     .extern MterpThreadFenceForConstructor
     call    SYMBOL(MterpThreadFenceForConstructor)
     movl    rSELF, %eax
-    testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
     call    SYMBOL(MterpSuspendCheck)
@@ -634,7 +634,7 @@
     .extern MterpThreadFenceForConstructor
     call    SYMBOL(MterpThreadFenceForConstructor)
     movl    rSELF, %eax
-    testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
     call    SYMBOL(MterpSuspendCheck)
@@ -654,7 +654,7 @@
     .extern MterpThreadFenceForConstructor
     call    SYMBOL(MterpThreadFenceForConstructor)
     movl    rSELF, %eax
-    testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
     call    SYMBOL(MterpSuspendCheck)
@@ -677,7 +677,7 @@
     .extern MterpThreadFenceForConstructor
     call    SYMBOL(MterpThreadFenceForConstructor)
     movl    rSELF, %eax
-    testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
     call    SYMBOL(MterpSuspendCheck)
@@ -3104,7 +3104,7 @@
 .L_op_return_void_no_barrier: /* 0x73 */
 /* File: x86/op_return_void_no_barrier.S */
     movl    rSELF, %eax
-    testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
     call    SYMBOL(MterpSuspendCheck)
@@ -12678,7 +12678,7 @@
     je      .L_add_batch                    # counted down to zero - report
 .L_resume_backward_branch:
     movl    rSELF, %eax
-    testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     leal    (rPC, rINST, 2), rPC
     FETCH_INST
     jnz     .L_suspend_request_pending
diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S
index df88499..2eab58c 100644
--- a/runtime/interpreter/mterp/out/mterp_x86_64.S
+++ b/runtime/interpreter/mterp/out/mterp_x86_64.S
@@ -587,7 +587,7 @@
     .extern MterpThreadFenceForConstructor
     call    SYMBOL(MterpThreadFenceForConstructor)
     movq    rSELF, OUT_ARG0
-    testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
     jz      1f
     call    SYMBOL(MterpSuspendCheck)
 1:
@@ -607,7 +607,7 @@
     .extern MterpThreadFenceForConstructor
     call    SYMBOL(MterpThreadFenceForConstructor)
     movq    rSELF, OUT_ARG0
-    testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
     jz      1f
     call    SYMBOL(MterpSuspendCheck)
 1:
@@ -625,7 +625,7 @@
     .extern MterpThreadFenceForConstructor
     call    SYMBOL(MterpThreadFenceForConstructor)
     movq    rSELF, OUT_ARG0
-    testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
     jz      1f
     call    SYMBOL(MterpSuspendCheck)
 1:
@@ -646,7 +646,7 @@
     .extern MterpThreadFenceForConstructor
     call    SYMBOL(MterpThreadFenceForConstructor)
     movq    rSELF, OUT_ARG0
-    testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
     jz      1f
     call    SYMBOL(MterpSuspendCheck)
 1:
@@ -2972,7 +2972,7 @@
 .L_op_return_void_no_barrier: /* 0x73 */
 /* File: x86_64/op_return_void_no_barrier.S */
     movq    rSELF, OUT_ARG0
-    testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
     jz      1f
     call    SYMBOL(MterpSuspendCheck)
 1:
@@ -11915,7 +11915,7 @@
     je      .L_add_batch                    # counted down to zero - report
 .L_resume_backward_branch:
     movq    rSELF, %rax
-    testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax)
+    testl   $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax)
     REFRESH_IBASE
     leaq    (rPC, rINSTq, 2), rPC
     FETCH_INST
diff --git a/runtime/interpreter/mterp/x86/footer.S b/runtime/interpreter/mterp/x86/footer.S
index e8c8ca8..088cb12 100644
--- a/runtime/interpreter/mterp/x86/footer.S
+++ b/runtime/interpreter/mterp/x86/footer.S
@@ -167,7 +167,7 @@
     je      .L_add_batch                    # counted down to zero - report
 .L_resume_backward_branch:
     movl    rSELF, %eax
-    testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     leal    (rPC, rINST, 2), rPC
     FETCH_INST
     jnz     .L_suspend_request_pending
diff --git a/runtime/interpreter/mterp/x86/op_return.S b/runtime/interpreter/mterp/x86/op_return.S
index 8e3cfad..a8ebbed 100644
--- a/runtime/interpreter/mterp/x86/op_return.S
+++ b/runtime/interpreter/mterp/x86/op_return.S
@@ -7,7 +7,7 @@
     .extern MterpThreadFenceForConstructor
     call    SYMBOL(MterpThreadFenceForConstructor)
     movl    rSELF, %eax
-    testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
     call    SYMBOL(MterpSuspendCheck)
diff --git a/runtime/interpreter/mterp/x86/op_return_void.S b/runtime/interpreter/mterp/x86/op_return_void.S
index a14a4f6..d9eddf3 100644
--- a/runtime/interpreter/mterp/x86/op_return_void.S
+++ b/runtime/interpreter/mterp/x86/op_return_void.S
@@ -1,7 +1,7 @@
     .extern MterpThreadFenceForConstructor
     call    SYMBOL(MterpThreadFenceForConstructor)
     movl    rSELF, %eax
-    testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
     call    SYMBOL(MterpSuspendCheck)
diff --git a/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S b/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S
index 1d0e933..2fbda6b 100644
--- a/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S
+++ b/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S
@@ -1,5 +1,5 @@
     movl    rSELF, %eax
-    testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
     call    SYMBOL(MterpSuspendCheck)
diff --git a/runtime/interpreter/mterp/x86/op_return_wide.S b/runtime/interpreter/mterp/x86/op_return_wide.S
index 7d1850a..5fff626 100644
--- a/runtime/interpreter/mterp/x86/op_return_wide.S
+++ b/runtime/interpreter/mterp/x86/op_return_wide.S
@@ -5,7 +5,7 @@
     .extern MterpThreadFenceForConstructor
     call    SYMBOL(MterpThreadFenceForConstructor)
     movl    rSELF, %eax
-    testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
     call    SYMBOL(MterpSuspendCheck)
diff --git a/runtime/interpreter/mterp/x86_64/footer.S b/runtime/interpreter/mterp/x86_64/footer.S
index f78f163..ed5e5ea 100644
--- a/runtime/interpreter/mterp/x86_64/footer.S
+++ b/runtime/interpreter/mterp/x86_64/footer.S
@@ -151,7 +151,7 @@
     je      .L_add_batch                    # counted down to zero - report
 .L_resume_backward_branch:
     movq    rSELF, %rax
-    testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax)
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax)
     REFRESH_IBASE
     leaq    (rPC, rINSTq, 2), rPC
     FETCH_INST
diff --git a/runtime/interpreter/mterp/x86_64/op_return.S b/runtime/interpreter/mterp/x86_64/op_return.S
index 07e0e53..8cb6cba 100644
--- a/runtime/interpreter/mterp/x86_64/op_return.S
+++ b/runtime/interpreter/mterp/x86_64/op_return.S
@@ -7,7 +7,7 @@
     .extern MterpThreadFenceForConstructor
     call    SYMBOL(MterpThreadFenceForConstructor)
     movq    rSELF, OUT_ARG0
-    testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
     jz      1f
     call    SYMBOL(MterpSuspendCheck)
 1:
diff --git a/runtime/interpreter/mterp/x86_64/op_return_void.S b/runtime/interpreter/mterp/x86_64/op_return_void.S
index 6a12df3..ba68e7e 100644
--- a/runtime/interpreter/mterp/x86_64/op_return_void.S
+++ b/runtime/interpreter/mterp/x86_64/op_return_void.S
@@ -1,7 +1,7 @@
     .extern MterpThreadFenceForConstructor
     call    SYMBOL(MterpThreadFenceForConstructor)
     movq    rSELF, OUT_ARG0
-    testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
     jz      1f
     call    SYMBOL(MterpSuspendCheck)
 1:
diff --git a/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S b/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S
index 822b2e8..6799da1 100644
--- a/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S
+++ b/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S
@@ -1,5 +1,5 @@
     movq    rSELF, OUT_ARG0
-    testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
     jz      1f
     call    SYMBOL(MterpSuspendCheck)
 1:
diff --git a/runtime/interpreter/mterp/x86_64/op_return_wide.S b/runtime/interpreter/mterp/x86_64/op_return_wide.S
index 288eb96..d6d6d1b 100644
--- a/runtime/interpreter/mterp/x86_64/op_return_wide.S
+++ b/runtime/interpreter/mterp/x86_64/op_return_wide.S
@@ -5,7 +5,7 @@
     .extern MterpThreadFenceForConstructor
     call    SYMBOL(MterpThreadFenceForConstructor)
     movq    rSELF, OUT_ARG0
-    testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
     jz      1f
     call    SYMBOL(MterpSuspendCheck)
 1:
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 5a62bd7..a5b1038 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -424,7 +424,7 @@
 
   std::unique_ptr<ZipArchive> zip_archive(ZipArchive::Open(jar_file.c_str(), error_msg));
   if (zip_archive == nullptr) {
-    return nullptr;;
+    return nullptr;
   }
   std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(entry_name, error_msg));
   if (zip_entry == nullptr) {
@@ -564,7 +564,7 @@
             this_classloader_class.Get()) {
       AbortTransactionOrFail(self,
                              "Unsupported classloader type %s for getResourceAsStream",
-                             Class::PrettyClass(this_classloader_class.Get()).c_str());
+                             mirror::Class::PrettyClass(this_classloader_class.Get()).c_str());
       return;
     }
   }
@@ -608,10 +608,11 @@
                                int32_t length)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   if (src_array->GetClass()->GetComponentType() != dst_array->GetClass()->GetComponentType()) {
-    AbortTransactionOrFail(self, "Types mismatched in arraycopy: %s vs %s.",
-                           Class::PrettyDescriptor(
+    AbortTransactionOrFail(self,
+                           "Types mismatched in arraycopy: %s vs %s.",
+                           mirror::Class::PrettyDescriptor(
                                src_array->GetClass()->GetComponentType()).c_str(),
-                           Class::PrettyDescriptor(
+                           mirror::Class::PrettyDescriptor(
                                dst_array->GetClass()->GetComponentType()).c_str());
     return;
   }
@@ -677,9 +678,9 @@
         GetComponentType();
     if (trg_type->IsPrimitiveInt()) {
       AbortTransactionOrFail(self, "Type mismatch in arraycopy: %s vs %s",
-                             Class::PrettyDescriptor(
+                             mirror::Class::PrettyDescriptor(
                                  src_array->GetClass()->GetComponentType()).c_str(),
-                             Class::PrettyDescriptor(
+                             mirror::Class::PrettyDescriptor(
                                  dst_array->GetClass()->GetComponentType()).c_str());
       return;
     }
@@ -1096,10 +1097,12 @@
     return;
   }
   DCHECK_GE(start, 0);
-  DCHECK_GE(end, string->GetLength());
+  DCHECK_LE(start, end);
+  DCHECK_LE(end, string->GetLength());
   StackHandleScope<1> hs(self);
   Handle<mirror::CharArray> h_char_array(
       hs.NewHandle(shadow_frame->GetVRegReference(arg_offset + 3)->AsCharArray()));
+  DCHECK_GE(index, 0);
   DCHECK_LE(index, h_char_array->GetLength());
   DCHECK_LE(end - start, h_char_array->GetLength() - index);
   string->GetChars(start, end, h_char_array, index);
@@ -1271,7 +1274,7 @@
     mirror::HeapReference<mirror::Object>* field_addr =
         reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
             reinterpret_cast<uint8_t*>(obj) + static_cast<size_t>(offset));
-    ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, /*kAlwaysUpdateField*/true>(
+    ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, /* kAlwaysUpdateField */ true>(
         obj,
         MemberOffset(offset),
         field_addr);
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index a1ed470..caf705a 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -54,10 +54,10 @@
 class SharedLibrary {
  public:
   SharedLibrary(JNIEnv* env, Thread* self, const std::string& path, void* handle,
-                jobject class_loader, void* class_loader_allocator)
+                bool needs_native_bridge, jobject class_loader, void* class_loader_allocator)
       : path_(path),
         handle_(handle),
-        needs_native_bridge_(false),
+        needs_native_bridge_(needs_native_bridge),
         class_loader_(env->NewWeakGlobalRef(class_loader)),
         class_loader_allocator_(class_loader_allocator),
         jni_on_load_lock_("JNI_OnLoad lock"),
@@ -73,9 +73,7 @@
       self->GetJniEnv()->DeleteWeakGlobalRef(class_loader_);
     }
 
-    if (!needs_native_bridge_) {
-      android::CloseNativeLibrary(handle_);
-    }
+    android::CloseNativeLibrary(handle_, needs_native_bridge_);
   }
 
   jweak GetClassLoader() const {
@@ -131,8 +129,8 @@
     jni_on_load_cond_.Broadcast(self);
   }
 
-  void SetNeedsNativeBridge() {
-    needs_native_bridge_ = true;
+  void SetNeedsNativeBridge(bool needs) {
+    needs_native_bridge_ = needs;
   }
 
   bool NeedsNativeBridge() const {
@@ -564,6 +562,9 @@
   }
   MutexLock mu(self, *Locks::jni_weak_globals_lock_);
   while (UNLIKELY(!MayAccessWeakGlobals(self))) {
+    // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+    // presence of threads blocking for weak ref access.
+    self->CheckEmptyCheckpoint();
     weak_globals_add_condition_.WaitHoldingLocks(self);
   }
   IndirectRef ref = weak_globals_.Add(kIRTFirstSegment, obj);
@@ -650,7 +651,6 @@
 }
 
 void JavaVMExt::BroadcastForNewWeakGlobals() {
-  CHECK(kUseReadBarrier);
   Thread* self = Thread::Current();
   MutexLock mu(self, *Locks::jni_weak_globals_lock_);
   weak_globals_add_condition_.Broadcast(self);
@@ -696,6 +696,9 @@
     Locks::jni_weak_globals_lock_->AssertHeld(self);
   }
   while (UNLIKELY(!MayAccessWeakGlobals(self))) {
+    // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+    // presence of threads blocking for weak ref access.
+    self->CheckEmptyCheckpoint();
     weak_globals_add_condition_.WaitHoldingLocks(self);
   }
   return weak_globals_.Get(ref);
@@ -718,6 +721,9 @@
   DCHECK_EQ(IndirectReferenceTable::GetIndirectRefKind(ref), kWeakGlobal);
   MutexLock mu(self, *Locks::jni_weak_globals_lock_);
   while (UNLIKELY(!MayAccessWeakGlobals(self))) {
+    // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+    // presence of threads blocking for weak ref access.
+    self->CheckEmptyCheckpoint();
     weak_globals_add_condition_.WaitHoldingLocks(self);
   }
   // When just checking a weak ref has been cleared, avoid triggering the read barrier in decode
@@ -817,24 +823,18 @@
 
   Locks::mutator_lock_->AssertNotHeld(self);
   const char* path_str = path.empty() ? nullptr : path.c_str();
+  bool needs_native_bridge = false;
   void* handle = android::OpenNativeLibrary(env,
                                             runtime_->GetTargetSdkVersion(),
                                             path_str,
                                             class_loader,
-                                            library_path);
-
-  bool needs_native_bridge = false;
-  if (handle == nullptr) {
-    if (android::NativeBridgeIsSupported(path_str)) {
-      handle = android::NativeBridgeLoadLibrary(path_str, RTLD_NOW);
-      needs_native_bridge = true;
-    }
-  }
+                                            library_path,
+                                            &needs_native_bridge,
+                                            error_msg);
 
   VLOG(jni) << "[Call to dlopen(\"" << path << "\", RTLD_NOW) returned " << handle << "]";
 
   if (handle == nullptr) {
-    *error_msg = dlerror();
     VLOG(jni) << "dlopen(\"" << path << "\", RTLD_NOW) failed: " << *error_msg;
     return false;
   }
@@ -850,7 +850,14 @@
   {
     // Create SharedLibrary ahead of taking the libraries lock to maintain lock ordering.
     std::unique_ptr<SharedLibrary> new_library(
-        new SharedLibrary(env, self, path, handle, class_loader, class_loader_allocator));
+        new SharedLibrary(env,
+                          self,
+                          path,
+                          handle,
+                          needs_native_bridge,
+                          class_loader,
+                          class_loader_allocator));
+
     MutexLock mu(self, *Locks::jni_libraries_lock_);
     library = libraries_->Get(path);
     if (library == nullptr) {  // We won race to get libraries_lock.
@@ -867,11 +874,7 @@
   VLOG(jni) << "[Added shared library \"" << path << "\" for ClassLoader " << class_loader << "]";
 
   bool was_successful = false;
-  void* sym;
-  if (needs_native_bridge) {
-    library->SetNeedsNativeBridge();
-  }
-  sym = library->FindSymbol("JNI_OnLoad", nullptr);
+  void* sym = library->FindSymbol("JNI_OnLoad", nullptr);
   if (sym == nullptr) {
     VLOG(jni) << "[No JNI_OnLoad found in \"" << path << "\"]";
     was_successful = true;
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index 9e37f11..7374920 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -136,7 +136,6 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::jni_weak_globals_lock_);
   void BroadcastForNewWeakGlobals()
-      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::jni_weak_globals_lock_);
 
   jobject AddGlobalRef(Thread* self, ObjPtr<mirror::Object> obj)
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 85bfd17..fad7d90 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -781,7 +781,7 @@
   SendRequestAndPossiblySuspend(pReq, suspend_policy, threadId);
 }
 
-static void LogMatchingEventsAndThread(const std::vector<JdwpEvent*> match_list,
+static void LogMatchingEventsAndThread(const std::vector<JdwpEvent*>& match_list,
                                        ObjectId thread_id)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   for (size_t i = 0, e = match_list.size(); i < e; ++i) {
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 953b1c0..803e9d5 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -114,7 +114,7 @@
   } else {
     jit_options->invoke_transition_weight_ = std::max(
         jit_options->warmup_threshold_ / Jit::kDefaultInvokeTransitionWeightRatio,
-        static_cast<size_t>(1));;
+        static_cast<size_t>(1));
   }
 
   return jit_options;
@@ -683,7 +683,7 @@
   }
 }
 
-void Jit::InvokeVirtualOrInterface(mirror::Object* this_object,
+void Jit::InvokeVirtualOrInterface(ObjPtr<mirror::Object> this_object,
                                    ArtMethod* caller,
                                    uint32_t dex_pc,
                                    ArtMethod* callee ATTRIBUTE_UNUSED) {
@@ -701,5 +701,24 @@
   }
 }
 
+ScopedJitSuspend::ScopedJitSuspend() {
+  jit::Jit* jit = Runtime::Current()->GetJit();
+  was_on_ = (jit != nullptr) && (jit->GetThreadPool() != nullptr);
+  if (was_on_) {
+    Thread* self = Thread::Current();
+    jit->WaitForCompilationToFinish(self);
+    jit->GetThreadPool()->StopWorkers(self);
+    jit->WaitForCompilationToFinish(self);
+  }
+}
+
+ScopedJitSuspend::~ScopedJitSuspend() {
+  if (was_on_) {
+    DCHECK(Runtime::Current()->GetJit() != nullptr);
+    DCHECK(Runtime::Current()->GetJit()->GetThreadPool() != nullptr);
+    Runtime::Current()->GetJit()->GetThreadPool()->StartWorkers(Thread::Current());
+  }
+}
+
 }  // namespace jit
 }  // namespace art
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index d3178b0..a230c78 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -22,9 +22,10 @@
 #include "base/macros.h"
 #include "base/mutex.h"
 #include "base/timing_logger.h"
+#include "jit/profile_saver_options.h"
+#include "obj_ptr.h"
 #include "object_callbacks.h"
 #include "offline_profiling_info.h"
-#include "jit/profile_saver_options.h"
 #include "thread_pool.h"
 
 namespace art {
@@ -114,7 +115,7 @@
   void AddSamples(Thread* self, ArtMethod* method, uint16_t samples, bool with_backedges)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void InvokeVirtualOrInterface(mirror::Object* this_object,
+  void InvokeVirtualOrInterface(ObjPtr<mirror::Object> this_object,
                                 ArtMethod* caller,
                                 uint32_t dex_pc,
                                 ArtMethod* callee)
@@ -174,6 +175,10 @@
 
   static bool LoadCompilerLibrary(std::string* error_msg);
 
+  ThreadPool* GetThreadPool() const {
+    return thread_pool_.get();
+  }
+
  private:
   Jit();
 
@@ -277,6 +282,16 @@
   DISALLOW_COPY_AND_ASSIGN(JitOptions);
 };
 
+// Helper class to stop the JIT for a given scope. This will wait for the JIT to quiesce.
+class ScopedJitSuspend {
+ public:
+  ScopedJitSuspend();
+  ~ScopedJitSuspend();
+
+ private:
+  bool was_on_;
+};
+
 }  // namespace jit
 }  // namespace art
 
diff --git a/runtime/jit/offline_profiling_info.cc b/runtime/jit/offline_profiling_info.cc
index f535151..b9f5981 100644
--- a/runtime/jit/offline_profiling_info.cc
+++ b/runtime/jit/offline_profiling_info.cc
@@ -37,7 +37,7 @@
 namespace art {
 
 const uint8_t ProfileCompilationInfo::kProfileMagic[] = { 'p', 'r', 'o', '\0' };
-const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '0', '1', '\0' };
+const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '0', '2', '\0' };
 
 static constexpr uint16_t kMaxDexFileKeyLength = PATH_MAX;
 
@@ -282,12 +282,12 @@
 
 bool ProfileCompilationInfo::AddClassIndex(const std::string& dex_location,
                                            uint32_t checksum,
-                                           uint16_t class_idx) {
+                                           uint16_t type_idx) {
   DexFileData* const data = GetOrAddDexFileData(dex_location, checksum);
   if (data == nullptr) {
     return false;
   }
-  data->class_set.insert(class_idx);
+  data->class_set.insert(type_idx);
   return true;
 }
 
@@ -304,8 +304,8 @@
   }
 
   for (uint16_t i = 0; i < class_set_size; i++) {
-    uint16_t class_def_idx = line_buffer.ReadUintAndAdvance<uint16_t>();
-    if (!AddClassIndex(dex_location, checksum, class_def_idx)) {
+    uint16_t type_idx = line_buffer.ReadUintAndAdvance<uint16_t>();
+    if (!AddClassIndex(dex_location, checksum, type_idx)) {
       return false;
     }
   }
@@ -569,14 +569,14 @@
   return false;
 }
 
-bool ProfileCompilationInfo::ContainsClass(const DexFile& dex_file, uint16_t class_def_idx) const {
+bool ProfileCompilationInfo::ContainsClass(const DexFile& dex_file, uint16_t type_idx) const {
   auto info_it = info_.find(GetProfileDexFileKey(dex_file.GetLocation()));
   if (info_it != info_.end()) {
     if (!ChecksumMatch(dex_file, info_it->second.checksum)) {
       return false;
     }
     const std::set<uint16_t>& classes = info_it->second.class_set;
-    return classes.find(class_def_idx) != classes.end();
+    return classes.find(type_idx) != classes.end();
   }
   return false;
 }
@@ -637,7 +637,7 @@
     os << "\n\tclasses: ";
     for (const auto class_it : dex_data.class_set) {
       if (dex_file != nullptr) {
-        os << "\n\t\t" << dex_file->GetClassDescriptor(dex_file->GetClassDef(class_it));
+        os << "\n\t\t" << dex_file->PrettyType(class_it);
       } else {
         os << class_it << ",";
       }
@@ -702,11 +702,11 @@
     }
 
     for (uint16_t c = 0; c < number_of_classes; c++) {
-      uint16_t class_idx = rand() % max_classes;
+      uint16_t type_idx = rand() % max_classes;
       if (c < (number_of_classes / kFavorSplit)) {
-        class_idx %= kFavorFirstN;
+        type_idx %= kFavorFirstN;
       }
-      info.AddClassIndex(profile_key, 0, class_idx);
+      info.AddClassIndex(profile_key, 0, type_idx);
     }
   }
   return info.Save(fd);
diff --git a/runtime/jit/offline_profiling_info.h b/runtime/jit/offline_profiling_info.h
index 0b26f9b..f8ed573 100644
--- a/runtime/jit/offline_profiling_info.h
+++ b/runtime/jit/offline_profiling_info.h
@@ -65,8 +65,8 @@
   // Returns true if the method reference is present in the profiling info.
   bool ContainsMethod(const MethodReference& method_ref) const;
 
-  // Returns true if the class is present in the profiling info.
-  bool ContainsClass(const DexFile& dex_file, uint16_t class_def_idx) const;
+  // Returns true if the class's type is present in the profiling info.
+  bool ContainsClass(const DexFile& dex_file, uint16_t type_idx) const;
 
   // Dumps all the loaded profile info into a string and returns it.
   // If dex_files is not null then the method indices will be resolved to their
@@ -115,7 +115,7 @@
 
   DexFileData* GetOrAddDexFileData(const std::string& dex_location, uint32_t checksum);
   bool AddMethodIndex(const std::string& dex_location, uint32_t checksum, uint16_t method_idx);
-  bool AddClassIndex(const std::string& dex_location, uint32_t checksum, uint16_t class_idx);
+  bool AddClassIndex(const std::string& dex_location, uint32_t checksum, uint16_t type_idx);
   bool AddResolvedClasses(const DexCacheResolvedClasses& classes);
 
   // Parsing functionality.
@@ -152,7 +152,7 @@
     uint8_t* Get() { return storage_.get(); }
 
    private:
-    std::unique_ptr<uint8_t> storage_;
+    std::unique_ptr<uint8_t[]> storage_;
     uint8_t* ptr_current_;
     uint8_t* ptr_end_;
   };
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 0217a67..01a2ad8 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -157,14 +157,14 @@
     ThrowNoSuchMethodError(soa, c, name, sig, is_static ? "static" : "non-static");
     return nullptr;
   }
-  return soa.EncodeMethod(method);
+  return jni::EncodeArtMethod(method);
 }
 
 static ObjPtr<mirror::ClassLoader> GetClassLoader(const ScopedObjectAccess& soa)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ArtMethod* method = soa.Self()->GetCurrentMethod(nullptr);
   // If we are running Runtime.nativeLoad, use the overriding ClassLoader it set.
-  if (method == soa.DecodeMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) {
+  if (method == jni::DecodeArtMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) {
     return soa.Decode<mirror::ClassLoader>(soa.Self()->GetClassLoaderOverride());
   }
   // If we have a method, use its ClassLoader for context.
@@ -235,7 +235,7 @@
                                    sig, name, c->GetDescriptor(&temp));
     return nullptr;
   }
-  return soa.EncodeField(field);
+  return jni::EncodeArtField(field);
 }
 
 static void ThrowAIOOBE(ScopedObjectAccess& soa, mirror::Array* array, jsize start,
@@ -368,7 +368,7 @@
   static jmethodID FromReflectedMethod(JNIEnv* env, jobject jlr_method) {
     CHECK_NON_NULL_ARGUMENT(jlr_method);
     ScopedObjectAccess soa(env);
-    return soa.EncodeMethod(ArtMethod::FromReflectedMethod(soa, jlr_method));
+    return jni::EncodeArtMethod(ArtMethod::FromReflectedMethod(soa, jlr_method));
   }
 
   static jfieldID FromReflectedField(JNIEnv* env, jobject jlr_field) {
@@ -380,13 +380,13 @@
       return nullptr;
     }
     ObjPtr<mirror::Field> field = ObjPtr<mirror::Field>::DownCast(obj_field);
-    return soa.EncodeField(field->GetArtField());
+    return jni::EncodeArtField(field->GetArtField());
   }
 
   static jobject ToReflectedMethod(JNIEnv* env, jclass, jmethodID mid, jboolean) {
     CHECK_NON_NULL_ARGUMENT(mid);
     ScopedObjectAccess soa(env);
-    ArtMethod* m = soa.DecodeMethod(mid);
+    ArtMethod* m = jni::DecodeArtMethod(mid);
     mirror::Executable* method;
     DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
     DCHECK(!Runtime::Current()->IsActiveTransaction());
@@ -401,7 +401,7 @@
   static jobject ToReflectedField(JNIEnv* env, jclass, jfieldID fid, jboolean) {
     CHECK_NON_NULL_ARGUMENT(fid);
     ScopedObjectAccess soa(env);
-    ArtField* f = soa.DecodeField(fid);
+    ArtField* f = jni::DecodeArtField(fid);
     return soa.AddLocalReference<jobject>(
         mirror::Field::CreateFromArtField<kRuntimePointerSize>(soa.Self(), f, true));
   }
@@ -631,8 +631,8 @@
     }
     if (c->IsStringClass()) {
       // Replace calls to String.<init> with equivalent StringFactory call.
-      jmethodID sf_mid = soa.EncodeMethod(
-          WellKnownClasses::StringInitToStringFactory(soa.DecodeMethod(mid)));
+      jmethodID sf_mid = jni::EncodeArtMethod(
+          WellKnownClasses::StringInitToStringFactory(jni::DecodeArtMethod(mid)));
       return CallStaticObjectMethodV(env, WellKnownClasses::java_lang_StringFactory, sf_mid, args);
     }
     ObjPtr<mirror::Object> result = c->AllocObject(soa.Self());
@@ -658,8 +658,8 @@
     }
     if (c->IsStringClass()) {
       // Replace calls to String.<init> with equivalent StringFactory call.
-      jmethodID sf_mid = soa.EncodeMethod(
-          WellKnownClasses::StringInitToStringFactory(soa.DecodeMethod(mid)));
+      jmethodID sf_mid = jni::EncodeArtMethod(
+          WellKnownClasses::StringInitToStringFactory(jni::DecodeArtMethod(mid)));
       return CallStaticObjectMethodA(env, WellKnownClasses::java_lang_StringFactory, sf_mid, args);
     }
     ObjPtr<mirror::Object> result = c->AllocObject(soa.Self());
@@ -1237,14 +1237,14 @@
     CHECK_NON_NULL_ARGUMENT(fid);
     ScopedObjectAccess soa(env);
     ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(obj);
-    ArtField* f = soa.DecodeField(fid);
+    ArtField* f = jni::DecodeArtField(fid);
     return soa.AddLocalReference<jobject>(f->GetObject(o));
   }
 
   static jobject GetStaticObjectField(JNIEnv* env, jclass, jfieldID fid) {
     CHECK_NON_NULL_ARGUMENT(fid);
     ScopedObjectAccess soa(env);
-    ArtField* f = soa.DecodeField(fid);
+    ArtField* f = jni::DecodeArtField(fid);
     return soa.AddLocalReference<jobject>(f->GetObject(f->GetDeclaringClass()));
   }
 
@@ -1254,7 +1254,7 @@
     ScopedObjectAccess soa(env);
     ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(java_object);
     ObjPtr<mirror::Object> v = soa.Decode<mirror::Object>(java_value);
-    ArtField* f = soa.DecodeField(fid);
+    ArtField* f = jni::DecodeArtField(fid);
     f->SetObject<false>(o, v);
   }
 
@@ -1262,7 +1262,7 @@
     CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid);
     ScopedObjectAccess soa(env);
     ObjPtr<mirror::Object> v = soa.Decode<mirror::Object>(java_value);
-    ArtField* f = soa.DecodeField(fid);
+    ArtField* f = jni::DecodeArtField(fid);
     f->SetObject<false>(f->GetDeclaringClass(), v);
   }
 
@@ -1271,13 +1271,13 @@
   CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(fid); \
   ScopedObjectAccess soa(env); \
   ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(instance); \
-  ArtField* f = soa.DecodeField(fid); \
+  ArtField* f = jni::DecodeArtField(fid); \
   return f->Get ##fn (o)
 
 #define GET_STATIC_PRIMITIVE_FIELD(fn) \
   CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(fid); \
   ScopedObjectAccess soa(env); \
-  ArtField* f = soa.DecodeField(fid); \
+  ArtField* f = jni::DecodeArtField(fid); \
   return f->Get ##fn (f->GetDeclaringClass())
 
 #define SET_PRIMITIVE_FIELD(fn, instance, value) \
@@ -1285,13 +1285,13 @@
   CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid); \
   ScopedObjectAccess soa(env); \
   ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(instance); \
-  ArtField* f = soa.DecodeField(fid); \
+  ArtField* f = jni::DecodeArtField(fid); \
   f->Set ##fn <false>(o, value)
 
 #define SET_STATIC_PRIMITIVE_FIELD(fn, value) \
   CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid); \
   ScopedObjectAccess soa(env); \
-  ArtField* f = soa.DecodeField(fid); \
+  ArtField* f = jni::DecodeArtField(fid); \
   f->Set ##fn <false>(f->GetDeclaringClass(), value)
 
   static jboolean GetBooleanField(JNIEnv* env, jobject obj, jfieldID fid) {
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index b829934..b3837c4 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -20,6 +20,8 @@
 #include <jni.h>
 #include <iosfwd>
 
+#include "base/macros.h"
+
 #ifndef NATIVE_METHOD
 #define NATIVE_METHOD(className, functionName, signature) \
   { #functionName, signature, reinterpret_cast<void*>(className ## _ ## functionName) }
@@ -36,6 +38,9 @@
 
 namespace art {
 
+class ArtField;
+class ArtMethod;
+
 const JNINativeInterface* GetJniNativeInterface();
 const JNINativeInterface* GetRuntimeShutdownNativeInterface();
 
@@ -46,6 +51,29 @@
 
 int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause);
 
+namespace jni {
+
+ALWAYS_INLINE
+static inline ArtField* DecodeArtField(jfieldID fid) {
+  return reinterpret_cast<ArtField*>(fid);
+}
+
+ALWAYS_INLINE
+static inline jfieldID EncodeArtField(ArtField* field) {
+  return reinterpret_cast<jfieldID>(field);
+}
+
+ALWAYS_INLINE
+static inline jmethodID EncodeArtMethod(ArtMethod* art_method) {
+  return reinterpret_cast<jmethodID>(art_method);
+}
+
+ALWAYS_INLINE
+static inline ArtMethod* DecodeArtMethod(jmethodID method_id) {
+  return reinterpret_cast<ArtMethod*>(method_id);
+}
+
+}  // namespace jni
 }  // namespace art
 
 std::ostream& operator<<(std::ostream& os, const jobjectRefType& rhs);
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index e990935..a421c34 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -679,12 +679,8 @@
   ASSERT_TRUE(env_->IsInstanceOf(o, c));
   // ...whose fields haven't been initialized because
   // we didn't call a constructor.
-  if (art::mirror::kUseStringCompression) {
-    // Zero-length string is compressed, so the length internally will be -(1 << 31).
-    ASSERT_EQ(-2147483648, env_->GetIntField(o, env_->GetFieldID(c, "count", "I")));
-  } else {
-    ASSERT_EQ(0, env_->GetIntField(o, env_->GetFieldID(c, "count", "I")));
-  }
+  // Even with string compression empty string has `count == 0`.
+  ASSERT_EQ(0, env_->GetIntField(o, env_->GetFieldID(c, "count", "I")));
 }
 
 TEST_F(JniInternalTest, GetVersion) {
@@ -895,11 +891,12 @@
   // Make sure we can actually use it.
   jstring s = env_->NewStringUTF("poop");
   if (mirror::kUseStringCompression) {
-    // Negative because s is compressed (first bit is 1)
-    ASSERT_EQ(-2147483644, env_->GetIntField(s, fid2));
+    ASSERT_EQ(mirror::String::GetFlaggedCount(4, /* compressible */ true),
+              env_->GetIntField(s, fid2));
     // Create incompressible string
     jstring s_16 = env_->NewStringUTF("\u0444\u0444");
-    ASSERT_EQ(2, env_->GetIntField(s_16, fid2));
+    ASSERT_EQ(mirror::String::GetFlaggedCount(2, /* compressible */ false),
+              env_->GetIntField(s_16, fid2));
   } else {
     ASSERT_EQ(4, env_->GetIntField(s, fid2));
   }
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index 538b6eb..2f2565b 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -61,7 +61,7 @@
  */
 class LockWord {
  public:
-  enum SizeShiftsAndMasks {  // private marker to avoid generate-operator-out.py from processing.
+  enum SizeShiftsAndMasks : uint32_t {  // private marker to avoid generate-operator-out.py from processing.
     // Number of bits to encode the state, currently just fat or thin/unlocked or hash code.
     kStateSize = 2,
     kReadBarrierStateSize = 1,
@@ -91,6 +91,8 @@
     kStateFat = 1,
     kStateHash = 2,
     kStateForwardingAddress = 3,
+    kStateForwardingAddressShifted = kStateForwardingAddress << kStateShift,
+    kStateForwardingAddressOverflow = (1 + kStateMask - kStateForwardingAddress) << kStateShift,
 
     // Read barrier bit.
     kReadBarrierStateShift = kThinLockCountSize + kThinLockCountShift,
@@ -140,7 +142,7 @@
 
   static LockWord FromForwardingAddress(size_t target) {
     DCHECK_ALIGNED(target, (1 << kStateSize));
-    return LockWord((target >> kForwardingAddressShift) | (kStateForwardingAddress << kStateShift));
+    return LockWord((target >> kForwardingAddressShift) | kStateForwardingAddressShifted);
   }
 
   static LockWord FromHashCode(uint32_t hash_code, uint32_t gc_state) {
@@ -202,6 +204,8 @@
 
   void SetReadBarrierState(uint32_t rb_state) {
     DCHECK_EQ(rb_state & ~kReadBarrierStateMask, 0U);
+    DCHECK(rb_state == ReadBarrier::WhiteState() ||
+           rb_state == ReadBarrier::GrayState()) << rb_state;
     DCHECK_NE(static_cast<uint32_t>(GetState()), static_cast<uint32_t>(kForwardingAddress));
     // Clear and or the bits.
     value_ &= ~(kReadBarrierStateMask << kReadBarrierStateShift);
@@ -256,6 +260,14 @@
   LockWord();
 
   explicit LockWord(uint32_t val) : value_(val) {
+    // Make sure adding the overflow causes an overflow.
+    constexpr uint64_t overflow = static_cast<uint64_t>(kStateForwardingAddressShifted) +
+        static_cast<uint64_t>(kStateForwardingAddressOverflow);
+    constexpr bool is_larger = overflow > static_cast<uint64_t>(0xFFFFFFFF);
+    static_assert(is_larger, "should have overflowed");
+    static_assert(
+         (~kStateForwardingAddress & kStateMask) == 0,
+        "READ_BARRIER_MARK_REG relies on the forwarding address state being only one bits");
     CheckReadBarrierState();
   }
 
@@ -270,9 +282,8 @@
       if (!kUseReadBarrier) {
         DCHECK_EQ(rb_state, 0U);
       } else {
-        DCHECK(rb_state == ReadBarrier::white_ptr_ ||
-               rb_state == ReadBarrier::gray_ptr_ ||
-               rb_state == ReadBarrier::black_ptr_) << rb_state;
+        DCHECK(rb_state == ReadBarrier::WhiteState() ||
+               rb_state == ReadBarrier::GrayState()) << rb_state;
       }
     }
   }
diff --git a/runtime/method_handles-inl.h b/runtime/method_handles-inl.h
index b488133..1240792 100644
--- a/runtime/method_handles-inl.h
+++ b/runtime/method_handles-inl.h
@@ -31,130 +31,103 @@
 
 namespace art {
 
-// Assigns |type| to the primitive type associated with |dst_class|. Returns
-// true iff. |dst_class| was a boxed type (Integer, Long etc.), false otherwise.
-REQUIRES_SHARED(Locks::mutator_lock_)
-static inline bool GetPrimitiveType(ObjPtr<mirror::Class> dst_class, Primitive::Type* type) {
-  if (dst_class->DescriptorEquals("Ljava/lang/Boolean;")) {
-    (*type) = Primitive::kPrimBoolean;
+inline bool ConvertArgumentValue(Handle<mirror::MethodType> callsite_type,
+                                 Handle<mirror::MethodType> callee_type,
+                                 int index,
+                                 JValue* value) REQUIRES_SHARED(Locks::mutator_lock_) {
+  ObjPtr<mirror::Class> from_class(callsite_type->GetPTypes()->GetWithoutChecks(index));
+  ObjPtr<mirror::Class> to_class(callee_type->GetPTypes()->GetWithoutChecks(index));
+  if (from_class == to_class) {
     return true;
-  } else if (dst_class->DescriptorEquals("Ljava/lang/Byte;")) {
-    (*type) = Primitive::kPrimByte;
-    return true;
-  } else if (dst_class->DescriptorEquals("Ljava/lang/Character;")) {
-    (*type) = Primitive::kPrimChar;
-    return true;
-  } else if (dst_class->DescriptorEquals("Ljava/lang/Float;")) {
-    (*type) = Primitive::kPrimFloat;
-    return true;
-  } else if (dst_class->DescriptorEquals("Ljava/lang/Double;")) {
-    (*type) = Primitive::kPrimDouble;
-    return true;
-  } else if (dst_class->DescriptorEquals("Ljava/lang/Integer;")) {
-    (*type) = Primitive::kPrimInt;
-    return true;
-  } else if (dst_class->DescriptorEquals("Ljava/lang/Long;")) {
-    (*type) = Primitive::kPrimLong;
-    return true;
-  } else if (dst_class->DescriptorEquals("Ljava/lang/Short;")) {
-    (*type) = Primitive::kPrimShort;
+  }
+
+  // |value| may contain a bare heap pointer which is generally
+  // |unsafe. ConvertJValueCommon() saves |value|, |from_class|, and
+  // |to_class| to Handles where necessary to avoid issues if the heap
+  // changes.
+  if (ConvertJValueCommon(callsite_type, callee_type, from_class, to_class, value)) {
+    DCHECK(!Thread::Current()->IsExceptionPending());
     return true;
   } else {
+    DCHECK(Thread::Current()->IsExceptionPending());
+    value->SetJ(0);
     return false;
   }
 }
 
-// A convenience class that allows for iteration through a list of
-// input argument registers |arg| for non-range invokes or a list of
-// consecutive registers starting with a given based for range
-// invokes.
-template <bool is_range> class ArgIterator {
- public:
-  ArgIterator(size_t first_src_reg,
-              const uint32_t (&arg)[Instruction::kMaxVarArgRegs]) :
-      first_src_reg_(first_src_reg),
-      arg_(arg),
-      arg_index_(0) {
+inline bool ConvertReturnValue(Handle<mirror::MethodType> callsite_type,
+                               Handle<mirror::MethodType> callee_type,
+                               JValue* value)  REQUIRES_SHARED(Locks::mutator_lock_) {
+  ObjPtr<mirror::Class> from_class(callee_type->GetRType());
+  ObjPtr<mirror::Class> to_class(callsite_type->GetRType());
+  if (to_class->GetPrimitiveType() == Primitive::kPrimVoid || from_class == to_class) {
+    return true;
   }
 
-  uint32_t Next() {
-    const uint32_t next = (is_range ? first_src_reg_ + arg_index_ : arg_[arg_index_]);
-    ++arg_index_;
-
-    return next;
-  }
-
-  uint32_t NextPair() {
-    const uint32_t next = (is_range ? first_src_reg_ + arg_index_ : arg_[arg_index_]);
-    arg_index_ += 2;
-
-    return next;
-  }
-
- private:
-  const size_t first_src_reg_;
-  const uint32_t (&arg_)[Instruction::kMaxVarArgRegs];
-  size_t arg_index_;
-};
-
-REQUIRES_SHARED(Locks::mutator_lock_)
-bool ConvertJValue(Handle<mirror::Class> from,
-                   Handle<mirror::Class> to,
-                   const JValue& from_value,
-                   JValue* to_value) {
-  const Primitive::Type from_type = from->GetPrimitiveType();
-  const Primitive::Type to_type = to->GetPrimitiveType();
-
-  // This method must be called only when the types don't match.
-  DCHECK(from.Get() != to.Get());
-
-  if ((from_type != Primitive::kPrimNot) && (to_type != Primitive::kPrimNot)) {
-    // Throws a ClassCastException if we're unable to convert a primitive value.
-    return ConvertPrimitiveValue(false, from_type, to_type, from_value, to_value);
-  } else if ((from_type == Primitive::kPrimNot) && (to_type == Primitive::kPrimNot)) {
-    // They're both reference types. If "from" is null, we can pass it
-    // through unchanged. If not, we must generate a cast exception if
-    // |to| is not assignable from the dynamic type of |ref|.
-    mirror::Object* const ref = from_value.GetL();
-    if (ref == nullptr || to->IsAssignableFrom(ref->GetClass())) {
-      to_value->SetL(ref);
-      return true;
-    } else {
-      ThrowClassCastException(to.Get(), ref->GetClass());
-      return false;
-    }
+  // |value| may contain a bare heap pointer which is generally
+  // unsafe. ConvertJValueCommon() saves |value|, |from_class|, and
+  // |to_class| to Handles where necessary to avoid issues if the heap
+  // changes.
+  if (ConvertJValueCommon(callsite_type, callee_type, from_class, to_class, value)) {
+    DCHECK(!Thread::Current()->IsExceptionPending());
+    return true;
   } else {
-    // Precisely one of the source or the destination are reference types.
-    // We must box or unbox.
-    if (to_type == Primitive::kPrimNot) {
-      // The target type is a reference, we must box.
-      Primitive::Type type;
-      // TODO(narayan): This is a CHECK for now. There might be a few corner cases
-      // here that we might not have handled yet. For exmple, if |to| is java/lang/Number;,
-      // we will need to box this "naturally".
-      CHECK(GetPrimitiveType(to.Get(), &type));
-      // First perform a primitive conversion to the unboxed equivalent of the target,
-      // if necessary. This should be for the rarer cases like (int->Long) etc.
-      if (UNLIKELY(from_type != type)) {
-         if (!ConvertPrimitiveValue(false, from_type, type, from_value, to_value)) {
-           return false;
-         }
+    DCHECK(Thread::Current()->IsExceptionPending());
+    value->SetJ(0);
+    return false;
+  }
+}
+
+template <typename G, typename S>
+bool PerformConversions(Thread* self,
+                        Handle<mirror::MethodType> callsite_type,
+                        Handle<mirror::MethodType> callee_type,
+                        G* getter,
+                        S* setter,
+                        int32_t num_conversions) REQUIRES_SHARED(Locks::mutator_lock_) {
+  StackHandleScope<2> hs(self);
+  Handle<mirror::ObjectArray<mirror::Class>> from_types(hs.NewHandle(callsite_type->GetPTypes()));
+  Handle<mirror::ObjectArray<mirror::Class>> to_types(hs.NewHandle(callee_type->GetPTypes()));
+
+  for (int32_t i = 0; i < num_conversions; ++i) {
+    ObjPtr<mirror::Class> from(from_types->GetWithoutChecks(i));
+    ObjPtr<mirror::Class> to(to_types->GetWithoutChecks(i));
+    const Primitive::Type from_type = from_types->GetWithoutChecks(i)->GetPrimitiveType();
+    const Primitive::Type to_type = to_types->GetWithoutChecks(i)->GetPrimitiveType();
+    if (from == to) {
+      // Easy case - the types are identical. Nothing left to do except to pass
+      // the arguments along verbatim.
+      if (Primitive::Is64BitType(from_type)) {
+        setter->SetLong(getter->GetLong());
+      } else if (from_type == Primitive::kPrimNot) {
+        setter->SetReference(getter->GetReference());
       } else {
-        *to_value = from_value;
+        setter->Set(getter->Get());
+      }
+    } else {
+      JValue value;
+
+      if (Primitive::Is64BitType(from_type)) {
+        value.SetJ(getter->GetLong());
+      } else if (from_type == Primitive::kPrimNot) {
+        value.SetL(getter->GetReference());
+      } else {
+        value.SetI(getter->Get());
       }
 
-      // Then perform the actual boxing, and then set the reference.
-      ObjPtr<mirror::Object> boxed = BoxPrimitive(type, from_value);
-      to_value->SetL(boxed.Ptr());
-      return true;
-    } else {
-      // The target type is a primitive, we must unbox.
-      ObjPtr<mirror::Object> ref(from_value.GetL());
+      // Caveat emptor - ObjPtr's not guaranteed valid after this call.
+      if (!ConvertArgumentValue(callsite_type, callee_type, i, &value)) {
+        DCHECK(self->IsExceptionPending());
+        return false;
+      }
 
-      // Note that UnboxPrimitiveForResult already performs all of the type
-      // conversions that we want, based on |to|.
-      JValue unboxed_value;
-      return UnboxPrimitiveForResult(ref, to.Get(), to_value);
+      if (Primitive::Is64BitType(to_type)) {
+        setter->SetLong(value.GetJ());
+      } else if (to_type == Primitive::kPrimNot) {
+        setter->SetReference(value.GetL());
+      } else {
+        setter->Set(value.GetI());
+      }
     }
   }
 
@@ -169,10 +142,10 @@
                                             uint32_t first_src_reg,
                                             uint32_t first_dest_reg,
                                             const uint32_t (&arg)[Instruction::kMaxVarArgRegs],
-                                            ShadowFrame* callee_frame) {
-  StackHandleScope<4> hs(self);
-  Handle<mirror::ObjectArray<mirror::Class>> from_types(hs.NewHandle(callsite_type->GetPTypes()));
-  Handle<mirror::ObjectArray<mirror::Class>> to_types(hs.NewHandle(callee_type->GetPTypes()));
+                                            ShadowFrame* callee_frame)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ObjPtr<mirror::ObjectArray<mirror::Class>> from_types(callsite_type->GetPTypes());
+  ObjPtr<mirror::ObjectArray<mirror::Class>> to_types(callee_type->GetPTypes());
 
   const int32_t num_method_params = from_types->GetLength();
   if (to_types->GetLength() != num_method_params) {
@@ -180,89 +153,17 @@
     return false;
   }
 
-  ArgIterator<is_range> input_args(first_src_reg, arg);
-  size_t to_arg_index = 0;
-  MutableHandle<mirror::Class> from(hs.NewHandle<mirror::Class>(nullptr));
-  MutableHandle<mirror::Class> to(hs.NewHandle<mirror::Class>(nullptr));
-  for (int32_t i = 0; i < num_method_params; ++i) {
-    from.Assign(from_types->GetWithoutChecks(i));
-    to.Assign(to_types->GetWithoutChecks(i));
+  ShadowFrameGetter<is_range> getter(first_src_reg, arg, caller_frame);
+  ShadowFrameSetter setter(callee_frame, first_dest_reg);
 
-    const Primitive::Type from_type = from->GetPrimitiveType();
-    const Primitive::Type to_type = to->GetPrimitiveType();
-
-    // Easy case - the types are identical. Nothing left to do except to pass
-    // the arguments along verbatim.
-    if (from.Get() == to.Get()) {
-      interpreter::AssignRegister(callee_frame,
-                                  caller_frame,
-                                  first_dest_reg + to_arg_index,
-                                  input_args.Next());
-      ++to_arg_index;
-
-      // This is a wide argument, we must use the second half of the register
-      // pair as well.
-      if (Primitive::Is64BitType(from_type)) {
-        interpreter::AssignRegister(callee_frame,
-                                    caller_frame,
-                                    first_dest_reg + to_arg_index,
-                                    input_args.Next());
-        ++to_arg_index;
-      }
-
-      continue;
-    } else {
-      JValue from_value;
-      JValue to_value;
-
-      if (Primitive::Is64BitType(from_type)) {
-        from_value.SetJ(caller_frame.GetVRegLong(input_args.NextPair()));
-      } else if (from_type == Primitive::kPrimNot) {
-        from_value.SetL(caller_frame.GetVRegReference(input_args.Next()));
-      } else {
-        from_value.SetI(caller_frame.GetVReg(input_args.Next()));
-      }
-
-      if (!ConvertJValue(from, to, from_value, &to_value)) {
-        DCHECK(self->IsExceptionPending());
-        return false;
-      }
-
-      if (Primitive::Is64BitType(to_type)) {
-        callee_frame->SetVRegLong(first_dest_reg + to_arg_index, to_value.GetJ());
-        to_arg_index += 2;
-      } else if (to_type == Primitive::kPrimNot) {
-        callee_frame->SetVRegReference(first_dest_reg + to_arg_index, to_value.GetL());
-        ++to_arg_index;
-      } else {
-        callee_frame->SetVReg(first_dest_reg + to_arg_index, to_value.GetI());
-        ++to_arg_index;
-      }
-    }
-  }
-
-  return true;
+  return PerformConversions<ShadowFrameGetter<is_range>, ShadowFrameSetter>(self,
+                                                                            callsite_type,
+                                                                            callee_type,
+                                                                            &getter,
+                                                                            &setter,
+                                                                            num_method_params);
 }
 
-// Similar to |ConvertAndCopyArgumentsFromCallerFrame|, except that the
-// arguments are copied from an |EmulatedStackFrame|.
-template <bool is_range>
-bool ConvertAndCopyArgumentsFromEmulatedStackFrame(Thread* self,
-                                                   ObjPtr<mirror::Object> emulated_stack_frame,
-                                                   Handle<mirror::MethodType> callee_type,
-                                                   const uint32_t first_dest_reg,
-                                                   ShadowFrame* callee_frame) {
-  UNUSED(self);
-  UNUSED(emulated_stack_frame);
-  UNUSED(callee_type);
-  UNUSED(first_dest_reg);
-  UNUSED(callee_frame);
-
-  UNIMPLEMENTED(FATAL) << "ConvertAndCopyArgumentsFromEmulatedStackFrame is unimplemented";
-  return false;
-}
-
-
 }  // namespace art
 
 #endif  // ART_RUNTIME_METHOD_HANDLES_INL_H_
diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc
new file mode 100644
index 0000000..3c22d7f
--- /dev/null
+++ b/runtime/method_handles.cc
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "method_handles.h"
+
+#include "method_handles-inl.h"
+#include "jvalue.h"
+#include "jvalue-inl.h"
+#include "reflection.h"
+#include "reflection-inl.h"
+#include "well_known_classes.h"
+
+namespace art {
+
+namespace {
+
+#define PRIMITIVES_LIST(V) \
+  V(Primitive::kPrimBoolean, Boolean, Boolean, Z) \
+  V(Primitive::kPrimByte, Byte, Byte, B)          \
+  V(Primitive::kPrimChar, Char, Character, C)     \
+  V(Primitive::kPrimShort, Short, Short, S)       \
+  V(Primitive::kPrimInt, Int, Integer, I)         \
+  V(Primitive::kPrimLong, Long, Long, J)          \
+  V(Primitive::kPrimFloat, Float, Float, F)       \
+  V(Primitive::kPrimDouble, Double, Double, D)
+
+// Assigns |type| to the primitive type associated with |klass|. Returns
+// true iff. |klass| was a boxed type (Integer, Long etc.), false otherwise.
+bool GetUnboxedPrimitiveType(ObjPtr<mirror::Class> klass, Primitive::Type* type)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ScopedAssertNoThreadSuspension ants(__FUNCTION__);
+#define LOOKUP_PRIMITIVE(primitive, _, __, ___)                         \
+  if (klass->DescriptorEquals(Primitive::BoxedDescriptor(primitive))) { \
+    *type = primitive;                                                  \
+    return true;                                                        \
+  }
+
+  PRIMITIVES_LIST(LOOKUP_PRIMITIVE);
+#undef LOOKUP_PRIMITIVE
+  return false;
+}
+
+ObjPtr<mirror::Class> GetBoxedPrimitiveClass(Primitive::Type type)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ScopedAssertNoThreadSuspension ants(__FUNCTION__);
+  jmethodID m = nullptr;
+  switch (type) {
+#define CASE_PRIMITIVE(primitive, _, java_name, __)              \
+    case primitive:                                              \
+      m = WellKnownClasses::java_lang_ ## java_name ## _valueOf; \
+      break;
+    PRIMITIVES_LIST(CASE_PRIMITIVE);
+#undef CASE_PRIMITIVE
+    case Primitive::Type::kPrimNot:
+    case Primitive::Type::kPrimVoid:
+      return nullptr;
+  }
+  return jni::DecodeArtMethod(m)->GetDeclaringClass();
+}
+
+bool GetUnboxedTypeAndValue(ObjPtr<mirror::Object> o, Primitive::Type* type, JValue* value)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ScopedAssertNoThreadSuspension ants(__FUNCTION__);
+  ObjPtr<mirror::Class> klass = o->GetClass();
+  ArtField* primitive_field = &klass->GetIFieldsPtr()->At(0);
+#define CASE_PRIMITIVE(primitive, abbrev, _, shorthand)         \
+  if (klass == GetBoxedPrimitiveClass(primitive)) {             \
+    *type = primitive;                                          \
+    value->Set ## shorthand(primitive_field->Get ## abbrev(o)); \
+    return true;                                                \
+  }
+  PRIMITIVES_LIST(CASE_PRIMITIVE)
+#undef CASE_PRIMITIVE
+  return false;
+}
+
+inline bool IsReferenceType(Primitive::Type type) {
+  return type == Primitive::kPrimNot;
+}
+
+inline bool IsPrimitiveType(Primitive::Type type) {
+  return !IsReferenceType(type);
+}
+
+}  // namespace
+
+bool IsParameterTypeConvertible(ObjPtr<mirror::Class> from, ObjPtr<mirror::Class> to)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  // This function returns true if there's any conceivable conversion
+  // between |from| and |to|. It's expected this method will be used
+  // to determine if a WrongMethodTypeException should be raised. The
+  // decision logic follows the documentation for MethodType.asType().
+  if (from == to) {
+    return true;
+  }
+
+  Primitive::Type from_primitive = from->GetPrimitiveType();
+  Primitive::Type to_primitive = to->GetPrimitiveType();
+  DCHECK(from_primitive != Primitive::Type::kPrimVoid);
+  DCHECK(to_primitive != Primitive::Type::kPrimVoid);
+
+  // If |to| and |from| are references.
+  if (IsReferenceType(from_primitive) && IsReferenceType(to_primitive)) {
+    // Assignability is determined during parameter conversion when
+    // invoking the associated method handle.
+    return true;
+  }
+
+  // If |to| and |from| are primitives and a widening conversion exists.
+  if (Primitive::IsWidenable(from_primitive, to_primitive)) {
+    return true;
+  }
+
+  // If |to| is a reference and |from| is a primitive, then boxing conversion.
+  if (IsReferenceType(to_primitive) && IsPrimitiveType(from_primitive)) {
+    return to->IsAssignableFrom(GetBoxedPrimitiveClass(from_primitive));
+  }
+
+  // If |from| is a reference and |to| is a primitive, then unboxing conversion.
+  if (IsPrimitiveType(to_primitive) && IsReferenceType(from_primitive)) {
+    if (from->DescriptorEquals("Ljava/lang/Object;")) {
+      // Object might be converted into a primitive during unboxing.
+      return true;
+    } else if (Primitive::IsNumericType(to_primitive) &&
+               from->DescriptorEquals("Ljava/lang/Number;")) {
+      // Number might be unboxed into any of the number primitive types.
+      return true;
+    }
+    Primitive::Type unboxed_type;
+    if (GetUnboxedPrimitiveType(from, &unboxed_type)) {
+      if (unboxed_type == to_primitive) {
+        // Straightforward unboxing conversion such as Boolean => boolean.
+        return true;
+      } else {
+        // Check if widening operations for numeric primitives would work,
+        // such as Byte => byte => long.
+        return Primitive::IsWidenable(unboxed_type, to_primitive);
+      }
+    }
+  }
+
+  return false;
+}
+
+bool IsReturnTypeConvertible(ObjPtr<mirror::Class> from, ObjPtr<mirror::Class> to)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (to->GetPrimitiveType() == Primitive::Type::kPrimVoid) {
+    // Result will be ignored.
+    return true;
+  } else if (from->GetPrimitiveType() == Primitive::Type::kPrimVoid) {
+    // Returned value will be 0 / null.
+    return true;
+  } else {
+    // Otherwise apply usual parameter conversion rules.
+    return IsParameterTypeConvertible(from, to);
+  }
+}
+
+bool ConvertJValueCommon(
+    Handle<mirror::MethodType> callsite_type,
+    Handle<mirror::MethodType> callee_type,
+    ObjPtr<mirror::Class> from,
+    ObjPtr<mirror::Class> to,
+    JValue* value) {
+  // The reader maybe concerned about the safety of the heap object
+  // that may be in |value|. There is only one case where allocation
+  // is obviously needed and that's for boxing. However, in the case
+  // of boxing |value| contains a non-reference type.
+
+  const Primitive::Type from_type = from->GetPrimitiveType();
+  const Primitive::Type to_type = to->GetPrimitiveType();
+
+  // Put incoming value into |src_value| and set return value to 0.
+  // Errors and conversions from void require the return value to be 0.
+  const JValue src_value(*value);
+  value->SetJ(0);
+
+  // Conversion from void set result to zero.
+  if (from_type == Primitive::kPrimVoid) {
+    return true;
+  }
+
+  // This method must be called only when the types don't match.
+  DCHECK(from != to);
+
+  if (IsPrimitiveType(from_type) && IsPrimitiveType(to_type)) {
+    // The source and target types are both primitives.
+    if (UNLIKELY(!ConvertPrimitiveValueNoThrow(from_type, to_type, src_value, value))) {
+      ThrowWrongMethodTypeException(callee_type.Get(), callsite_type.Get());
+      return false;
+    }
+    return true;
+  } else if (IsReferenceType(from_type) && IsReferenceType(to_type)) {
+    // They're both reference types. If "from" is null, we can pass it
+    // through unchanged. If not, we must generate a cast exception if
+    // |to| is not assignable from the dynamic type of |ref|.
+    //
+    // Playing it safe with StackHandleScope here, not expecting any allocation
+    // in mirror::Class::IsAssignable().
+    StackHandleScope<2> hs(Thread::Current());
+    Handle<mirror::Class> h_to(hs.NewHandle(to));
+    Handle<mirror::Object> h_obj(hs.NewHandle(src_value.GetL()));
+    if (h_obj.Get() != nullptr && !to->IsAssignableFrom(h_obj->GetClass())) {
+      ThrowClassCastException(h_to.Get(), h_obj->GetClass());
+      return false;
+    }
+    value->SetL(h_obj.Get());
+    return true;
+  } else if (IsReferenceType(to_type)) {
+    DCHECK(IsPrimitiveType(from_type));
+    // The source type is a primitive and the target type is a reference, so we must box.
+    // The target type maybe a super class of the boxed source type, for example,
+    // if the source type is int, it's boxed type is java.lang.Integer, and the target
+    // type could be java.lang.Number.
+    Primitive::Type type;
+    if (!GetUnboxedPrimitiveType(to, &type)) {
+      ObjPtr<mirror::Class> boxed_from_class = GetBoxedPrimitiveClass(from_type);
+      if (boxed_from_class->IsSubClass(to)) {
+        type = from_type;
+      } else {
+        ThrowWrongMethodTypeException(callee_type.Get(), callsite_type.Get());
+        return false;
+      }
+    }
+
+    if (UNLIKELY(from_type != type)) {
+      ThrowWrongMethodTypeException(callee_type.Get(), callsite_type.Get());
+      return false;
+    }
+
+    if (!ConvertPrimitiveValueNoThrow(from_type, type, src_value, value)) {
+      ThrowWrongMethodTypeException(callee_type.Get(), callsite_type.Get());
+      return false;
+    }
+
+    // Then perform the actual boxing, and then set the reference.
+    ObjPtr<mirror::Object> boxed = BoxPrimitive(type, src_value);
+    value->SetL(boxed.Ptr());
+    return true;
+  } else {
+    // The source type is a reference and the target type is a primitive, so we must unbox.
+    DCHECK(IsReferenceType(from_type));
+    DCHECK(IsPrimitiveType(to_type));
+
+    ObjPtr<mirror::Object> from_obj(src_value.GetL());
+    if (UNLIKELY(from_obj == nullptr)) {
+      ThrowNullPointerException(
+          StringPrintf("Expected to unbox a '%s' primitive type but was returned null",
+                       from->PrettyDescriptor().c_str()).c_str());
+      return false;
+    }
+
+    Primitive::Type unboxed_type;
+    JValue unboxed_value;
+    if (UNLIKELY(!GetUnboxedTypeAndValue(from_obj, &unboxed_type, &unboxed_value))) {
+      ThrowWrongMethodTypeException(callee_type.Get(), callsite_type.Get());
+      return false;
+    }
+
+    if (UNLIKELY(!ConvertPrimitiveValueNoThrow(unboxed_type, to_type, unboxed_value, value))) {
+      ThrowClassCastException(from, to);
+      return false;
+    }
+
+    return true;
+  }
+}
+
+}  // namespace art
diff --git a/runtime/method_handles.h b/runtime/method_handles.h
index 5175dce..54c772a 100644
--- a/runtime/method_handles.h
+++ b/runtime/method_handles.h
@@ -20,7 +20,10 @@
 #include <ostream>
 
 #include "dex_instruction.h"
+#include "handle.h"
 #include "jvalue.h"
+#include "mirror/class.h"
+#include "mirror/method_type.h"
 
 namespace art {
 
@@ -56,20 +59,102 @@
   return handle_kind <= kLastInvokeKind;
 }
 
-// Performs a single argument conversion from type |from| to a distinct
-// type |to|. Returns true on success, false otherwise.
-REQUIRES_SHARED(Locks::mutator_lock_)
-bool ConvertJValue(Handle<mirror::Class> from,
-                   Handle<mirror::Class> to,
-                   const JValue& from_value,
-                   JValue* to_value) ALWAYS_INLINE;
+// Returns true if there is a possible conversion from |from| to |to|
+// for a MethodHandle parameter.
+bool IsParameterTypeConvertible(ObjPtr<mirror::Class> from,
+                                ObjPtr<mirror::Class> to);
+
+// Returns true if there is a possible conversion from |from| to |to|
+// for the return type of a MethodHandle.
+bool IsReturnTypeConvertible(ObjPtr<mirror::Class> from,
+                             ObjPtr<mirror::Class> to);
+
+// Performs a conversion from type |from| to a distinct type |to| as
+// part of conversion of |caller_type| to |callee_type|. The value to
+// be converted is in |value|. Returns true on success and updates
+// |value| with the converted value, false otherwise.
+bool ConvertJValueCommon(Handle<mirror::MethodType> callsite_type,
+                         Handle<mirror::MethodType> callee_type,
+                         ObjPtr<mirror::Class> from,
+                         ObjPtr<mirror::Class> to,
+                         JValue* value)
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
+// Converts the value of the argument at position |index| from type
+// expected by |callee_type| to type used by |callsite_type|. |value|
+// represents the value to be converted. Returns true on success and
+// updates |value|, false otherwise.
+ALWAYS_INLINE bool ConvertArgumentValue(Handle<mirror::MethodType> callsite_type,
+                                        Handle<mirror::MethodType> callee_type,
+                                        int index,
+                                        JValue* value)
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
+// Converts the return value from return type yielded by
+// |callee_type| to the return type yielded by
+// |callsite_type|. |value| represents the value to be
+// converted. Returns true on success and updates |value|, false
+// otherwise.
+ALWAYS_INLINE bool ConvertReturnValue(Handle<mirror::MethodType> callsite_type,
+                                      Handle<mirror::MethodType> callee_type,
+                                      JValue* value)
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
 // Perform argument conversions between |callsite_type| (the type of the
 // incoming arguments) and |callee_type| (the type of the method being
 // invoked). These include widening and narrowing conversions as well as
 // boxing and unboxing. Returns true on success, on false on failure. A
 // pending exception will always be set on failure.
-template <bool is_range> REQUIRES_SHARED(Locks::mutator_lock_)
+//
+// The values to be converted are read from an input source (of type G)
+// that provides three methods :
+//
+// class G {
+//   // Used to read the next boolean/short/int or float value from the
+//   // source.
+//   uint32_t Get();
+//
+//   // Used to the read the next reference value from the source.
+//   ObjPtr<mirror::Object> GetReference();
+//
+//   // Used to read the next double or long value from the source.
+//   int64_t GetLong();
+// }
+//
+// After conversion, the values are written to an output sink (of type S)
+// that provides three methods :
+//
+// class S {
+//   void Set(uint32_t);
+//   void SetReference(ObjPtr<mirror::Object>)
+//   void SetLong(int64_t);
+// }
+//
+// The semantics and usage of the Set methods are analagous to the getter
+// class.
+//
+// This method is instantiated in three different scenarions :
+// - <S = ShadowFrameSetter, G = ShadowFrameGetter> : copying from shadow
+//   frame to shadow frame, used in a regular polymorphic non-exact invoke.
+// - <S = EmulatedShadowFrameAccessor, G = ShadowFrameGetter> : entering into
+//   a transformer method from a polymorphic invoke.
+// - <S = ShadowFrameStter, G = EmulatedStackFrameAccessor> : entering into
+//   a regular poly morphic invoke from a transformer method.
+//
+// TODO(narayan): If we find that the instantiations of this function take
+// up too much space, we can make G / S abstract base classes that are
+// overridden by concrete classes.
+template <typename G, typename S>
+bool PerformConversions(Thread* self,
+                        Handle<mirror::MethodType> callsite_type,
+                        Handle<mirror::MethodType> callee_type,
+                        G* getter,
+                        S* setter,
+                        int32_t num_conversions) REQUIRES_SHARED(Locks::mutator_lock_);
+
+// A convenience wrapper around |PerformConversions|, for the case where
+// the setter and getter are both ShadowFrame based.
+template <bool is_range>
 bool ConvertAndCopyArgumentsFromCallerFrame(Thread* self,
                                             Handle<mirror::MethodType> callsite_type,
                                             Handle<mirror::MethodType> callee_type,
@@ -77,17 +162,83 @@
                                             uint32_t first_src_reg,
                                             uint32_t first_dest_reg,
                                             const uint32_t (&arg)[Instruction::kMaxVarArgRegs],
-                                            ShadowFrame* callee_frame);
+                                            ShadowFrame* callee_frame)
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
-// Similar to |ConvertAndCopyArgumentsFromCallerFrame|, except that the
-// arguments are copied from an |EmulatedStackFrame|.
-template <bool is_range> REQUIRES_SHARED(Locks::mutator_lock_)
-bool ConvertAndCopyArgumentsFromEmulatedStackFrame(Thread* self,
-                                                   ObjPtr<mirror::Object> emulated_stack_frame,
-                                                   Handle<mirror::MethodType> callee_type,
-                                                   const uint32_t first_dest_reg,
-                                                   ShadowFrame* callee_frame);
+// A convenience class that allows for iteration through a list of
+// input argument registers |arg| for non-range invokes or a list of
+// consecutive registers starting with a given based for range
+// invokes.
+//
+// This is used to iterate over input arguments while performing standard
+// argument conversions.
+template <bool is_range> class ShadowFrameGetter {
+ public:
+  ShadowFrameGetter(size_t first_src_reg,
+                    const uint32_t (&arg)[Instruction::kMaxVarArgRegs],
+                    const ShadowFrame& shadow_frame) :
+      first_src_reg_(first_src_reg),
+      arg_(arg),
+      shadow_frame_(shadow_frame),
+      arg_index_(0) {
+  }
 
+  ALWAYS_INLINE uint32_t Get() REQUIRES_SHARED(Locks::mutator_lock_) {
+    const uint32_t next = (is_range ? first_src_reg_ + arg_index_ : arg_[arg_index_]);
+    ++arg_index_;
+
+    return shadow_frame_.GetVReg(next);
+  }
+
+  ALWAYS_INLINE int64_t GetLong() REQUIRES_SHARED(Locks::mutator_lock_) {
+    const uint32_t next = (is_range ? first_src_reg_ + arg_index_ : arg_[arg_index_]);
+    arg_index_ += 2;
+
+    return shadow_frame_.GetVRegLong(next);
+  }
+
+  ALWAYS_INLINE ObjPtr<mirror::Object> GetReference() REQUIRES_SHARED(Locks::mutator_lock_) {
+    const uint32_t next = (is_range ? first_src_reg_ + arg_index_ : arg_[arg_index_]);
+    ++arg_index_;
+
+    return shadow_frame_.GetVRegReference(next);
+  }
+
+ private:
+  const size_t first_src_reg_;
+  const uint32_t (&arg_)[Instruction::kMaxVarArgRegs];
+  const ShadowFrame& shadow_frame_;
+  size_t arg_index_;
+};
+
+// A convenience class that allows values to be written to a given shadow frame,
+// starting at location |first_dst_reg|.
+class ShadowFrameSetter {
+ public:
+  ShadowFrameSetter(ShadowFrame* shadow_frame,
+                    size_t first_dst_reg) :
+    shadow_frame_(shadow_frame),
+    arg_index_(first_dst_reg) {
+  }
+
+  ALWAYS_INLINE void Set(uint32_t value) REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame_->SetVReg(arg_index_++, value);
+  }
+
+  ALWAYS_INLINE void SetReference(ObjPtr<mirror::Object> value)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame_->SetVRegReference(arg_index_++, value.Ptr());
+  }
+
+  ALWAYS_INLINE void SetLong(int64_t value) REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame_->SetVRegLong(arg_index_, value);
+    arg_index_ += 2;
+  }
+
+ private:
+  ShadowFrame* shadow_frame_;
+  size_t arg_index_;
+};
 
 }  // namespace art
 
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 9992a9e..9a6d60e 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -526,20 +526,18 @@
 template<VerifyObjectFlags kVerifyFlags,
          ReadBarrierOption kReadBarrierOption>
 inline IfTable* Class::GetIfTable() {
-  return GetFieldObject<IfTable, kVerifyFlags, kReadBarrierOption>(
-      OFFSET_OF_OBJECT_MEMBER(Class, iftable_));
+  ObjPtr<IfTable> ret = GetFieldObject<IfTable, kVerifyFlags, kReadBarrierOption>(IfTableOffset());
+  DCHECK(ret != nullptr) << PrettyClass(this);
+  return ret.Ptr();
 }
 
 inline int32_t Class::GetIfTableCount() {
-  ObjPtr<IfTable> iftable = GetIfTable();
-  if (iftable == nullptr) {
-    return 0;
-  }
-  return iftable->Count();
+  return GetIfTable()->Count();
 }
 
 inline void Class::SetIfTable(ObjPtr<IfTable> new_iftable) {
-  SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, iftable_), new_iftable);
+  DCHECK(new_iftable != nullptr) << PrettyClass(this);
+  SetFieldObject<false>(IfTableOffset(), new_iftable);
 }
 
 inline LengthPrefixedArray<ArtField>* Class::GetIFieldsPtr() {
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 6a357b3..db46027 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -18,6 +18,7 @@
 
 #include "art_field-inl.h"
 #include "art_method-inl.h"
+#include "class_ext.h"
 #include "class_linker-inl.h"
 #include "class_loader.h"
 #include "class-inl.h"
@@ -29,6 +30,7 @@
 #include "method.h"
 #include "object_array-inl.h"
 #include "object-inl.h"
+#include "object_lock.h"
 #include "runtime.h"
 #include "thread.h"
 #include "throwable.h"
@@ -58,12 +60,49 @@
   java_lang_Class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
 }
 
-inline void Class::SetVerifyError(ObjPtr<Object> error) {
-  CHECK(error != nullptr) << PrettyClass();
-  if (Runtime::Current()->IsActiveTransaction()) {
-    SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_), error);
+ClassExt* Class::GetExtData() {
+  return GetFieldObject<ClassExt>(OFFSET_OF_OBJECT_MEMBER(Class, ext_data_));
+}
+
+ClassExt* Class::EnsureExtDataPresent(Thread* self) {
+  ObjPtr<ClassExt> existing(GetExtData());
+  if (!existing.IsNull()) {
+    return existing.Ptr();
+  }
+  StackHandleScope<3> hs(self);
+  // Handlerize 'this' since we are allocating here.
+  Handle<Class> h_this(hs.NewHandle(this));
+  // Clear exception so we can allocate.
+  Handle<Throwable> throwable(hs.NewHandle(self->GetException()));
+  self->ClearException();
+  // Allocate the ClassExt
+  Handle<ClassExt> new_ext(hs.NewHandle(ClassExt::Alloc(self)));
+  if (new_ext.Get() == nullptr) {
+    // OOM allocating the classExt.
+    // TODO Should we restore the suppressed exception?
+    self->AssertPendingOOMException();
+    return nullptr;
   } else {
-    SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_), error);
+    MemberOffset ext_offset(OFFSET_OF_OBJECT_MEMBER(Class, ext_data_));
+    bool set;
+    // Set the ext_data_ field using CAS semantics.
+    if (Runtime::Current()->IsActiveTransaction()) {
+      set = h_this->CasFieldStrongSequentiallyConsistentObject<true>(ext_offset,
+                                                                     ObjPtr<ClassExt>(nullptr),
+                                                                     new_ext.Get());
+    } else {
+      set = h_this->CasFieldStrongSequentiallyConsistentObject<false>(ext_offset,
+                                                                      ObjPtr<ClassExt>(nullptr),
+                                                                      new_ext.Get());
+    }
+    ObjPtr<ClassExt> ret(set ? new_ext.Get() : h_this->GetExtData());
+    DCHECK(!set || h_this->GetExtData() == new_ext.Get());
+    CHECK(!ret.IsNull());
+    // Restore the exception if there was one.
+    if (throwable.Get() != nullptr) {
+      self->SetException(throwable.Get());
+    }
+    return ret.Ptr();
   }
 }
 
@@ -95,10 +134,16 @@
       }
     }
 
-    // Remember the current exception.
-    CHECK(self->GetException() != nullptr);
-    h_this->SetVerifyError(self->GetException());
+    ObjPtr<ClassExt> ext(h_this->EnsureExtDataPresent(self));
+    if (!ext.IsNull()) {
+      self->AssertPendingException();
+      ext->SetVerifyError(self->GetException());
+    } else {
+      self->AssertPendingOOMException();
+    }
+    self->AssertPendingException();
   }
+
   static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32");
   if (Runtime::Current()->IsActiveTransaction()) {
     h_this->SetField32Volatile<true>(StatusOffset(), new_status);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 5793795..711914d 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -49,6 +49,7 @@
 
 namespace mirror {
 
+class ClassExt;
 class ClassLoader;
 class Constructor;
 class DexCache;
@@ -561,7 +562,7 @@
   // The size of java.lang.Class.class.
   static uint32_t ClassClassSize(PointerSize pointer_size) {
     // The number of vtable entries in java.lang.Class.
-    uint32_t vtable_entries = Object::kVTableLength + 72;
+    uint32_t vtable_entries = Object::kVTableLength + 73;
     return ComputeClassSize(true, vtable_entries, 0, 0, 4, 1, 0, pointer_size);
   }
 
@@ -680,6 +681,10 @@
     return MemberOffset(OFFSETOF_MEMBER(Class, dex_cache_));
   }
 
+  static MemberOffset IfTableOffset() {
+    return MemberOffset(OFFSETOF_MEMBER(Class, iftable_));
+  }
+
   enum {
     kDumpClassFullDetail = 1,
     kDumpClassClassLoader = (1 << 1),
@@ -1126,10 +1131,13 @@
 
   void SetClinitThreadId(pid_t new_clinit_thread_id) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  Object* GetVerifyError() REQUIRES_SHARED(Locks::mutator_lock_) {
-    // DCHECK(IsErroneous());
-    return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_));
-  }
+  ClassExt* GetExtData() REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Returns the ExtData for this class, allocating one if necessary. This should be the only way
+  // to force ext_data_ to be set. No functions are available for changing an already set ext_data_
+  // since doing so is not allowed.
+  ClassExt* EnsureExtDataPresent(Thread* self)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   uint16_t GetDexClassDefIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_));
@@ -1318,8 +1326,6 @@
   ALWAYS_INLINE void SetMethodsPtrInternal(LengthPrefixedArray<ArtMethod>* new_methods)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetVerifyError(ObjPtr<Object> klass) REQUIRES_SHARED(Locks::mutator_lock_);
-
   template <bool throw_on_failure, bool use_referrers_cache>
   bool ResolvedFieldAccessTest(ObjPtr<Class> access_to,
                                ArtField* field,
@@ -1384,6 +1390,12 @@
   // runtime such as arrays and primitive classes).
   HeapReference<DexCache> dex_cache_;
 
+  // Extraneous class data that is not always needed. This field is allocated lazily and may
+  // only be set with 'this' locked. This is synchronized on 'this'.
+  // TODO(allight) We should probably synchronize it on something external or handle allocation in
+  // some other (safe) way to prevent possible deadlocks.
+  HeapReference<ClassExt> ext_data_;
+
   // The interface table (iftable_) contains pairs of a interface class and an array of the
   // interface methods. There is one pair per interface supported by this class.  That means one
   // pair for each interface we support directly, indirectly via superclass, or indirectly via a
@@ -1408,10 +1420,6 @@
   // check for interfaces and return null.
   HeapReference<Class> super_class_;
 
-  // If class verify fails, we must return same error on subsequent tries. We may store either
-  // the class of the error, or an actual instance of Throwable here.
-  HeapReference<Object> verify_error_;
-
   // Virtual method table (vtable), for use by "invoke-virtual".  The vtable from the superclass is
   // copied in, and virtual methods from our class either replace those from the super or are
   // appended. For abstract classes, methods may be created in the vtable that aren't in
diff --git a/runtime/mirror/class_ext.cc b/runtime/mirror/class_ext.cc
new file mode 100644
index 0000000..cc208e4
--- /dev/null
+++ b/runtime/mirror/class_ext.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "class_ext.h"
+
+#include "art_method-inl.h"
+#include "base/casts.h"
+#include "base/enums.h"
+#include "class-inl.h"
+#include "dex_file-inl.h"
+#include "gc/accounting/card_table-inl.h"
+#include "object-inl.h"
+#include "object_array.h"
+#include "object_array-inl.h"
+#include "stack_trace_element.h"
+#include "utils.h"
+#include "well_known_classes.h"
+
+namespace art {
+namespace mirror {
+
+GcRoot<Class> ClassExt::dalvik_system_ClassExt_;
+
+ClassExt* ClassExt::Alloc(Thread* self) {
+  DCHECK(dalvik_system_ClassExt_.Read() != nullptr);
+  return down_cast<ClassExt*>(dalvik_system_ClassExt_.Read()->AllocObject(self).Ptr());
+}
+
+void ClassExt::SetVerifyError(ObjPtr<Object> err) {
+  if (Runtime::Current()->IsActiveTransaction()) {
+    SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(ClassExt, verify_error_), err);
+  } else {
+    SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ClassExt, verify_error_), err);
+  }
+}
+
+void ClassExt::SetClass(ObjPtr<Class> dalvik_system_ClassExt) {
+  CHECK(dalvik_system_ClassExt != nullptr);
+  dalvik_system_ClassExt_ = GcRoot<Class>(dalvik_system_ClassExt);
+}
+
+void ClassExt::ResetClass() {
+  CHECK(!dalvik_system_ClassExt_.IsNull());
+  dalvik_system_ClassExt_ = GcRoot<Class>(nullptr);
+}
+
+void ClassExt::VisitRoots(RootVisitor* visitor) {
+  dalvik_system_ClassExt_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
+}
+
+}  // namespace mirror
+}  // namespace art
diff --git a/runtime/mirror/class_ext.h b/runtime/mirror/class_ext.h
new file mode 100644
index 0000000..35eaae1
--- /dev/null
+++ b/runtime/mirror/class_ext.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_CLASS_EXT_H_
+#define ART_RUNTIME_MIRROR_CLASS_EXT_H_
+
+#include "class-inl.h"
+
+#include "gc_root.h"
+#include "object.h"
+#include "object_callbacks.h"
+#include "string.h"
+
+namespace art {
+
+struct ClassExtOffsets;
+
+namespace mirror {
+
+// C++ mirror of dalvik.system.ClassExt
+class MANAGED ClassExt : public Object {
+ public:
+  static uint32_t ClassSize(PointerSize pointer_size) {
+    uint32_t vtable_entries = Object::kVTableLength;
+    return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
+  }
+
+  // Size of an instance of dalvik.system.ClassExt.
+  static constexpr uint32_t InstanceSize() {
+    return sizeof(ClassExt);
+  }
+
+  void SetVerifyError(ObjPtr<Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  Object* GetVerifyError() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return GetFieldObject<ClassExt>(OFFSET_OF_OBJECT_MEMBER(ClassExt, verify_error_));
+  }
+
+  static void SetClass(ObjPtr<Class> dalvik_system_ClassExt);
+  static void ResetClass();
+  static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  static ClassExt* Alloc(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+  // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
+  HeapReference<Object> verify_error_;
+
+  static GcRoot<Class> dalvik_system_ClassExt_;
+
+  friend struct art::ClassExtOffsets;  // for verifying offset information
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ClassExt);
+};
+
+}  // namespace mirror
+}  // namespace art
+
+#endif  // ART_RUNTIME_MIRROR_CLASS_EXT_H_
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index df3865b..c7a123b 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -174,14 +174,9 @@
     // tell the compiler to treat "Read" as a template rather than a field or
     // function. Otherwise, on encountering the "<" token, the compiler would
     // treat "Read" as a field.
-    T* before = source.object.template Read<kReadBarrierOption>();
-    // TODO(narayan): This additional GC root construction and assignment
-    // is unnecessary. We're already operating on a copy of the DexCachePair
-    // that's in the cache.
-    GcRoot<T> root(before);
-    visitor.VisitRootIfNonNull(root.AddressWithoutBarrier());
-    if (root.Read() != before) {
-      source.object = GcRoot<T>(root.Read());
+    T* const before = source.object.template Read<kReadBarrierOption>();
+    visitor.VisitRootIfNonNull(source.object.AddressWithoutBarrier());
+    if (source.object.template Read<kReadBarrierOption>() != before) {
       pairs[i].store(source, std::memory_order_relaxed);
     }
   }
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index f5d1b80..1ae694d 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -74,6 +74,7 @@
   static GcRoot<T> Lookup(std::atomic<DexCachePair<T>>* dex_cache,
                           uint32_t idx,
                           uint32_t cache_size) {
+    DCHECK_NE(cache_size, 0u);
     DexCachePair<T> element = dex_cache[idx % cache_size].load(std::memory_order_relaxed);
     if (idx != element.index) {
       return GcRoot<T>(nullptr);
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index e95ca21..916f1cf 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -55,9 +55,8 @@
   EXPECT_EQ(java_lang_dex_file_->NumTypeIds(),   dex_cache->NumResolvedTypes());
   EXPECT_EQ(java_lang_dex_file_->NumMethodIds(), dex_cache->NumResolvedMethods());
   EXPECT_EQ(java_lang_dex_file_->NumFieldIds(),  dex_cache->NumResolvedFields());
-  // This should always be zero because the -Xexperimental:method-handles isn't
-  // set.
-  EXPECT_EQ(0u, dex_cache->NumResolvedMethodTypes());
+  EXPECT_TRUE(dex_cache->StaticMethodTypeSize() == dex_cache->NumResolvedMethodTypes()
+      || java_lang_dex_file_->NumProtoIds() == dex_cache->NumResolvedMethodTypes());
 }
 
 TEST_F(DexCacheMethodHandlesTest, Open) {
diff --git a/runtime/mirror/emulated_stack_frame.cc b/runtime/mirror/emulated_stack_frame.cc
new file mode 100644
index 0000000..d607040
--- /dev/null
+++ b/runtime/mirror/emulated_stack_frame.cc
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "emulated_stack_frame.h"
+
+#include "class-inl.h"
+#include "gc_root-inl.h"
+#include "jvalue-inl.h"
+#include "method_handles.h"
+#include "method_handles-inl.h"
+#include "reflection-inl.h"
+
+namespace art {
+namespace mirror {
+
+GcRoot<mirror::Class> EmulatedStackFrame::static_class_;
+
+// Calculates the size of a stack frame based on the size of its argument
+// types and return types.
+static void CalculateFrameAndReferencesSize(ObjPtr<mirror::ObjectArray<mirror::Class>> p_types,
+                                            ObjPtr<mirror::Class> r_type,
+                                            size_t* frame_size_out,
+                                            size_t* references_size_out)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  const size_t length = p_types->GetLength();
+  size_t frame_size = 0;
+  size_t references_size = 0;
+  for (size_t i = 0; i < length; ++i) {
+    ObjPtr<mirror::Class> type = p_types->GetWithoutChecks(i);
+    const Primitive::Type primitive_type = type->GetPrimitiveType();
+    if (primitive_type == Primitive::kPrimNot) {
+      references_size++;
+    } else if (Primitive::Is64BitType(primitive_type)) {
+      frame_size += 8;
+    } else {
+      frame_size += 4;
+    }
+  }
+
+  const Primitive::Type return_type = r_type->GetPrimitiveType();
+  if (return_type == Primitive::kPrimNot) {
+    references_size++;
+  } else if (Primitive::Is64BitType(return_type)) {
+    frame_size += 8;
+  } else {
+    frame_size += 4;
+  }
+
+  (*frame_size_out) = frame_size;
+  (*references_size_out) = references_size;
+}
+
+// Allows for read or write access to an emulated stack frame. Each
+// accessor index has an associated index into the references / stack frame
+// arrays which is incremented on every read or write to the frame.
+//
+// This class is used in conjunction with PerformConversions, either as a setter
+// or as a getter.
+class EmulatedStackFrameAccessor {
+ public:
+  EmulatedStackFrameAccessor(Handle<mirror::ObjectArray<mirror::Object>> references,
+                             Handle<mirror::ByteArray> stack_frame,
+                             size_t stack_frame_size) :
+    references_(references),
+    stack_frame_(stack_frame),
+    stack_frame_size_(stack_frame_size),
+    reference_idx_(0u),
+    stack_frame_idx_(0u) {
+  }
+
+  ALWAYS_INLINE void SetReference(ObjPtr<mirror::Object> reference)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    references_->Set(reference_idx_++, reference);
+  }
+
+  ALWAYS_INLINE void Set(const uint32_t value) REQUIRES_SHARED(Locks::mutator_lock_) {
+    int8_t* array = stack_frame_->GetData();
+
+    CHECK_LE((stack_frame_idx_ + 4u), stack_frame_size_);
+    memcpy(array + stack_frame_idx_, &value, sizeof(uint32_t));
+    stack_frame_idx_ += 4u;
+  }
+
+  ALWAYS_INLINE void SetLong(const int64_t value) REQUIRES_SHARED(Locks::mutator_lock_) {
+    int8_t* array = stack_frame_->GetData();
+
+    CHECK_LE((stack_frame_idx_ + 8u), stack_frame_size_);
+    memcpy(array + stack_frame_idx_, &value, sizeof(int64_t));
+    stack_frame_idx_ += 8u;
+  }
+
+  ALWAYS_INLINE ObjPtr<mirror::Object> GetReference() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return ObjPtr<mirror::Object>(references_->Get(reference_idx_++));
+  }
+
+  ALWAYS_INLINE uint32_t Get() REQUIRES_SHARED(Locks::mutator_lock_) {
+    const int8_t* array = stack_frame_->GetData();
+
+    CHECK_LE((stack_frame_idx_ + 4u), stack_frame_size_);
+    uint32_t val = 0;
+
+    memcpy(&val, array + stack_frame_idx_, sizeof(uint32_t));
+    stack_frame_idx_ += 4u;
+    return val;
+  }
+
+  ALWAYS_INLINE int64_t GetLong() REQUIRES_SHARED(Locks::mutator_lock_) {
+    const int8_t* array = stack_frame_->GetData();
+
+    CHECK_LE((stack_frame_idx_ + 8u), stack_frame_size_);
+    int64_t val = 0;
+
+    memcpy(&val, array + stack_frame_idx_, sizeof(int64_t));
+    stack_frame_idx_ += 8u;
+    return val;
+  }
+
+ private:
+  Handle<mirror::ObjectArray<mirror::Object>> references_;
+  Handle<mirror::ByteArray> stack_frame_;
+  const size_t stack_frame_size_;
+
+  size_t reference_idx_;
+  size_t stack_frame_idx_;
+
+  DISALLOW_COPY_AND_ASSIGN(EmulatedStackFrameAccessor);
+};
+
+template <bool is_range>
+mirror::EmulatedStackFrame* EmulatedStackFrame::CreateFromShadowFrameAndArgs(
+    Thread* self,
+    Handle<mirror::MethodType> caller_type,
+    Handle<mirror::MethodType> callee_type,
+    const ShadowFrame& caller_frame,
+    const uint32_t first_src_reg,
+    const uint32_t (&arg)[Instruction::kMaxVarArgRegs]) {
+  StackHandleScope<6> hs(self);
+
+  // Step 1: We must throw a WrongMethodTypeException if there's a mismatch in the
+  // number of arguments between the caller and the callsite.
+  Handle<mirror::ObjectArray<mirror::Class>> from_types(hs.NewHandle(caller_type->GetPTypes()));
+  Handle<mirror::ObjectArray<mirror::Class>> to_types(hs.NewHandle(callee_type->GetPTypes()));
+
+  const int32_t num_method_params = from_types->GetLength();
+  if (to_types->GetLength() != num_method_params) {
+    ThrowWrongMethodTypeException(callee_type.Get(), caller_type.Get());
+    return nullptr;
+  }
+
+  // Step 2: Calculate the size of the reference / byte arrays in the emulated
+  // stack frame.
+  size_t frame_size = 0;
+  size_t refs_size = 0;
+  Handle<mirror::Class> r_type(hs.NewHandle(callee_type->GetRType()));
+  CalculateFrameAndReferencesSize(to_types.Get(), r_type.Get(), &frame_size, &refs_size);
+
+  // Step 3 : Allocate the arrays.
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  ObjPtr<mirror::Class> array_class(class_linker->GetClassRoot(ClassLinker::kObjectArrayClass));
+
+  Handle<mirror::ObjectArray<mirror::Object>> references(hs.NewHandle(
+      mirror::ObjectArray<mirror::Object>::Alloc(self, array_class, refs_size)));
+  if (references.Get() == nullptr) {
+    DCHECK(self->IsExceptionPending());
+    return nullptr;
+  }
+
+  Handle<ByteArray> stack_frame(hs.NewHandle(ByteArray::Alloc(self, frame_size)));
+  if (stack_frame.Get() == nullptr) {
+    DCHECK(self->IsExceptionPending());
+    return nullptr;
+  }
+
+  // Step 4 : Perform argument conversions (if required).
+  ShadowFrameGetter<is_range> getter(first_src_reg, arg, caller_frame);
+  EmulatedStackFrameAccessor setter(references, stack_frame, stack_frame->GetLength());
+  if (!PerformConversions<ShadowFrameGetter<is_range>, EmulatedStackFrameAccessor>(
+          self, caller_type, callee_type, &getter, &setter, num_method_params)) {
+    return nullptr;
+  }
+
+  // Step 5: Construct the EmulatedStackFrame object.
+  Handle<EmulatedStackFrame> sf(hs.NewHandle(
+      ObjPtr<EmulatedStackFrame>::DownCast(StaticClass()->AllocObject(self))));
+  sf->SetFieldObject<false>(TypeOffset(), callee_type.Get());
+  sf->SetFieldObject<false>(ReferencesOffset(), references.Get());
+  sf->SetFieldObject<false>(StackFrameOffset(), stack_frame.Get());
+
+  return sf.Get();
+}
+
+bool EmulatedStackFrame::WriteToShadowFrame(Thread* self,
+                                            Handle<mirror::MethodType> callee_type,
+                                            const uint32_t first_dest_reg,
+                                            ShadowFrame* callee_frame) {
+  ObjPtr<mirror::ObjectArray<mirror::Class>> from_types(GetType()->GetPTypes());
+  ObjPtr<mirror::ObjectArray<mirror::Class>> to_types(callee_type->GetPTypes());
+
+  const int32_t num_method_params = from_types->GetLength();
+  if (to_types->GetLength() != num_method_params) {
+    ThrowWrongMethodTypeException(callee_type.Get(), GetType());
+    return false;
+  }
+
+  StackHandleScope<3> hs(self);
+  Handle<mirror::MethodType> frame_callsite_type(hs.NewHandle(GetType()));
+  Handle<mirror::ObjectArray<mirror::Object>> references(hs.NewHandle(GetReferences()));
+  Handle<ByteArray> stack_frame(hs.NewHandle(GetStackFrame()));
+
+  EmulatedStackFrameAccessor getter(references, stack_frame, stack_frame->GetLength());
+  ShadowFrameSetter setter(callee_frame, first_dest_reg);
+
+  return PerformConversions<EmulatedStackFrameAccessor, ShadowFrameSetter>(
+      self, frame_callsite_type, callee_type, &getter, &setter, num_method_params);
+}
+
+void EmulatedStackFrame::GetReturnValue(Thread* self, JValue* value) {
+  StackHandleScope<2> hs(self);
+  Handle<mirror::Class> r_type(hs.NewHandle(GetType()->GetRType()));
+
+  const Primitive::Type type = r_type->GetPrimitiveType();
+  if (type == Primitive::kPrimNot) {
+    Handle<mirror::ObjectArray<mirror::Object>> references(hs.NewHandle(GetReferences()));
+    value->SetL(references->GetWithoutChecks(references->GetLength() - 1));
+  } else {
+    Handle<ByteArray> stack_frame(hs.NewHandle(GetStackFrame()));
+    const int8_t* array = stack_frame->GetData();
+    const size_t length = stack_frame->GetLength();
+    if (Primitive::Is64BitType(type)) {
+      int64_t primitive = 0;
+      memcpy(&primitive, array + length - sizeof(int64_t), sizeof(int64_t));
+      value->SetJ(primitive);
+    } else {
+      uint32_t primitive = 0;
+      memcpy(&primitive, array + length - sizeof(uint32_t), sizeof(uint32_t));
+      value->SetI(primitive);
+    }
+  }
+}
+
+void EmulatedStackFrame::SetReturnValue(Thread* self, const JValue& value) {
+  StackHandleScope<2> hs(self);
+  Handle<mirror::Class> r_type(hs.NewHandle(GetType()->GetRType()));
+
+  const Primitive::Type type = r_type->GetPrimitiveType();
+  if (type == Primitive::kPrimNot) {
+    Handle<mirror::ObjectArray<mirror::Object>> references(hs.NewHandle(GetReferences()));
+    references->SetWithoutChecks<false>(references->GetLength() - 1, value.GetL());
+  } else {
+    Handle<ByteArray> stack_frame(hs.NewHandle(GetStackFrame()));
+    int8_t* array = stack_frame->GetData();
+    const size_t length = stack_frame->GetLength();
+    if (Primitive::Is64BitType(type)) {
+      const int64_t primitive = value.GetJ();
+      memcpy(array + length - sizeof(int64_t), &primitive, sizeof(int64_t));
+    } else {
+      const uint32_t primitive = value.GetI();
+      memcpy(array + length - sizeof(uint32_t), &primitive, sizeof(uint32_t));
+    }
+  }
+}
+
+void EmulatedStackFrame::SetClass(Class* klass) {
+  CHECK(static_class_.IsNull()) << static_class_.Read() << " " << klass;
+  CHECK(klass != nullptr);
+  static_class_ = GcRoot<Class>(klass);
+}
+
+void EmulatedStackFrame::ResetClass() {
+  CHECK(!static_class_.IsNull());
+  static_class_ = GcRoot<Class>(nullptr);
+}
+
+void EmulatedStackFrame::VisitRoots(RootVisitor* visitor) {
+  static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
+}
+
+// Explicit DoInvokePolymorphic template function declarations.
+#define EXPLICIT_CREATE_FROM_SHADOW_FRAME_AND_ARGS_DECL(_is_range)                         \
+  template REQUIRES_SHARED(Locks::mutator_lock_)                                           \
+  mirror::EmulatedStackFrame* EmulatedStackFrame::CreateFromShadowFrameAndArgs<_is_range>( \
+    Thread* self,                                                                          \
+    Handle<mirror::MethodType> caller_type,                                                \
+    Handle<mirror::MethodType> callee_type,                                                \
+    const ShadowFrame& caller_frame,                                                       \
+    const uint32_t first_src_reg,                                                          \
+    const uint32_t (&arg)[Instruction::kMaxVarArgRegs])                                    \
+
+EXPLICIT_CREATE_FROM_SHADOW_FRAME_AND_ARGS_DECL(true);
+EXPLICIT_CREATE_FROM_SHADOW_FRAME_AND_ARGS_DECL(false);
+#undef EXPLICIT_CREATE_FROM_SHADOW_FRAME_AND_ARGS_DECL
+
+
+}  // namespace mirror
+}  // namespace art
diff --git a/runtime/mirror/emulated_stack_frame.h b/runtime/mirror/emulated_stack_frame.h
new file mode 100644
index 0000000..9fa06b7
--- /dev/null
+++ b/runtime/mirror/emulated_stack_frame.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_EMULATED_STACK_FRAME_H_
+#define ART_RUNTIME_MIRROR_EMULATED_STACK_FRAME_H_
+
+#include "dex_instruction.h"
+#include "method_type.h"
+#include "object.h"
+#include "stack.h"
+#include "string.h"
+#include "utils.h"
+
+namespace art {
+
+struct EmulatedStackFrameOffsets;
+
+namespace mirror {
+
+// C++ mirror of dalvik.system.EmulatedStackFrame
+class MANAGED EmulatedStackFrame : public Object {
+ public:
+  // Creates an emulated stack frame whose type is |frame_type| from
+  // a shadow frame.
+  template <bool is_range>
+  static mirror::EmulatedStackFrame* CreateFromShadowFrameAndArgs(
+      Thread* self,
+      Handle<mirror::MethodType> args_type,
+      Handle<mirror::MethodType> frame_type,
+      const ShadowFrame& caller_frame,
+      const uint32_t first_src_reg,
+      const uint32_t (&arg)[Instruction::kMaxVarArgRegs]) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Writes the contents of this emulated stack frame to the |callee_frame|
+  // whose type is |callee_type|, starting at |first_dest_reg|.
+  bool WriteToShadowFrame(
+      Thread* self,
+      Handle<mirror::MethodType> callee_type,
+      const uint32_t first_dest_reg,
+      ShadowFrame* callee_frame) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Sets |value| to the return value written to this emulated stack frame (if any).
+  void GetReturnValue(Thread* self, JValue* value) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Sets the return value slot of this emulated stack frame to |value|.
+  void SetReturnValue(Thread* self, const JValue& value) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+  static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
+  static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+  static mirror::Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return static_class_.Read();
+  }
+
+  mirror::MethodType* GetType() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return GetFieldObject<MethodType>(OFFSET_OF_OBJECT_MEMBER(EmulatedStackFrame, type_));
+  }
+
+  mirror::ObjectArray<mirror::Object>* GetReferences() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return GetFieldObject<mirror::ObjectArray<mirror::Object>>(
+        OFFSET_OF_OBJECT_MEMBER(EmulatedStackFrame, references_));
+  }
+
+  mirror::ByteArray* GetStackFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return GetFieldObject<mirror::ByteArray>(
+        OFFSET_OF_OBJECT_MEMBER(EmulatedStackFrame, stack_frame_));
+  }
+
+  static MemberOffset TypeOffset() {
+    return MemberOffset(OFFSETOF_MEMBER(EmulatedStackFrame, type_));
+  }
+
+  static MemberOffset ReferencesOffset() {
+    return MemberOffset(OFFSETOF_MEMBER(EmulatedStackFrame, references_));
+  }
+
+  static MemberOffset StackFrameOffset() {
+    return MemberOffset(OFFSETOF_MEMBER(EmulatedStackFrame, stack_frame_));
+  }
+
+  HeapReference<mirror::ObjectArray<mirror::Object>> references_;
+  HeapReference<mirror::ByteArray> stack_frame_;
+  HeapReference<mirror::MethodType> type_;
+
+  static GcRoot<mirror::Class> static_class_;  // dalvik.system.EmulatedStackFrame.class
+
+  friend struct art::EmulatedStackFrameOffsets;  // for verifying offset information
+  DISALLOW_IMPLICIT_CONSTRUCTORS(EmulatedStackFrame);
+};
+
+}  // namespace mirror
+}  // namespace art
+
+#endif  // ART_RUNTIME_MIRROR_EMULATED_STACK_FRAME_H_
diff --git a/runtime/mirror/method_handle_impl.h b/runtime/mirror/method_handle_impl.h
index 40716ad..9054216 100644
--- a/runtime/mirror/method_handle_impl.h
+++ b/runtime/mirror/method_handle_impl.h
@@ -36,6 +36,15 @@
     return GetFieldObject<mirror::MethodType>(OFFSET_OF_OBJECT_MEMBER(MethodHandle, method_type_));
   }
 
+  mirror::MethodType* GetNominalType() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return GetFieldObject<mirror::MethodType>(OFFSET_OF_OBJECT_MEMBER(MethodHandle, nominal_type_));
+  }
+
+  ArtField* GetTargetField() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return reinterpret_cast<ArtField*>(
+        GetField64(OFFSET_OF_OBJECT_MEMBER(MethodHandle, art_field_or_method_)));
+  }
+
   ArtMethod* GetTargetMethod() REQUIRES_SHARED(Locks::mutator_lock_) {
     return reinterpret_cast<ArtMethod*>(
         GetField64(OFFSET_OF_OBJECT_MEMBER(MethodHandle, art_field_or_method_)));
@@ -49,14 +58,14 @@
   }
 
  private:
-  HeapReference<mirror::Object> as_type_cache_;
+  HeapReference<mirror::MethodType> nominal_type_;
   HeapReference<mirror::MethodType> method_type_;
   uint64_t art_field_or_method_;
   uint32_t handle_kind_;
 
  private:
-  static MemberOffset AsTypeCacheOffset() {
-    return MemberOffset(OFFSETOF_MEMBER(MethodHandle, as_type_cache_));
+  static MemberOffset NominalTypeOffset() {
+    return MemberOffset(OFFSETOF_MEMBER(MethodHandle, nominal_type_));
   }
   static MemberOffset MethodTypeOffset() {
     return MemberOffset(OFFSETOF_MEMBER(MethodHandle, method_type_));
diff --git a/runtime/mirror/method_type.cc b/runtime/mirror/method_type.cc
index 0b52931..5d77a16 100644
--- a/runtime/mirror/method_type.cc
+++ b/runtime/mirror/method_type.cc
@@ -18,6 +18,7 @@
 
 #include "class-inl.h"
 #include "gc_root-inl.h"
+#include "method_handles.h"
 
 namespace art {
 namespace mirror {
@@ -43,28 +44,66 @@
   return mt.Get();
 }
 
-bool MethodType::IsExactMatch(mirror::MethodType* other) REQUIRES_SHARED(Locks::mutator_lock_) {
-  if (GetRType() != other->GetRType()) {
-    return false;
-  }
-
+bool MethodType::IsExactMatch(mirror::MethodType* target) REQUIRES_SHARED(Locks::mutator_lock_) {
   mirror::ObjectArray<Class>* const p_types = GetPTypes();
   const int32_t params_length = p_types->GetLength();
 
-  mirror::ObjectArray<Class>* const other_p_types = other->GetPTypes();
-  if (params_length != other_p_types->GetLength()) {
+  mirror::ObjectArray<Class>* const target_p_types = target->GetPTypes();
+  if (params_length != target_p_types->GetLength()) {
+    return false;
+  }
+  for (int32_t i = 0; i < params_length; ++i) {
+    if (p_types->GetWithoutChecks(i) != target_p_types->GetWithoutChecks(i)) {
+      return false;
+    }
+  }
+  return GetRType() == target->GetRType();
+}
+
+bool MethodType::IsConvertible(mirror::MethodType* target) REQUIRES_SHARED(Locks::mutator_lock_) {
+  mirror::ObjectArray<Class>* const p_types = GetPTypes();
+  const int32_t params_length = p_types->GetLength();
+
+  mirror::ObjectArray<Class>* const target_p_types = target->GetPTypes();
+  if (params_length != target_p_types->GetLength()) {
+    return false;
+  }
+
+  // Perform return check before invoking method handle otherwise side
+  // effects from the invocation may be observable before
+  // WrongMethodTypeException is raised.
+  if (!IsReturnTypeConvertible(target->GetRType(), GetRType())) {
     return false;
   }
 
   for (int32_t i = 0; i < params_length; ++i) {
-    if (p_types->GetWithoutChecks(i) != other_p_types->GetWithoutChecks(i)) {
+    if (!IsParameterTypeConvertible(p_types->GetWithoutChecks(i),
+                                    target_p_types->GetWithoutChecks(i))) {
       return false;
     }
   }
-
   return true;
 }
 
+std::string MethodType::PrettyDescriptor() REQUIRES_SHARED(Locks::mutator_lock_) {
+  std::ostringstream ss;
+  ss << "(";
+
+  mirror::ObjectArray<Class>* const p_types = GetPTypes();
+  const int32_t params_length = p_types->GetLength();
+  for (int32_t i = 0; i < params_length; ++i) {
+    ss << p_types->GetWithoutChecks(i)->PrettyDescriptor();
+    if (i != (params_length - 1)) {
+      ss << ", ";
+    }
+  }
+
+  ss << ")";
+  ss << GetRType()->PrettyDescriptor();
+
+  return ss.str();
+}
+
 void MethodType::SetClass(Class* klass) {
   CHECK(static_class_.IsNull()) << static_class_.Read() << " " << klass;
   CHECK(klass != nullptr);
diff --git a/runtime/mirror/method_type.h b/runtime/mirror/method_type.h
index 5b50409..9a98143 100644
--- a/runtime/mirror/method_type.h
+++ b/runtime/mirror/method_type.h
@@ -52,9 +52,17 @@
   static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
   static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Returns true iff. |other| is an exact match for this method type, i.e
+  // Returns true iff. |this| is an exact match for method type |target|, i.e
   // iff. they have the same return types and parameter types.
-  bool IsExactMatch(mirror::MethodType* other) REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsExactMatch(mirror::MethodType* target) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Returns true iff. |this| can be converted to match |target| method type, i.e
+  // iff. they have convertible return types and parameter types.
+  bool IsConvertible(mirror::MethodType* target) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Returns the pretty descriptor for this method type, suitable for display in
+  // exception messages and the like.
+  std::string PrettyDescriptor() REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
   static MemberOffset FormOffset() {
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 2e70c9b..6d29ed3 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -135,25 +135,82 @@
   Monitor::Wait(self, this, ms, ns, true, kTimedWaiting);
 }
 
-inline Object* Object::GetReadBarrierPointer() {
+inline uint32_t Object::GetReadBarrierState(uintptr_t* fake_address_dependency) {
+#ifdef USE_BAKER_READ_BARRIER
+  CHECK(kUseBakerReadBarrier);
+#if defined(__arm__)
+  uintptr_t obj = reinterpret_cast<uintptr_t>(this);
+  uintptr_t result;
+  DCHECK_EQ(OFFSETOF_MEMBER(Object, monitor_), 4U);
+  // Use inline assembly to prevent the compiler from optimizing away the false dependency.
+  __asm__ __volatile__(
+      "ldr %[result], [%[obj], #4]\n\t"
+      // This instruction is enough to "fool the compiler and the CPU" by having `fad` always be
+      // null, without them being able to assume that fact.
+      "eor %[fad], %[result], %[result]\n\t"
+      : [result] "+r" (result), [fad] "=r" (*fake_address_dependency)
+      : [obj] "r" (obj));
+  DCHECK_EQ(*fake_address_dependency, 0U);
+  LockWord lw(static_cast<uint32_t>(result));
+  uint32_t rb_state = lw.ReadBarrierState();
+  return rb_state;
+#elif defined(__aarch64__)
+  uintptr_t obj = reinterpret_cast<uintptr_t>(this);
+  uintptr_t result;
+  DCHECK_EQ(OFFSETOF_MEMBER(Object, monitor_), 4U);
+  // Use inline assembly to prevent the compiler from optimizing away the false dependency.
+  __asm__ __volatile__(
+      "ldr %w[result], [%[obj], #4]\n\t"
+      // This instruction is enough to "fool the compiler and the CPU" by having `fad` always be
+      // null, without them being able to assume that fact.
+      "eor %[fad], %[result], %[result]\n\t"
+      : [result] "+r" (result), [fad] "=r" (*fake_address_dependency)
+      : [obj] "r" (obj));
+  DCHECK_EQ(*fake_address_dependency, 0U);
+  LockWord lw(static_cast<uint32_t>(result));
+  uint32_t rb_state = lw.ReadBarrierState();
+  return rb_state;
+#elif defined(__i386__) || defined(__x86_64__)
+  LockWord lw = GetLockWord(false);
+  // i386/x86_64 don't need fake address dependency. Use a compiler fence to avoid compiler
+  // reordering.
+  *fake_address_dependency = 0;
+  std::atomic_signal_fence(std::memory_order_acquire);
+  uint32_t rb_state = lw.ReadBarrierState();
+  return rb_state;
+#else
+  // mips/mips64
+  LOG(FATAL) << "Unreachable";
+  UNREACHABLE();
+  UNUSED(fake_address_dependency);
+#endif
+#else  // !USE_BAKER_READ_BARRIER
+  LOG(FATAL) << "Unreachable";
+  UNREACHABLE();
+  UNUSED(fake_address_dependency);
+#endif
+}
+
+inline uint32_t Object::GetReadBarrierState() {
 #ifdef USE_BAKER_READ_BARRIER
   DCHECK(kUseBakerReadBarrier);
-  return reinterpret_cast<Object*>(GetLockWord(false).ReadBarrierState());
-#elif USE_BROOKS_READ_BARRIER
-  DCHECK(kUseBrooksReadBarrier);
-  return GetFieldObject<Object, kVerifyNone, kWithoutReadBarrier>(
-      OFFSET_OF_OBJECT_MEMBER(Object, x_rb_ptr_));
+  LockWord lw(GetField<uint32_t, /*kIsVolatile*/false>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
+  uint32_t rb_state = lw.ReadBarrierState();
+  DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
+  return rb_state;
 #else
   LOG(FATAL) << "Unreachable";
   UNREACHABLE();
 #endif
 }
 
-inline Object* Object::GetReadBarrierPointerAcquire() {
+inline uint32_t Object::GetReadBarrierStateAcquire() {
 #ifdef USE_BAKER_READ_BARRIER
   DCHECK(kUseBakerReadBarrier);
   LockWord lw(GetFieldAcquire<uint32_t>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
-  return reinterpret_cast<Object*>(lw.ReadBarrierState());
+  uint32_t rb_state = lw.ReadBarrierState();
+  DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
+  return rb_state;
 #else
   LOG(FATAL) << "Unreachable";
   UNREACHABLE();
@@ -169,48 +226,38 @@
 #endif
 }
 
-inline void Object::SetReadBarrierPointer(Object* rb_ptr) {
+inline void Object::SetReadBarrierState(uint32_t rb_state) {
 #ifdef USE_BAKER_READ_BARRIER
   DCHECK(kUseBakerReadBarrier);
-  DCHECK_EQ(reinterpret_cast<uint64_t>(rb_ptr) >> 32, 0U);
-  DCHECK_NE(rb_ptr, ReadBarrier::BlackPtr()) << "Setting to black is not supported";
+  DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
   LockWord lw = GetLockWord(false);
-  lw.SetReadBarrierState(static_cast<uint32_t>(reinterpret_cast<uintptr_t>(rb_ptr)));
+  lw.SetReadBarrierState(rb_state);
   SetLockWord(lw, false);
-#elif USE_BROOKS_READ_BARRIER
-  DCHECK(kUseBrooksReadBarrier);
-  // We don't mark the card as this occurs as part of object allocation. Not all objects have
-  // backing cards, such as large objects.
-  SetFieldObjectWithoutWriteBarrier<false, false, kVerifyNone>(
-      OFFSET_OF_OBJECT_MEMBER(Object, x_rb_ptr_), rb_ptr);
 #else
   LOG(FATAL) << "Unreachable";
   UNREACHABLE();
-  UNUSED(rb_ptr);
+  UNUSED(rb_state);
 #endif
 }
 
 template<bool kCasRelease>
-inline bool Object::AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr) {
+inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state) {
 #ifdef USE_BAKER_READ_BARRIER
   DCHECK(kUseBakerReadBarrier);
-  DCHECK_EQ(reinterpret_cast<uint64_t>(expected_rb_ptr) >> 32, 0U);
-  DCHECK_EQ(reinterpret_cast<uint64_t>(rb_ptr) >> 32, 0U);
-  DCHECK_NE(expected_rb_ptr, ReadBarrier::BlackPtr()) << "Setting to black is not supported";
-  DCHECK_NE(rb_ptr, ReadBarrier::BlackPtr()) << "Setting to black is not supported";
+  DCHECK(ReadBarrier::IsValidReadBarrierState(expected_rb_state)) << expected_rb_state;
+  DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
   LockWord expected_lw;
   LockWord new_lw;
   do {
     LockWord lw = GetLockWord(false);
-    if (UNLIKELY(reinterpret_cast<Object*>(lw.ReadBarrierState()) != expected_rb_ptr)) {
+    if (UNLIKELY(lw.ReadBarrierState() != expected_rb_state)) {
       // Lost the race.
       return false;
     }
     expected_lw = lw;
-    expected_lw.SetReadBarrierState(
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(expected_rb_ptr)));
+    expected_lw.SetReadBarrierState(expected_rb_state);
     new_lw = lw;
-    new_lw.SetReadBarrierState(static_cast<uint32_t>(reinterpret_cast<uintptr_t>(rb_ptr)));
+    new_lw.SetReadBarrierState(rb_state);
     // ConcurrentCopying::ProcessMarkStackRef uses this with kCasRelease == true.
     // If kCasRelease == true, use a CAS release so that when GC updates all the fields of
     // an object and then changes the object from gray to black, the field updates (stores) will be
@@ -219,23 +266,8 @@
              CasLockWordWeakRelease(expected_lw, new_lw) :
              CasLockWordWeakRelaxed(expected_lw, new_lw)));
   return true;
-#elif USE_BROOKS_READ_BARRIER
-  DCHECK(kUseBrooksReadBarrier);
-  MemberOffset offset = OFFSET_OF_OBJECT_MEMBER(Object, x_rb_ptr_);
-  uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + offset.SizeValue();
-  Atomic<uint32_t>* atomic_rb_ptr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
-  HeapReference<Object> expected_ref(HeapReference<Object>::FromMirrorPtr(expected_rb_ptr));
-  HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(rb_ptr));
-  do {
-    if (UNLIKELY(atomic_rb_ptr->LoadRelaxed() != expected_ref.reference_)) {
-      // Lost the race.
-      return false;
-    }
-  } while (!atomic_rb_ptr->CompareExchangeWeakSequentiallyConsistent(expected_ref.reference_,
-                                                                     new_ref.reference_));
-  return true;
 #else
-  UNUSED(expected_rb_ptr, rb_ptr);
+  UNUSED(expected_rb_state, rb_state);
   LOG(FATAL) << "Unreachable";
   UNREACHABLE();
 #endif
@@ -259,23 +291,16 @@
 }
 
 
-inline void Object::AssertReadBarrierPointer() const {
-  if (kUseBakerReadBarrier) {
-    Object* obj = const_cast<Object*>(this);
-    DCHECK(obj->GetReadBarrierPointer() == nullptr)
-        << "Bad Baker pointer: obj=" << reinterpret_cast<void*>(obj)
-        << " ptr=" << reinterpret_cast<void*>(obj->GetReadBarrierPointer());
-  } else {
-    CHECK(kUseBrooksReadBarrier);
-    Object* obj = const_cast<Object*>(this);
-    DCHECK_EQ(obj, obj->GetReadBarrierPointer())
-        << "Bad Brooks pointer: obj=" << reinterpret_cast<void*>(obj)
-        << " ptr=" << reinterpret_cast<void*>(obj->GetReadBarrierPointer());
-  }
+inline void Object::AssertReadBarrierState() const {
+  CHECK(kUseBakerReadBarrier);
+  Object* obj = const_cast<Object*>(this);
+  DCHECK(obj->GetReadBarrierState() == ReadBarrier::WhiteState())
+      << "Bad Baker pointer: obj=" << reinterpret_cast<void*>(obj)
+      << " rb_state" << reinterpret_cast<void*>(obj->GetReadBarrierState());
 }
 
 template<VerifyObjectFlags kVerifyFlags>
-inline bool Object::VerifierInstanceOf(Class* klass) {
+inline bool Object::VerifierInstanceOf(ObjPtr<Class> klass) {
   DCHECK(klass != nullptr);
   DCHECK(GetClass<kVerifyFlags>() != nullptr);
   return klass->IsInterface() || InstanceOf(klass);
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 8cfb60e..f5b9ab3 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -235,8 +235,6 @@
   }
   for (ObjPtr<Class> cur = c; cur != nullptr; cur = cur->GetSuperClass()) {
     for (ArtField& field : cur->GetIFields()) {
-      StackHandleScope<1> hs(Thread::Current());
-      Handle<Object> h_object(hs.NewHandle(new_value));
       if (field.GetOffset().Int32Value() == field_offset.Int32Value()) {
         CHECK_NE(field.GetTypeAsPrimitiveType(), Primitive::kPrimNot);
         // TODO: resolve the field type for moving GC.
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index f1ab72a..67b5ddb 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -94,19 +94,22 @@
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   void SetClass(ObjPtr<Class> new_klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // TODO: Clean these up and change to return int32_t
-  Object* GetReadBarrierPointer() REQUIRES_SHARED(Locks::mutator_lock_);
-
-  // Get the read barrier pointer with release semantics, only supported for baker.
-  Object* GetReadBarrierPointerAcquire() REQUIRES_SHARED(Locks::mutator_lock_);
+  // Get the read barrier state with a fake address dependency.
+  // '*fake_address_dependency' will be set to 0.
+  ALWAYS_INLINE uint32_t GetReadBarrierState(uintptr_t* fake_address_dependency)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  // This version does not offer any special mechanism to prevent load-load reordering.
+  ALWAYS_INLINE uint32_t GetReadBarrierState() REQUIRES_SHARED(Locks::mutator_lock_);
+  // Get the read barrier state with a load-acquire.
+  ALWAYS_INLINE uint32_t GetReadBarrierStateAcquire() REQUIRES_SHARED(Locks::mutator_lock_);
 
 #ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
   NO_RETURN
 #endif
-  void SetReadBarrierPointer(Object* rb_ptr) REQUIRES_SHARED(Locks::mutator_lock_);
+  ALWAYS_INLINE void SetReadBarrierState(uint32_t rb_state) REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<bool kCasRelease = false>
-  ALWAYS_INLINE bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr)
+  ALWAYS_INLINE bool AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE uint32_t GetMarkBit() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -114,12 +117,13 @@
   ALWAYS_INLINE bool AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void AssertReadBarrierPointer() const REQUIRES_SHARED(Locks::mutator_lock_);
+  // Assert that the read barrier state is in the default (white) state.
+  ALWAYS_INLINE void AssertReadBarrierState() const REQUIRES_SHARED(Locks::mutator_lock_);
 
   // The verifier treats all interfaces as java.lang.Object and relies on runtime checks in
   // invoke-interface to detect incompatible interface types.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool VerifierInstanceOf(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+  bool VerifierInstanceOf(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE bool InstanceOf(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 5fb9459..0fdf132 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -119,10 +119,10 @@
       OffsetOfElement(i), object);
 }
 
-template<class T>
+template<class T> template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
 inline T* ObjectArray<T>::GetWithoutChecks(int32_t i) {
   DCHECK(CheckIsValidIndex(i));
-  return GetFieldObject<T>(OffsetOfElement(i));
+  return GetFieldObject<T, kVerifyFlags, kReadBarrierOption>(OffsetOfElement(i));
 }
 
 template<class T>
@@ -145,17 +145,53 @@
   const bool copy_forward = (src != this) || (dst_pos < src_pos) || (dst_pos - src_pos >= count);
   if (copy_forward) {
     // Forward copy.
-    for (int i = 0; i < count; ++i) {
-      // We need a RB here. ObjectArray::GetWithoutChecks() contains a RB.
-      Object* obj = src->GetWithoutChecks(src_pos + i);
-      SetWithoutChecksAndWriteBarrier<false>(dst_pos + i, obj);
+    bool baker_non_gray_case = false;
+    if (kUseReadBarrier && kUseBakerReadBarrier) {
+      uintptr_t fake_address_dependency;
+      if (!ReadBarrier::IsGray(src.Ptr(), &fake_address_dependency)) {
+        baker_non_gray_case = true;
+        DCHECK_EQ(fake_address_dependency, 0U);
+        src.Assign(reinterpret_cast<ObjectArray<T>*>(
+            reinterpret_cast<uintptr_t>(src.Ptr()) | fake_address_dependency));
+        for (int i = 0; i < count; ++i) {
+          // We can skip the RB here because 'src' isn't gray.
+          T* obj = src->template GetWithoutChecks<kDefaultVerifyFlags, kWithoutReadBarrier>(
+              src_pos + i);
+          SetWithoutChecksAndWriteBarrier<false>(dst_pos + i, obj);
+        }
+      }
+    }
+    if (!baker_non_gray_case) {
+      for (int i = 0; i < count; ++i) {
+        // We need a RB here. ObjectArray::GetWithoutChecks() contains a RB.
+        T* obj = src->GetWithoutChecks(src_pos + i);
+        SetWithoutChecksAndWriteBarrier<false>(dst_pos + i, obj);
+      }
     }
   } else {
     // Backward copy.
-    for (int i = count - 1; i >= 0; --i) {
-      // We need a RB here. ObjectArray::GetWithoutChecks() contains a RB.
-      Object* obj = src->GetWithoutChecks(src_pos + i);
-      SetWithoutChecksAndWriteBarrier<false>(dst_pos + i, obj);
+    bool baker_non_gray_case = false;
+    if (kUseReadBarrier && kUseBakerReadBarrier) {
+      uintptr_t fake_address_dependency;
+      if (!ReadBarrier::IsGray(src.Ptr(), &fake_address_dependency)) {
+        baker_non_gray_case = true;
+        DCHECK_EQ(fake_address_dependency, 0U);
+        src.Assign(reinterpret_cast<ObjectArray<T>*>(
+            reinterpret_cast<uintptr_t>(src.Ptr()) | fake_address_dependency));
+        for (int i = count - 1; i >= 0; --i) {
+          // We can skip the RB here because 'src' isn't gray.
+          T* obj = src->template GetWithoutChecks<kDefaultVerifyFlags, kWithoutReadBarrier>(
+              src_pos + i);
+          SetWithoutChecksAndWriteBarrier<false>(dst_pos + i, obj);
+        }
+      }
+    }
+    if (!baker_non_gray_case) {
+      for (int i = count - 1; i >= 0; --i) {
+        // We need a RB here. ObjectArray::GetWithoutChecks() contains a RB.
+        T* obj = src->GetWithoutChecks(src_pos + i);
+        SetWithoutChecksAndWriteBarrier<false>(dst_pos + i, obj);
+      }
     }
   }
   Runtime::Current()->GetHeap()->WriteBarrierArray(this, dst_pos, count);
@@ -184,10 +220,28 @@
   // TODO: Optimize this later?
   // We can't use memmove since it does not handle read barriers and may do by per byte copying.
   // See b/32012820.
-  for (int i = 0; i < count; ++i) {
-    // We need a RB here. ObjectArray::GetWithoutChecks() contains a RB.
-    T* obj = src->GetWithoutChecks(src_pos + i);
-    SetWithoutChecksAndWriteBarrier<false>(dst_pos + i, obj);
+  bool baker_non_gray_case = false;
+  if (kUseReadBarrier && kUseBakerReadBarrier) {
+    uintptr_t fake_address_dependency;
+    if (!ReadBarrier::IsGray(src.Ptr(), &fake_address_dependency)) {
+      baker_non_gray_case = true;
+      DCHECK_EQ(fake_address_dependency, 0U);
+      src.Assign(reinterpret_cast<ObjectArray<T>*>(
+          reinterpret_cast<uintptr_t>(src.Ptr()) | fake_address_dependency));
+      for (int i = 0; i < count; ++i) {
+        // We can skip the RB here because 'src' isn't gray.
+        Object* obj = src->template GetWithoutChecks<kDefaultVerifyFlags, kWithoutReadBarrier>(
+            src_pos + i);
+        SetWithoutChecksAndWriteBarrier<false>(dst_pos + i, obj);
+      }
+    }
+  }
+  if (!baker_non_gray_case) {
+    for (int i = 0; i < count; ++i) {
+      // We need a RB here. ObjectArray::GetWithoutChecks() contains a RB.
+      T* obj = src->GetWithoutChecks(src_pos + i);
+      SetWithoutChecksAndWriteBarrier<false>(dst_pos + i, obj);
+    }
   }
   Runtime::Current()->GetHeap()->WriteBarrierArray(this, dst_pos, count);
   if (kIsDebugBuild) {
@@ -212,27 +266,62 @@
   Class* dst_class = GetClass()->GetComponentType();
   Class* lastAssignableElementClass = dst_class;
 
-  Object* o = nullptr;
+  T* o = nullptr;
   int i = 0;
-  for (; i < count; ++i) {
-    // The follow get operations force the objects to be verified.
-    // We need a RB here. ObjectArray::GetWithoutChecks() contains a RB.
-    o = src->GetWithoutChecks(src_pos + i);
-    if (o == nullptr) {
-      // Null is always assignable.
-      SetWithoutChecks<kTransactionActive>(dst_pos + i, nullptr);
-    } else {
-      // TODO: use the underlying class reference to avoid uncompression when not necessary.
-      Class* o_class = o->GetClass();
-      if (LIKELY(lastAssignableElementClass == o_class)) {
-        SetWithoutChecks<kTransactionActive>(dst_pos + i, o);
-      } else if (LIKELY(dst_class->IsAssignableFrom(o_class))) {
-        lastAssignableElementClass = o_class;
-        SetWithoutChecks<kTransactionActive>(dst_pos + i, o);
+  bool baker_non_gray_case = false;
+  if (kUseReadBarrier && kUseBakerReadBarrier) {
+    uintptr_t fake_address_dependency;
+    if (!ReadBarrier::IsGray(src.Ptr(), &fake_address_dependency)) {
+      baker_non_gray_case = true;
+      DCHECK_EQ(fake_address_dependency, 0U);
+      src.Assign(reinterpret_cast<ObjectArray<T>*>(
+          reinterpret_cast<uintptr_t>(src.Ptr()) | fake_address_dependency));
+      for (; i < count; ++i) {
+        // The follow get operations force the objects to be verified.
+        // We can skip the RB here because 'src' isn't gray.
+        o = src->template GetWithoutChecks<kDefaultVerifyFlags, kWithoutReadBarrier>(
+            src_pos + i);
+        if (o == nullptr) {
+          // Null is always assignable.
+          SetWithoutChecks<kTransactionActive>(dst_pos + i, nullptr);
+        } else {
+          // TODO: use the underlying class reference to avoid uncompression when not necessary.
+          Class* o_class = o->GetClass();
+          if (LIKELY(lastAssignableElementClass == o_class)) {
+            SetWithoutChecks<kTransactionActive>(dst_pos + i, o);
+          } else if (LIKELY(dst_class->IsAssignableFrom(o_class))) {
+            lastAssignableElementClass = o_class;
+            SetWithoutChecks<kTransactionActive>(dst_pos + i, o);
+          } else {
+            // Can't put this element into the array, break to perform write-barrier and throw
+            // exception.
+            break;
+          }
+        }
+      }
+    }
+  }
+  if (!baker_non_gray_case) {
+    for (; i < count; ++i) {
+      // The follow get operations force the objects to be verified.
+      // We need a RB here. ObjectArray::GetWithoutChecks() contains a RB.
+      o = src->GetWithoutChecks(src_pos + i);
+      if (o == nullptr) {
+        // Null is always assignable.
+        SetWithoutChecks<kTransactionActive>(dst_pos + i, nullptr);
       } else {
-        // Can't put this element into the array, break to perform write-barrier and throw
-        // exception.
-        break;
+        // TODO: use the underlying class reference to avoid uncompression when not necessary.
+        Class* o_class = o->GetClass();
+        if (LIKELY(lastAssignableElementClass == o_class)) {
+          SetWithoutChecks<kTransactionActive>(dst_pos + i, o);
+        } else if (LIKELY(dst_class->IsAssignableFrom(o_class))) {
+          lastAssignableElementClass = o_class;
+          SetWithoutChecks<kTransactionActive>(dst_pos + i, o);
+        } else {
+          // Can't put this element into the array, break to perform write-barrier and throw
+          // exception.
+          break;
+        }
       }
     }
   }
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index e4e954e..b7a9561 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -72,6 +72,8 @@
   ALWAYS_INLINE void SetWithoutChecksAndWriteBarrier(int32_t i, ObjPtr<T> object)
       NO_THREAD_SAFETY_ANALYSIS;
 
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ALWAYS_INLINE T* GetWithoutChecks(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Copy src into this array (dealing with overlaps as memmove does) without assignability checks.
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index d42bb92..6870fda 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -106,9 +106,7 @@
     string->SetCount(count_);
     const uint16_t* const src = src_array_->GetData() + offset_;
     const int32_t length = String::GetLengthFromCount(count_);
-    bool compressible = kUseStringCompression && String::GetCompressionFlagFromCount(count_);
-    DCHECK(!compressible || kUseStringCompression);
-    if (compressible) {
+    if (kUseStringCompression && String::IsCompressed(count_)) {
       for (int i = 0; i < length; ++i) {
         string->GetValueCompressed()[i] = static_cast<uint8_t>(src[i]);
       }
@@ -126,7 +124,8 @@
 // Sets string count and value in the allocation code path to ensure it is guarded by a CAS.
 class SetStringCountAndValueVisitorFromString {
  public:
-  SetStringCountAndValueVisitorFromString(int32_t count, Handle<String> src_string,
+  SetStringCountAndValueVisitorFromString(int32_t count,
+                                          Handle<String> src_string,
                                           int32_t offset) :
     count_(count), src_string_(src_string), offset_(offset) {
   }
@@ -137,8 +136,7 @@
     ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
     string->SetCount(count_);
     const int32_t length = String::GetLengthFromCount(count_);
-    bool compressible = kUseStringCompression && String::GetCompressionFlagFromCount(count_);
-    DCHECK(!compressible || kUseStringCompression);
+    bool compressible = kUseStringCompression && String::IsCompressed(count_);
     if (src_string_->IsCompressed()) {
       const uint8_t* const src = src_string_->GetValueCompressed() + offset_;
       memcpy(string->GetValueCompressed(), src, length * sizeof(uint8_t));
@@ -160,7 +158,7 @@
   const int32_t offset_;
 };
 
-inline String* String::Intern() {
+inline ObjPtr<String> String::Intern() {
   return Runtime::Current()->GetInternTable()->InternWeak(this);
 }
 
@@ -209,8 +207,7 @@
                              gc::AllocatorType allocator_type,
                              const PreFenceVisitor& pre_fence_visitor) {
   constexpr size_t header_size = sizeof(String);
-  const bool compressible = kUseStringCompression &&
-                            String::GetCompressionFlagFromCount(utf16_length_with_flag);
+  const bool compressible = kUseStringCompression && String::IsCompressed(utf16_length_with_flag);
   const size_t block_size = (compressible) ? sizeof(uint8_t) : sizeof(uint16_t);
   size_t length = String::GetLengthFromCount(utf16_length_with_flag);
   static_assert(sizeof(length) <= sizeof(size_t),
@@ -245,7 +242,7 @@
 
 template <bool kIsInstrumented>
 inline String* String::AllocEmptyString(Thread* self, gc::AllocatorType allocator_type) {
-  const int32_t length_with_flag = String::GetFlaggedCount(0);
+  const int32_t length_with_flag = String::GetFlaggedCount(0, /* compressible */ true);
   SetStringCountVisitor visitor(length_with_flag);
   return Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
 }
@@ -255,10 +252,9 @@
                                           Handle<ByteArray> array, int32_t offset,
                                           int32_t high_byte, gc::AllocatorType allocator_type) {
   const uint8_t* const src = reinterpret_cast<uint8_t*>(array->GetData()) + offset;
-  const bool compressible = kUseStringCompression && String::AllASCII<uint8_t>(src, byte_length)
-                                            && (high_byte == 0);
-  const int32_t length_with_flag = (compressible) ? String::GetFlaggedCount(byte_length)
-                                                  : byte_length;
+  const bool compressible =
+      kUseStringCompression && String::AllASCII<uint8_t>(src, byte_length) && (high_byte == 0);
+  const int32_t length_with_flag = String::GetFlaggedCount(byte_length, compressible);
   SetStringCountAndBytesVisitor visitor(length_with_flag, array, offset, high_byte << 8);
   String* string = Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
   return string;
@@ -272,7 +268,7 @@
   DCHECK_GE(array->GetLength(), count);
   const bool compressible = kUseStringCompression &&
                             String::AllASCII<uint16_t>(array->GetData() + offset, count);
-  const int32_t length_with_flag = (compressible) ? String::GetFlaggedCount(count) : count;
+  const int32_t length_with_flag = String::GetFlaggedCount(count, compressible);
   SetStringCountAndValueVisitorFromCharArray visitor(length_with_flag, array, offset);
   String* new_string = Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
   return new_string;
@@ -284,8 +280,7 @@
   const bool compressible = kUseStringCompression &&
       ((string->IsCompressed()) ? true : String::AllASCII<uint16_t>(string->GetValue() + offset,
                                                                     string_length));
-  const int32_t length_with_flag = (compressible) ? String::GetFlaggedCount(string_length)
-                                                  : string_length;
+  const int32_t length_with_flag = String::GetFlaggedCount(string_length, compressible);
   SetStringCountAndValueVisitorFromString visitor(length_with_flag, string, offset);
   String* new_string = Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor);
   return new_string;
@@ -311,7 +306,7 @@
 template<typename MemoryType>
 bool String::AllASCII(const MemoryType* const chars, const int length) {
   for (int i = 0; i < length; ++i) {
-    if (chars[i] > 0x80) {
+    if (chars[i] >= 0x80) {
       return false;
     }
   }
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 4336aa1..0ab0bd6 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -95,8 +95,7 @@
   gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
   const bool compressible = kUseStringCompression &&
       (string->IsCompressed() && string2->IsCompressed());
-  const int32_t length_with_flag = compressible ? String::GetFlaggedCount(length + length2)
-                                                : (length + length2);
+  const int32_t length_with_flag = String::GetFlaggedCount(length + length2, compressible);
 
   SetStringCountVisitor visitor(length_with_flag);
   ObjPtr<String> new_string = Alloc<true>(self, length_with_flag, allocator_type, visitor);
@@ -132,8 +131,7 @@
   gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
   const bool compressible = kUseStringCompression &&
                             String::AllASCII<uint16_t>(utf16_data_in, utf16_length);
-  int32_t length_with_flag = (compressible) ? String::GetFlaggedCount(utf16_length)
-                                            : utf16_length;
+  int32_t length_with_flag = String::GetFlaggedCount(utf16_length, compressible);
   SetStringCountVisitor visitor(length_with_flag);
   ObjPtr<String> string = Alloc<true>(self, length_with_flag, allocator_type, visitor);
   if (UNLIKELY(string == nullptr)) {
@@ -169,8 +167,7 @@
                                       int32_t utf8_length) {
   gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
   const bool compressible = kUseStringCompression && (utf16_length == utf8_length);
-  const int32_t utf16_length_with_flag = (compressible) ? String::GetFlaggedCount(utf16_length)
-                                                        : utf16_length;
+  const int32_t utf16_length_with_flag = String::GetFlaggedCount(utf16_length, compressible);
   SetStringCountVisitor visitor(utf16_length_with_flag);
   ObjPtr<String> string = Alloc<true>(self, utf16_length_with_flag, allocator_type, visitor);
   if (UNLIKELY(string == nullptr)) {
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index a1b674a..95b6c3e 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -33,6 +33,10 @@
 
 // String Compression
 static constexpr bool kUseStringCompression = false;
+enum class StringCompressionFlag : uint32_t {
+    kCompressed = 0u,
+    kUncompressed = 1u
+};
 
 // C++ mirror of java.lang.String
 class MANAGED String FINAL : public Object {
@@ -78,7 +82,6 @@
   void SetCount(int32_t new_count) REQUIRES_SHARED(Locks::mutator_lock_) {
     // Count is invariant so use non-transactional mode. Also disable check as we may run inside
     // a transaction.
-    DCHECK_LE(0, (new_count & INT32_MAX));
     SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(String, count_), new_count);
   }
 
@@ -93,7 +96,7 @@
 
   void SetCharAt(int32_t index, uint16_t c) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  String* Intern() REQUIRES_SHARED(Locks::mutator_lock_);
+  ObjPtr<String> Intern() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template <bool kIsInstrumented>
   ALWAYS_INLINE static String* AllocFromByteArray(Thread* self, int32_t byte_length,
@@ -175,7 +178,7 @@
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsCompressed() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return kUseStringCompression && GetCompressionFlagFromCount(GetCount());
+    return kUseStringCompression && IsCompressed(GetCount());
   }
 
   bool IsValueNull() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -183,16 +186,27 @@
   template<typename MemoryType>
   static bool AllASCII(const MemoryType* const chars, const int length);
 
-  ALWAYS_INLINE static bool GetCompressionFlagFromCount(const int32_t count) {
-    return kUseStringCompression && ((count & (1u << 31)) != 0);
+  ALWAYS_INLINE static bool IsCompressed(int32_t count) {
+    return GetCompressionFlagFromCount(count) == StringCompressionFlag::kCompressed;
   }
 
-  ALWAYS_INLINE static int32_t GetLengthFromCount(const int32_t count) {
-    return kUseStringCompression ? (count & INT32_MAX) : count;
+  ALWAYS_INLINE static StringCompressionFlag GetCompressionFlagFromCount(int32_t count) {
+    return kUseStringCompression
+        ? static_cast<StringCompressionFlag>(static_cast<uint32_t>(count) & 1u)
+        : StringCompressionFlag::kUncompressed;
   }
 
-  ALWAYS_INLINE static int32_t GetFlaggedCount(const int32_t count) {
-    return kUseStringCompression ? (count | (1u << 31)) : count;
+  ALWAYS_INLINE static int32_t GetLengthFromCount(int32_t count) {
+    return kUseStringCompression ? static_cast<int32_t>(static_cast<uint32_t>(count) >> 1) : count;
+  }
+
+  ALWAYS_INLINE static int32_t GetFlaggedCount(int32_t length, bool compressible) {
+    return kUseStringCompression
+        ? static_cast<int32_t>((static_cast<uint32_t>(length) << 1) |
+                               (static_cast<uint32_t>(compressible
+                                                          ? StringCompressionFlag::kCompressed
+                                                          : StringCompressionFlag::kUncompressed)))
+        : length;
   }
 
   static Class* GetJavaLangString() REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index eb74fcf..e7de7e6 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -771,7 +771,7 @@
         return false;
       }
       // Can't deflate if our lock count is too high.
-      if (monitor->lock_count_ > LockWord::kThinLockMaxCount) {
+      if (static_cast<uint32_t>(monitor->lock_count_) > LockWord::kThinLockMaxCount) {
         return false;
       }
       // Deflate to a thin lock.
@@ -1330,7 +1330,6 @@
 }
 
 void MonitorList::BroadcastForNewMonitors() {
-  CHECK(kUseReadBarrier);
   Thread* self = Thread::Current();
   MutexLock mu(self, monitor_list_lock_);
   monitor_add_condition_.Broadcast(self);
@@ -1341,6 +1340,9 @@
   MutexLock mu(self, monitor_list_lock_);
   while (UNLIKELY((!kUseReadBarrier && !allow_new_monitors_) ||
                   (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+    // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
+    // presence of threads blocking for weak ref access.
+    self->CheckEmptyCheckpoint();
     monitor_add_condition_.WaitHoldingLocks(self);
   }
   list_.push_front(m);
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 4ee46dc..4fbfe47 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -401,14 +401,11 @@
   Thread* const self = Thread::Current();
   ThreadPool thread_pool("the pool", 2);
   ScopedObjectAccess soa(self);
-  StackHandleScope<3> hs(self);
+  StackHandleScope<1> hs(self);
   Handle<mirror::Object> obj1(
       hs.NewHandle<mirror::Object>(mirror::String::AllocFromModifiedUtf8(self, "hello, world!")));
-  Handle<mirror::Object> obj2(
-      hs.NewHandle<mirror::Object>(mirror::String::AllocFromModifiedUtf8(self, "hello, world!")));
   {
     ObjectLock<mirror::Object> lock1(self, obj1);
-    ObjectLock<mirror::Object> lock2(self, obj1);
     {
       ObjectTryLock<mirror::Object> trylock(self, obj1);
       EXPECT_TRUE(trylock.Acquired());
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 8d85425..adf35b6 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -420,8 +420,10 @@
   }
 }
 
-static bool SetRuntimeStatValue(JNIEnv* env, jobjectArray result, VMDebugRuntimeStatId id,
-                                std::string value) {
+static bool SetRuntimeStatValue(JNIEnv* env,
+                                jobjectArray result,
+                                VMDebugRuntimeStatId id,
+                                const std::string& value) {
   ScopedLocalRef<jstring> jvalue(env, env->NewStringUTF(value.c_str()));
   if (jvalue.get() == nullptr) {
     return false;
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index e5bab36..284d2d1 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -48,7 +48,7 @@
                                                           Handle<mirror::ClassLoader> class_loader)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     ObjPtr<mirror::Class> result;
-    if (cl->FindClassInPathClassLoader(soa, self, descriptor, hash, class_loader, &result)) {
+    if (cl->FindClassInBaseDexClassLoader(soa, self, descriptor, hash, class_loader, &result)) {
       return result;
     }
     return nullptr;
diff --git a/runtime/native/java_lang_reflect_Executable.cc b/runtime/native/java_lang_reflect_Executable.cc
index 1b128fb..73b81a7 100644
--- a/runtime/native/java_lang_reflect_Executable.cc
+++ b/runtime/native/java_lang_reflect_Executable.cc
@@ -136,7 +136,7 @@
   Handle<mirror::Class> parameter_class =
       hs.NewHandle(soa.Decode<mirror::Class>(WellKnownClasses::java_lang_reflect_Parameter));
   ArtMethod* parameter_init =
-      soa.DecodeMethod(WellKnownClasses::java_lang_reflect_Parameter_init);
+      jni::DecodeArtMethod(WellKnownClasses::java_lang_reflect_Parameter_init);
 
   // Mutable handles used in the loop below to ensure cleanup without scaling the number of
   // handles by the number of parameters.
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index 329aae9..6206948 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -446,6 +446,12 @@
   return soa.AddLocalReference<jobject>(annotations::GetAnnotationForField(field, klass));
 }
 
+static jlong Field_getArtField(JNIEnv* env, jobject javaField) {
+  ScopedFastNativeObjectAccess soa(env);
+  ArtField* field = soa.Decode<mirror::Field>(javaField)->GetArtField();
+  return reinterpret_cast<jlong>(field);
+}
+
 static jobjectArray Field_getDeclaredAnnotations(JNIEnv* env, jobject javaField) {
   ScopedFastNativeObjectAccess soa(env);
   ArtField* field = soa.Decode<mirror::Field>(javaField)->GetArtField();
@@ -489,6 +495,7 @@
   NATIVE_METHOD(Field, getChar,    "!(Ljava/lang/Object;)C"),
   NATIVE_METHOD(Field, getAnnotationNative,
                 "!(Ljava/lang/Class;)Ljava/lang/annotation/Annotation;"),
+  NATIVE_METHOD(Field, getArtField, "!()J"),
   NATIVE_METHOD(Field, getDeclaredAnnotations, "!()[Ljava/lang/annotation/Annotation;"),
   NATIVE_METHOD(Field, getSignatureAnnotation, "!()[Ljava/lang/String;"),
   NATIVE_METHOD(Field, getDouble,  "!(Ljava/lang/Object;)D"),
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index cdf4b14..644df07 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -65,7 +65,7 @@
     mirror::HeapReference<mirror::Object>* field_addr =
         reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
             reinterpret_cast<uint8_t*>(obj.Ptr()) + static_cast<size_t>(offset));
-    ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, /*kAlwaysUpdateField*/true>(
+    ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, /* kAlwaysUpdateField */ true>(
         obj.Ptr(),
         MemberOffset(offset),
         field_addr);
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index 5ab6097..c58854b 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -25,6 +25,7 @@
 #include "base/logging.h"
 #include "base/macros.h"
 #include "dex_file-inl.h"
+#include "jni_internal.h"
 #include "mirror/class-inl.h"
 #include "scoped_thread_state_change-inl.h"
 #include "sigchain.h"
@@ -33,7 +34,7 @@
 
 static const char* GetMethodShorty(JNIEnv* env, jmethodID mid) {
   ScopedObjectAccess soa(env);
-  ArtMethod* m = soa.DecodeMethod(mid);
+  ArtMethod* m = jni::DecodeArtMethod(mid);
   return m->GetShorty();
 }
 
@@ -90,14 +91,14 @@
   GetMethodShorty, GetNativeMethodCount, GetNativeMethods
 };
 
-bool LoadNativeBridge(std::string& native_bridge_library_filename) {
+bool LoadNativeBridge(const std::string& native_bridge_library_filename) {
   VLOG(startup) << "Runtime::Setup native bridge library: "
       << (native_bridge_library_filename.empty() ? "(empty)" : native_bridge_library_filename);
   return android::LoadNativeBridge(native_bridge_library_filename.c_str(),
                                    &native_bridge_art_callbacks_);
 }
 
-void PreInitializeNativeBridge(std::string dir) {
+void PreInitializeNativeBridge(const std::string& dir) {
   VLOG(startup) << "Runtime::Pre-initialize native bridge";
 #ifndef __APPLE__  // Mac OS does not support CLONE_NEWNS.
   if (unshare(CLONE_NEWNS) == -1) {
diff --git a/runtime/native_bridge_art_interface.h b/runtime/native_bridge_art_interface.h
index 090cddb..c86e5da 100644
--- a/runtime/native_bridge_art_interface.h
+++ b/runtime/native_bridge_art_interface.h
@@ -26,10 +26,10 @@
 // Mirror libnativebridge interface. Done to have the ART callbacks out of line, and not require
 // the system/core header file in other files.
 
-bool LoadNativeBridge(std::string& native_bridge_library_filename);
+bool LoadNativeBridge(const std::string& native_bridge_library_filename);
 
 // This is mostly for testing purposes, as in a full system this is called by Zygote code.
-void PreInitializeNativeBridge(std::string dir);
+void PreInitializeNativeBridge(const std::string& dir);
 
 void InitializeNativeBridge(JNIEnv* env, const char* instruction_set);
 
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index 00ab577..2376889 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -256,7 +256,7 @@
   Drain(2U, prefix, pipe, os);
 }
 
-static bool RunCommand(std::string cmd) {
+static bool RunCommand(const std::string& cmd) {
   FILE* stream = popen(cmd.c_str(), "r");
   if (stream) {
     pclose(stream);
diff --git a/runtime/oat.h b/runtime/oat.h
index 814a493..8c84d42 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@
 class PACKED(4) OatHeader {
  public:
   static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
-  static constexpr uint8_t kOatVersion[] = { '0', '9', '0', '\0' };
+  static constexpr uint8_t kOatVersion[] = { '0', '9', '2', '\0' };
 
   static constexpr const char* kImageLocationKey = "image-location";
   static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index ff00451..0679360 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -595,7 +595,7 @@
 
   std::vector<std::string> args;
   args.push_back("--dex-file=" + dex_location_);
-  args.push_back("--vdex-fd=" + std::to_string(vdex_file->Fd()));
+  args.push_back("--output-vdex-fd=" + std::to_string(vdex_file->Fd()));
   args.push_back("--oat-fd=" + std::to_string(oat_file->Fd()));
   args.push_back("--oat-location=" + oat_file_name);
 
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index d18e946..d4337b9 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -999,7 +999,7 @@
 
 // Turn an absolute path into a path relative to the current working
 // directory.
-static std::string MakePathRelative(std::string target) {
+static std::string MakePathRelative(const std::string& target) {
   char buf[MAXPATHLEN];
   std::string cwd = getcwd(buf, MAXPATHLEN);
 
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 68f71f7..5641459 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -28,6 +28,7 @@
 #include "gc/scoped_gc_critical_section.h"
 #include "gc/space/image_space.h"
 #include "handle_scope-inl.h"
+#include "jni_internal.h"
 #include "mirror/class_loader.h"
 #include "oat_file_assistant.h"
 #include "obj_ptr-inl.h"
@@ -224,9 +225,10 @@
   }
 }
 
+template <typename T>
 static void IterateOverJavaDexFile(ObjPtr<mirror::Object> dex_file,
                                    ArtField* const cookie_field,
-                                   std::function<bool(const DexFile*)> fn)
+                                   const T& fn)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   if (dex_file != nullptr) {
     mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray();
@@ -247,26 +249,27 @@
   }
 }
 
+template <typename T>
 static void IterateOverPathClassLoader(
-    ScopedObjectAccessAlreadyRunnable& soa,
     Handle<mirror::ClassLoader> class_loader,
     MutableHandle<mirror::ObjectArray<mirror::Object>> dex_elements,
-    std::function<bool(const DexFile*)> fn) REQUIRES_SHARED(Locks::mutator_lock_) {
+    const T& fn) REQUIRES_SHARED(Locks::mutator_lock_) {
   // Handle this step.
   // Handle as if this is the child PathClassLoader.
   // The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
   // We need to get the DexPathList and loop through it.
-  ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
+  ArtField* const cookie_field =
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
   ArtField* const dex_file_field =
-      soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
   ObjPtr<mirror::Object> dex_path_list =
-      soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList)->
-      GetObject(class_loader.Get());
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)->
+          GetObject(class_loader.Get());
   if (dex_path_list != nullptr && dex_file_field != nullptr && cookie_field != nullptr) {
     // DexPathList has an array dexElements of Elements[] which each contain a dex file.
     ObjPtr<mirror::Object> dex_elements_obj =
-        soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
-        GetObject(dex_path_list);
+        jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
+            GetObject(dex_path_list);
     // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
     // at the mCookie which is a DexFile vector.
     if (dex_elements_obj != nullptr) {
@@ -323,7 +326,7 @@
       hs.NewHandle<mirror::ObjectArray<mirror::Object>>(nullptr));
   Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
 
-  IterateOverPathClassLoader(soa, h_class_loader, dex_elements, GetDexFilesFn);
+  IterateOverPathClassLoader(h_class_loader, dex_elements, GetDexFilesFn);
 
   return true;
 }
@@ -337,9 +340,10 @@
     return;
   }
 
-  ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
+  ArtField* const cookie_field =
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
   ArtField* const dex_file_field =
-      soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
   ObjPtr<mirror::Class> const element_class = soa.Decode<mirror::Class>(
       WellKnownClasses::dalvik_system_DexPathList__Element);
   ObjPtr<mirror::Class> const dexfile_class = soa.Decode<mirror::Class>(
@@ -377,7 +381,7 @@
   }
 }
 
-static bool AreSharedLibrariesOk(const std::string shared_libraries,
+static bool AreSharedLibrariesOk(const std::string& shared_libraries,
                                  std::priority_queue<DexFileAndClassPair>& queue) {
   if (shared_libraries.empty()) {
     if (queue.empty()) {
@@ -398,10 +402,14 @@
     while (!temp.empty() && index < shared_libraries_split.size() - 1) {
       DexFileAndClassPair pair(temp.top());
       const DexFile* dex_file = pair.GetDexFile();
-      std::string dex_filename(dex_file->GetLocation());
+      const std::string& dex_filename = dex_file->GetLocation();
+      if (dex_filename != shared_libraries_split[index]) {
+        break;
+      }
+      char* end;
+      size_t shared_lib_checksum = strtoul(shared_libraries_split[index + 1].c_str(), &end, 10);
       uint32_t dex_checksum = dex_file->GetLocationChecksum();
-      if (dex_filename != shared_libraries_split[index] ||
-          dex_checksum != std::stoul(shared_libraries_split[index + 1])) {
+      if (*end != '\0' || dex_checksum != shared_lib_checksum) {
         break;
       }
       temp.pop();
diff --git a/runtime/obj_ptr.h b/runtime/obj_ptr.h
index 9318232..d24c6fb 100644
--- a/runtime/obj_ptr.h
+++ b/runtime/obj_ptr.h
@@ -20,6 +20,7 @@
 #include <ostream>
 #include <type_traits>
 
+#include "base/macros.h"
 #include "base/mutex.h"  // For Locks::mutator_lock_.
 #include "globals.h"
 
@@ -41,17 +42,26 @@
  public:
   ALWAYS_INLINE ObjPtr() REQUIRES_SHARED(Locks::mutator_lock_) : reference_(0u) {}
 
-  ALWAYS_INLINE ObjPtr(std::nullptr_t) REQUIRES_SHARED(Locks::mutator_lock_) : reference_(0u) {}
+  // Note: The following constructors allow implicit conversion. This simplifies code that uses
+  //       them, e.g., for parameter passing. However, in general, implicit-conversion constructors
+  //       are discouraged and detected by cpplint and clang-tidy. So mark these constructors
+  //       as NOLINT (without category, as the categories are different).
+
+  ALWAYS_INLINE ObjPtr(std::nullptr_t)  // NOLINT
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      : reference_(0u) {}
 
   template <typename Type>
-  ALWAYS_INLINE ObjPtr(Type* ptr) REQUIRES_SHARED(Locks::mutator_lock_)
+  ALWAYS_INLINE ObjPtr(Type* ptr)  // NOLINT
+      REQUIRES_SHARED(Locks::mutator_lock_)
       : reference_(Encode(static_cast<MirrorType*>(ptr))) {
     static_assert(std::is_base_of<MirrorType, Type>::value,
                   "Input type must be a subtype of the ObjPtr type");
   }
 
   template <typename Type>
-  ALWAYS_INLINE ObjPtr(const ObjPtr<Type, kPoison>& other) REQUIRES_SHARED(Locks::mutator_lock_)
+  ALWAYS_INLINE ObjPtr(const ObjPtr<Type, kPoison>& other)  // NOLINT
+      REQUIRES_SHARED(Locks::mutator_lock_)
       : reference_(Encode(static_cast<MirrorType*>(other.Ptr()))) {
     static_assert(std::is_base_of<MirrorType, Type>::value,
                   "Input type must be a subtype of the ObjPtr type");
@@ -154,6 +164,9 @@
   uintptr_t reference_;
 };
 
+static_assert(std::is_trivially_copyable<ObjPtr<void>>::value,
+              "ObjPtr should be trivially copyable");
+
 // Hash function for stl data structures.
 class HashObjPtr {
  public:
diff --git a/runtime/openjdkjvmti/Android.bp b/runtime/openjdkjvmti/Android.bp
index de6683c..b323aef 100644
--- a/runtime/openjdkjvmti/Android.bp
+++ b/runtime/openjdkjvmti/Android.bp
@@ -18,9 +18,12 @@
     defaults: ["art_defaults"],
     host_supported: true,
     srcs: ["events.cc",
-           "heap.cc",
            "object_tagging.cc",
            "OpenjdkJvmTi.cc",
+           "ti_class.cc",
+           "ti_heap.cc",
+           "ti_method.cc",
+           "ti_stack.cc",
            "transform.cc"],
     include_dirs: ["art/runtime"],
     shared_libs: [
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index 7292946..6480843 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -39,7 +39,6 @@
 #include "art_jvmti.h"
 #include "base/mutex.h"
 #include "events-inl.h"
-#include "heap.h"
 #include "jni_env_ext-inl.h"
 #include "object_tagging.h"
 #include "obj_ptr-inl.h"
@@ -47,6 +46,10 @@
 #include "scoped_thread_state_change-inl.h"
 #include "thread_list.h"
 #include "thread-inl.h"
+#include "ti_class.h"
+#include "ti_heap.h"
+#include "ti_method.h"
+#include "ti_stack.h"
 #include "transform.h"
 
 // TODO Remove this at some point by annotating all the methods. It was put in to make the skeleton
@@ -58,20 +61,38 @@
 EventHandler gEventHandler;
 ObjectTagTable gObjectTagTable(&gEventHandler);
 
+#define ENSURE_NON_NULL(n)      \
+  do {                          \
+    if ((n) == nullptr) {       \
+      return ERR(NULL_POINTER); \
+    }                           \
+  } while (false)
+
 class JvmtiFunctions {
  private:
   static bool IsValidEnv(jvmtiEnv* env) {
     return env != nullptr;
   }
 
+#define ENSURE_VALID_ENV(env)          \
+  do {                                 \
+    if (!IsValidEnv(env)) {            \
+      return ERR(INVALID_ENVIRONMENT); \
+    }                                  \
+  } while (false)
+
+#define ENSURE_HAS_CAP(env, cap) \
+  do { \
+    ENSURE_VALID_ENV(env); \
+    if (ArtJvmTiEnv::AsArtJvmTiEnv(env)->capabilities.cap != 1) { \
+      return ERR(MUST_POSSESS_CAPABILITY); \
+    } \
+  } while (false)
+
  public:
   static jvmtiError Allocate(jvmtiEnv* env, jlong size, unsigned char** mem_ptr) {
-    if (!IsValidEnv(env)) {
-      return ERR(INVALID_ENVIRONMENT);
-    }
-    if (mem_ptr == nullptr) {
-      return ERR(NULL_POINTER);
-    }
+    ENSURE_VALID_ENV(env);
+    ENSURE_NON_NULL(mem_ptr);
     if (size < 0) {
       return ERR(ILLEGAL_ARGUMENT);
     } else if (size == 0) {
@@ -83,9 +104,7 @@
   }
 
   static jvmtiError Deallocate(jvmtiEnv* env, unsigned char* mem) {
-    if (!IsValidEnv(env)) {
-      return ERR(INVALID_ENVIRONMENT);
-    }
+    ENSURE_VALID_ENV(env);
     if (mem != nullptr) {
       free(mem);
     }
@@ -155,7 +174,7 @@
   static jvmtiError GetCurrentContendedMonitor(jvmtiEnv* env,
                                                jthread thread,
                                                jobject* monitor_ptr) {
-  return ERR(NOT_IMPLEMENTED);
+    return ERR(NOT_IMPLEMENTED);
   }
 
   static jvmtiError RunAgentThread(jvmtiEnv* env,
@@ -201,7 +220,12 @@
                                   jint max_frame_count,
                                   jvmtiFrameInfo* frame_buffer,
                                   jint* count_ptr) {
-    return ERR(NOT_IMPLEMENTED);
+    return StackUtil::GetStackTrace(env,
+                                    thread,
+                                    start_depth,
+                                    max_frame_count,
+                                    frame_buffer,
+                                    count_ptr);
   }
 
   static jvmtiError GetAllStackTraces(jvmtiEnv* env,
@@ -269,7 +293,13 @@
                                      jobject initial_object,
                                      const jvmtiHeapCallbacks* callbacks,
                                      const void* user_data) {
-    return ERR(NOT_IMPLEMENTED);
+    HeapUtil heap_util(&gObjectTagTable);
+    return heap_util.FollowReferences(env,
+                                      heap_filter,
+                                      klass,
+                                      initial_object,
+                                      callbacks,
+                                      user_data);
   }
 
   static jvmtiError IterateThroughHeap(jvmtiEnv* env,
@@ -277,14 +307,13 @@
                                        jclass klass,
                                        const jvmtiHeapCallbacks* callbacks,
                                        const void* user_data) {
+    ENSURE_HAS_CAP(env, can_tag_objects);
     HeapUtil heap_util(&gObjectTagTable);
     return heap_util.IterateThroughHeap(env, heap_filter, klass, callbacks, user_data);
   }
 
   static jvmtiError GetTag(jvmtiEnv* env, jobject object, jlong* tag_ptr) {
-    if (object == nullptr || tag_ptr == nullptr) {
-      return ERR(NULL_POINTER);
-    }
+    ENSURE_HAS_CAP(env, can_tag_objects);
 
     JNIEnv* jni_env = GetJniEnv(env);
     if (jni_env == nullptr) {
@@ -301,6 +330,8 @@
   }
 
   static jvmtiError SetTag(jvmtiEnv* env, jobject object, jlong tag) {
+    ENSURE_HAS_CAP(env, can_tag_objects);
+
     if (object == nullptr) {
       return ERR(NULL_POINTER);
     }
@@ -323,11 +354,24 @@
                                        jint* count_ptr,
                                        jobject** object_result_ptr,
                                        jlong** tag_result_ptr) {
-    return ERR(NOT_IMPLEMENTED);
+    ENSURE_HAS_CAP(env, can_tag_objects);
+
+    JNIEnv* jni_env = GetJniEnv(env);
+    if (jni_env == nullptr) {
+      return ERR(INTERNAL);
+    }
+
+    art::ScopedObjectAccess soa(jni_env);
+    return gObjectTagTable.GetTaggedObjects(env,
+                                            tag_count,
+                                            tags,
+                                            count_ptr,
+                                            object_result_ptr,
+                                            tag_result_ptr);
   }
 
   static jvmtiError ForceGarbageCollection(jvmtiEnv* env) {
-    return ERR(NOT_IMPLEMENTED);
+    return HeapUtil::ForceGarbageCollection(env);
   }
 
   static jvmtiError IterateOverObjectsReachableFromObject(
@@ -488,7 +532,7 @@
                                       jclass klass,
                                       char** signature_ptr,
                                       char** generic_ptr) {
-    return ERR(NOT_IMPLEMENTED);
+    return ClassUtil::GetClassSignature(env, klass, signature_ptr, generic_ptr);
   }
 
   static jvmtiError GetClassStatus(jvmtiEnv* env, jclass klass, jint* status_ptr) {
@@ -624,19 +668,19 @@
                                   char** name_ptr,
                                   char** signature_ptr,
                                   char** generic_ptr) {
-    return ERR(NOT_IMPLEMENTED);
+    return MethodUtil::GetMethodName(env, method, name_ptr, signature_ptr, generic_ptr);
   }
 
   static jvmtiError GetMethodDeclaringClass(jvmtiEnv* env,
                                             jmethodID method,
                                             jclass* declaring_class_ptr) {
-    return ERR(NOT_IMPLEMENTED);
+    return MethodUtil::GetMethodDeclaringClass(env, method, declaring_class_ptr);
   }
 
   static jvmtiError GetMethodModifiers(jvmtiEnv* env,
                                        jmethodID method,
                                        jint* modifiers_ptr) {
-    return ERR(NOT_IMPLEMENTED);
+    return MethodUtil::GetMethodModifiers(env, method, modifiers_ptr);
   }
 
   static jvmtiError GetMaxLocals(jvmtiEnv* env,
@@ -740,9 +784,7 @@
   static jvmtiError SetEventCallbacks(jvmtiEnv* env,
                                       const jvmtiEventCallbacks* callbacks,
                                       jint size_of_callbacks) {
-    if (env == nullptr) {
-      return ERR(NULL_POINTER);
-    }
+    ENSURE_VALID_ENV(env);
     if (size_of_callbacks < 0) {
       return ERR(ILLEGAL_ARGUMENT);
     }
@@ -769,6 +811,8 @@
                                              jvmtiEvent event_type,
                                              jthread event_thread,
                                              ...) {
+    ENSURE_VALID_ENV(env);
+    // TODO: Check for capabilities.
     art::Thread* art_thread = nullptr;
     if (event_thread != nullptr) {
       // TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD.
@@ -809,20 +853,136 @@
   }
 
   static jvmtiError GetPotentialCapabilities(jvmtiEnv* env, jvmtiCapabilities* capabilities_ptr) {
-    return ERR(NOT_IMPLEMENTED);
+    ENSURE_VALID_ENV(env);
+    ENSURE_NON_NULL(capabilities_ptr);
+    *capabilities_ptr = kPotentialCapabilities;
+    return OK;
   }
 
   static jvmtiError AddCapabilities(jvmtiEnv* env, const jvmtiCapabilities* capabilities_ptr) {
-    return ERR(NOT_IMPLEMENTED);
+    ENSURE_VALID_ENV(env);
+    ENSURE_NON_NULL(capabilities_ptr);
+    ArtJvmTiEnv* art_env = static_cast<ArtJvmTiEnv*>(env);
+    jvmtiError ret = OK;
+#define ADD_CAPABILITY(e) \
+    do { \
+      if (capabilities_ptr->e == 1) { \
+        if (kPotentialCapabilities.e == 1) { \
+          art_env->capabilities.e = 1;\
+        } else { \
+          ret = ERR(NOT_AVAILABLE); \
+        } \
+      } \
+    } while (false)
+
+    ADD_CAPABILITY(can_tag_objects);
+    ADD_CAPABILITY(can_generate_field_modification_events);
+    ADD_CAPABILITY(can_generate_field_access_events);
+    ADD_CAPABILITY(can_get_bytecodes);
+    ADD_CAPABILITY(can_get_synthetic_attribute);
+    ADD_CAPABILITY(can_get_owned_monitor_info);
+    ADD_CAPABILITY(can_get_current_contended_monitor);
+    ADD_CAPABILITY(can_get_monitor_info);
+    ADD_CAPABILITY(can_pop_frame);
+    ADD_CAPABILITY(can_redefine_classes);
+    ADD_CAPABILITY(can_signal_thread);
+    ADD_CAPABILITY(can_get_source_file_name);
+    ADD_CAPABILITY(can_get_line_numbers);
+    ADD_CAPABILITY(can_get_source_debug_extension);
+    ADD_CAPABILITY(can_access_local_variables);
+    ADD_CAPABILITY(can_maintain_original_method_order);
+    ADD_CAPABILITY(can_generate_single_step_events);
+    ADD_CAPABILITY(can_generate_exception_events);
+    ADD_CAPABILITY(can_generate_frame_pop_events);
+    ADD_CAPABILITY(can_generate_breakpoint_events);
+    ADD_CAPABILITY(can_suspend);
+    ADD_CAPABILITY(can_redefine_any_class);
+    ADD_CAPABILITY(can_get_current_thread_cpu_time);
+    ADD_CAPABILITY(can_get_thread_cpu_time);
+    ADD_CAPABILITY(can_generate_method_entry_events);
+    ADD_CAPABILITY(can_generate_method_exit_events);
+    ADD_CAPABILITY(can_generate_all_class_hook_events);
+    ADD_CAPABILITY(can_generate_compiled_method_load_events);
+    ADD_CAPABILITY(can_generate_monitor_events);
+    ADD_CAPABILITY(can_generate_vm_object_alloc_events);
+    ADD_CAPABILITY(can_generate_native_method_bind_events);
+    ADD_CAPABILITY(can_generate_garbage_collection_events);
+    ADD_CAPABILITY(can_generate_object_free_events);
+    ADD_CAPABILITY(can_force_early_return);
+    ADD_CAPABILITY(can_get_owned_monitor_stack_depth_info);
+    ADD_CAPABILITY(can_get_constant_pool);
+    ADD_CAPABILITY(can_set_native_method_prefix);
+    ADD_CAPABILITY(can_retransform_classes);
+    ADD_CAPABILITY(can_retransform_any_class);
+    ADD_CAPABILITY(can_generate_resource_exhaustion_heap_events);
+    ADD_CAPABILITY(can_generate_resource_exhaustion_threads_events);
+#undef ADD_CAPABILITY
+    return ret;
   }
 
   static jvmtiError RelinquishCapabilities(jvmtiEnv* env,
                                            const jvmtiCapabilities* capabilities_ptr) {
-    return ERR(NOT_IMPLEMENTED);
+    ENSURE_VALID_ENV(env);
+    ENSURE_NON_NULL(capabilities_ptr);
+    ArtJvmTiEnv* art_env = reinterpret_cast<ArtJvmTiEnv*>(env);
+#define DEL_CAPABILITY(e) \
+    do { \
+      if (capabilities_ptr->e == 1) { \
+        art_env->capabilities.e = 0;\
+      } \
+    } while (false)
+
+    DEL_CAPABILITY(can_tag_objects);
+    DEL_CAPABILITY(can_generate_field_modification_events);
+    DEL_CAPABILITY(can_generate_field_access_events);
+    DEL_CAPABILITY(can_get_bytecodes);
+    DEL_CAPABILITY(can_get_synthetic_attribute);
+    DEL_CAPABILITY(can_get_owned_monitor_info);
+    DEL_CAPABILITY(can_get_current_contended_monitor);
+    DEL_CAPABILITY(can_get_monitor_info);
+    DEL_CAPABILITY(can_pop_frame);
+    DEL_CAPABILITY(can_redefine_classes);
+    DEL_CAPABILITY(can_signal_thread);
+    DEL_CAPABILITY(can_get_source_file_name);
+    DEL_CAPABILITY(can_get_line_numbers);
+    DEL_CAPABILITY(can_get_source_debug_extension);
+    DEL_CAPABILITY(can_access_local_variables);
+    DEL_CAPABILITY(can_maintain_original_method_order);
+    DEL_CAPABILITY(can_generate_single_step_events);
+    DEL_CAPABILITY(can_generate_exception_events);
+    DEL_CAPABILITY(can_generate_frame_pop_events);
+    DEL_CAPABILITY(can_generate_breakpoint_events);
+    DEL_CAPABILITY(can_suspend);
+    DEL_CAPABILITY(can_redefine_any_class);
+    DEL_CAPABILITY(can_get_current_thread_cpu_time);
+    DEL_CAPABILITY(can_get_thread_cpu_time);
+    DEL_CAPABILITY(can_generate_method_entry_events);
+    DEL_CAPABILITY(can_generate_method_exit_events);
+    DEL_CAPABILITY(can_generate_all_class_hook_events);
+    DEL_CAPABILITY(can_generate_compiled_method_load_events);
+    DEL_CAPABILITY(can_generate_monitor_events);
+    DEL_CAPABILITY(can_generate_vm_object_alloc_events);
+    DEL_CAPABILITY(can_generate_native_method_bind_events);
+    DEL_CAPABILITY(can_generate_garbage_collection_events);
+    DEL_CAPABILITY(can_generate_object_free_events);
+    DEL_CAPABILITY(can_force_early_return);
+    DEL_CAPABILITY(can_get_owned_monitor_stack_depth_info);
+    DEL_CAPABILITY(can_get_constant_pool);
+    DEL_CAPABILITY(can_set_native_method_prefix);
+    DEL_CAPABILITY(can_retransform_classes);
+    DEL_CAPABILITY(can_retransform_any_class);
+    DEL_CAPABILITY(can_generate_resource_exhaustion_heap_events);
+    DEL_CAPABILITY(can_generate_resource_exhaustion_threads_events);
+#undef DEL_CAPABILITY
+    return OK;
   }
 
   static jvmtiError GetCapabilities(jvmtiEnv* env, jvmtiCapabilities* capabilities_ptr) {
-    return ERR(NOT_IMPLEMENTED);
+    ENSURE_VALID_ENV(env);
+    ENSURE_NON_NULL(capabilities_ptr);
+    ArtJvmTiEnv* artenv = reinterpret_cast<ArtJvmTiEnv*>(env);
+    *capabilities_ptr = artenv->capabilities;
+    return OK;
   }
 
   static jvmtiError GetCurrentThreadCpuTimerInfo(jvmtiEnv* env, jvmtiTimerInfo* info_ptr) {
@@ -878,44 +1038,31 @@
   }
 
   static jvmtiError DisposeEnvironment(jvmtiEnv* env) {
-    if (!IsValidEnv(env)) {
-      return ERR(INVALID_ENVIRONMENT);
-    }
+    ENSURE_VALID_ENV(env);
     delete env;
     return OK;
   }
 
   static jvmtiError SetEnvironmentLocalStorage(jvmtiEnv* env, const void* data) {
-    if (!IsValidEnv(env)) {
-      return ERR(INVALID_ENVIRONMENT);
-    }
+    ENSURE_VALID_ENV(env);
     reinterpret_cast<ArtJvmTiEnv*>(env)->local_data = const_cast<void*>(data);
     return OK;
   }
 
   static jvmtiError GetEnvironmentLocalStorage(jvmtiEnv* env, void** data_ptr) {
-    if (!IsValidEnv(env)) {
-      return ERR(INVALID_ENVIRONMENT);
-    }
+    ENSURE_VALID_ENV(env);
     *data_ptr = reinterpret_cast<ArtJvmTiEnv*>(env)->local_data;
     return OK;
   }
 
   static jvmtiError GetVersionNumber(jvmtiEnv* env, jint* version_ptr) {
-    if (!IsValidEnv(env)) {
-      return ERR(INVALID_ENVIRONMENT);
-    }
+    ENSURE_VALID_ENV(env);
     *version_ptr = JVMTI_VERSION;
     return OK;
   }
 
   static jvmtiError GetErrorName(jvmtiEnv* env, jvmtiError error,  char** name_ptr) {
-    if (!IsValidEnv(env)) {
-      return ERR(INVALID_ENVIRONMENT);
-    }
-    if (name_ptr == nullptr) {
-      return ERR(NULL_POINTER);
-    }
+    ENSURE_NON_NULL(name_ptr);
     switch (error) {
 #define ERROR_CASE(e) case (JVMTI_ERROR_ ## e) : do { \
           *name_ptr = const_cast<char*>("JVMTI_ERROR_"#e); \
diff --git a/runtime/openjdkjvmti/art_jvmti.h b/runtime/openjdkjvmti/art_jvmti.h
index 66d0937..48b29a3 100644
--- a/runtime/openjdkjvmti/art_jvmti.h
+++ b/runtime/openjdkjvmti/art_jvmti.h
@@ -37,6 +37,8 @@
 #include <jni.h>
 
 #include "base/casts.h"
+#include "base/logging.h"
+#include "base/macros.h"
 #include "events.h"
 #include "java_vm_ext.h"
 #include "jni_env_ext.h"
@@ -50,11 +52,13 @@
 struct ArtJvmTiEnv : public jvmtiEnv {
   art::JavaVMExt* art_vm;
   void* local_data;
+  jvmtiCapabilities capabilities;
 
   EventMasks event_masks;
   std::unique_ptr<jvmtiEventCallbacks> event_callbacks;
 
-  explicit ArtJvmTiEnv(art::JavaVMExt* runtime) : art_vm(runtime), local_data(nullptr) {
+  explicit ArtJvmTiEnv(art::JavaVMExt* runtime)
+      : art_vm(runtime), local_data(nullptr), capabilities() {
     functions = &gJvmtiInterface;
   }
 
@@ -80,6 +84,89 @@
   return ret_value;
 }
 
+class JvmtiDeleter {
+ public:
+  JvmtiDeleter() : env_(nullptr) {}
+  explicit JvmtiDeleter(jvmtiEnv* env) : env_(env) {}
+
+  JvmtiDeleter(JvmtiDeleter&) = default;
+  JvmtiDeleter(JvmtiDeleter&&) = default;
+  JvmtiDeleter& operator=(const JvmtiDeleter&) = default;
+
+  void operator()(unsigned char* ptr) const {
+    CHECK(env_ != nullptr);
+    jvmtiError ret = env_->Deallocate(ptr);
+    CHECK(ret == ERR(NONE));
+  }
+
+ private:
+  mutable jvmtiEnv* env_;
+};
+
+using JvmtiUniquePtr = std::unique_ptr<unsigned char, JvmtiDeleter>;
+
+ALWAYS_INLINE
+static inline JvmtiUniquePtr MakeJvmtiUniquePtr(jvmtiEnv* env, unsigned char* mem) {
+  return JvmtiUniquePtr(mem, JvmtiDeleter(env));
+}
+
+ALWAYS_INLINE
+static inline jvmtiError CopyString(jvmtiEnv* env, const char* src, unsigned char** copy) {
+  size_t len = strlen(src) + 1;
+  unsigned char* buf;
+  jvmtiError ret = env->Allocate(len, &buf);
+  if (ret != ERR(NONE)) {
+    return ret;
+  }
+  strcpy(reinterpret_cast<char*>(buf), src);
+  *copy = buf;
+  return ret;
+}
+
+const jvmtiCapabilities kPotentialCapabilities = {
+    .can_tag_objects                                 = 1,
+    .can_generate_field_modification_events          = 0,
+    .can_generate_field_access_events                = 0,
+    .can_get_bytecodes                               = 0,
+    .can_get_synthetic_attribute                     = 0,
+    .can_get_owned_monitor_info                      = 0,
+    .can_get_current_contended_monitor               = 0,
+    .can_get_monitor_info                            = 0,
+    .can_pop_frame                                   = 0,
+    .can_redefine_classes                            = 0,
+    .can_signal_thread                               = 0,
+    .can_get_source_file_name                        = 0,
+    .can_get_line_numbers                            = 0,
+    .can_get_source_debug_extension                  = 0,
+    .can_access_local_variables                      = 0,
+    .can_maintain_original_method_order              = 0,
+    .can_generate_single_step_events                 = 0,
+    .can_generate_exception_events                   = 0,
+    .can_generate_frame_pop_events                   = 0,
+    .can_generate_breakpoint_events                  = 0,
+    .can_suspend                                     = 0,
+    .can_redefine_any_class                          = 0,
+    .can_get_current_thread_cpu_time                 = 0,
+    .can_get_thread_cpu_time                         = 0,
+    .can_generate_method_entry_events                = 0,
+    .can_generate_method_exit_events                 = 0,
+    .can_generate_all_class_hook_events              = 0,
+    .can_generate_compiled_method_load_events        = 0,
+    .can_generate_monitor_events                     = 0,
+    .can_generate_vm_object_alloc_events             = 0,
+    .can_generate_native_method_bind_events          = 0,
+    .can_generate_garbage_collection_events          = 0,
+    .can_generate_object_free_events                 = 0,
+    .can_force_early_return                          = 0,
+    .can_get_owned_monitor_stack_depth_info          = 0,
+    .can_get_constant_pool                           = 0,
+    .can_set_native_method_prefix                    = 0,
+    .can_retransform_classes                         = 0,
+    .can_retransform_any_class                       = 0,
+    .can_generate_resource_exhaustion_heap_events    = 0,
+    .can_generate_resource_exhaustion_threads_events = 0,
+};
+
 }  // namespace openjdkjvmti
 
 #endif  // ART_RUNTIME_OPENJDKJVMTI_ART_JVMTI_H_
diff --git a/runtime/openjdkjvmti/heap.cc b/runtime/openjdkjvmti/heap.cc
deleted file mode 100644
index 1799e19..0000000
--- a/runtime/openjdkjvmti/heap.cc
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "heap.h"
-
-#include "art_jvmti.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "class_linker.h"
-#include "gc/heap.h"
-#include "jni_env_ext.h"
-#include "mirror/class.h"
-#include "object_callbacks.h"
-#include "object_tagging.h"
-#include "obj_ptr-inl.h"
-#include "runtime.h"
-#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
-
-namespace openjdkjvmti {
-
-struct IterateThroughHeapData {
-  IterateThroughHeapData(HeapUtil* _heap_util,
-                         jint heap_filter,
-                         art::ObjPtr<art::mirror::Class> klass,
-                         const jvmtiHeapCallbacks* _callbacks,
-                         const void* _user_data)
-      : heap_util(_heap_util),
-        filter_klass(klass),
-        callbacks(_callbacks),
-        user_data(_user_data),
-        filter_out_tagged((heap_filter & JVMTI_HEAP_FILTER_TAGGED) != 0),
-        filter_out_untagged((heap_filter & JVMTI_HEAP_FILTER_UNTAGGED) != 0),
-        filter_out_class_tagged((heap_filter & JVMTI_HEAP_FILTER_CLASS_TAGGED) != 0),
-        filter_out_class_untagged((heap_filter & JVMTI_HEAP_FILTER_CLASS_UNTAGGED) != 0),
-        any_filter(filter_out_tagged ||
-                   filter_out_untagged ||
-                   filter_out_class_tagged ||
-                   filter_out_class_untagged),
-        stop_reports(false) {
-  }
-
-  bool ShouldReportByHeapFilter(jlong tag, jlong class_tag) {
-    if (!any_filter) {
-      return true;
-    }
-
-    if ((tag == 0 && filter_out_untagged) || (tag != 0 && filter_out_tagged)) {
-      return false;
-    }
-
-    if ((class_tag == 0 && filter_out_class_untagged) ||
-        (class_tag != 0 && filter_out_class_tagged)) {
-      return false;
-    }
-
-    return true;
-  }
-
-  HeapUtil* heap_util;
-  art::ObjPtr<art::mirror::Class> filter_klass;
-  const jvmtiHeapCallbacks* callbacks;
-  const void* user_data;
-  const bool filter_out_tagged;
-  const bool filter_out_untagged;
-  const bool filter_out_class_tagged;
-  const bool filter_out_class_untagged;
-  const bool any_filter;
-
-  bool stop_reports;
-};
-
-static void IterateThroughHeapObjectCallback(art::mirror::Object* obj, void* arg)
-    REQUIRES_SHARED(art::Locks::mutator_lock_) {
-  IterateThroughHeapData* ithd = reinterpret_cast<IterateThroughHeapData*>(arg);
-  // Early return, as we can't really stop visiting.
-  if (ithd->stop_reports) {
-    return;
-  }
-
-  art::ScopedAssertNoThreadSuspension no_suspension("IterateThroughHeapCallback");
-
-  jlong tag = 0;
-  ithd->heap_util->GetTags()->GetTag(obj, &tag);
-
-  jlong class_tag = 0;
-  art::ObjPtr<art::mirror::Class> klass = obj->GetClass();
-  ithd->heap_util->GetTags()->GetTag(klass.Ptr(), &class_tag);
-  // For simplicity, even if we find a tag = 0, assume 0 = not tagged.
-
-  if (!ithd->ShouldReportByHeapFilter(tag, class_tag)) {
-    return;
-  }
-
-  // TODO: Handle array_primitive_value_callback.
-
-  if (ithd->filter_klass != nullptr) {
-    if (ithd->filter_klass != klass) {
-      return;
-    }
-  }
-
-  jlong size = obj->SizeOf();
-
-  jint length = -1;
-  if (obj->IsArrayInstance()) {
-    length = obj->AsArray()->GetLength();
-  }
-
-  jlong saved_tag = tag;
-  jint ret = ithd->callbacks->heap_iteration_callback(class_tag,
-                                                      size,
-                                                      &tag,
-                                                      length,
-                                                      const_cast<void*>(ithd->user_data));
-
-  if (tag != saved_tag) {
-    ithd->heap_util->GetTags()->Set(obj, tag);
-  }
-
-  ithd->stop_reports = (ret & JVMTI_VISIT_ABORT) != 0;
-
-  // TODO Implement array primitive and string primitive callback.
-  // TODO Implement primitive field callback.
-}
-
-jvmtiError HeapUtil::IterateThroughHeap(jvmtiEnv* env ATTRIBUTE_UNUSED,
-                                        jint heap_filter,
-                                        jclass klass,
-                                        const jvmtiHeapCallbacks* callbacks,
-                                        const void* user_data) {
-  if (callbacks == nullptr) {
-    return ERR(NULL_POINTER);
-  }
-
-  if (callbacks->array_primitive_value_callback != nullptr) {
-    // TODO: Implement.
-    return ERR(NOT_IMPLEMENTED);
-  }
-
-  art::Thread* self = art::Thread::Current();
-  art::ScopedObjectAccess soa(self);      // Now we know we have the shared lock.
-
-  IterateThroughHeapData ithd(this,
-                              heap_filter,
-                              soa.Decode<art::mirror::Class>(klass),
-                              callbacks,
-                              user_data);
-
-  art::Runtime::Current()->GetHeap()->VisitObjects(IterateThroughHeapObjectCallback, &ithd);
-
-  return ERR(NONE);
-}
-
-jvmtiError HeapUtil::GetLoadedClasses(jvmtiEnv* env,
-                                      jint* class_count_ptr,
-                                      jclass** classes_ptr) {
-  if (class_count_ptr == nullptr || classes_ptr == nullptr) {
-    return ERR(NULL_POINTER);
-  }
-
-  class ReportClassVisitor : public art::ClassVisitor {
-   public:
-    explicit ReportClassVisitor(art::Thread* self) : self_(self) {}
-
-    bool operator()(art::ObjPtr<art::mirror::Class> klass)
-        OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
-      classes_.push_back(self_->GetJniEnv()->AddLocalReference<jclass>(klass));
-      return true;
-    }
-
-    art::Thread* self_;
-    std::vector<jclass> classes_;
-  };
-
-  art::Thread* self = art::Thread::Current();
-  ReportClassVisitor rcv(self);
-  {
-    art::ScopedObjectAccess soa(self);
-    art::Runtime::Current()->GetClassLinker()->VisitClasses(&rcv);
-  }
-
-  size_t size = rcv.classes_.size();
-  jclass* classes = nullptr;
-  jvmtiError alloc_ret = env->Allocate(static_cast<jlong>(size * sizeof(jclass)),
-                                       reinterpret_cast<unsigned char**>(&classes));
-  if (alloc_ret != ERR(NONE)) {
-    return alloc_ret;
-  }
-
-  for (size_t i = 0; i < size; ++i) {
-    classes[i] = rcv.classes_[i];
-  }
-  *classes_ptr = classes;
-  *class_count_ptr = static_cast<jint>(size);
-
-  return ERR(NONE);
-}
-
-}  // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/jvmti_allocator.h b/runtime/openjdkjvmti/jvmti_allocator.h
new file mode 100644
index 0000000..1225c14
--- /dev/null
+++ b/runtime/openjdkjvmti/jvmti_allocator.h
@@ -0,0 +1,170 @@
+/* Copyright (C) 2016 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h.  The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_JVMTI_ALLOCATOR_H_
+#define ART_RUNTIME_OPENJDKJVMTI_JVMTI_ALLOCATOR_H_
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "jvmti.h"
+
+namespace openjdkjvmti {
+
+template <typename T> class JvmtiAllocator;
+
+template <>
+class JvmtiAllocator<void> {
+ public:
+  typedef void value_type;
+  typedef void* pointer;
+  typedef const void* const_pointer;
+
+  template <typename U>
+  struct rebind {
+    typedef JvmtiAllocator<U> other;
+  };
+
+  explicit JvmtiAllocator(jvmtiEnv* env) : env_(env) {}
+
+  template <typename U>
+  JvmtiAllocator(const JvmtiAllocator<U>& other)  // NOLINT, implicit
+      : env_(other.env_) {}
+
+  JvmtiAllocator(const JvmtiAllocator& other) = default;
+  JvmtiAllocator& operator=(const JvmtiAllocator& other) = default;
+  ~JvmtiAllocator() = default;
+
+ private:
+  jvmtiEnv* env_;
+
+  template <typename U>
+  friend class JvmtiAllocator;
+
+  template <typename U>
+  friend bool operator==(const JvmtiAllocator<U>& lhs, const JvmtiAllocator<U>& rhs);
+};
+
+template <typename T>
+class JvmtiAllocator {
+ public:
+  typedef T value_type;
+  typedef T* pointer;
+  typedef T& reference;
+  typedef const T* const_pointer;
+  typedef const T& const_reference;
+  typedef size_t size_type;
+  typedef ptrdiff_t difference_type;
+
+  template <typename U>
+  struct rebind {
+    typedef JvmtiAllocator<U> other;
+  };
+
+  explicit JvmtiAllocator(jvmtiEnv* env) : env_(env) {}
+
+  template <typename U>
+  JvmtiAllocator(const JvmtiAllocator<U>& other)  // NOLINT, implicit
+      : env_(other.env_) {}
+
+  JvmtiAllocator(const JvmtiAllocator& other) = default;
+  JvmtiAllocator& operator=(const JvmtiAllocator& other) = default;
+  ~JvmtiAllocator() = default;
+
+  size_type max_size() const {
+    return static_cast<size_type>(-1) / sizeof(T);
+  }
+
+  pointer address(reference x) const { return &x; }
+  const_pointer address(const_reference x) const { return &x; }
+
+  pointer allocate(size_type n, JvmtiAllocator<void>::pointer hint ATTRIBUTE_UNUSED = nullptr) {
+    DCHECK_LE(n, max_size());
+    if (env_ == nullptr) {
+      T* result = reinterpret_cast<T*>(malloc(n * sizeof(T)));
+      CHECK(result != nullptr || n == 0u);  // Abort if malloc() fails.
+      return result;
+    } else {
+      unsigned char* result;
+      jvmtiError alloc_error = env_->Allocate(n * sizeof(T), &result);
+      CHECK(alloc_error == JVMTI_ERROR_NONE);
+      return reinterpret_cast<T*>(result);
+    }
+  }
+  void deallocate(pointer p, size_type n ATTRIBUTE_UNUSED) {
+    if (env_ == nullptr) {
+      free(p);
+    } else {
+      jvmtiError dealloc_error = env_->Deallocate(reinterpret_cast<unsigned char*>(p));
+      CHECK(dealloc_error == JVMTI_ERROR_NONE);
+    }
+  }
+
+  void construct(pointer p, const_reference val) {
+    new (static_cast<void*>(p)) value_type(val);
+  }
+  template <class U, class... Args>
+  void construct(U* p, Args&&... args) {
+    ::new (static_cast<void*>(p)) U(std::forward<Args>(args)...);
+  }
+  void destroy(pointer p) {
+    p->~value_type();
+  }
+
+  inline bool operator==(JvmtiAllocator const& other) {
+    return env_ == other.env_;
+  }
+  inline bool operator!=(JvmtiAllocator const& other) {
+    return !operator==(other);
+  }
+
+ private:
+  jvmtiEnv* env_;
+
+  template <typename U>
+  friend class JvmtiAllocator;
+
+  template <typename U>
+  friend bool operator==(const JvmtiAllocator<U>& lhs, const JvmtiAllocator<U>& rhs);
+};
+
+template <typename T>
+inline bool operator==(const JvmtiAllocator<T>& lhs, const JvmtiAllocator<T>& rhs) {
+  return lhs.env_ == rhs.env_;
+}
+
+template <typename T>
+inline bool operator!=(const JvmtiAllocator<T>& lhs, const JvmtiAllocator<T>& rhs) {
+  return !(lhs == rhs);
+}
+
+}  // namespace openjdkjvmti
+
+#endif  // ART_RUNTIME_OPENJDKJVMTI_JVMTI_ALLOCATOR_H_
diff --git a/runtime/openjdkjvmti/object_tagging.cc b/runtime/openjdkjvmti/object_tagging.cc
index f16b023..b983e79 100644
--- a/runtime/openjdkjvmti/object_tagging.cc
+++ b/runtime/openjdkjvmti/object_tagging.cc
@@ -39,6 +39,7 @@
 #include "gc/allocation_listener.h"
 #include "instrumentation.h"
 #include "jni_env_ext-inl.h"
+#include "jvmti_allocator.h"
 #include "mirror/class.h"
 #include "mirror/object.h"
 #include "runtime.h"
@@ -46,6 +47,16 @@
 
 namespace openjdkjvmti {
 
+void ObjectTagTable::Lock() {
+  allow_disallow_lock_.ExclusiveLock(art::Thread::Current());
+}
+void ObjectTagTable::Unlock() {
+  allow_disallow_lock_.ExclusiveUnlock(art::Thread::Current());
+}
+void ObjectTagTable::AssertLocked() {
+  allow_disallow_lock_.AssertHeld(art::Thread::Current());
+}
+
 void ObjectTagTable::UpdateTableWithReadBarrier() {
   update_since_last_sweep_ = true;
 
@@ -79,6 +90,13 @@
 
   return RemoveLocked(self, obj, tag);
 }
+bool ObjectTagTable::RemoveLocked(art::mirror::Object* obj, jlong* tag) {
+  art::Thread* self = art::Thread::Current();
+  allow_disallow_lock_.AssertHeld(self);
+  Wait(self);
+
+  return RemoveLocked(self, obj, tag);
+}
 
 bool ObjectTagTable::RemoveLocked(art::Thread* self, art::mirror::Object* obj, jlong* tag) {
   auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
@@ -108,12 +126,29 @@
 }
 
 bool ObjectTagTable::Set(art::mirror::Object* obj, jlong new_tag) {
+  if (new_tag == 0) {
+    jlong tmp;
+    return Remove(obj, &tmp);
+  }
+
   art::Thread* self = art::Thread::Current();
   art::MutexLock mu(self, allow_disallow_lock_);
   Wait(self);
 
   return SetLocked(self, obj, new_tag);
 }
+bool ObjectTagTable::SetLocked(art::mirror::Object* obj, jlong new_tag) {
+  if (new_tag == 0) {
+    jlong tmp;
+    return RemoveLocked(obj, &tmp);
+  }
+
+  art::Thread* self = art::Thread::Current();
+  allow_disallow_lock_.AssertHeld(self);
+  Wait(self);
+
+  return SetLocked(self, obj, new_tag);
+}
 
 bool ObjectTagTable::SetLocked(art::Thread* self, art::mirror::Object* obj, jlong new_tag) {
   auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
@@ -211,4 +246,142 @@
   // TODO: consider rehash here.
 }
 
+template <typename T, class Allocator = std::allocator<T>>
+struct ReleasableContainer {
+  using allocator_type = Allocator;
+
+  explicit ReleasableContainer(const allocator_type& alloc, size_t reserve = 10)
+      : allocator(alloc),
+        data(reserve > 0 ? allocator.allocate(reserve) : nullptr),
+        size(0),
+        capacity(reserve) {
+  }
+
+  ~ReleasableContainer() {
+    if (data != nullptr) {
+      allocator.deallocate(data, capacity);
+      capacity = 0;
+      size = 0;
+    }
+  }
+
+  T* Release() {
+    T* tmp = data;
+
+    data = nullptr;
+    size = 0;
+    capacity = 0;
+
+    return tmp;
+  }
+
+  void Resize(size_t new_capacity) {
+    CHECK_GT(new_capacity, capacity);
+
+    T* tmp = allocator.allocate(new_capacity);
+    DCHECK(tmp != nullptr);
+    if (data != nullptr) {
+      memcpy(tmp, data, sizeof(T) * size);
+    }
+    T* old = data;
+    data = tmp;
+    allocator.deallocate(old, capacity);
+    capacity = new_capacity;
+  }
+
+  void Pushback(const T& elem) {
+    if (size == capacity) {
+      size_t new_capacity = 2 * capacity + 1;
+      Resize(new_capacity);
+    }
+    data[size++] = elem;
+  }
+
+  Allocator allocator;
+  T* data;
+  size_t size;
+  size_t capacity;
+};
+
+jvmtiError ObjectTagTable::GetTaggedObjects(jvmtiEnv* jvmti_env,
+                                            jint tag_count,
+                                            const jlong* tags,
+                                            jint* count_ptr,
+                                            jobject** object_result_ptr,
+                                            jlong** tag_result_ptr) {
+  if (tag_count < 0) {
+    return ERR(ILLEGAL_ARGUMENT);
+  }
+  if (tag_count > 0) {
+    for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
+      if (tags[i] == 0) {
+        return ERR(ILLEGAL_ARGUMENT);
+      }
+    }
+  }
+  if (tags == nullptr) {
+    return ERR(NULL_POINTER);
+  }
+  if (count_ptr == nullptr) {
+    return ERR(NULL_POINTER);
+  }
+
+  art::Thread* self = art::Thread::Current();
+  art::MutexLock mu(self, allow_disallow_lock_);
+  Wait(self);
+
+  art::JNIEnvExt* jni_env = self->GetJniEnv();
+
+  constexpr size_t kDefaultSize = 10;
+  size_t initial_object_size;
+  size_t initial_tag_size;
+  if (tag_count == 0) {
+    initial_object_size = (object_result_ptr != nullptr) ? tagged_objects_.size() : 0;
+    initial_tag_size = (tag_result_ptr != nullptr) ? tagged_objects_.size() : 0;
+  } else {
+    initial_object_size = initial_tag_size = kDefaultSize;
+  }
+  JvmtiAllocator<void> allocator(jvmti_env);
+  ReleasableContainer<jobject, JvmtiAllocator<jobject>> selected_objects(allocator, initial_object_size);
+  ReleasableContainer<jlong, JvmtiAllocator<jlong>> selected_tags(allocator, initial_tag_size);
+
+  size_t count = 0;
+  for (auto& pair : tagged_objects_) {
+    bool select;
+    if (tag_count > 0) {
+      select = false;
+      for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
+        if (tags[i] == pair.second) {
+          select = true;
+          break;
+        }
+      }
+    } else {
+      select = true;
+    }
+
+    if (select) {
+      art::mirror::Object* obj = pair.first.Read<art::kWithReadBarrier>();
+      if (obj != nullptr) {
+        count++;
+        if (object_result_ptr != nullptr) {
+          selected_objects.Pushback(jni_env->AddLocalReference<jobject>(obj));
+        }
+        if (tag_result_ptr != nullptr) {
+          selected_tags.Pushback(pair.second);
+        }
+      }
+    }
+  }
+
+  if (object_result_ptr != nullptr) {
+    *object_result_ptr = selected_objects.Release();
+  }
+  if (tag_result_ptr != nullptr) {
+    *tag_result_ptr = selected_tags.Release();
+  }
+  *count_ptr = static_cast<jint>(count);
+  return ERR(NONE);
+}
+
 }  // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/object_tagging.h b/runtime/openjdkjvmti/object_tagging.h
index 579dc22..0296f1a 100644
--- a/runtime/openjdkjvmti/object_tagging.h
+++ b/runtime/openjdkjvmti/object_tagging.h
@@ -23,6 +23,7 @@
 #include "gc/system_weak.h"
 #include "gc_root-inl.h"
 #include "globals.h"
+#include "jvmti.h"
 #include "mirror/object.h"
 #include "thread-inl.h"
 
@@ -33,7 +34,7 @@
 class ObjectTagTable : public art::gc::SystemWeakHolder {
  public:
   explicit ObjectTagTable(EventHandler* event_handler)
-      : art::gc::SystemWeakHolder(art::LockLevel::kAllocTrackerLock),
+      : art::gc::SystemWeakHolder(kTaggingLockLevel),
         update_since_last_sweep_(false),
         event_handler_(event_handler) {
   }
@@ -45,10 +46,16 @@
   bool Remove(art::mirror::Object* obj, jlong* tag)
       REQUIRES_SHARED(art::Locks::mutator_lock_)
       REQUIRES(!allow_disallow_lock_);
+  bool RemoveLocked(art::mirror::Object* obj, jlong* tag)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(allow_disallow_lock_);
 
   bool Set(art::mirror::Object* obj, jlong tag)
       REQUIRES_SHARED(art::Locks::mutator_lock_)
       REQUIRES(!allow_disallow_lock_);
+  bool SetLocked(art::mirror::Object* obj, jlong tag)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(allow_disallow_lock_);
 
   bool GetTag(art::mirror::Object* obj, jlong* result)
       REQUIRES_SHARED(art::Locks::mutator_lock_)
@@ -59,11 +66,48 @@
 
     return GetTagLocked(self, obj, result);
   }
+  bool GetTagLocked(art::mirror::Object* obj, jlong* result)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(allow_disallow_lock_) {
+    art::Thread* self = art::Thread::Current();
+    allow_disallow_lock_.AssertHeld(self);
+    Wait(self);
+
+    return GetTagLocked(self, obj, result);
+  }
+
+  jlong GetTagOrZero(art::mirror::Object* obj)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!allow_disallow_lock_) {
+    jlong tmp = 0;
+    GetTag(obj, &tmp);
+    return tmp;
+  }
+  jlong GetTagOrZeroLocked(art::mirror::Object* obj)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(allow_disallow_lock_) {
+    jlong tmp = 0;
+    GetTagLocked(obj, &tmp);
+    return tmp;
+  }
 
   void Sweep(art::IsMarkedVisitor* visitor)
       REQUIRES_SHARED(art::Locks::mutator_lock_)
       REQUIRES(!allow_disallow_lock_);
 
+  jvmtiError GetTaggedObjects(jvmtiEnv* jvmti_env,
+                              jint tag_count,
+                              const jlong* tags,
+                              jint* count_ptr,
+                              jobject** object_result_ptr,
+                              jlong** tag_result_ptr)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!allow_disallow_lock_);
+
+  void Lock() ACQUIRE(allow_disallow_lock_);
+  void Unlock() RELEASE(allow_disallow_lock_);
+  void AssertLocked() ASSERT_CAPABILITY(allow_disallow_lock_);
+
  private:
   bool SetLocked(art::Thread* self, art::mirror::Object* obj, jlong tag)
       REQUIRES_SHARED(art::Locks::mutator_lock_)
@@ -136,6 +180,10 @@
     }
   };
 
+  // The tag table is used when visiting roots. So it needs to have a low lock level.
+  static constexpr art::LockLevel kTaggingLockLevel =
+      static_cast<art::LockLevel>(art::LockLevel::kAbortLock + 1);
+
   std::unordered_map<art::GcRoot<art::mirror::Object>,
                      jlong,
                      HashGcRoot,
diff --git a/runtime/openjdkjvmti/ti_class.cc b/runtime/openjdkjvmti/ti_class.cc
new file mode 100644
index 0000000..de2076a
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_class.cc
@@ -0,0 +1,73 @@
+/* Copyright (C) 2016 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h.  The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "ti_class.h"
+
+#include "art_jvmti.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+
+namespace openjdkjvmti {
+
+jvmtiError ClassUtil::GetClassSignature(jvmtiEnv* env,
+                                         jclass jklass,
+                                         char** signature_ptr,
+                                         char** generic_ptr) {
+  art::ScopedObjectAccess soa(art::Thread::Current());
+  art::ObjPtr<art::mirror::Class> klass = soa.Decode<art::mirror::Class>(jklass);
+  if (klass == nullptr) {
+    return ERR(INVALID_CLASS);
+  }
+
+  JvmtiUniquePtr sig_copy;
+  if (signature_ptr != nullptr) {
+    std::string storage;
+    const char* descriptor = klass->GetDescriptor(&storage);
+
+    unsigned char* tmp;
+    jvmtiError ret = CopyString(env, descriptor, &tmp);
+    if (ret != ERR(NONE)) {
+      return ret;
+    }
+    sig_copy = MakeJvmtiUniquePtr(env, tmp);
+    *signature_ptr = reinterpret_cast<char*>(tmp);
+  }
+
+  // TODO: Support generic signature.
+  *generic_ptr = nullptr;
+
+  // Everything is fine, release the buffers.
+  sig_copy.release();
+
+  return ERR(NONE);
+}
+
+}  // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_class.h b/runtime/openjdkjvmti/ti_class.h
new file mode 100644
index 0000000..caa77d4
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_class.h
@@ -0,0 +1,50 @@
+/* Copyright (C) 2016 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h.  The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_TI_CLASS_H_
+#define ART_RUNTIME_OPENJDKJVMTI_TI_CLASS_H_
+
+#include "jni.h"
+#include "jvmti.h"
+
+namespace openjdkjvmti {
+
+class ClassUtil {
+ public:
+  static jvmtiError GetClassSignature(jvmtiEnv* env,
+                                      jclass klass,
+                                      char** signature_ptr,
+                                      char** generic_ptr);
+};
+
+}  // namespace openjdkjvmti
+
+#endif  // ART_RUNTIME_OPENJDKJVMTI_TI_CLASS_H_
diff --git a/runtime/openjdkjvmti/ti_heap.cc b/runtime/openjdkjvmti/ti_heap.cc
new file mode 100644
index 0000000..0eff469
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_heap.cc
@@ -0,0 +1,684 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ti_heap.h"
+
+#include "art_field-inl.h"
+#include "art_jvmti.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "class_linker.h"
+#include "gc/heap.h"
+#include "gc_root-inl.h"
+#include "jni_env_ext.h"
+#include "jni_internal.h"
+#include "mirror/class.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "object_callbacks.h"
+#include "object_tagging.h"
+#include "obj_ptr-inl.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+namespace openjdkjvmti {
+
+struct IterateThroughHeapData {
+  IterateThroughHeapData(HeapUtil* _heap_util,
+                         jint heap_filter,
+                         art::ObjPtr<art::mirror::Class> klass,
+                         const jvmtiHeapCallbacks* _callbacks,
+                         const void* _user_data)
+      : heap_util(_heap_util),
+        filter_klass(klass),
+        callbacks(_callbacks),
+        user_data(_user_data),
+        filter_out_tagged((heap_filter & JVMTI_HEAP_FILTER_TAGGED) != 0),
+        filter_out_untagged((heap_filter & JVMTI_HEAP_FILTER_UNTAGGED) != 0),
+        filter_out_class_tagged((heap_filter & JVMTI_HEAP_FILTER_CLASS_TAGGED) != 0),
+        filter_out_class_untagged((heap_filter & JVMTI_HEAP_FILTER_CLASS_UNTAGGED) != 0),
+        any_filter(filter_out_tagged ||
+                   filter_out_untagged ||
+                   filter_out_class_tagged ||
+                   filter_out_class_untagged),
+        stop_reports(false) {
+  }
+
+  bool ShouldReportByHeapFilter(jlong tag, jlong class_tag) {
+    if (!any_filter) {
+      return true;
+    }
+
+    if ((tag == 0 && filter_out_untagged) || (tag != 0 && filter_out_tagged)) {
+      return false;
+    }
+
+    if ((class_tag == 0 && filter_out_class_untagged) ||
+        (class_tag != 0 && filter_out_class_tagged)) {
+      return false;
+    }
+
+    return true;
+  }
+
+  HeapUtil* heap_util;
+  art::ObjPtr<art::mirror::Class> filter_klass;
+  const jvmtiHeapCallbacks* callbacks;
+  const void* user_data;
+  const bool filter_out_tagged;
+  const bool filter_out_untagged;
+  const bool filter_out_class_tagged;
+  const bool filter_out_class_untagged;
+  const bool any_filter;
+
+  bool stop_reports;
+};
+
+static void IterateThroughHeapObjectCallback(art::mirror::Object* obj, void* arg)
+    REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  IterateThroughHeapData* ithd = reinterpret_cast<IterateThroughHeapData*>(arg);
+  // Early return, as we can't really stop visiting.
+  if (ithd->stop_reports) {
+    return;
+  }
+
+  art::ScopedAssertNoThreadSuspension no_suspension("IterateThroughHeapCallback");
+
+  jlong tag = 0;
+  ithd->heap_util->GetTags()->GetTag(obj, &tag);
+
+  jlong class_tag = 0;
+  art::ObjPtr<art::mirror::Class> klass = obj->GetClass();
+  ithd->heap_util->GetTags()->GetTag(klass.Ptr(), &class_tag);
+  // For simplicity, even if we find a tag = 0, assume 0 = not tagged.
+
+  if (!ithd->ShouldReportByHeapFilter(tag, class_tag)) {
+    return;
+  }
+
+  // TODO: Handle array_primitive_value_callback.
+
+  if (ithd->filter_klass != nullptr) {
+    if (ithd->filter_klass != klass) {
+      return;
+    }
+  }
+
+  jlong size = obj->SizeOf();
+
+  jint length = -1;
+  if (obj->IsArrayInstance()) {
+    length = obj->AsArray()->GetLength();
+  }
+
+  jlong saved_tag = tag;
+  jint ret = ithd->callbacks->heap_iteration_callback(class_tag,
+                                                      size,
+                                                      &tag,
+                                                      length,
+                                                      const_cast<void*>(ithd->user_data));
+
+  if (tag != saved_tag) {
+    ithd->heap_util->GetTags()->Set(obj, tag);
+  }
+
+  ithd->stop_reports = (ret & JVMTI_VISIT_ABORT) != 0;
+
+  // TODO Implement array primitive and string primitive callback.
+  // TODO Implement primitive field callback.
+}
+
+jvmtiError HeapUtil::IterateThroughHeap(jvmtiEnv* env ATTRIBUTE_UNUSED,
+                                        jint heap_filter,
+                                        jclass klass,
+                                        const jvmtiHeapCallbacks* callbacks,
+                                        const void* user_data) {
+  if (callbacks == nullptr) {
+    return ERR(NULL_POINTER);
+  }
+
+  if (callbacks->array_primitive_value_callback != nullptr) {
+    // TODO: Implement.
+    return ERR(NOT_IMPLEMENTED);
+  }
+
+  art::Thread* self = art::Thread::Current();
+  art::ScopedObjectAccess soa(self);      // Now we know we have the shared lock.
+
+  IterateThroughHeapData ithd(this,
+                              heap_filter,
+                              soa.Decode<art::mirror::Class>(klass),
+                              callbacks,
+                              user_data);
+
+  art::Runtime::Current()->GetHeap()->VisitObjects(IterateThroughHeapObjectCallback, &ithd);
+
+  return ERR(NONE);
+}
+
+class FollowReferencesHelper FINAL {
+ public:
+  FollowReferencesHelper(HeapUtil* h,
+                         art::ObjPtr<art::mirror::Object> initial_object ATTRIBUTE_UNUSED,
+                         const jvmtiHeapCallbacks* callbacks,
+                         const void* user_data)
+      : tag_table_(h->GetTags()),
+        callbacks_(callbacks),
+        user_data_(user_data),
+        start_(0),
+        stop_reports_(false) {
+  }
+
+  void Init()
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
+    CollectAndReportRootsVisitor carrv(this, tag_table_, &worklist_, &visited_);
+    art::Runtime::Current()->VisitRoots(&carrv);
+    art::Runtime::Current()->VisitImageRoots(&carrv);
+    stop_reports_ = carrv.IsStopReports();
+
+    if (stop_reports_) {
+      worklist_.clear();
+    }
+  }
+
+  void Work()
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
+    // Currently implemented as a BFS. To lower overhead, we don't erase elements immediately
+    // from the head of the work list, instead postponing until there's a gap that's "large."
+    //
+    // Alternatively, we can implement a DFS and use the work list as a stack.
+    while (start_ < worklist_.size()) {
+      art::mirror::Object* cur_obj = worklist_[start_];
+      start_++;
+
+      if (start_ >= kMaxStart) {
+        worklist_.erase(worklist_.begin(), worklist_.begin() + start_);
+        start_ = 0;
+      }
+
+      VisitObject(cur_obj);
+
+      if (stop_reports_) {
+        break;
+      }
+    }
+  }
+
+ private:
+  class CollectAndReportRootsVisitor FINAL : public art::RootVisitor {
+   public:
+    CollectAndReportRootsVisitor(FollowReferencesHelper* helper,
+                                 ObjectTagTable* tag_table,
+                                 std::vector<art::mirror::Object*>* worklist,
+                                 std::unordered_set<art::mirror::Object*>* visited)
+        : helper_(helper),
+          tag_table_(tag_table),
+          worklist_(worklist),
+          visited_(visited),
+          stop_reports_(false) {}
+
+    void VisitRoots(art::mirror::Object*** roots, size_t count, const art::RootInfo& info)
+        OVERRIDE
+        REQUIRES_SHARED(art::Locks::mutator_lock_)
+        REQUIRES(!*helper_->tag_table_->GetAllowDisallowLock()) {
+      for (size_t i = 0; i != count; ++i) {
+        AddRoot(*roots[i], info);
+      }
+    }
+
+    void VisitRoots(art::mirror::CompressedReference<art::mirror::Object>** roots,
+                    size_t count,
+                    const art::RootInfo& info)
+        OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_)
+        REQUIRES(!*helper_->tag_table_->GetAllowDisallowLock()) {
+      for (size_t i = 0; i != count; ++i) {
+        AddRoot(roots[i]->AsMirrorPtr(), info);
+      }
+    }
+
+    bool IsStopReports() {
+      return stop_reports_;
+    }
+
+   private:
+    void AddRoot(art::mirror::Object* root_obj, const art::RootInfo& info)
+        REQUIRES_SHARED(art::Locks::mutator_lock_)
+        REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
+      // We use visited_ to mark roots already so we do not need another set.
+      if (visited_->find(root_obj) == visited_->end()) {
+        visited_->insert(root_obj);
+        worklist_->push_back(root_obj);
+      }
+      ReportRoot(root_obj, info);
+    }
+
+    jvmtiHeapReferenceKind GetReferenceKind(const art::RootInfo& info,
+                                            jvmtiHeapReferenceInfo* ref_info)
+        REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      // TODO: Fill in ref_info.
+      memset(ref_info, 0, sizeof(jvmtiHeapReferenceInfo));
+
+      switch (info.GetType()) {
+        case art::RootType::kRootJNIGlobal:
+          return JVMTI_HEAP_REFERENCE_JNI_GLOBAL;
+
+        case art::RootType::kRootJNILocal:
+          return JVMTI_HEAP_REFERENCE_JNI_LOCAL;
+
+        case art::RootType::kRootJavaFrame:
+          return JVMTI_HEAP_REFERENCE_STACK_LOCAL;
+
+        case art::RootType::kRootNativeStack:
+        case art::RootType::kRootThreadBlock:
+        case art::RootType::kRootThreadObject:
+          return JVMTI_HEAP_REFERENCE_THREAD;
+
+        case art::RootType::kRootStickyClass:
+        case art::RootType::kRootInternedString:
+          // Note: this isn't a root in the RI.
+          return JVMTI_HEAP_REFERENCE_SYSTEM_CLASS;
+
+        case art::RootType::kRootMonitorUsed:
+        case art::RootType::kRootJNIMonitor:
+          return JVMTI_HEAP_REFERENCE_MONITOR;
+
+        case art::RootType::kRootFinalizing:
+        case art::RootType::kRootDebugger:
+        case art::RootType::kRootReferenceCleanup:
+        case art::RootType::kRootVMInternal:
+        case art::RootType::kRootUnknown:
+          return JVMTI_HEAP_REFERENCE_OTHER;
+      }
+      LOG(FATAL) << "Unreachable";
+      UNREACHABLE();
+    }
+
+    void ReportRoot(art::mirror::Object* root_obj, const art::RootInfo& info)
+        REQUIRES_SHARED(art::Locks::mutator_lock_)
+        REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
+      jvmtiHeapReferenceInfo ref_info;
+      jvmtiHeapReferenceKind kind = GetReferenceKind(info, &ref_info);
+      jint result = helper_->ReportReference(kind, &ref_info, nullptr, root_obj);
+      if ((result & JVMTI_VISIT_ABORT) != 0) {
+        stop_reports_ = true;
+      }
+    }
+
+   private:
+    FollowReferencesHelper* helper_;
+    ObjectTagTable* tag_table_;
+    std::vector<art::mirror::Object*>* worklist_;
+    std::unordered_set<art::mirror::Object*>* visited_;
+    bool stop_reports_;
+  };
+
+  void VisitObject(art::mirror::Object* obj)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
+    if (obj->IsClass()) {
+      VisitClass(obj->AsClass());
+      return;
+    }
+    if (obj->IsArrayInstance()) {
+      VisitArray(obj);
+      return;
+    }
+
+    // TODO: We'll probably have to rewrite this completely with our own visiting logic, if we
+    //       want to have a chance of getting the field indices computed halfway efficiently. For
+    //       now, ignore them altogether.
+
+    struct InstanceReferenceVisitor {
+      explicit InstanceReferenceVisitor(FollowReferencesHelper* helper_)
+          : helper(helper_), stop_reports(false) {}
+
+      void operator()(art::mirror::Object* src,
+                      art::MemberOffset field_offset,
+                      bool is_static ATTRIBUTE_UNUSED) const
+          REQUIRES_SHARED(art::Locks::mutator_lock_)
+          REQUIRES(!*helper->tag_table_->GetAllowDisallowLock()) {
+        if (stop_reports) {
+          return;
+        }
+
+        art::mirror::Object* trg = src->GetFieldObjectReferenceAddr(field_offset)->AsMirrorPtr();
+        jvmtiHeapReferenceInfo reference_info;
+        memset(&reference_info, 0, sizeof(reference_info));
+
+        // TODO: Implement spec-compliant numbering.
+        reference_info.field.index = field_offset.Int32Value();
+
+        jvmtiHeapReferenceKind kind =
+            field_offset.Int32Value() == art::mirror::Object::ClassOffset().Int32Value()
+                ? JVMTI_HEAP_REFERENCE_CLASS
+                : JVMTI_HEAP_REFERENCE_FIELD;
+        const jvmtiHeapReferenceInfo* reference_info_ptr =
+            kind == JVMTI_HEAP_REFERENCE_CLASS ? nullptr : &reference_info;
+
+        stop_reports = !helper->ReportReferenceMaybeEnqueue(kind, reference_info_ptr, src, trg);
+      }
+
+      void VisitRoot(art::mirror::CompressedReference<art::mirror::Object>* root ATTRIBUTE_UNUSED)
+          const {
+        LOG(FATAL) << "Unreachable";
+      }
+      void VisitRootIfNonNull(
+          art::mirror::CompressedReference<art::mirror::Object>* root ATTRIBUTE_UNUSED) const {
+        LOG(FATAL) << "Unreachable";
+      }
+
+      // "mutable" required by the visitor API.
+      mutable FollowReferencesHelper* helper;
+      mutable bool stop_reports;
+    };
+
+    InstanceReferenceVisitor visitor(this);
+    // Visit references, not native roots.
+    obj->VisitReferences<false>(visitor, art::VoidFunctor());
+
+    stop_reports_ = visitor.stop_reports;
+  }
+
+  void VisitArray(art::mirror::Object* array)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
+    stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_CLASS,
+                                                 nullptr,
+                                                 array,
+                                                 array->GetClass());
+    if (stop_reports_) {
+      return;
+    }
+
+    if (array->IsObjectArray()) {
+      art::mirror::ObjectArray<art::mirror::Object>* obj_array =
+          array->AsObjectArray<art::mirror::Object>();
+      int32_t length = obj_array->GetLength();
+      for (int32_t i = 0; i != length; ++i) {
+        art::mirror::Object* elem = obj_array->GetWithoutChecks(i);
+        if (elem != nullptr) {
+          jvmtiHeapReferenceInfo reference_info;
+          reference_info.array.index = i;
+          stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_ARRAY_ELEMENT,
+                                                       &reference_info,
+                                                       array,
+                                                       elem);
+          if (stop_reports_) {
+            break;
+          }
+        }
+      }
+    }
+  }
+
+  void VisitClass(art::mirror::Class* klass)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
+    // TODO: Are erroneous classes reported? Are non-prepared ones? For now, just use resolved ones.
+    if (!klass->IsResolved()) {
+      return;
+    }
+
+    // Superclass.
+    stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_SUPERCLASS,
+                                                 nullptr,
+                                                 klass,
+                                                 klass->GetSuperClass());
+    if (stop_reports_) {
+      return;
+    }
+
+    // Directly implemented or extended interfaces.
+    art::Thread* self = art::Thread::Current();
+    art::StackHandleScope<1> hs(self);
+    art::Handle<art::mirror::Class> h_klass(hs.NewHandle<art::mirror::Class>(klass));
+    for (size_t i = 0; i < h_klass->NumDirectInterfaces(); ++i) {
+      art::ObjPtr<art::mirror::Class> inf_klass =
+          art::mirror::Class::GetDirectInterface(self, h_klass, i);
+      if (inf_klass == nullptr) {
+        // TODO: With a resolved class this should not happen...
+        self->ClearException();
+        break;
+      }
+
+      stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_INTERFACE,
+                                                   nullptr,
+                                                   klass,
+                                                   inf_klass.Ptr());
+      if (stop_reports_) {
+        return;
+      }
+    }
+
+    // Classloader.
+    // TODO: What about the boot classpath loader? We'll skip for now, but do we have to find the
+    //       fake BootClassLoader?
+    if (klass->GetClassLoader() != nullptr) {
+      stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_CLASS_LOADER,
+                                                   nullptr,
+                                                   klass,
+                                                   klass->GetClassLoader());
+      if (stop_reports_) {
+        return;
+      }
+    }
+    DCHECK_EQ(h_klass.Get(), klass);
+
+    // Declared static fields.
+    for (auto& field : klass->GetSFields()) {
+      if (!field.IsPrimitiveType()) {
+        art::ObjPtr<art::mirror::Object> field_value = field.GetObject(klass);
+        if (field_value != nullptr) {
+          jvmtiHeapReferenceInfo reference_info;
+          memset(&reference_info, 0, sizeof(reference_info));
+
+          // TODO: Implement spec-compliant numbering.
+          reference_info.field.index = field.GetOffset().Int32Value();
+
+          stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_STATIC_FIELD,
+                                                       &reference_info,
+                                                       klass,
+                                                       field_value.Ptr());
+          if (stop_reports_) {
+            return;
+          }
+        }
+      }
+    }
+  }
+
+  void MaybeEnqueue(art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    if (visited_.find(obj) == visited_.end()) {
+      worklist_.push_back(obj);
+      visited_.insert(obj);
+    }
+  }
+
+  bool ReportReferenceMaybeEnqueue(jvmtiHeapReferenceKind kind,
+                                   const jvmtiHeapReferenceInfo* reference_info,
+                                   art::mirror::Object* referree,
+                                   art::mirror::Object* referrer)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
+    jint result = ReportReference(kind, reference_info, referree, referrer);
+    if ((result & JVMTI_VISIT_ABORT) == 0) {
+      if ((result & JVMTI_VISIT_OBJECTS) != 0) {
+        MaybeEnqueue(referrer);
+      }
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  jint ReportReference(jvmtiHeapReferenceKind kind,
+                       const jvmtiHeapReferenceInfo* reference_info,
+                       art::mirror::Object* referrer,
+                       art::mirror::Object* referree)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
+    if (referree == nullptr || stop_reports_) {
+      return 0;
+    }
+
+    const jlong class_tag = tag_table_->GetTagOrZero(referree->GetClass());
+    const jlong referrer_class_tag =
+        referrer == nullptr ? 0 : tag_table_->GetTagOrZero(referrer->GetClass());
+    const jlong size = static_cast<jlong>(referree->SizeOf());
+    jlong tag = tag_table_->GetTagOrZero(referree);
+    jlong saved_tag = tag;
+    jlong referrer_tag = 0;
+    jlong saved_referrer_tag = 0;
+    jlong* referrer_tag_ptr;
+    if (referrer == nullptr) {
+      referrer_tag_ptr = nullptr;
+    } else {
+      if (referrer == referree) {
+        referrer_tag_ptr = &tag;
+      } else {
+        referrer_tag = saved_referrer_tag = tag_table_->GetTagOrZero(referrer);
+        referrer_tag_ptr = &referrer_tag;
+      }
+    }
+    jint length = -1;
+    if (referree->IsArrayInstance()) {
+      length = referree->AsArray()->GetLength();
+    }
+
+    jint result = callbacks_->heap_reference_callback(kind,
+                                                      reference_info,
+                                                      class_tag,
+                                                      referrer_class_tag,
+                                                      size,
+                                                      &tag,
+                                                      referrer_tag_ptr,
+                                                      length,
+                                                      const_cast<void*>(user_data_));
+
+    if (tag != saved_tag) {
+      tag_table_->Set(referree, tag);
+    }
+    if (referrer_tag != saved_referrer_tag) {
+      tag_table_->Set(referrer, referrer_tag);
+    }
+
+    return result;
+  }
+
+  ObjectTagTable* tag_table_;
+  const jvmtiHeapCallbacks* callbacks_;
+  const void* user_data_;
+
+  std::vector<art::mirror::Object*> worklist_;
+  size_t start_;
+  static constexpr size_t kMaxStart = 1000000U;
+
+  std::unordered_set<art::mirror::Object*> visited_;
+
+  bool stop_reports_;
+
+  friend class CollectAndReportRootsVisitor;
+};
+
+jvmtiError HeapUtil::FollowReferences(jvmtiEnv* env ATTRIBUTE_UNUSED,
+                                      jint heap_filter ATTRIBUTE_UNUSED,
+                                      jclass klass ATTRIBUTE_UNUSED,
+                                      jobject initial_object,
+                                      const jvmtiHeapCallbacks* callbacks,
+                                      const void* user_data) {
+  if (callbacks == nullptr) {
+    return ERR(NULL_POINTER);
+  }
+
+  if (callbacks->array_primitive_value_callback != nullptr) {
+    // TODO: Implement.
+    return ERR(NOT_IMPLEMENTED);
+  }
+
+  art::Thread* self = art::Thread::Current();
+  art::ScopedObjectAccess soa(self);      // Now we know we have the shared lock.
+
+  art::Runtime::Current()->GetHeap()->IncrementDisableMovingGC(self);
+  {
+    art::ObjPtr<art::mirror::Object> o_initial = soa.Decode<art::mirror::Object>(initial_object);
+
+    art::ScopedThreadSuspension sts(self, art::kWaitingForVisitObjects);
+    art::ScopedSuspendAll ssa("FollowReferences");
+
+    FollowReferencesHelper frh(this, o_initial, callbacks, user_data);
+    frh.Init();
+    frh.Work();
+  }
+  art::Runtime::Current()->GetHeap()->DecrementDisableMovingGC(self);
+
+  return ERR(NONE);
+}
+
+jvmtiError HeapUtil::GetLoadedClasses(jvmtiEnv* env,
+                                      jint* class_count_ptr,
+                                      jclass** classes_ptr) {
+  if (class_count_ptr == nullptr || classes_ptr == nullptr) {
+    return ERR(NULL_POINTER);
+  }
+
+  class ReportClassVisitor : public art::ClassVisitor {
+   public:
+    explicit ReportClassVisitor(art::Thread* self) : self_(self) {}
+
+    bool operator()(art::ObjPtr<art::mirror::Class> klass)
+        OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      classes_.push_back(self_->GetJniEnv()->AddLocalReference<jclass>(klass));
+      return true;
+    }
+
+    art::Thread* self_;
+    std::vector<jclass> classes_;
+  };
+
+  art::Thread* self = art::Thread::Current();
+  ReportClassVisitor rcv(self);
+  {
+    art::ScopedObjectAccess soa(self);
+    art::Runtime::Current()->GetClassLinker()->VisitClasses(&rcv);
+  }
+
+  size_t size = rcv.classes_.size();
+  jclass* classes = nullptr;
+  jvmtiError alloc_ret = env->Allocate(static_cast<jlong>(size * sizeof(jclass)),
+                                       reinterpret_cast<unsigned char**>(&classes));
+  if (alloc_ret != ERR(NONE)) {
+    return alloc_ret;
+  }
+
+  for (size_t i = 0; i < size; ++i) {
+    classes[i] = rcv.classes_[i];
+  }
+  *classes_ptr = classes;
+  *class_count_ptr = static_cast<jint>(size);
+
+  return ERR(NONE);
+}
+
+jvmtiError HeapUtil::ForceGarbageCollection(jvmtiEnv* env ATTRIBUTE_UNUSED) {
+  art::Runtime::Current()->GetHeap()->CollectGarbage(false);
+
+  return ERR(NONE);
+}
+}  // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/heap.h b/runtime/openjdkjvmti/ti_heap.h
similarity index 71%
rename from runtime/openjdkjvmti/heap.h
rename to runtime/openjdkjvmti/ti_heap.h
index b6becb9..72ee097 100644
--- a/runtime/openjdkjvmti/heap.h
+++ b/runtime/openjdkjvmti/ti_heap.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_RUNTIME_OPENJDKJVMTI_HEAP_H_
-#define ART_RUNTIME_OPENJDKJVMTI_HEAP_H_
+#ifndef ART_RUNTIME_OPENJDKJVMTI_TI_HEAP_H_
+#define ART_RUNTIME_OPENJDKJVMTI_TI_HEAP_H_
 
 #include "jvmti.h"
 
@@ -36,6 +36,15 @@
                                 const jvmtiHeapCallbacks* callbacks,
                                 const void* user_data);
 
+  jvmtiError FollowReferences(jvmtiEnv* env,
+                              jint heap_filter,
+                              jclass klass,
+                              jobject initial_object,
+                              const jvmtiHeapCallbacks* callbacks,
+                              const void* user_data);
+
+  static jvmtiError ForceGarbageCollection(jvmtiEnv* env);
+
   ObjectTagTable* GetTags() {
     return tags_;
   }
@@ -46,4 +55,4 @@
 
 }  // namespace openjdkjvmti
 
-#endif  // ART_RUNTIME_OPENJDKJVMTI_HEAP_H_
+#endif  // ART_RUNTIME_OPENJDKJVMTI_TI_HEAP_H_
diff --git a/runtime/openjdkjvmti/ti_method.cc b/runtime/openjdkjvmti/ti_method.cc
new file mode 100644
index 0000000..e391a9d
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_method.cc
@@ -0,0 +1,131 @@
+/* Copyright (C) 2016 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h.  The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "ti_method.h"
+
+#include "art_jvmti.h"
+#include "art_method-inl.h"
+#include "base/enums.h"
+#include "jni_internal.h"
+#include "modifiers.h"
+#include "scoped_thread_state_change-inl.h"
+
+namespace openjdkjvmti {
+
+jvmtiError MethodUtil::GetMethodName(jvmtiEnv* env,
+                                     jmethodID method,
+                                     char** name_ptr,
+                                     char** signature_ptr,
+                                     char** generic_ptr) {
+  art::ScopedObjectAccess soa(art::Thread::Current());
+  art::ArtMethod* art_method = art::jni::DecodeArtMethod(method);
+  art_method = art_method->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
+
+  JvmtiUniquePtr name_copy;
+  if (name_ptr != nullptr) {
+    const char* method_name = art_method->GetName();
+    if (method_name == nullptr) {
+      method_name = "<error>";
+    }
+    unsigned char* tmp;
+    jvmtiError ret = CopyString(env, method_name, &tmp);
+    if (ret != ERR(NONE)) {
+      return ret;
+    }
+    name_copy = MakeJvmtiUniquePtr(env, tmp);
+    *name_ptr = reinterpret_cast<char*>(tmp);
+  }
+
+  JvmtiUniquePtr signature_copy;
+  if (signature_ptr != nullptr) {
+    const art::Signature sig = art_method->GetSignature();
+    std::string str = sig.ToString();
+    unsigned char* tmp;
+    jvmtiError ret = CopyString(env, str.c_str(), &tmp);
+    if (ret != ERR(NONE)) {
+      return ret;
+    }
+    signature_copy = MakeJvmtiUniquePtr(env, tmp);
+    *signature_ptr = reinterpret_cast<char*>(tmp);
+  }
+
+  // TODO: Support generic signature.
+  *generic_ptr = nullptr;
+
+  // Everything is fine, release the buffers.
+  name_copy.release();
+  signature_copy.release();
+
+  return ERR(NONE);
+}
+
+jvmtiError MethodUtil::GetMethodDeclaringClass(jvmtiEnv* env ATTRIBUTE_UNUSED,
+                                               jmethodID method,
+                                               jclass* declaring_class_ptr) {
+  if (declaring_class_ptr == nullptr) {
+    return ERR(NULL_POINTER);
+  }
+
+  art::ArtMethod* art_method = art::jni::DecodeArtMethod(method);
+  // Note: No GetInterfaceMethodIfProxy, we want to actual class.
+
+  art::ScopedObjectAccess soa(art::Thread::Current());
+  art::mirror::Class* klass = art_method->GetDeclaringClass();
+  *declaring_class_ptr = soa.AddLocalReference<jclass>(klass);
+
+  return ERR(NONE);
+}
+
+jvmtiError MethodUtil::GetMethodModifiers(jvmtiEnv* env ATTRIBUTE_UNUSED,
+                                          jmethodID method,
+                                          jint* modifiers_ptr) {
+  if (modifiers_ptr == nullptr) {
+    return ERR(NULL_POINTER);
+  }
+
+  art::ArtMethod* art_method = art::jni::DecodeArtMethod(method);
+  uint32_t modifiers = art_method->GetAccessFlags();
+
+  // Note: Keep this code in sync with Executable.fixMethodFlags.
+  if ((modifiers & art::kAccAbstract) != 0) {
+    modifiers &= ~art::kAccNative;
+  }
+  modifiers &= ~art::kAccSynchronized;
+  if ((modifiers & art::kAccDeclaredSynchronized) != 0) {
+    modifiers |= art::kAccSynchronized;
+  }
+  modifiers &= art::kAccJavaFlagsMask;
+
+  *modifiers_ptr = modifiers;
+  return ERR(NONE);
+}
+
+}  // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_method.h b/runtime/openjdkjvmti/ti_method.h
new file mode 100644
index 0000000..43f11f9
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_method.h
@@ -0,0 +1,59 @@
+/* Copyright (C) 2016 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h.  The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_TI_METHOD_H_
+#define ART_RUNTIME_OPENJDKJVMTI_TI_METHOD_H_
+
+#include "jni.h"
+#include "jvmti.h"
+
+namespace openjdkjvmti {
+
+class MethodUtil {
+ public:
+  static jvmtiError GetMethodName(jvmtiEnv* env,
+                                  jmethodID method,
+                                  char** name_ptr,
+                                  char** signature_ptr,
+                                  char** generic_ptr);
+
+  static jvmtiError GetMethodDeclaringClass(jvmtiEnv* env,
+                                            jmethodID method,
+                                            jclass* declaring_class_ptr);
+
+  static jvmtiError GetMethodModifiers(jvmtiEnv* env,
+                                       jmethodID method,
+                                       jint* modifiers_ptr);
+};
+
+}  // namespace openjdkjvmti
+
+#endif  // ART_RUNTIME_OPENJDKJVMTI_TI_METHOD_H_
diff --git a/runtime/openjdkjvmti/ti_stack.cc b/runtime/openjdkjvmti/ti_stack.cc
new file mode 100644
index 0000000..6f8976f
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_stack.cc
@@ -0,0 +1,196 @@
+/* Copyright (C) 2016 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h.  The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "ti_stack.h"
+
+#include "art_jvmti.h"
+#include "art_method-inl.h"
+#include "base/enums.h"
+#include "dex_file.h"
+#include "dex_file_annotations.h"
+#include "jni_env_ext.h"
+#include "jni_internal.h"
+#include "mirror/class.h"
+#include "mirror/dex_cache.h"
+#include "scoped_thread_state_change-inl.h"
+#include "stack.h"
+#include "thread.h"
+#include "thread_pool.h"
+
+namespace openjdkjvmti {
+
+struct GetStackTraceVisitor : public art::StackVisitor {
+  GetStackTraceVisitor(art::Thread* thread_in,
+                       art::ScopedObjectAccessAlreadyRunnable& soa_,
+                       size_t start_,
+                       size_t stop_)
+      : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+        soa(soa_),
+        start(start_),
+        stop(stop_) {}
+
+  bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    art::ArtMethod* m = GetMethod();
+    if (m->IsRuntimeMethod()) {
+      return true;
+    }
+
+    if (start == 0) {
+      m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
+      jmethodID id = art::jni::EncodeArtMethod(m);
+
+      art::mirror::DexCache* dex_cache = m->GetDexCache();
+      int32_t line_number = -1;
+      if (dex_cache != nullptr) {  // be tolerant of bad input
+        const art::DexFile* dex_file = dex_cache->GetDexFile();
+        line_number = art::annotations::GetLineNumFromPC(dex_file, m, GetDexPc(false));
+      }
+
+      jvmtiFrameInfo info = { id, static_cast<jlong>(line_number) };
+      frames.push_back(info);
+
+      if (stop == 1) {
+        return false;  // We're done.
+      } else if (stop > 0) {
+        stop--;
+      }
+    } else {
+      start--;
+    }
+
+    return true;
+  }
+
+  art::ScopedObjectAccessAlreadyRunnable& soa;
+  std::vector<jvmtiFrameInfo> frames;
+  size_t start;
+  size_t stop;
+};
+
+struct GetStackTraceClosure : public art::Closure {
+ public:
+  GetStackTraceClosure(size_t start, size_t stop)
+      : start_input(start),
+        stop_input(stop),
+        start_result(0),
+        stop_result(0) {}
+
+  void Run(art::Thread* self) OVERRIDE {
+    art::ScopedObjectAccess soa(art::Thread::Current());
+
+    GetStackTraceVisitor visitor(self, soa, start_input, stop_input);
+    visitor.WalkStack(false);
+
+    frames.swap(visitor.frames);
+    start_result = visitor.start;
+    stop_result = visitor.stop;
+  }
+
+  const size_t start_input;
+  const size_t stop_input;
+
+  std::vector<jvmtiFrameInfo> frames;
+  size_t start_result;
+  size_t stop_result;
+};
+
+jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
+                                    jthread java_thread,
+                                    jint start_depth,
+                                    jint max_frame_count,
+                                    jvmtiFrameInfo* frame_buffer,
+                                    jint* count_ptr) {
+  if (java_thread == nullptr) {
+    return ERR(INVALID_THREAD);
+  }
+
+  art::Thread* thread;
+  {
+    // TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD.
+    art::ScopedObjectAccess soa(art::Thread::Current());
+    art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+    thread = art::Thread::FromManagedThread(soa, java_thread);
+    DCHECK(thread != nullptr);
+  }
+
+  art::ThreadState state = thread->GetState();
+  if (state == art::ThreadState::kStarting ||
+      state == art::ThreadState::kTerminated ||
+      thread->IsStillStarting()) {
+    return ERR(THREAD_NOT_ALIVE);
+  }
+
+  if (max_frame_count < 0) {
+    return ERR(ILLEGAL_ARGUMENT);
+  }
+  if (frame_buffer == nullptr || count_ptr == nullptr) {
+    return ERR(NULL_POINTER);
+  }
+
+  if (max_frame_count == 0) {
+    *count_ptr = 0;
+    return ERR(NONE);
+  }
+
+  GetStackTraceClosure closure(start_depth >= 0 ? static_cast<size_t>(start_depth) : 0,
+                               start_depth >= 0 ?static_cast<size_t>(max_frame_count) : 0);
+  thread->RequestSynchronousCheckpoint(&closure);
+
+  size_t collected_frames = closure.frames.size();
+
+  // Frames from the top.
+  if (start_depth >= 0) {
+    if (closure.start_result != 0) {
+      // Not enough frames.
+      return ERR(ILLEGAL_ARGUMENT);
+    }
+    DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
+    if (closure.frames.size() > 0) {
+      memcpy(frame_buffer, closure.frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
+    }
+    *count_ptr = static_cast<jint>(closure.frames.size());
+    return ERR(NONE);
+  }
+
+  // Frames from the bottom.
+  if (collected_frames < static_cast<size_t>(-start_depth)) {
+    return ERR(ILLEGAL_ARGUMENT);
+  }
+
+  size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
+  memcpy(frame_buffer,
+         &closure.frames.data()[collected_frames + start_depth],
+         count * sizeof(jvmtiFrameInfo));
+  *count_ptr = static_cast<jint>(count);
+  return ERR(NONE);
+}
+
+}  // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_stack.h b/runtime/openjdkjvmti/ti_stack.h
new file mode 100644
index 0000000..1931ed3
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_stack.h
@@ -0,0 +1,51 @@
+/* Copyright (C) 2016 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h.  The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_TI_STACK_H_
+#define ART_RUNTIME_OPENJDKJVMTI_TI_STACK_H_
+
+#include "jvmti.h"
+
+namespace openjdkjvmti {
+
+class StackUtil {
+ public:
+  static jvmtiError GetStackTrace(jvmtiEnv* env,
+                                  jthread thread,
+                                  jint start_depth,
+                                  jint max_frame_count,
+                                  jvmtiFrameInfo* frame_buffer,
+                                  jint* count_ptr);
+};
+
+}  // namespace openjdkjvmti
+
+#endif  // ART_RUNTIME_OPENJDKJVMTI_TI_STACK_H_
diff --git a/runtime/openjdkjvmti/transform.cc b/runtime/openjdkjvmti/transform.cc
index 3443aea..fa2983c 100644
--- a/runtime/openjdkjvmti/transform.cc
+++ b/runtime/openjdkjvmti/transform.cc
@@ -283,7 +283,7 @@
 // Install the new dex file.
 // TODO do error checks for bad state (method in a stack, changes to number of methods/fields/etc).
 jvmtiError MoveTransformedFileIntoRuntime(jclass jklass,
-                                          std::string original_location,
+                                          const std::string& original_location,
                                           jint data_len,
                                           unsigned char* dex_data) {
   const char* dex_file_name = "Ldalvik/system/DexFile;";
diff --git a/runtime/openjdkjvmti/transform.h b/runtime/openjdkjvmti/transform.h
index 85bcb00..a76ed93 100644
--- a/runtime/openjdkjvmti/transform.h
+++ b/runtime/openjdkjvmti/transform.h
@@ -54,7 +54,7 @@
 
 // Install the new dex file.
 jvmtiError MoveTransformedFileIntoRuntime(jclass jklass,
-                                          std::string original_location,
+                                          const std::string& original_location,
                                           jint data_len,
                                           unsigned char* dex_data);
 
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 56eab5e..e1022b0 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -300,6 +300,8 @@
       .Define("-Xplugin:_")
           .WithType<std::vector<Plugin>>().AppendValues()
           .IntoKey(M::Plugins)
+      .Define("-Xfully-deoptable")
+          .IntoKey(M::FullyDeoptable)
       .Ignore({
           "-ea", "-da", "-enableassertions", "-disableassertions", "--runtime-arg", "-esa",
           "-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:_",
diff --git a/runtime/plugin.h b/runtime/plugin.h
index 18f3977..f077aaf 100644
--- a/runtime/plugin.h
+++ b/runtime/plugin.h
@@ -34,7 +34,7 @@
 // single-threaded fashion so not much need
 class Plugin {
  public:
-  static Plugin Create(std::string lib) {
+  static Plugin Create(const std::string& lib) {
     return Plugin(lib);
   }
 
@@ -66,7 +66,7 @@
   }
 
  private:
-  explicit Plugin(std::string library) : library_(library), dlopen_handle_(nullptr) { }
+  explicit Plugin(const std::string& library) : library_(library), dlopen_handle_(nullptr) { }
 
   std::string library_;
   void* dlopen_handle_;
diff --git a/runtime/primitive.cc b/runtime/primitive.cc
index d29a060..2380284 100644
--- a/runtime/primitive.cc
+++ b/runtime/primitive.cc
@@ -31,11 +31,35 @@
   "PrimVoid",
 };
 
+static const char* kBoxedDescriptors[] = {
+  "Ljava/lang/Object;",
+  "Ljava/lang/Boolean;",
+  "Ljava/lang/Byte;",
+  "Ljava/lang/Character;",
+  "Ljava/lang/Short;",
+  "Ljava/lang/Integer;",
+  "Ljava/lang/Long;",
+  "Ljava/lang/Float;",
+  "Ljava/lang/Double;",
+  "Ljava/lang/Void;",
+};
+
+#define COUNT_OF(x) (sizeof(x) / sizeof(x[0]))
+
 const char* Primitive::PrettyDescriptor(Primitive::Type type) {
+  static_assert(COUNT_OF(kTypeNames) == static_cast<size_t>(Primitive::kPrimLast) + 1,
+                "Missing element");
   CHECK(Primitive::kPrimNot <= type && type <= Primitive::kPrimVoid) << static_cast<int>(type);
   return kTypeNames[type];
 }
 
+const char* Primitive::BoxedDescriptor(Primitive::Type type) {
+  static_assert(COUNT_OF(kBoxedDescriptors) == static_cast<size_t>(Primitive::kPrimLast) + 1,
+                "Missing element");
+  CHECK(Primitive::kPrimNot <= type && type <= Primitive::kPrimVoid) << static_cast<int>(type);
+  return kBoxedDescriptors[type];
+}
+
 std::ostream& operator<<(std::ostream& os, const Primitive::Type& type) {
   int32_t int_type = static_cast<int32_t>(type);
   if (type >= Primitive::kPrimNot && type <= Primitive::kPrimVoid) {
diff --git a/runtime/primitive.h b/runtime/primitive.h
index 18f45ff..a0edaee 100644
--- a/runtime/primitive.h
+++ b/runtime/primitive.h
@@ -138,6 +138,9 @@
 
   static const char* PrettyDescriptor(Type type);
 
+  // Returns the descriptor corresponding to the boxed type of |type|.
+  static const char* BoxedDescriptor(Type type);
+
   static bool IsFloatingPointType(Type type) {
     return type == kPrimFloat || type == kPrimDouble;
   }
@@ -158,6 +161,35 @@
     }
   }
 
+  // Return true if |type| is an numeric type.
+  static constexpr bool IsNumericType(Type type) {
+    switch (type) {
+      case Primitive::Type::kPrimNot: return false;
+      case Primitive::Type::kPrimBoolean: return false;
+      case Primitive::Type::kPrimByte: return true;
+      case Primitive::Type::kPrimChar: return false;
+      case Primitive::Type::kPrimShort: return true;
+      case Primitive::Type::kPrimInt: return true;
+      case Primitive::Type::kPrimLong: return true;
+      case Primitive::Type::kPrimFloat: return true;
+      case Primitive::Type::kPrimDouble: return true;
+      case Primitive::Type::kPrimVoid: return false;
+    }
+  }
+
+  // Returns true if it is possible to widen type |from| to type |to|. Both |from| and
+  // |to| should be numeric primitive types.
+  static bool IsWidenable(Type from, Type to) {
+    static_assert(Primitive::Type::kPrimByte < Primitive::Type::kPrimShort, "Bad ordering");
+    static_assert(Primitive::Type::kPrimShort < Primitive::Type::kPrimInt, "Bad ordering");
+    static_assert(Primitive::Type::kPrimInt < Primitive::Type::kPrimLong, "Bad ordering");
+    static_assert(Primitive::Type::kPrimLong < Primitive::Type::kPrimFloat, "Bad ordering");
+    static_assert(Primitive::Type::kPrimFloat < Primitive::Type::kPrimDouble, "Bad ordering");
+    // Widening is only applicable between numeric types, like byte
+    // and int. Non-numeric types, such as boolean, cannot be widened.
+    return IsNumericType(from) && IsNumericType(to) && from <= to;
+  }
+
   static bool IsIntOrLongType(Type type) {
     return type == kPrimInt || type == kPrimLong;
   }
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 32a5582..fd7e56d 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -199,8 +199,6 @@
   ScopedObjectAccess soa(Thread::Current());
   jobject jclass_loader = LoadDex("Interfaces");
   StackHandleScope<7> hs(soa.Self());
-  Handle<mirror::ClassLoader> class_loader(
-      hs.NewHandle(soa.Decode<mirror::ClassLoader>(jclass_loader)));
 
   Handle<mirror::Class> proxyClass0;
   Handle<mirror::Class> proxyClass1;
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 67e5a81..37cf257 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -40,29 +40,28 @@
       }
     }
     if (kUseBakerReadBarrier) {
-      // The higher bits of the rb_ptr, rb_ptr_high_bits (must be zero)
-      // is used to create artificial data dependency from the is_gray
-      // load to the ref field (ptr) load to avoid needing a load-load
-      // barrier between the two.
-      uintptr_t rb_ptr_high_bits;
-      bool is_gray = HasGrayReadBarrierPointer(obj, &rb_ptr_high_bits);
+      // fake_address_dependency (must be zero) is used to create artificial data dependency from
+      // the is_gray load to the ref field (ptr) load to avoid needing a load-load barrier between
+      // the two.
+      uintptr_t fake_address_dependency;
+      bool is_gray = IsGray(obj, &fake_address_dependency);
+      if (kEnableReadBarrierInvariantChecks) {
+        CHECK_EQ(fake_address_dependency, 0U) << obj << " rb_state=" << obj->GetReadBarrierState();
+      }
       ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
-          rb_ptr_high_bits | reinterpret_cast<uintptr_t>(ref_addr));
+          fake_address_dependency | reinterpret_cast<uintptr_t>(ref_addr));
       MirrorType* ref = ref_addr->AsMirrorPtr();
       MirrorType* old_ref = ref;
       if (is_gray) {
         // Slow-path.
         ref = reinterpret_cast<MirrorType*>(Mark(ref));
         // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
-        // updates before us, but it's ok.
+        // updates before us, but it's OK.
         if (kAlwaysUpdateField && ref != old_ref) {
           obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
               offset, old_ref, ref);
         }
       }
-      if (kEnableReadBarrierInvariantChecks) {
-        CHECK_EQ(rb_ptr_high_bits, 0U) << obj << " rb_ptr=" << obj->GetReadBarrierPointer();
-      }
       AssertToSpaceInvariant(obj, offset, ref);
       return ref;
     } else if (kUseBrooksReadBarrier) {
@@ -223,20 +222,14 @@
   return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->MarkFromReadBarrier(obj);
 }
 
-inline bool ReadBarrier::HasGrayReadBarrierPointer(mirror::Object* obj,
-                                                   uintptr_t* out_rb_ptr_high_bits) {
-  mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
-  uintptr_t rb_ptr_bits = reinterpret_cast<uintptr_t>(rb_ptr);
-  uintptr_t rb_ptr_low_bits = rb_ptr_bits & rb_ptr_mask_;
-  if (kEnableReadBarrierInvariantChecks) {
-    CHECK(rb_ptr_low_bits == white_ptr_ || rb_ptr_low_bits == gray_ptr_ ||
-          rb_ptr_low_bits == black_ptr_)
-        << "obj=" << obj << " rb_ptr=" << rb_ptr << " " << obj->PrettyTypeOf();
-  }
-  bool is_gray = rb_ptr_low_bits == gray_ptr_;
-  // The high bits are supposed to be zero. We check this on the caller side.
-  *out_rb_ptr_high_bits = rb_ptr_bits & ~rb_ptr_mask_;
-  return is_gray;
+inline bool ReadBarrier::IsGray(mirror::Object* obj, uintptr_t* fake_address_dependency) {
+  return obj->GetReadBarrierState(fake_address_dependency) == gray_state_;
+}
+
+inline bool ReadBarrier::IsGray(mirror::Object* obj) {
+  // Use a load-acquire to load the read barrier bit to avoid reordering with the subsequent load.
+  // GetReadBarrierStateAcquire() has load-acquire semantics.
+  return obj->GetReadBarrierStateAcquire() == gray_state_;
 }
 
 }  // namespace art
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index a861861..cbc2697 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -82,26 +82,32 @@
   // ALWAYS_INLINE on this caused a performance regression b/26744236.
   static mirror::Object* Mark(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static mirror::Object* WhitePtr() {
-    return reinterpret_cast<mirror::Object*>(white_ptr_);
+  static constexpr uint32_t WhiteState() {
+    return white_state_;
   }
-  static mirror::Object* GrayPtr() {
-    return reinterpret_cast<mirror::Object*>(gray_ptr_);
-  }
-  static mirror::Object* BlackPtr() {
-    return reinterpret_cast<mirror::Object*>(black_ptr_);
+  static constexpr uint32_t GrayState() {
+    return gray_state_;
   }
 
-  ALWAYS_INLINE static bool HasGrayReadBarrierPointer(mirror::Object* obj,
-                                                      uintptr_t* out_rb_ptr_high_bits)
+  // fake_address_dependency will be zero which should be bitwise-or'ed with the address of the
+  // subsequent load to prevent the reordering of the read barrier bit load and the subsequent
+  // object reference load (from one of `obj`'s fields).
+  // *fake_address_dependency will be set to 0.
+  ALWAYS_INLINE static bool IsGray(mirror::Object* obj, uintptr_t* fake_address_dependency)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Note: These couldn't be constexpr pointers as reinterpret_cast isn't compatible with them.
-  static constexpr uintptr_t white_ptr_ = 0x0;    // Not marked.
-  static constexpr uintptr_t gray_ptr_ = 0x1;     // Marked, but not marked through. On mark stack.
-  // TODO: black_ptr_ is unused, we should remove it.
-  static constexpr uintptr_t black_ptr_ = 0x2;    // Marked through. Used for non-moving objects.
-  static constexpr uintptr_t rb_ptr_mask_ = 0x1;  // The low bits for white|gray.
+  // This uses a load-acquire to load the read barrier bit internally to prevent the reordering of
+  // the read barrier bit load and the subsequent load.
+  ALWAYS_INLINE static bool IsGray(mirror::Object* obj)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  static bool IsValidReadBarrierState(uint32_t rb_state) {
+    return rb_state == white_state_ || rb_state == gray_state_;
+  }
+
+  static constexpr uint32_t white_state_ = 0x0;    // Not marked.
+  static constexpr uint32_t gray_state_ = 0x1;     // Marked, but not marked through. On mark stack.
+  static constexpr uint32_t rb_state_mask_ = 0x1;  // The low bits for white|gray.
 };
 
 }  // namespace art
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index 16ed7fb..1c975a4 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -215,33 +215,87 @@
   }
   std::sort(sorted_entries.begin(), sorted_entries.end(), GcRootComparator());
 
+  class SummaryElement {
+   public:
+    GcRoot<mirror::Object> root;
+    size_t equiv;
+    size_t identical;
+
+    SummaryElement() : equiv(0), identical(0) {}
+    SummaryElement(SummaryElement&& ref) {
+      root = ref.root;
+      equiv = ref.equiv;
+      identical = ref.identical;
+    }
+    SummaryElement(const SummaryElement&) = default;
+    SummaryElement& operator=(SummaryElement&&) = default;
+
+    void Reset(GcRoot<mirror::Object>& _root) {
+      root = _root;
+      equiv = 0;
+      identical = 0;
+    }
+  };
+  std::vector<SummaryElement> sorted_summaries;
+  {
+    SummaryElement prev;
+
+    for (GcRoot<mirror::Object>& root : sorted_entries) {
+      ObjPtr<mirror::Object> current = root.Read<kWithoutReadBarrier>();
+
+      if (UNLIKELY(prev.root.IsNull())) {
+        prev.Reset(root);
+        continue;
+      }
+
+      ObjPtr<mirror::Object> prevObj = prev.root.Read<kWithoutReadBarrier>();
+      if (current == prevObj) {
+        // Same reference, added more than once.
+        ++prev.identical;
+      } else if (current->GetClass() == prevObj->GetClass() &&
+          GetElementCount(current) == GetElementCount(prevObj)) {
+        // Same class / element count, different object.
+        ++prev.equiv;
+      } else {
+        sorted_summaries.push_back(prev);
+        prev.Reset(root);
+      }
+      prev.root = root;
+    }
+    sorted_summaries.push_back(prev);
+
+    // Compare summary elements, first by combined count, then by identical (indicating leaks),
+    // then by class (and size and address).
+    struct SummaryElementComparator {
+      GcRootComparator gc_root_cmp;
+
+      bool operator()(SummaryElement& elem1, SummaryElement& elem2) const
+          NO_THREAD_SAFETY_ANALYSIS {
+        Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+
+        size_t count1 = elem1.equiv + elem1.identical;
+        size_t count2 = elem2.equiv + elem2.identical;
+        if (count1 != count2) {
+          return count1 > count2;
+        }
+
+        if (elem1.identical != elem2.identical) {
+          return elem1.identical > elem2.identical;
+        }
+
+        // Otherwise, compare the GC roots as before.
+        return gc_root_cmp(elem1.root, elem2.root);
+      }
+    };
+    std::sort(sorted_summaries.begin(), sorted_summaries.end(), SummaryElementComparator());
+  }
+
   // Dump a summary of the whole table.
   os << "  Summary:\n";
-  size_t equiv = 0;
-  size_t identical = 0;
-  ObjPtr<mirror::Object> prev = nullptr;
-  for (GcRoot<mirror::Object>& root : sorted_entries) {
-    ObjPtr<mirror::Object> current = root.Read<kWithoutReadBarrier>();
-    if (prev != nullptr) {
-      const size_t element_count = GetElementCount(prev);
-      if (current == prev) {
-        // Same reference, added more than once.
-        ++identical;
-      } else if (current->GetClass() == prev->GetClass() &&
-          GetElementCount(current) == element_count) {
-        // Same class / element count, different object.
-        ++equiv;
-      } else {
-        // Different class.
-        DumpSummaryLine(os, prev, element_count, identical, equiv);
-        equiv = 0;
-        identical = 0;
-      }
-    }
-    prev = current;
+  for (SummaryElement& elem : sorted_summaries) {
+    ObjPtr<mirror::Object> elemObj = elem.root.Read<kWithoutReadBarrier>();
+    DumpSummaryLine(os, elemObj, GetElementCount(elemObj), elem.identical, elem.equiv);
   }
-  // Handle the last entry.
-  DumpSummaryLine(os, prev, GetElementCount(prev), identical, equiv);
 }
 
 void ReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
diff --git a/runtime/reference_table_test.cc b/runtime/reference_table_test.cc
index 489db9a..d80a9b3 100644
--- a/runtime/reference_table_test.cc
+++ b/runtime/reference_table_test.cc
@@ -166,4 +166,77 @@
   }
 }
 
+static std::vector<size_t> FindAll(const std::string& haystack, const char* needle) {
+  std::vector<size_t> res;
+  size_t start = 0;
+  do {
+    size_t pos = haystack.find(needle, start);
+    if (pos == std::string::npos) {
+      break;
+    }
+    res.push_back(pos);
+    start = pos + 1;
+  } while (start < haystack.size());
+  return res;
+}
+
+TEST_F(ReferenceTableTest, SummaryOrder) {
+  // Check that the summary statistics are sorted.
+  ScopedObjectAccess soa(Thread::Current());
+
+  ReferenceTable rt("test", 0, 20);
+
+  {
+    mirror::Object* s1 = mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello");
+    mirror::Object* s2 = mirror::String::AllocFromModifiedUtf8(soa.Self(), "world");
+
+    // 3 copies of s1, 2 copies of s2, interleaved.
+    for (size_t i = 0; i != 2; ++i) {
+      rt.Add(s1);
+      rt.Add(s2);
+    }
+    rt.Add(s1);
+  }
+
+  {
+    // Differently sized byte arrays. Should be sorted by identical (non-unique cound).
+    mirror::Object* b1_1 = mirror::ByteArray::Alloc(soa.Self(), 1);
+    rt.Add(b1_1);
+    rt.Add(mirror::ByteArray::Alloc(soa.Self(), 2));
+    rt.Add(b1_1);
+    rt.Add(mirror::ByteArray::Alloc(soa.Self(), 2));
+    rt.Add(mirror::ByteArray::Alloc(soa.Self(), 1));
+    rt.Add(mirror::ByteArray::Alloc(soa.Self(), 2));
+  }
+
+  rt.Add(mirror::CharArray::Alloc(soa.Self(), 0));
+
+  // Now dump, and ensure order.
+  std::ostringstream oss;
+  rt.Dump(oss);
+
+  // Only do this on the part after Summary.
+  std::string base = oss.str();
+  size_t summary_pos = base.find("Summary:");
+  ASSERT_NE(summary_pos, std::string::npos);
+
+  std::string haystack = base.substr(summary_pos);
+
+  std::vector<size_t> strCounts = FindAll(haystack, "java.lang.String");
+  std::vector<size_t> b1Counts = FindAll(haystack, "byte[] (1 elements)");
+  std::vector<size_t> b2Counts = FindAll(haystack, "byte[] (2 elements)");
+  std::vector<size_t> cCounts = FindAll(haystack, "char[]");
+
+  // Only one each.
+  EXPECT_EQ(1u, strCounts.size());
+  EXPECT_EQ(1u, b1Counts.size());
+  EXPECT_EQ(1u, b2Counts.size());
+  EXPECT_EQ(1u, cCounts.size());
+
+  // Expect them to be in order.
+  EXPECT_LT(strCounts[0], b1Counts[0]);
+  EXPECT_LT(b1Counts[0], b2Counts[0]);
+  EXPECT_LT(b2Counts[0], cCounts[0]);
+}
+
 }  // namespace art
diff --git a/runtime/reflection-inl.h b/runtime/reflection-inl.h
index c4d4fae..68e7a10 100644
--- a/runtime/reflection-inl.h
+++ b/runtime/reflection-inl.h
@@ -29,11 +29,10 @@
 
 namespace art {
 
-inline bool ConvertPrimitiveValue(bool unbox_for_result,
-                                  Primitive::Type srcType,
-                                  Primitive::Type dstType,
-                                  const JValue& src,
-                                  JValue* dst) {
+inline bool ConvertPrimitiveValueNoThrow(Primitive::Type srcType,
+                                         Primitive::Type dstType,
+                                         const JValue& src,
+                                         JValue* dst) {
   DCHECK(srcType != Primitive::kPrimNot && dstType != Primitive::kPrimNot);
   if (LIKELY(srcType == dstType)) {
     dst->SetJ(src.GetJ());
@@ -91,6 +90,18 @@
   default:
     break;
   }
+  return false;
+}
+
+inline bool ConvertPrimitiveValue(bool unbox_for_result,
+                                  Primitive::Type srcType,
+                                  Primitive::Type dstType,
+                                  const JValue& src,
+                                  JValue* dst) {
+  if (ConvertPrimitiveValueNoThrow(srcType, dstType, src, dst)) {
+    return true;
+  }
+
   if (!unbox_for_result) {
     ThrowIllegalArgumentException(StringPrintf("Invalid primitive conversion from %s to %s",
                                                PrettyDescriptor(srcType).c_str(),
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index f88309b..3128380 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -453,7 +453,7 @@
     return JValue();
   }
 
-  ArtMethod* method = soa.DecodeMethod(mid);
+  ArtMethod* method = jni::DecodeArtMethod(mid);
   bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
   if (is_string_init) {
     // Replace calls to String.<init> with equivalent StringFactory call.
@@ -484,7 +484,7 @@
     return JValue();
   }
 
-  ArtMethod* method = soa.DecodeMethod(mid);
+  ArtMethod* method = jni::DecodeArtMethod(mid);
   bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
   if (is_string_init) {
     // Replace calls to String.<init> with equivalent StringFactory call.
@@ -516,7 +516,7 @@
   }
 
   ObjPtr<mirror::Object> receiver = soa.Decode<mirror::Object>(obj);
-  ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid));
+  ArtMethod* method = FindVirtualMethod(receiver, jni::DecodeArtMethod(mid));
   bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
   if (is_string_init) {
     // Replace calls to String.<init> with equivalent StringFactory call.
@@ -548,7 +548,7 @@
   }
 
   ObjPtr<mirror::Object> receiver = soa.Decode<mirror::Object>(obj);
-  ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid));
+  ArtMethod* method = FindVirtualMethod(receiver, jni::DecodeArtMethod(mid));
   bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
   if (is_string_init) {
     // Replace calls to String.<init> with equivalent StringFactory call.
@@ -739,8 +739,11 @@
     arg_array.Append(value.GetI());
   }
 
-  soa.DecodeMethod(m)->Invoke(soa.Self(), arg_array.GetArray(), arg_array.GetNumBytes(),
-                              &result, shorty);
+  jni::DecodeArtMethod(m)->Invoke(soa.Self(),
+                                  arg_array.GetArray(),
+                                  arg_array.GetNumBytes(),
+                                  &result,
+                                  shorty);
   return result.GetL();
 }
 
diff --git a/runtime/reflection.h b/runtime/reflection.h
index 6e5ef71..f2652fd 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -47,6 +47,12 @@
                              JValue* unboxed_value)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
+ALWAYS_INLINE bool ConvertPrimitiveValueNoThrow(Primitive::Type src_class,
+                                                Primitive::Type dst_class,
+                                                const JValue& src,
+                                                JValue* dst)
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
 ALWAYS_INLINE bool ConvertPrimitiveValue(bool unbox_for_result,
                                          Primitive::Type src_class,
                                          Primitive::Type dst_class,
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 22076bb..e254dfe 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -23,6 +23,7 @@
 #include "art_method-inl.h"
 #include "base/enums.h"
 #include "common_compiler_test.h"
+#include "jni_internal.h"
 #include "scoped_thread_state_change-inl.h"
 
 namespace art {
@@ -136,7 +137,7 @@
     ObjPtr<mirror::Object> receiver;
     ReflectionTestMakeExecutable(&method, &receiver, is_static, "nop", "()V");
     ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
-    InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), nullptr);
+    InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), nullptr);
   }
 
   void InvokeIdentityByteMethod(bool is_static) {
@@ -148,20 +149,20 @@
     jvalue args[1];
 
     args[0].b = 0;
-    JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(0, result.GetB());
 
     args[0].b = -1;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(-1, result.GetB());
 
     args[0].b = SCHAR_MAX;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(SCHAR_MAX, result.GetB());
 
     static_assert(SCHAR_MIN == -128, "SCHAR_MIN unexpected");
     args[0].b = SCHAR_MIN;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(SCHAR_MIN, result.GetB());
   }
 
@@ -174,19 +175,19 @@
     jvalue args[1];
 
     args[0].i = 0;
-    JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(0, result.GetI());
 
     args[0].i = -1;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(-1, result.GetI());
 
     args[0].i = INT_MAX;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(INT_MAX, result.GetI());
 
     args[0].i = INT_MIN;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(INT_MIN, result.GetI());
   }
 
@@ -199,19 +200,19 @@
     jvalue args[1];
 
     args[0].d = 0.0;
-    JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_DOUBLE_EQ(0.0, result.GetD());
 
     args[0].d = -1.0;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_DOUBLE_EQ(-1.0, result.GetD());
 
     args[0].d = DBL_MAX;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_DOUBLE_EQ(DBL_MAX, result.GetD());
 
     args[0].d = DBL_MIN;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_DOUBLE_EQ(DBL_MIN, result.GetD());
   }
 
@@ -225,22 +226,22 @@
 
     args[0].i = 1;
     args[1].i = 2;
-    JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(3, result.GetI());
 
     args[0].i = -2;
     args[1].i = 5;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(3, result.GetI());
 
     args[0].i = INT_MAX;
     args[1].i = INT_MIN;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(-1, result.GetI());
 
     args[0].i = INT_MAX;
     args[1].i = INT_MAX;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(-2, result.GetI());
   }
 
@@ -255,31 +256,31 @@
     args[0].i = 0;
     args[1].i = 0;
     args[2].i = 0;
-    JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(0, result.GetI());
 
     args[0].i = 1;
     args[1].i = 2;
     args[2].i = 3;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(6, result.GetI());
 
     args[0].i = -1;
     args[1].i = 2;
     args[2].i = -3;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(-2, result.GetI());
 
     args[0].i = INT_MAX;
     args[1].i = INT_MIN;
     args[2].i = INT_MAX;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(2147483646, result.GetI());
 
     args[0].i = INT_MAX;
     args[1].i = INT_MAX;
     args[2].i = INT_MAX;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(2147483645, result.GetI());
   }
 
@@ -295,35 +296,35 @@
     args[1].i = 0;
     args[2].i = 0;
     args[3].i = 0;
-    JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(0, result.GetI());
 
     args[0].i = 1;
     args[1].i = 2;
     args[2].i = 3;
     args[3].i = 4;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(10, result.GetI());
 
     args[0].i = -1;
     args[1].i = 2;
     args[2].i = -3;
     args[3].i = 4;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(2, result.GetI());
 
     args[0].i = INT_MAX;
     args[1].i = INT_MIN;
     args[2].i = INT_MAX;
     args[3].i = INT_MIN;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(-2, result.GetI());
 
     args[0].i = INT_MAX;
     args[1].i = INT_MAX;
     args[2].i = INT_MAX;
     args[3].i = INT_MAX;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(-4, result.GetI());
   }
 
@@ -340,7 +341,7 @@
     args[2].i = 0;
     args[3].i = 0;
     args[4].i = 0;
-    JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(0, result.GetI());
 
     args[0].i = 1;
@@ -348,7 +349,7 @@
     args[2].i = 3;
     args[3].i = 4;
     args[4].i = 5;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(15, result.GetI());
 
     args[0].i = -1;
@@ -356,7 +357,7 @@
     args[2].i = -3;
     args[3].i = 4;
     args[4].i = -5;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(-3, result.GetI());
 
     args[0].i = INT_MAX;
@@ -364,7 +365,7 @@
     args[2].i = INT_MAX;
     args[3].i = INT_MIN;
     args[4].i = INT_MAX;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(2147483645, result.GetI());
 
     args[0].i = INT_MAX;
@@ -372,7 +373,7 @@
     args[2].i = INT_MAX;
     args[3].i = INT_MAX;
     args[4].i = INT_MAX;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_EQ(2147483643, result.GetI());
   }
 
@@ -386,27 +387,27 @@
 
     args[0].d = 0.0;
     args[1].d = 0.0;
-    JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_DOUBLE_EQ(0.0, result.GetD());
 
     args[0].d = 1.0;
     args[1].d = 2.0;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_DOUBLE_EQ(3.0, result.GetD());
 
     args[0].d = 1.0;
     args[1].d = -2.0;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_DOUBLE_EQ(-1.0, result.GetD());
 
     args[0].d = DBL_MAX;
     args[1].d = DBL_MIN;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_DOUBLE_EQ(1.7976931348623157e308, result.GetD());
 
     args[0].d = DBL_MAX;
     args[1].d = DBL_MAX;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_DOUBLE_EQ(INFINITY, result.GetD());
   }
 
@@ -421,19 +422,19 @@
     args[0].d = 0.0;
     args[1].d = 0.0;
     args[2].d = 0.0;
-    JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_DOUBLE_EQ(0.0, result.GetD());
 
     args[0].d = 1.0;
     args[1].d = 2.0;
     args[2].d = 3.0;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_DOUBLE_EQ(6.0, result.GetD());
 
     args[0].d = 1.0;
     args[1].d = -2.0;
     args[2].d = 3.0;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_DOUBLE_EQ(2.0, result.GetD());
   }
 
@@ -449,21 +450,21 @@
     args[1].d = 0.0;
     args[2].d = 0.0;
     args[3].d = 0.0;
-    JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_DOUBLE_EQ(0.0, result.GetD());
 
     args[0].d = 1.0;
     args[1].d = 2.0;
     args[2].d = 3.0;
     args[3].d = 4.0;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_DOUBLE_EQ(10.0, result.GetD());
 
     args[0].d = 1.0;
     args[1].d = -2.0;
     args[2].d = 3.0;
     args[3].d = -4.0;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_DOUBLE_EQ(-2.0, result.GetD());
   }
 
@@ -480,7 +481,7 @@
     args[2].d = 0.0;
     args[3].d = 0.0;
     args[4].d = 0.0;
-    JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_DOUBLE_EQ(0.0, result.GetD());
 
     args[0].d = 1.0;
@@ -488,7 +489,7 @@
     args[2].d = 3.0;
     args[3].d = 4.0;
     args[4].d = 5.0;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_DOUBLE_EQ(15.0, result.GetD());
 
     args[0].d = 1.0;
@@ -496,7 +497,7 @@
     args[2].d = 3.0;
     args[3].d = -4.0;
     args[4].d = 5.0;
-    result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+    result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
     EXPECT_DOUBLE_EQ(3.0, result.GetD());
   }
 
@@ -531,7 +532,7 @@
 
   jvalue args[1];
   args[0].l = nullptr;
-  InvokeWithJValues(soa, nullptr, soa.EncodeMethod(method), args);
+  InvokeWithJValues(soa, nullptr, jni::EncodeArtMethod(method), args);
 }
 
 TEST_F(ReflectionTest, StaticNopMethod) {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 4e600ae..09a0462 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -85,7 +85,9 @@
 #include "linear_alloc.h"
 #include "mirror/array.h"
 #include "mirror/class-inl.h"
+#include "mirror/class_ext.h"
 #include "mirror/class_loader.h"
+#include "mirror/emulated_stack_frame.h"
 #include "mirror/field.h"
 #include "mirror/method.h"
 #include "mirror/method_handle_impl.h"
@@ -237,6 +239,7 @@
       force_native_bridge_(false),
       is_native_bridge_loaded_(false),
       is_native_debuggable_(false),
+      is_fully_deoptable_(false),
       zygote_max_failed_boots_(0),
       experimental_flags_(ExperimentalFlags::kNone),
       oat_file_manager_(nullptr),
@@ -557,7 +560,10 @@
       "getSystemClassLoader", "()Ljava/lang/ClassLoader;", pointer_size);
   CHECK(getSystemClassLoader != nullptr);
 
-  JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr);
+  JValue result = InvokeWithJValues(soa,
+                                    nullptr,
+                                    jni::EncodeArtMethod(getSystemClassLoader),
+                                    nullptr);
   JNIEnv* env = soa.Self()->GetJniEnv();
   ScopedLocalRef<jobject> system_class_loader(env, soa.AddLocalReference<jobject>(result.GetL()));
   CHECK(system_class_loader.get() != nullptr);
@@ -759,6 +765,9 @@
 }
 
 bool Runtime::IsDebuggable() const {
+  if (IsFullyDeoptable()) {
+    return true;
+  }
   const OatFile* oat_file = GetOatFileManager().GetPrimaryOatFile();
   return oat_file != nullptr && oat_file->IsDebuggable();
 }
@@ -982,6 +991,8 @@
   verify_ = runtime_options.GetOrDefault(Opt::Verify);
   allow_dex_file_fallback_ = !runtime_options.Exists(Opt::NoDexFileFallback);
 
+  is_fully_deoptable_ = runtime_options.Exists(Opt::FullyDeoptable);
+
   no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain);
   force_native_bridge_ = runtime_options.Exists(Opt::ForceNativeBridge);
 
@@ -1018,8 +1029,10 @@
                        runtime_options.GetOrDefault(Opt::NonMovingSpaceCapacity),
                        runtime_options.GetOrDefault(Opt::Image),
                        runtime_options.GetOrDefault(Opt::ImageInstructionSet),
-                       xgc_option.collector_type_,
-                       runtime_options.GetOrDefault(Opt::BackgroundGc),
+                       // Override the collector type to CC if the read barrier config.
+                       kUseReadBarrier ? gc::kCollectorTypeCC : xgc_option.collector_type_,
+                       kUseReadBarrier ? BackgroundGcOption(gc::kCollectorTypeCCBackground)
+                                       : runtime_options.GetOrDefault(Opt::BackgroundGc),
                        runtime_options.GetOrDefault(Opt::LargeObjectSpace),
                        runtime_options.GetOrDefault(Opt::LargeObjectThreshold),
                        runtime_options.GetOrDefault(Opt::ParallelGCThreads),
@@ -1593,6 +1606,8 @@
   mirror::Field::VisitRoots(visitor);
   mirror::MethodType::VisitRoots(visitor);
   mirror::MethodHandleImpl::VisitRoots(visitor);
+  mirror::EmulatedStackFrame::VisitRoots(visitor);
+  mirror::ClassExt::VisitRoots(visitor);
   // Visit all the primitive array types classes.
   mirror::PrimitiveArray<uint8_t>::VisitRoots(visitor);   // BooleanArray
   mirror::PrimitiveArray<int8_t>::VisitRoots(visitor);    // ByteArray
@@ -1754,10 +1769,10 @@
   }
 }
 
-void Runtime::BroadcastForNewSystemWeaks() {
+void Runtime::BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint) {
   // This is used for the read barrier case that uses the thread-local
-  // Thread::GetWeakRefAccessEnabled() flag.
-  CHECK(kUseReadBarrier);
+  // Thread::GetWeakRefAccessEnabled() flag and the checkpoint while weak ref access is disabled
+  // (see ThreadList::RunCheckpoint).
   monitor_list_->BroadcastForNewMonitors();
   intern_table_->BroadcastForNewInterns();
   java_vm_->BroadcastForNewWeakGlobals();
@@ -1765,7 +1780,7 @@
 
   // All other generic system-weak holders.
   for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
-    holder->Broadcast();
+    holder->Broadcast(broadcast_for_checkpoint);
   }
 }
 
@@ -1951,31 +1966,31 @@
   preinitialization_transaction_->RecordWriteArray(array, index, value);
 }
 
-void Runtime::RecordStrongStringInsertion(mirror::String* s) const {
+void Runtime::RecordStrongStringInsertion(ObjPtr<mirror::String> s) const {
   DCHECK(IsAotCompiler());
   DCHECK(IsActiveTransaction());
   preinitialization_transaction_->RecordStrongStringInsertion(s);
 }
 
-void Runtime::RecordWeakStringInsertion(mirror::String* s) const {
+void Runtime::RecordWeakStringInsertion(ObjPtr<mirror::String> s) const {
   DCHECK(IsAotCompiler());
   DCHECK(IsActiveTransaction());
   preinitialization_transaction_->RecordWeakStringInsertion(s);
 }
 
-void Runtime::RecordStrongStringRemoval(mirror::String* s) const {
+void Runtime::RecordStrongStringRemoval(ObjPtr<mirror::String> s) const {
   DCHECK(IsAotCompiler());
   DCHECK(IsActiveTransaction());
   preinitialization_transaction_->RecordStrongStringRemoval(s);
 }
 
-void Runtime::RecordWeakStringRemoval(mirror::String* s) const {
+void Runtime::RecordWeakStringRemoval(ObjPtr<mirror::String> s) const {
   DCHECK(IsAotCompiler());
   DCHECK(IsActiveTransaction());
   preinitialization_transaction_->RecordWeakStringRemoval(s);
 }
 
-void Runtime::RecordResolveString(mirror::DexCache* dex_cache, uint32_t string_idx) const {
+void Runtime::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, uint32_t string_idx) const {
   DCHECK(IsAotCompiler());
   DCHECK(IsActiveTransaction());
   preinitialization_transaction_->RecordResolveString(dex_cache, string_idx);
diff --git a/runtime/runtime.h b/runtime/runtime.h
index b25ec23..de5a356 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -107,9 +107,7 @@
   kVisitRootFlagStartLoggingNewRoots = 0x4,
   kVisitRootFlagStopLoggingNewRoots = 0x8,
   kVisitRootFlagClearRootLog = 0x10,
-  // Non moving means we can have optimizations where we don't visit some roots if they are
-  // definitely reachable from another location. E.g. ArtMethod and ArtField roots.
-  kVisitRootFlagNonMoving = 0x20,
+  kVisitRootFlagClassLoader = 0x20,
 };
 
 class Runtime {
@@ -182,7 +180,7 @@
     return compiler_options_;
   }
 
-  void AddCompilerOption(std::string option) {
+  void AddCompilerOption(const std::string& option) {
     compiler_options_.push_back(option);
   }
 
@@ -321,11 +319,15 @@
 
   void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
   void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
-  void BroadcastForNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
+  // broadcast_for_checkpoint is true when we broadcast for making blocking threads to respond to
+  // checkpoint requests. It's false when we broadcast to unblock blocking threads after system weak
+  // access is reenabled.
+  void BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint = false);
 
   // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
   // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
+      REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Visit image roots, only used for hprof since the GC uses the image space mod union table
@@ -335,6 +337,7 @@
   // Visit all of the roots we can do safely do concurrently.
   void VisitConcurrentRoots(RootVisitor* visitor,
                             VisitRootFlags flags = kVisitRootFlagAllRoots)
+      REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Visit all of the non thread roots, we can do this with mutators unpaused.
@@ -509,15 +512,15 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
   void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
       REQUIRES_SHARED(Locks::mutator_lock_);
-  void RecordStrongStringInsertion(mirror::String* s) const
+  void RecordStrongStringInsertion(ObjPtr<mirror::String> s) const
       REQUIRES(Locks::intern_table_lock_);
-  void RecordWeakStringInsertion(mirror::String* s) const
+  void RecordWeakStringInsertion(ObjPtr<mirror::String> s) const
       REQUIRES(Locks::intern_table_lock_);
-  void RecordStrongStringRemoval(mirror::String* s) const
+  void RecordStrongStringRemoval(ObjPtr<mirror::String> s) const
       REQUIRES(Locks::intern_table_lock_);
-  void RecordWeakStringRemoval(mirror::String* s) const
+  void RecordWeakStringRemoval(ObjPtr<mirror::String> s) const
       REQUIRES(Locks::intern_table_lock_);
-  void RecordResolveString(mirror::DexCache* dex_cache, uint32_t string_idx) const
+  void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, uint32_t string_idx) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_);
@@ -589,6 +592,14 @@
 
   bool IsDebuggable() const;
 
+  bool IsFullyDeoptable() const {
+    return is_fully_deoptable_;
+  }
+
+  void SetFullyDeoptable(bool value) {
+    is_fully_deoptable_ = value;
+  }
+
   bool IsNativeDebuggable() const {
     return is_native_debuggable_;
   }
@@ -854,6 +865,9 @@
   // Whether we are running under native debugger.
   bool is_native_debuggable_;
 
+  // Whether we are expected to be deoptable at all points.
+  bool is_fully_deoptable_;
+
   // The maximum number of failed boots we allow before pruning the dalvik cache
   // and trying again. This option is only inspected when we're running as a
   // zygote.
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index b01a570..d1970fe 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -121,6 +121,7 @@
 RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>,         AgentLib)  // -agentlib:<libname>=<options>, Requires -Xexperimental:agents
 RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>,         AgentPath)  // -agentpath:<libname>=<options>, Requires -Xexperimental:agents
 RUNTIME_OPTIONS_KEY (std::vector<Plugin>,            Plugins)  // -Xplugin:<library> Requires -Xexperimental:runtime-plugins
+RUNTIME_OPTIONS_KEY (Unit,                           FullyDeoptable)  // -Xfully-deoptable
 
 // Not parse-able from command line, but can be provided explicitly.
 // (Do not add anything here that is defined in ParsedOptions::MakeParser)
diff --git a/runtime/scoped_thread_state_change-inl.h b/runtime/scoped_thread_state_change-inl.h
index bde23c8..d4469f4 100644
--- a/runtime/scoped_thread_state_change-inl.h
+++ b/runtime/scoped_thread_state_change-inl.h
@@ -86,30 +86,6 @@
   return ObjPtr<T, kPoison>::DownCast(Self()->DecodeJObject(obj));
 }
 
-inline ArtField* ScopedObjectAccessAlreadyRunnable::DecodeField(jfieldID fid) const {
-  Locks::mutator_lock_->AssertSharedHeld(Self());
-  DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
-  return reinterpret_cast<ArtField*>(fid);
-}
-
-inline jfieldID ScopedObjectAccessAlreadyRunnable::EncodeField(ArtField* field) const {
-  Locks::mutator_lock_->AssertSharedHeld(Self());
-  DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
-  return reinterpret_cast<jfieldID>(field);
-}
-
-inline ArtMethod* ScopedObjectAccessAlreadyRunnable::DecodeMethod(jmethodID mid) const {
-  Locks::mutator_lock_->AssertSharedHeld(Self());
-  DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
-  return reinterpret_cast<ArtMethod*>(mid);
-}
-
-inline jmethodID ScopedObjectAccessAlreadyRunnable::EncodeMethod(ArtMethod* method) const {
-  Locks::mutator_lock_->AssertSharedHeld(Self());
-  DCHECK(IsRunnable());  // Don't work with raw objects in non-runnable states.
-  return reinterpret_cast<jmethodID>(method);
-}
-
 inline bool ScopedObjectAccessAlreadyRunnable::IsRunnable() const {
   return self_->GetState() == kRunnable;
 }
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 04fd914..b499258 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -94,14 +94,6 @@
   template<typename T, bool kPoison = kIsDebugBuild>
   ObjPtr<T, kPoison> Decode(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ArtField* DecodeField(jfieldID fid) const REQUIRES_SHARED(Locks::mutator_lock_);
-
-  jfieldID EncodeField(ArtField* field) const REQUIRES_SHARED(Locks::mutator_lock_);
-
-  ArtMethod* DecodeMethod(jmethodID mid) const REQUIRES_SHARED(Locks::mutator_lock_);
-
-  jmethodID EncodeMethod(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_);
-
   ALWAYS_INLINE bool IsRunnable() const;
 
  protected:
diff --git a/runtime/stack.h b/runtime/stack.h
index e9ed497..992bda5 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -469,14 +469,21 @@
   }
 };
 
-class JavaFrameRootInfo : public RootInfo {
+class JavaFrameRootInfo FINAL : public RootInfo {
  public:
   JavaFrameRootInfo(uint32_t thread_id, const StackVisitor* stack_visitor, size_t vreg)
      : RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) {
   }
-  virtual void Describe(std::ostream& os) const OVERRIDE
+  void Describe(std::ostream& os) const OVERRIDE
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  size_t GetVReg() const {
+    return vreg_;
+  }
+  const StackVisitor* GetVisitor() const {
+    return stack_visitor_;
+  }
+
  private:
   const StackVisitor* const stack_visitor_;
   const size_t vreg_;
@@ -572,8 +579,7 @@
   };
 
  protected:
-  StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind)
-      REQUIRES_SHARED(Locks::mutator_lock_);
+  StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind);
 
   bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -624,7 +630,7 @@
     return num_frames_;
   }
 
-  size_t GetFrameDepth() REQUIRES_SHARED(Locks::mutator_lock_) {
+  size_t GetFrameDepth() const REQUIRES_SHARED(Locks::mutator_lock_) {
     return cur_depth_;
   }
 
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 5fa9353..c92305f 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -72,6 +72,19 @@
       RunCheckpointFunction();
     } else if (ReadFlag(kSuspendRequest)) {
       FullSuspendCheck();
+    } else if (ReadFlag(kEmptyCheckpointRequest)) {
+      RunEmptyCheckpoint();
+    } else {
+      break;
+    }
+  }
+}
+
+inline void Thread::CheckEmptyCheckpoint() {
+  DCHECK_EQ(Thread::Current(), this);
+  for (;;) {
+    if (ReadFlag(kEmptyCheckpointRequest)) {
+      RunEmptyCheckpoint();
     } else {
       break;
     }
@@ -145,8 +158,13 @@
       RunCheckpointFunction();
       continue;
     }
+    if (UNLIKELY((old_state_and_flags.as_struct.flags & kEmptyCheckpointRequest) != 0)) {
+      RunEmptyCheckpoint();
+      continue;
+    }
     // Change the state but keep the current flags (kCheckpointRequest is clear).
     DCHECK_EQ((old_state_and_flags.as_struct.flags & kCheckpointRequest), 0);
+    DCHECK_EQ((old_state_and_flags.as_struct.flags & kEmptyCheckpointRequest), 0);
     new_state_and_flags.as_struct.flags = old_state_and_flags.as_struct.flags;
     new_state_and_flags.as_struct.state = new_state;
 
@@ -163,7 +181,8 @@
 inline void Thread::PassActiveSuspendBarriers() {
   while (true) {
     uint16_t current_flags = tls32_.state_and_flags.as_struct.flags;
-    if (LIKELY((current_flags & (kCheckpointRequest | kActiveSuspendBarrier)) == 0)) {
+    if (LIKELY((current_flags &
+                (kCheckpointRequest | kEmptyCheckpointRequest | kActiveSuspendBarrier)) == 0)) {
       break;
     } else if ((current_flags & kActiveSuspendBarrier) != 0) {
       PassActiveSuspendBarriers(this);
@@ -211,7 +230,8 @@
       }
     } else if ((old_state_and_flags.as_struct.flags & kActiveSuspendBarrier) != 0) {
       PassActiveSuspendBarriers(this);
-    } else if ((old_state_and_flags.as_struct.flags & kCheckpointRequest) != 0) {
+    } else if ((old_state_and_flags.as_struct.flags &
+                (kCheckpointRequest | kEmptyCheckpointRequest)) != 0) {
       // Impossible
       LOG(FATAL) << "Transitioning to runnable with checkpoint flag, "
                  << " flags=" << old_state_and_flags.as_struct.flags
diff --git a/runtime/thread.cc b/runtime/thread.cc
index ace5e67..b99df26 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -116,6 +116,13 @@
 }
 
 void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints);
+void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_marking);
+
+void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) {
+  CHECK(kUseReadBarrier);
+  tls32_.is_gc_marking = is_marking;
+  UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, is_marking);
+}
 
 void Thread::InitTlsEntryPoints() {
   // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
@@ -410,9 +417,9 @@
     self->tlsPtr_.opeer = soa.Decode<mirror::Object>(self->tlsPtr_.jpeer).Ptr();
     self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer);
     self->tlsPtr_.jpeer = nullptr;
-    self->SetThreadName(self->GetThreadName(soa)->ToModifiedUtf8().c_str());
+    self->SetThreadName(self->GetThreadName()->ToModifiedUtf8().c_str());
 
-    ArtField* priorityField = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority);
+    ArtField* priorityField = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority);
     self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer));
     Dbg::PostThreadStart(self);
 
@@ -430,7 +437,7 @@
 
 Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
                                   mirror::Object* thread_peer) {
-  ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer);
+  ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer);
   Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer)));
   // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
   // to stop it from going away.
@@ -562,7 +569,7 @@
   if (VLOG_IS_ON(threads)) {
     ScopedObjectAccess soa(env);
 
-    ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
+    ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name);
     ObjPtr<mirror::String> java_name =
         f->GetObject(soa.Decode<mirror::Object>(java_peer))->AsString();
     std::string thread_name;
@@ -823,7 +830,7 @@
 
   ScopedObjectAccess soa(self);
   StackHandleScope<1> hs(self);
-  MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName(soa)));
+  MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName()));
   if (peer_thread_name.Get() == nullptr) {
     // The Thread constructor should have set the Thread.name to a
     // non-null value. However, because we can run without code
@@ -834,7 +841,7 @@
     } else {
       InitPeer<false>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
     }
-    peer_thread_name.Assign(GetThreadName(soa));
+    peer_thread_name.Assign(GetThreadName());
   }
   // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
   if (peer_thread_name.Get() != nullptr) {
@@ -845,13 +852,13 @@
 template<bool kTransactionActive>
 void Thread::InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
                       jobject thread_name, jint thread_priority) {
-  soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->
+  jni::DecodeArtField(WellKnownClasses::java_lang_Thread_daemon)->
       SetBoolean<kTransactionActive>(tlsPtr_.opeer, thread_is_daemon);
-  soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->
+  jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group)->
       SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object>(thread_group));
-  soa.DecodeField(WellKnownClasses::java_lang_Thread_name)->
+  jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name)->
       SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object>(thread_name));
-  soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->
+  jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority)->
       SetInt<kTransactionActive>(tlsPtr_.opeer, thread_priority);
 }
 
@@ -947,8 +954,8 @@
   DumpStack(os, dump_native_stack, backtrace_map);
 }
 
-mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
-  ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
+mirror::String* Thread::GetThreadName() const {
+  ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name);
   if (tlsPtr_.opeer == nullptr) {
     return nullptr;
   }
@@ -1148,6 +1155,12 @@
   } while (!done);
 }
 
+void Thread::RunEmptyCheckpoint() {
+  DCHECK_EQ(Thread::Current(), this);
+  AtomicClearFlag(kEmptyCheckpointRequest);
+  Runtime::Current()->GetThreadList()->EmptyCheckpointBarrier()->Pass(this);
+}
+
 bool Thread::RequestCheckpoint(Closure* function) {
   union StateAndFlags old_state_and_flags;
   old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
@@ -1175,6 +1188,107 @@
   return success;
 }
 
+bool Thread::RequestEmptyCheckpoint() {
+  union StateAndFlags old_state_and_flags;
+  old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
+  if (old_state_and_flags.as_struct.state != kRunnable) {
+    // If it's not runnable, we don't need to do anything because it won't be in the middle of a
+    // heap access (eg. the read barrier).
+    return false;
+  }
+
+  // We must be runnable to request a checkpoint.
+  DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable);
+  union StateAndFlags new_state_and_flags;
+  new_state_and_flags.as_int = old_state_and_flags.as_int;
+  new_state_and_flags.as_struct.flags |= kEmptyCheckpointRequest;
+  bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(
+      old_state_and_flags.as_int, new_state_and_flags.as_int);
+  if (success) {
+    TriggerSuspend();
+  }
+  return success;
+}
+
+class BarrierClosure : public Closure {
+ public:
+  explicit BarrierClosure(Closure* wrapped) : wrapped_(wrapped), barrier_(0) {}
+
+  void Run(Thread* self) OVERRIDE {
+    wrapped_->Run(self);
+    barrier_.Pass(self);
+  }
+
+  void Wait(Thread* self) {
+    barrier_.Increment(self, 1);
+  }
+
+ private:
+  Closure* wrapped_;
+  Barrier barrier_;
+};
+
+void Thread::RequestSynchronousCheckpoint(Closure* function) {
+  if (this == Thread::Current()) {
+    // Asked to run on this thread. Just run.
+    function->Run(this);
+    return;
+  }
+  Thread* self = Thread::Current();
+
+  // The current thread is not this thread.
+
+  for (;;) {
+    // If this thread is runnable, try to schedule a checkpoint. Do some gymnastics to not hold the
+    // suspend-count lock for too long.
+    if (GetState() == ThreadState::kRunnable) {
+      BarrierClosure barrier_closure(function);
+      bool installed = false;
+      {
+        MutexLock mu(self, *Locks::thread_suspend_count_lock_);
+        installed = RequestCheckpoint(&barrier_closure);
+      }
+      if (installed) {
+        barrier_closure.Wait(self);
+        return;
+      }
+      // Fall-through.
+    }
+
+    // This thread is not runnable, make sure we stay suspended, then run the checkpoint.
+    // Note: ModifySuspendCountInternal also expects the thread_list_lock to be held in
+    //       certain situations.
+    {
+      MutexLock mu(self, *Locks::thread_list_lock_);
+      MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
+
+      if (!ModifySuspendCount(self, +1, nullptr, false)) {
+        // Just retry the loop.
+        sched_yield();
+        continue;
+      }
+    }
+
+    while (GetState() == ThreadState::kRunnable) {
+      // We became runnable again. Wait till the suspend triggered in ModifySuspendCount
+      // moves us to suspended.
+      sched_yield();
+    }
+
+    function->Run(this);
+
+    {
+      MutexLock mu(self, *Locks::thread_list_lock_);
+      MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
+
+      DCHECK_NE(GetState(), ThreadState::kRunnable);
+      CHECK(ModifySuspendCount(self, -1, nullptr, false));
+    }
+
+    return;  // We're done, break out of the loop.
+  }
+}
+
 Closure* Thread::GetFlipFunction() {
   Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function);
   Closure* func;
@@ -1227,17 +1341,18 @@
   // cause ScopedObjectAccessUnchecked to deadlock.
   if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) {
     ScopedObjectAccessUnchecked soa(self);
-    priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)
+    priority = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority)
         ->GetInt(thread->tlsPtr_.opeer);
-    is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)
+    is_daemon = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_daemon)
         ->GetBoolean(thread->tlsPtr_.opeer);
 
     ObjPtr<mirror::Object> thread_group =
-        soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->tlsPtr_.opeer);
+        jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group)
+            ->GetObject(thread->tlsPtr_.opeer);
 
     if (thread_group != nullptr) {
       ArtField* group_name_field =
-          soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
+          jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_name);
       ObjPtr<mirror::String> group_name_string =
           group_name_field->GetObject(thread_group)->AsString();
       group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>";
@@ -1713,10 +1828,10 @@
 
     // this.nativePeer = 0;
     if (Runtime::Current()->IsActiveTransaction()) {
-      soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)
+      jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer)
           ->SetLong<true>(tlsPtr_.opeer, 0);
     } else {
-      soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)
+      jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer)
           ->SetLong<false>(tlsPtr_.opeer, 0);
     }
     Dbg::PostThreadDeath(self);
@@ -1724,7 +1839,7 @@
     // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
     // who is waiting.
     ObjPtr<mirror::Object> lock =
-        soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer);
+        jni::DecodeArtField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer);
     // (This conditional is only needed for tests, where Thread.lock won't have been set.)
     if (lock != nullptr) {
       StackHandleScope<1> hs(self);
@@ -1754,7 +1869,8 @@
     tlsPtr_.jni_env = nullptr;
   }
   CHECK_NE(GetState(), kRunnable);
-  CHECK_NE(ReadFlag(kCheckpointRequest), true);
+  CHECK(!ReadFlag(kCheckpointRequest));
+  CHECK(!ReadFlag(kEmptyCheckpointRequest));
   CHECK(tlsPtr_.checkpoint_function == nullptr);
   CHECK_EQ(checkpoint_overflow_.size(), 0u);
   CHECK(tlsPtr_.flip_function == nullptr);
@@ -1815,7 +1931,7 @@
 void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
   // this.group.removeThread(this);
   // group can be null if we're in the compiler or a test.
-  ObjPtr<mirror::Object> ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)
+  ObjPtr<mirror::Object> ogroup = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group)
       ->GetObject(tlsPtr_.opeer);
   if (ogroup != nullptr) {
     ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup));
@@ -1827,14 +1943,6 @@
   }
 }
 
-size_t Thread::NumHandleReferences() {
-  size_t count = 0;
-  for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur != nullptr; cur = cur->GetLink()) {
-    count += cur->NumberOfReferences();
-  }
-  return count;
-}
-
 bool Thread::HandleScopeContains(jobject obj) const {
   StackReference<mirror::Object>* hs_entry =
       reinterpret_cast<StackReference<mirror::Object>*>(obj);
@@ -2343,7 +2451,7 @@
       ++i;
     }
     ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(exception.Get()));
-    InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(exception_init_method), jv_args);
+    InvokeWithJValues(soa, ref.get(), jni::EncodeArtMethod(exception_init_method), jv_args);
     if (LIKELY(!IsExceptionPending())) {
       SetException(exception.Get());
     }
@@ -2432,7 +2540,7 @@
   QUICK_ENTRY_POINT_INFO(pAllocStringFromChars)
   QUICK_ENTRY_POINT_INFO(pAllocStringFromString)
   QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial)
-  QUICK_ENTRY_POINT_INFO(pCheckCast)
+  QUICK_ENTRY_POINT_INFO(pCheckInstanceOf)
   QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage)
   QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess)
   QUICK_ENTRY_POINT_INFO(pInitializeType)
diff --git a/runtime/thread.h b/runtime/thread.h
index 24038f5..b2983cc 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -105,7 +105,8 @@
   kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0 and the Thread should enter the
                           // safepoint handler.
   kCheckpointRequest = 2,  // Request that the thread do some checkpoint work and then continue.
-  kActiveSuspendBarrier = 4  // Register that at least 1 suspend barrier needs to be passed.
+  kEmptyCheckpointRequest = 4,  // Request that the thread do empty checkpoint and then continue.
+  kActiveSuspendBarrier = 8,  // Register that at least 1 suspend barrier needs to be passed.
 };
 
 enum class StackedShadowFrameType {
@@ -171,6 +172,9 @@
   // Process pending thread suspension request and handle if pending.
   void CheckSuspend() REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Process a pending empty checkpoint if pending.
+  void CheckEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
+
   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
                                    mirror::Object* thread_peer)
       REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
@@ -237,6 +241,10 @@
 
   bool RequestCheckpoint(Closure* function)
       REQUIRES(Locks::thread_suspend_count_lock_);
+  void RequestSynchronousCheckpoint(Closure* function)
+      REQUIRES(!Locks::thread_suspend_count_lock_, !Locks::thread_list_lock_);
+  bool RequestEmptyCheckpoint()
+      REQUIRES(Locks::thread_suspend_count_lock_);
 
   void SetFlipFunction(Closure* function);
   Closure* GetFlipFunction();
@@ -331,8 +339,7 @@
   }
 
   // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
-  mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
-      REQUIRES_SHARED(Locks::mutator_lock_);
+  mirror::String* GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
   // allocation, or locking.
@@ -731,9 +738,6 @@
     tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
   }
 
-  // Install the protected region for implicit stack checks.
-  void InstallImplicitProtection();
-
   bool IsHandlingStackOverflow() const {
     return tlsPtr_.stack_end == tlsPtr_.stack_begin;
   }
@@ -784,19 +788,6 @@
         ManagedStack::TopShadowFrameOffset());
   }
 
-  // Number of references allocated in JNI ShadowFrames on this thread.
-  size_t NumJniShadowFrameReferences() const REQUIRES_SHARED(Locks::mutator_lock_) {
-    return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
-  }
-
-  // Number of references in handle scope on this thread.
-  size_t NumHandleReferences();
-
-  // Number of references allocated in handle scopes & JNI shadow frames on this thread.
-  size_t NumStackReferences() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return NumHandleReferences() + NumJniShadowFrameReferences();
-  }
-
   // Is the given obj in this thread's stack indirect reference table?
   bool HandleScopeContains(jobject obj) const;
 
@@ -860,10 +851,7 @@
     return tls32_.is_gc_marking;
   }
 
-  void SetIsGcMarking(bool is_marking) {
-    CHECK(kUseReadBarrier);
-    tls32_.is_gc_marking = is_marking;
-  }
+  void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
 
   bool GetWeakRefAccessEnabled() const {
     CHECK(kUseReadBarrier);
@@ -982,11 +970,6 @@
     tlsPtr_.held_mutexes[level] = mutex;
   }
 
-  void RunCheckpointFunction();
-
-  bool PassActiveSuspendBarriers(Thread* self)
-      REQUIRES(!Locks::thread_suspend_count_lock_);
-
   void ClearSuspendBarrier(AtomicInteger* target)
       REQUIRES(Locks::thread_suspend_count_lock_);
 
@@ -1236,6 +1219,15 @@
                                   bool for_debugger)
       REQUIRES(Locks::thread_suspend_count_lock_);
 
+  void RunCheckpointFunction();
+  void RunEmptyCheckpoint();
+
+  bool PassActiveSuspendBarriers(Thread* self)
+      REQUIRES(!Locks::thread_suspend_count_lock_);
+
+  // Install the protected region for implicit stack checks.
+  void InstallImplicitProtection();
+
   // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
   // change from being Suspended to Runnable without a suspend request occurring.
   union PACKED(4) StateAndFlags {
@@ -1578,7 +1570,8 @@
 
 class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
  public:
-  ALWAYS_INLINE ScopedAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
+  ALWAYS_INLINE explicit ScopedAssertNoThreadSuspension(const char* cause)
+      ACQUIRE(Roles::uninterruptible_) {
     if (kIsDebugBuild) {
       self_ = Thread::Current();
       old_cause_ = self_->StartAssertNoThreadSuspension(cause);
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index eba6666..27fb37a 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -32,6 +32,7 @@
 #include "base/timing_logger.h"
 #include "debugger.h"
 #include "gc/collector/concurrent_copying.h"
+#include "gc/reference_processor.h"
 #include "jni_internal.h"
 #include "lock_word.h"
 #include "monitor.h"
@@ -68,7 +69,8 @@
       debug_suspend_all_count_(0),
       unregistering_count_(0),
       suspend_all_historam_("suspend all histogram", 16, 64),
-      long_suspend_(false) {
+      long_suspend_(false),
+      empty_checkpoint_barrier_(new Barrier(0)) {
   CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1, 0U)));
 }
 
@@ -373,6 +375,43 @@
   return count;
 }
 
+size_t ThreadList::RunEmptyCheckpoint() {
+  Thread* self = Thread::Current();
+  Locks::mutator_lock_->AssertNotExclusiveHeld(self);
+  Locks::thread_list_lock_->AssertNotHeld(self);
+  Locks::thread_suspend_count_lock_->AssertNotHeld(self);
+
+  size_t count = 0;
+  {
+    MutexLock mu(self, *Locks::thread_list_lock_);
+    MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
+    for (Thread* thread : list_) {
+      if (thread != self) {
+        while (true) {
+          if (thread->RequestEmptyCheckpoint()) {
+            // This thread will run an empty checkpoint (decrement the empty checkpoint barrier)
+            // some time in the near future.
+            ++count;
+            break;
+          }
+          if (thread->GetState() != kRunnable) {
+            // It's seen suspended, we are done because it must not be in the middle of a mutator
+            // heap access.
+            break;
+          }
+        }
+      }
+    }
+  }
+
+  // Wake up the threads blocking for weak ref access so that they will respond to the empty
+  // checkpoint request. Otherwise we will hang as they are blocking in the kRunnable state.
+  Runtime::Current()->GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
+  Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint*/true);
+
+  return count;
+}
+
 // Request that a checkpoint function be run on all active (non-suspended)
 // threads.  Returns the number of successful requests.
 size_t ThreadList::RunCheckpointOnRunnableThreads(Closure* checkpoint_function) {
@@ -1242,7 +1281,7 @@
     // Initialize according to the state of the CC collector.
     bool is_gc_marking =
         Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking();
-    self->SetIsGcMarking(is_gc_marking);
+    self->SetIsGcMarkingAndUpdateEntrypoints(is_gc_marking);
     bool weak_ref_access_enabled =
         Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsWeakRefAccessEnabled();
     self->SetWeakRefAccessEnabled(weak_ref_access_enabled);
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index b455e31..133d430 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -17,6 +17,7 @@
 #ifndef ART_RUNTIME_THREAD_LIST_H_
 #define ART_RUNTIME_THREAD_LIST_H_
 
+#include "barrier.h"
 #include "base/histogram.h"
 #include "base/mutex.h"
 #include "base/value_object.h"
@@ -100,6 +101,14 @@
   size_t RunCheckpoint(Closure* checkpoint_function, Closure* callback = nullptr)
       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
+  // Run an empty checkpoint on threads. Wait until threads pass the next suspend point or are
+  // suspended. This is used to ensure that the threads finish or aren't in the middle of an
+  // in-flight mutator heap access (eg. a read barrier.) Runnable threads will respond by
+  // decrementing the empty checkpoint barrier count. This works even when the weak ref access is
+  // disabled. Only one concurrent use is currently supported.
+  size_t RunEmptyCheckpoint()
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
+
   size_t RunCheckpointOnRunnableThreads(Closure* checkpoint_function)
       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
@@ -158,6 +167,10 @@
   void DumpNativeStacks(std::ostream& os)
       REQUIRES(!Locks::thread_list_lock_);
 
+  Barrier* EmptyCheckpointBarrier() {
+    return empty_checkpoint_barrier_.get();
+  }
+
  private:
   uint32_t AllocThreadId(Thread* self);
   void ReleaseThreadId(Thread* self, uint32_t id) REQUIRES(!Locks::allocated_thread_ids_lock_);
@@ -203,6 +216,8 @@
   // Whether or not the current thread suspension is long.
   bool long_suspend_;
 
+  std::unique_ptr<Barrier> empty_checkpoint_barrier_;
+
   friend class Thread;
 
   DISALLOW_COPY_AND_ASSIGN(ThreadList);
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index b14f340..65fd999 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -177,7 +177,7 @@
     }
 
     ++waiting_count_;
-    if (waiting_count_ == GetThreadCount() && tasks_.empty()) {
+    if (waiting_count_ == GetThreadCount() && !HasOutstandingTasks()) {
       // We may be done, lets broadcast to the completion condition.
       completion_condition_.Broadcast(self);
     }
@@ -200,7 +200,7 @@
 }
 
 Task* ThreadPool::TryGetTaskLocked() {
-  if (started_ && !tasks_.empty()) {
+  if (HasOutstandingTasks()) {
     Task* task = tasks_.front();
     tasks_.pop_front();
     return task;
@@ -218,7 +218,7 @@
   }
   // Wait until each thread is waiting and the task list is empty.
   MutexLock mu(self, task_queue_lock_);
-  while (!shutting_down_ && (waiting_count_ != GetThreadCount() || !tasks_.empty())) {
+  while (!shutting_down_ && (waiting_count_ != GetThreadCount() || HasOutstandingTasks())) {
     if (!may_hold_locks) {
       completion_condition_.Wait(self);
     } else {
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index b6c6f02..2ff33a6 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -100,7 +100,8 @@
   ThreadPool(const char* name, size_t num_threads);
   virtual ~ThreadPool();
 
-  // Wait for all tasks currently on queue to get completed.
+  // Wait for all tasks currently on queue to get completed. If the pool has been stopped, only
+  // wait till all already running tasks are done.
   void Wait(Thread* self, bool do_work, bool may_hold_locks) REQUIRES(!task_queue_lock_);
 
   size_t GetTaskCount(Thread* self) REQUIRES(!task_queue_lock_);
@@ -130,6 +131,10 @@
     return shutting_down_;
   }
 
+  bool HasOutstandingTasks() const REQUIRES(task_queue_lock_) {
+    return started_ && !tasks_.empty();
+  }
+
   const std::string name_;
   Mutex task_queue_lock_;
   ConditionVariable task_queue_condition_ GUARDED_BY(task_queue_lock_);
diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc
index d5f17d1..89e9005 100644
--- a/runtime/thread_pool_test.cc
+++ b/runtime/thread_pool_test.cc
@@ -98,6 +98,26 @@
   thread_pool.Wait(self, false, false);
 }
 
+TEST_F(ThreadPoolTest, StopWait) {
+  Thread* self = Thread::Current();
+  ThreadPool thread_pool("Thread pool test thread pool", num_threads);
+
+  AtomicInteger count(0);
+  static const int32_t num_tasks = num_threads * 100;
+  for (int32_t i = 0; i < num_tasks; ++i) {
+    thread_pool.AddTask(self, new CountTask(&count));
+  }
+
+  // Signal the threads to start processing tasks.
+  thread_pool.StartWorkers(self);
+  usleep(200);
+  thread_pool.StopWorkers(self);
+
+  thread_pool.Wait(self, false, false);  // We should not deadlock here.
+  // Drain the task list.
+  thread_pool.Wait(self, /* do_work */ true, false);  // We should not deadlock here.
+}
+
 class TreeTask : public Task {
  public:
   TreeTask(ThreadPool* const thread_pool, AtomicInteger* count, int depth)
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index 9f8d981..c5da5d2 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -167,29 +167,29 @@
   array_log.LogValue(index, value);
 }
 
-void Transaction::RecordResolveString(mirror::DexCache* dex_cache, uint32_t string_idx) {
+void Transaction::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, uint32_t string_idx) {
   DCHECK(dex_cache != nullptr);
   DCHECK_LT(string_idx, dex_cache->GetDexFile()->NumStringIds());
   MutexLock mu(Thread::Current(), log_lock_);
   resolve_string_logs_.push_back(ResolveStringLog(dex_cache, string_idx));
 }
 
-void Transaction::RecordStrongStringInsertion(mirror::String* s) {
+void Transaction::RecordStrongStringInsertion(ObjPtr<mirror::String> s) {
   InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kInsert);
   LogInternedString(log);
 }
 
-void Transaction::RecordWeakStringInsertion(mirror::String* s) {
+void Transaction::RecordWeakStringInsertion(ObjPtr<mirror::String> s) {
   InternStringLog log(s, InternStringLog::kWeakString, InternStringLog::kInsert);
   LogInternedString(log);
 }
 
-void Transaction::RecordStrongStringRemoval(mirror::String* s) {
+void Transaction::RecordStrongStringRemoval(ObjPtr<mirror::String> s) {
   InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kRemove);
   LogInternedString(log);
 }
 
-void Transaction::RecordWeakStringRemoval(mirror::String* s) {
+void Transaction::RecordWeakStringRemoval(ObjPtr<mirror::String> s) {
   InternStringLog log(s, InternStringLog::kWeakString, InternStringLog::kRemove);
   LogInternedString(log);
 }
@@ -470,10 +470,10 @@
     case InternStringLog::kInsert: {
       switch (string_kind_) {
         case InternStringLog::kStrongString:
-          intern_table->RemoveStrongFromTransaction(str_);
+          intern_table->RemoveStrongFromTransaction(str_.Read());
           break;
         case InternStringLog::kWeakString:
-          intern_table->RemoveWeakFromTransaction(str_);
+          intern_table->RemoveWeakFromTransaction(str_.Read());
           break;
         default:
           LOG(FATAL) << "Unknown interned string kind";
@@ -484,10 +484,10 @@
     case InternStringLog::kRemove: {
       switch (string_kind_) {
         case InternStringLog::kStrongString:
-          intern_table->InsertStrongFromTransaction(str_);
+          intern_table->InsertStrongFromTransaction(str_.Read());
           break;
         case InternStringLog::kWeakString:
-          intern_table->InsertWeakFromTransaction(str_);
+          intern_table->InsertWeakFromTransaction(str_.Read());
           break;
         default:
           LOG(FATAL) << "Unknown interned string kind";
@@ -502,14 +502,15 @@
 }
 
 void Transaction::InternStringLog::VisitRoots(RootVisitor* visitor) {
-  visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&str_), RootInfo(kRootInternedString));
+  str_.VisitRoot(visitor, RootInfo(kRootInternedString));
 }
 
 void Transaction::ResolveStringLog::Undo() {
   dex_cache_.Read()->ClearString(string_idx_);
 }
 
-Transaction::ResolveStringLog::ResolveStringLog(mirror::DexCache* dex_cache, uint32_t string_idx)
+Transaction::ResolveStringLog::ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache,
+                                                uint32_t string_idx)
     : dex_cache_(dex_cache),
       string_idx_(string_idx) {
   DCHECK(dex_cache != nullptr);
@@ -520,6 +521,15 @@
   dex_cache_.VisitRoot(visitor, RootInfo(kRootVMInternal));
 }
 
+Transaction::InternStringLog::InternStringLog(ObjPtr<mirror::String> s,
+                                              StringKind kind,
+                                              StringOp op)
+    : str_(s),
+      string_kind_(kind),
+      string_op_(op) {
+  DCHECK(s != nullptr);
+}
+
 void Transaction::ArrayLog::LogValue(size_t index, uint64_t value) {
   auto it = array_values_.find(index);
   if (it == array_values_.end()) {
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 584dfb8..2ec2f50 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -83,21 +83,21 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Record intern string table changes.
-  void RecordStrongStringInsertion(mirror::String* s)
+  void RecordStrongStringInsertion(ObjPtr<mirror::String> s)
       REQUIRES(Locks::intern_table_lock_)
       REQUIRES(!log_lock_);
-  void RecordWeakStringInsertion(mirror::String* s)
+  void RecordWeakStringInsertion(ObjPtr<mirror::String> s)
       REQUIRES(Locks::intern_table_lock_)
       REQUIRES(!log_lock_);
-  void RecordStrongStringRemoval(mirror::String* s)
+  void RecordStrongStringRemoval(ObjPtr<mirror::String> s)
       REQUIRES(Locks::intern_table_lock_)
       REQUIRES(!log_lock_);
-  void RecordWeakStringRemoval(mirror::String* s)
+  void RecordWeakStringRemoval(ObjPtr<mirror::String> s)
       REQUIRES(Locks::intern_table_lock_)
       REQUIRES(!log_lock_);
 
   // Record resolve string.
-  void RecordResolveString(mirror::DexCache* dex_cache, uint32_t string_idx)
+  void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, uint32_t string_idx)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!log_lock_);
 
@@ -182,10 +182,7 @@
       kInsert,
       kRemove
     };
-    InternStringLog(mirror::String* s, StringKind kind, StringOp op)
-      : str_(s), string_kind_(kind), string_op_(op) {
-      DCHECK(s != nullptr);
-    }
+    InternStringLog(ObjPtr<mirror::String> s, StringKind kind, StringOp op);
 
     void Undo(InternTable* intern_table)
         REQUIRES_SHARED(Locks::mutator_lock_)
@@ -193,14 +190,14 @@
     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
 
    private:
-    mirror::String* str_;
+    GcRoot<mirror::String> str_;
     const StringKind string_kind_;
     const StringOp string_op_;
   };
 
   class ResolveStringLog : public ValueObject {
    public:
-    ResolveStringLog(mirror::DexCache* dex_cache, uint32_t string_idx);
+    ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache, uint32_t string_idx);
 
     void Undo() REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/utf_test.cc b/runtime/utf_test.cc
index 3284925..d1e9751 100644
--- a/runtime/utf_test.cc
+++ b/runtime/utf_test.cc
@@ -113,8 +113,8 @@
   EXPECT_EQ(2u, CountModifiedUtf8Chars(reinterpret_cast<const char *>(kSurrogateEncoding)));
 }
 
-static void AssertConversion(const std::vector<uint16_t> input,
-                             const std::vector<uint8_t> expected) {
+static void AssertConversion(const std::vector<uint16_t>& input,
+                             const std::vector<uint8_t>& expected) {
   ASSERT_EQ(expected.size(), CountUtf8Bytes(&input[0], input.size()));
 
   std::vector<uint8_t> output(expected.size());
diff --git a/runtime/utils.h b/runtime/utils.h
index 94738d2..1e98057 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -30,10 +30,8 @@
 #include "arch/instruction_set.h"
 #include "base/casts.h"
 #include "base/logging.h"
-#include "base/mutex.h"
 #include "base/stringpiece.h"
 #include "globals.h"
-#include "obj_ptr.h"
 #include "primitive.h"
 
 namespace art {
@@ -281,24 +279,34 @@
 using UsageFn = void (*)(const char*, ...);
 
 template <typename T>
-static void ParseUintOption(const StringPiece& option,
+static void ParseIntOption(const StringPiece& option,
                             const std::string& option_name,
                             T* out,
-                            UsageFn Usage,
+                            UsageFn usage,
                             bool is_long_option = true) {
   std::string option_prefix = option_name + (is_long_option ? "=" : "");
   DCHECK(option.starts_with(option_prefix)) << option << " " << option_prefix;
   const char* value_string = option.substr(option_prefix.size()).data();
   int64_t parsed_integer_value = 0;
   if (!ParseInt(value_string, &parsed_integer_value)) {
-    Usage("Failed to parse %s '%s' as an integer", option_name.c_str(), value_string);
-  }
-  if (parsed_integer_value < 0) {
-    Usage("%s passed a negative value %d", option_name.c_str(), parsed_integer_value);
+    usage("Failed to parse %s '%s' as an integer", option_name.c_str(), value_string);
   }
   *out = dchecked_integral_cast<T>(parsed_integer_value);
 }
 
+template <typename T>
+static void ParseUintOption(const StringPiece& option,
+                            const std::string& option_name,
+                            T* out,
+                            UsageFn usage,
+                            bool is_long_option = true) {
+  ParseIntOption(option, option_name, out, usage, is_long_option);
+  if (*out < 0) {
+    usage("%s passed a negative value %d", option_name.c_str(), *out);
+    *out = 0;
+  }
+}
+
 void ParseDouble(const std::string& option,
                  char after_char,
                  double min,
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index 28f9bb3..edd6ffe 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -20,6 +20,7 @@
 #include <stdint.h>
 #include <string>
 
+#include "base/array_ref.h"
 #include "base/macros.h"
 #include "mem_map.h"
 #include "os.h"
@@ -44,8 +45,11 @@
    public:
     Header(uint32_t dex_size, uint32_t verifier_deps_size, uint32_t quickening_info_size);
 
+    const char* GetMagic() const { return reinterpret_cast<const char*>(magic_); }
+    const char* GetVersion() const { return reinterpret_cast<const char*>(version_); }
     bool IsMagicValid() const;
     bool IsVersionValid() const;
+    bool IsValid() const { return IsMagicValid() && IsVersionValid(); }
 
     uint32_t GetDexSize() const { return dex_size_; }
     uint32_t GetVerifierDepsSize() const { return verifier_deps_size_; }
@@ -71,6 +75,15 @@
   const uint8_t* End() const { return mmap_->End(); }
   size_t Size() const { return mmap_->Size(); }
 
+  const Header& GetHeader() const {
+    return *reinterpret_cast<const Header*>(Begin());
+  }
+
+  ArrayRef<const uint8_t> GetVerifierDepsData() const {
+    return ArrayRef<const uint8_t>(
+        Begin() + sizeof(Header) + GetHeader().GetDexSize(), GetHeader().GetVerifierDepsSize());
+  }
+
  private:
   explicit VdexFile(MemMap* mmap) : mmap_(mmap) {}
 
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 97bc79c..d9e3ea7 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -956,7 +956,7 @@
   delete last_fail_message;
 }
 
-void MethodVerifier::AppendToLastFailMessage(std::string append) {
+void MethodVerifier::AppendToLastFailMessage(const std::string& append) {
   size_t failure_num = failure_messages_.size();
   DCHECK_NE(failure_num, 0U);
   std::ostringstream* last_fail_message = failure_messages_[failure_num - 1];
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index eb8b7a6..c6ce583 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -318,7 +318,7 @@
   void PrependToLastFailMessage(std::string);
 
   // Adds the given string to the end of the last failure message.
-  void AppendToLastFailMessage(std::string);
+  void AppendToLastFailMessage(const std::string& append);
 
   // Verification result for method(s). Includes a (maximum) failure kind, and (the union of)
   // all failure types.
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index 837ee2d..52be2df 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -23,6 +23,7 @@
 #include "common_runtime_test.h"
 #include "dex_file.h"
 #include "scoped_thread_state_change-inl.h"
+#include "utils.h"
 #include "verifier_log_mode.h"
 
 namespace art {
@@ -40,7 +41,14 @@
     std::string error_msg;
     MethodVerifier::FailureKind failure = MethodVerifier::VerifyClass(
         self, klass, nullptr, true, HardFailLogMode::kLogWarning, &error_msg);
-    ASSERT_TRUE(failure == MethodVerifier::kNoFailure) << error_msg;
+
+    if (StartsWith(descriptor, "Ljava/lang/invoke")) {
+      ASSERT_TRUE(failure == MethodVerifier::kSoftFailure ||
+                  failure == MethodVerifier::kNoFailure) << error_msg;
+
+    } else {
+      ASSERT_TRUE(failure == MethodVerifier::kNoFailure) << error_msg;
+    }
   }
 
   void VerifyDexFile(const DexFile& dex)
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 4ec2da6..da3d946 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -411,7 +411,7 @@
   }
 
   // Scan the map for the same value.
-  for (const std::pair<uint32_t, uint32_t>& pair : search_map) {
+  for (const std::pair<const uint32_t, uint32_t>& pair : search_map) {
     if (pair.first != src && pair.second == src_lock_levels) {
       return true;
     }
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index 3c7fb7a..c395612 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -39,6 +39,11 @@
   return (it == dex_deps_.end()) ? nullptr : it->second.get();
 }
 
+const VerifierDeps::DexFileDeps* VerifierDeps::GetDexFileDeps(const DexFile& dex_file) const {
+  auto it = dex_deps_.find(&dex_file);
+  return (it == dex_deps_.end()) ? nullptr : it->second.get();
+}
+
 template <typename T>
 uint16_t VerifierDeps::GetAccessFlags(T* element) {
   static_assert(kAccJavaFlagsMask == 0xFFFF, "Unexpected value of a constant");
@@ -95,12 +100,12 @@
   return new_id;
 }
 
-std::string VerifierDeps::GetStringFromId(const DexFile& dex_file, uint32_t string_id) {
+std::string VerifierDeps::GetStringFromId(const DexFile& dex_file, uint32_t string_id) const {
   uint32_t num_ids_in_dex = dex_file.NumStringIds();
   if (string_id < num_ids_in_dex) {
     return std::string(dex_file.StringDataByIdx(string_id));
   } else {
-    DexFileDeps* deps = GetDexFileDeps(dex_file);
+    const DexFileDeps* deps = GetDexFileDeps(dex_file);
     DCHECK(deps != nullptr);
     string_id -= num_ids_in_dex;
     CHECK_LT(string_id, deps->strings_.size());
@@ -108,7 +113,7 @@
   }
 }
 
-bool VerifierDeps::IsInClassPath(ObjPtr<mirror::Class> klass) {
+bool VerifierDeps::IsInClassPath(ObjPtr<mirror::Class> klass) const {
   DCHECK(klass != nullptr);
 
   ObjPtr<mirror::DexCache> dex_cache = klass->GetDexCache();
@@ -280,6 +285,22 @@
   return callbacks->GetVerifierDeps();
 }
 
+void VerifierDeps::MaybeRecordVerificationStatus(const DexFile& dex_file,
+                                                 uint16_t type_idx,
+                                                 MethodVerifier::FailureKind failure_kind) {
+  if (failure_kind == MethodVerifier::kNoFailure) {
+    // We only record classes that did not fully verify at compile time.
+    return;
+  }
+
+  VerifierDeps* singleton = GetVerifierDepsSingleton();
+  if (singleton != nullptr) {
+    DexFileDeps* dex_deps = singleton->GetDexFileDeps(dex_file);
+    MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_);
+    dex_deps->unverified_classes_.push_back(type_idx);
+  }
+}
+
 void VerifierDeps::MaybeRecordClassResolution(const DexFile& dex_file,
                                               uint16_t type_idx,
                                               mirror::Class* klass) {
@@ -360,6 +381,14 @@
   }
 }
 
+static inline void EncodeUint16Vector(std::vector<uint8_t>* out,
+                                      const std::vector<uint16_t>& vector) {
+  EncodeUnsignedLeb128(out, vector.size());
+  for (uint16_t entry : vector) {
+    EncodeUnsignedLeb128(out, entry);
+  }
+}
+
 template<typename T>
 static inline void DecodeSet(const uint8_t** in, const uint8_t* end, std::set<T>* set) {
   DCHECK(set->empty());
@@ -371,6 +400,17 @@
   }
 }
 
+static inline void DecodeUint16Vector(const uint8_t** in,
+                                      const uint8_t* end,
+                                      std::vector<uint16_t>* vector) {
+  DCHECK(vector->empty());
+  size_t num_entries = DecodeUint32WithOverflowCheck(in, end);
+  vector->reserve(num_entries);
+  for (size_t i = 0; i < num_entries; ++i) {
+    vector->push_back(dchecked_integral_cast<uint16_t>(DecodeUint32WithOverflowCheck(in, end)));
+  }
+}
+
 static inline void EncodeStringVector(std::vector<uint8_t>* out,
                                       const std::vector<std::string>& strings) {
   EncodeUnsignedLeb128(out, strings.size());
@@ -396,33 +436,45 @@
   }
 }
 
-void VerifierDeps::Encode(std::vector<uint8_t>* buffer) const {
+void VerifierDeps::Encode(const std::vector<const DexFile*>& dex_files,
+                          std::vector<uint8_t>* buffer) const {
   MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_);
-  for (auto& entry : dex_deps_) {
-    EncodeStringVector(buffer, entry.second->strings_);
-    EncodeSet(buffer, entry.second->assignable_types_);
-    EncodeSet(buffer, entry.second->unassignable_types_);
-    EncodeSet(buffer, entry.second->classes_);
-    EncodeSet(buffer, entry.second->fields_);
-    EncodeSet(buffer, entry.second->direct_methods_);
-    EncodeSet(buffer, entry.second->virtual_methods_);
-    EncodeSet(buffer, entry.second->interface_methods_);
+  for (const DexFile* dex_file : dex_files) {
+    const DexFileDeps& deps = *GetDexFileDeps(*dex_file);
+    EncodeStringVector(buffer, deps.strings_);
+    EncodeSet(buffer, deps.assignable_types_);
+    EncodeSet(buffer, deps.unassignable_types_);
+    EncodeSet(buffer, deps.classes_);
+    EncodeSet(buffer, deps.fields_);
+    EncodeSet(buffer, deps.direct_methods_);
+    EncodeSet(buffer, deps.virtual_methods_);
+    EncodeSet(buffer, deps.interface_methods_);
+    EncodeUint16Vector(buffer, deps.unverified_classes_);
   }
 }
 
-VerifierDeps::VerifierDeps(const std::vector<const DexFile*>& dex_files, ArrayRef<uint8_t> data)
+VerifierDeps::VerifierDeps(const std::vector<const DexFile*>& dex_files,
+                           ArrayRef<const uint8_t> data)
     : VerifierDeps(dex_files) {
+  if (data.empty()) {
+    // Return eagerly, as the first thing we expect from VerifierDeps data is
+    // the number of created strings, even if there is no dependency.
+    // Currently, only the boot image does not have any VerifierDeps data.
+    return;
+  }
   const uint8_t* data_start = data.data();
   const uint8_t* data_end = data_start + data.size();
-  for (auto& entry : dex_deps_) {
-    DecodeStringVector(&data_start, data_end, &entry.second->strings_);
-    DecodeSet(&data_start, data_end, &entry.second->assignable_types_);
-    DecodeSet(&data_start, data_end, &entry.second->unassignable_types_);
-    DecodeSet(&data_start, data_end, &entry.second->classes_);
-    DecodeSet(&data_start, data_end, &entry.second->fields_);
-    DecodeSet(&data_start, data_end, &entry.second->direct_methods_);
-    DecodeSet(&data_start, data_end, &entry.second->virtual_methods_);
-    DecodeSet(&data_start, data_end, &entry.second->interface_methods_);
+  for (const DexFile* dex_file : dex_files) {
+    DexFileDeps* deps = GetDexFileDeps(*dex_file);
+    DecodeStringVector(&data_start, data_end, &deps->strings_);
+    DecodeSet(&data_start, data_end, &deps->assignable_types_);
+    DecodeSet(&data_start, data_end, &deps->unassignable_types_);
+    DecodeSet(&data_start, data_end, &deps->classes_);
+    DecodeSet(&data_start, data_end, &deps->fields_);
+    DecodeSet(&data_start, data_end, &deps->direct_methods_);
+    DecodeSet(&data_start, data_end, &deps->virtual_methods_);
+    DecodeSet(&data_start, data_end, &deps->interface_methods_);
+    DecodeUint16Vector(&data_start, data_end, &deps->unverified_classes_);
   }
   CHECK_LE(data_start, data_end);
 }
@@ -463,7 +515,358 @@
          (fields_ == rhs.fields_) &&
          (direct_methods_ == rhs.direct_methods_) &&
          (virtual_methods_ == rhs.virtual_methods_) &&
-         (interface_methods_ == rhs.interface_methods_);
+         (interface_methods_ == rhs.interface_methods_) &&
+         (unverified_classes_ == rhs.unverified_classes_);
+}
+
+void VerifierDeps::Dump(VariableIndentationOutputStream* vios) const {
+  for (const auto& dep : dex_deps_) {
+    const DexFile& dex_file = *dep.first;
+    vios->Stream()
+        << "Dependencies of "
+        << dex_file.GetLocation()
+        << ":\n";
+
+    ScopedIndentation indent(vios);
+
+    for (const std::string& str : dep.second->strings_) {
+      vios->Stream() << "Extra string: " << str << "\n";
+    }
+
+    for (const TypeAssignability& entry : dep.second->assignable_types_) {
+      vios->Stream()
+        << GetStringFromId(dex_file, entry.GetSource())
+        << " must be assignable to "
+        << GetStringFromId(dex_file, entry.GetDestination())
+        << "\n";
+    }
+
+    for (const TypeAssignability& entry : dep.second->unassignable_types_) {
+      vios->Stream()
+        << GetStringFromId(dex_file, entry.GetSource())
+        << " must not be assignable to "
+        << GetStringFromId(dex_file, entry.GetDestination())
+        << "\n";
+    }
+
+    for (const ClassResolution& entry : dep.second->classes_) {
+      vios->Stream()
+          << dex_file.StringByTypeIdx(entry.GetDexTypeIndex())
+          << (entry.IsResolved() ? " must be resolved " : "must not be resolved ")
+          << " with access flags " << std::hex << entry.GetAccessFlags() << std::dec
+          << "\n";
+    }
+
+    for (const FieldResolution& entry : dep.second->fields_) {
+      const DexFile::FieldId& field_id = dex_file.GetFieldId(entry.GetDexFieldIndex());
+      vios->Stream()
+          << dex_file.GetFieldDeclaringClassDescriptor(field_id) << "->"
+          << dex_file.GetFieldName(field_id) << ":"
+          << dex_file.GetFieldTypeDescriptor(field_id)
+          << " is expected to be ";
+      if (!entry.IsResolved()) {
+        vios->Stream() << "unresolved\n";
+      } else {
+        vios->Stream()
+          << "in class "
+          << GetStringFromId(dex_file, entry.GetDeclaringClassIndex())
+          << ", and have the access flags " << std::hex << entry.GetAccessFlags() << std::dec
+          << "\n";
+      }
+    }
+
+    for (const auto& entry :
+            { std::make_pair(kDirectMethodResolution, dep.second->direct_methods_),
+              std::make_pair(kVirtualMethodResolution, dep.second->virtual_methods_),
+              std::make_pair(kInterfaceMethodResolution, dep.second->interface_methods_) }) {
+      for (const MethodResolution& method : entry.second) {
+        const DexFile::MethodId& method_id = dex_file.GetMethodId(method.GetDexMethodIndex());
+        vios->Stream()
+            << dex_file.GetMethodDeclaringClassDescriptor(method_id) << "->"
+            << dex_file.GetMethodName(method_id)
+            << dex_file.GetMethodSignature(method_id).ToString()
+            << " is expected to be ";
+        if (!method.IsResolved()) {
+          vios->Stream() << "unresolved\n";
+        } else {
+          vios->Stream()
+            << "in class "
+            << GetStringFromId(dex_file, method.GetDeclaringClassIndex())
+            << ", have the access flags " << std::hex << method.GetAccessFlags() << std::dec
+            << ", and be of kind " << entry.first
+            << "\n";
+        }
+      }
+    }
+
+    for (uint16_t type_index : dep.second->unverified_classes_) {
+      vios->Stream()
+          << dex_file.StringByTypeIdx(type_index)
+          << " is expected to be verified at runtime\n";
+    }
+  }
+}
+
+bool VerifierDeps::ValidateDependencies(Handle<mirror::ClassLoader> class_loader,
+                                        Thread* self) const {
+  for (const auto& entry : dex_deps_) {
+    if (!VerifyDexFile(class_loader, *entry.first, *entry.second, self)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+// TODO: share that helper with other parts of the compiler that have
+// the same lookup pattern.
+static mirror::Class* FindClassAndClearException(ClassLinker* class_linker,
+                                                 Thread* self,
+                                                 const char* name,
+                                                 Handle<mirror::ClassLoader> class_loader)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  mirror::Class* result = class_linker->FindClass(self, name, class_loader);
+  if (result == nullptr) {
+    DCHECK(self->IsExceptionPending());
+    self->ClearException();
+  }
+  return result;
+}
+
+bool VerifierDeps::VerifyAssignability(Handle<mirror::ClassLoader> class_loader,
+                                       const DexFile& dex_file,
+                                       const std::set<TypeAssignability>& assignables,
+                                       bool expected_assignability,
+                                       Thread* self) const {
+  StackHandleScope<2> hs(self);
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  MutableHandle<mirror::Class> source(hs.NewHandle<mirror::Class>(nullptr));
+  MutableHandle<mirror::Class> destination(hs.NewHandle<mirror::Class>(nullptr));
+
+  for (const auto& entry : assignables) {
+    const std::string& destination_desc = GetStringFromId(dex_file, entry.GetDestination());
+    destination.Assign(
+        FindClassAndClearException(class_linker, self, destination_desc.c_str(), class_loader));
+    const std::string& source_desc = GetStringFromId(dex_file, entry.GetSource());
+    source.Assign(
+        FindClassAndClearException(class_linker, self, source_desc.c_str(), class_loader));
+
+    if (destination.Get() == nullptr) {
+      LOG(INFO) << "VerifiersDeps: Could not resolve class " << destination_desc;
+      return false;
+    }
+
+    if (source.Get() == nullptr) {
+      LOG(INFO) << "VerifierDeps: Could not resolve class " << source_desc;
+      return false;
+    }
+
+    DCHECK(destination->IsResolved() && source->IsResolved());
+    if (destination->IsAssignableFrom(source.Get()) != expected_assignability) {
+      LOG(INFO) << "VerifierDeps: Class "
+                << destination_desc
+                << (expected_assignability ? " not " : " ")
+                << "assignable from "
+                << source_desc;
+      return false;
+    }
+  }
+  return true;
+}
+
+bool VerifierDeps::VerifyClasses(Handle<mirror::ClassLoader> class_loader,
+                                 const DexFile& dex_file,
+                                 const std::set<ClassResolution>& classes,
+                                 Thread* self) const {
+  StackHandleScope<1> hs(self);
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  MutableHandle<mirror::Class> cls(hs.NewHandle<mirror::Class>(nullptr));
+  for (const auto& entry : classes) {
+    const char* descriptor = dex_file.StringByTypeIdx(entry.GetDexTypeIndex());
+    cls.Assign(FindClassAndClearException(class_linker, self, descriptor, class_loader));
+
+    if (entry.IsResolved()) {
+      if (cls.Get() == nullptr) {
+        LOG(INFO) << "VerifierDeps: Could not resolve class " << descriptor;
+        return false;
+      } else if (entry.GetAccessFlags() != GetAccessFlags(cls.Get())) {
+        LOG(INFO) << "VerifierDeps: Unexpected access flags on class "
+                  << descriptor
+                  << std::hex
+                  << " (expected="
+                  << entry.GetAccessFlags()
+                  << ", actual="
+                  << GetAccessFlags(cls.Get()) << ")"
+                  << std::dec;
+        return false;
+      }
+    } else if (cls.Get() != nullptr) {
+      LOG(INFO) << "VerifierDeps: Unexpected successful resolution of class " << descriptor;
+      return false;
+    }
+  }
+  return true;
+}
+
+static std::string GetFieldDescription(const DexFile& dex_file, uint32_t index) {
+  const DexFile::FieldId& field_id = dex_file.GetFieldId(index);
+  return std::string(dex_file.GetFieldDeclaringClassDescriptor(field_id))
+      + "->"
+      + dex_file.GetFieldName(field_id)
+      + ":"
+      + dex_file.GetFieldTypeDescriptor(field_id);
+}
+
+bool VerifierDeps::VerifyFields(Handle<mirror::ClassLoader> class_loader,
+                                const DexFile& dex_file,
+                                const std::set<FieldResolution>& fields,
+                                Thread* self) const {
+  // Check recorded fields are resolved the same way, have the same recorded class,
+  // and have the same recorded flags.
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  StackHandleScope<1> hs(self);
+  Handle<mirror::DexCache> dex_cache(
+      hs.NewHandle(class_linker->FindDexCache(self, dex_file, /* allow_failure */ false)));
+  for (const auto& entry : fields) {
+    ArtField* field = class_linker->ResolveFieldJLS(
+        dex_file, entry.GetDexFieldIndex(), dex_cache, class_loader);
+
+    if (field == nullptr) {
+      DCHECK(self->IsExceptionPending());
+      self->ClearException();
+    }
+
+    if (entry.IsResolved()) {
+      std::string expected_decl_klass = GetStringFromId(dex_file, entry.GetDeclaringClassIndex());
+      std::string temp;
+      if (field == nullptr) {
+        LOG(INFO) << "VerifierDeps: Could not resolve field "
+                  << GetFieldDescription(dex_file, entry.GetDexFieldIndex());
+        return false;
+      } else if (expected_decl_klass != field->GetDeclaringClass()->GetDescriptor(&temp)) {
+        LOG(INFO) << "VerifierDeps: Unexpected declaring class for field resolution "
+                  << GetFieldDescription(dex_file, entry.GetDexFieldIndex())
+                  << " (expected=" << expected_decl_klass
+                  << ", actual=" << field->GetDeclaringClass()->GetDescriptor(&temp) << ")";
+        return false;
+      } else if (entry.GetAccessFlags() != GetAccessFlags(field)) {
+        LOG(INFO) << "VerifierDeps: Unexpected access flags for resolved field "
+                  << GetFieldDescription(dex_file, entry.GetDexFieldIndex())
+                  << std::hex << " (expected=" << entry.GetAccessFlags()
+                  << ", actual=" << GetAccessFlags(field) << ")" << std::dec;
+        return false;
+      }
+    } else if (field != nullptr) {
+      LOG(INFO) << "VerifierDeps: Unexpected successful resolution of field "
+                << GetFieldDescription(dex_file, entry.GetDexFieldIndex());
+      return false;
+    }
+  }
+  return true;
+}
+
+static std::string GetMethodDescription(const DexFile& dex_file, uint32_t index) {
+  const DexFile::MethodId& method_id = dex_file.GetMethodId(index);
+  return std::string(dex_file.GetMethodDeclaringClassDescriptor(method_id))
+      + "->"
+      + dex_file.GetMethodName(method_id)
+      + dex_file.GetMethodSignature(method_id).ToString();
+}
+
+bool VerifierDeps::VerifyMethods(Handle<mirror::ClassLoader> class_loader,
+                                 const DexFile& dex_file,
+                                 const std::set<MethodResolution>& methods,
+                                 MethodResolutionKind kind,
+                                 Thread* self) const {
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  PointerSize pointer_size = class_linker->GetImagePointerSize();
+
+  for (const auto& entry : methods) {
+    const DexFile::MethodId& method_id = dex_file.GetMethodId(entry.GetDexMethodIndex());
+
+    const char* name = dex_file.GetMethodName(method_id);
+    const Signature signature = dex_file.GetMethodSignature(method_id);
+    const char* descriptor = dex_file.GetMethodDeclaringClassDescriptor(method_id);
+
+    mirror::Class* cls = FindClassAndClearException(class_linker, self, descriptor, class_loader);
+    if (cls == nullptr) {
+      LOG(INFO) << "VerifierDeps: Could not resolve class " << descriptor;
+      return false;
+    }
+    DCHECK(cls->IsResolved());
+    ArtMethod* method = nullptr;
+    if (kind == kDirectMethodResolution) {
+      method = cls->FindDirectMethod(name, signature, pointer_size);
+    } else if (kind == kVirtualMethodResolution) {
+      method = cls->FindVirtualMethod(name, signature, pointer_size);
+    } else {
+      DCHECK_EQ(kind, kInterfaceMethodResolution);
+      method = cls->FindInterfaceMethod(name, signature, pointer_size);
+    }
+
+    if (entry.IsResolved()) {
+      std::string temp;
+      std::string expected_decl_klass = GetStringFromId(dex_file, entry.GetDeclaringClassIndex());
+      if (method == nullptr) {
+        LOG(INFO) << "VerifierDeps: Could not resolve "
+                  << kind
+                  << " method "
+                  << GetMethodDescription(dex_file, entry.GetDexMethodIndex());
+        return false;
+      } else if (expected_decl_klass != method->GetDeclaringClass()->GetDescriptor(&temp)) {
+        LOG(INFO) << "VerifierDeps: Unexpected declaring class for "
+                  << kind
+                  << " method resolution "
+                  << GetMethodDescription(dex_file, entry.GetDexMethodIndex())
+                  << " (expected="
+                  << expected_decl_klass
+                  << ", actual="
+                  << method->GetDeclaringClass()->GetDescriptor(&temp)
+                  << ")";
+        return false;
+      } else if (entry.GetAccessFlags() != GetAccessFlags(method)) {
+        LOG(INFO) << "VerifierDeps: Unexpected access flags for resolved "
+                  << kind
+                  << " method resolution "
+                  << GetMethodDescription(dex_file, entry.GetDexMethodIndex())
+                  << std::hex
+                  << " (expected="
+                  << entry.GetAccessFlags()
+                  << ", actual="
+                  << GetAccessFlags(method) << ")"
+                  << std::dec;
+        return false;
+      }
+    } else if (method != nullptr) {
+      LOG(INFO) << "VerifierDeps: Unexpected successful resolution of "
+                << kind
+                << " method "
+                << GetMethodDescription(dex_file, entry.GetDexMethodIndex());
+      return false;
+    }
+  }
+  return true;
+}
+
+bool VerifierDeps::VerifyDexFile(Handle<mirror::ClassLoader> class_loader,
+                                 const DexFile& dex_file,
+                                 const DexFileDeps& deps,
+                                 Thread* self) const {
+  bool result = VerifyAssignability(
+      class_loader, dex_file, deps.assignable_types_, /* expected_assignability */ true, self);
+  result = result && VerifyAssignability(
+      class_loader, dex_file, deps.unassignable_types_, /* expected_assignability */ false, self);
+
+  result = result && VerifyClasses(class_loader, dex_file, deps.classes_, self);
+  result = result && VerifyFields(class_loader, dex_file, deps.fields_, self);
+
+  result = result && VerifyMethods(
+      class_loader, dex_file, deps.direct_methods_, kDirectMethodResolution, self);
+  result = result && VerifyMethods(
+      class_loader, dex_file, deps.virtual_methods_, kVirtualMethodResolution, self);
+  result = result && VerifyMethods(
+      class_loader, dex_file, deps.interface_methods_, kInterfaceMethodResolution, self);
+
+  return result;
 }
 
 }  // namespace verifier
diff --git a/runtime/verifier/verifier_deps.h b/runtime/verifier/verifier_deps.h
index 3223f6f..7b419d4 100644
--- a/runtime/verifier/verifier_deps.h
+++ b/runtime/verifier/verifier_deps.h
@@ -25,7 +25,9 @@
 #include "art_method.h"
 #include "base/array_ref.h"
 #include "base/mutex.h"
+#include "indenter.h"
 #include "method_resolution_kind.h"
+#include "method_verifier.h"  // For MethodVerifier::FailureKind.
 #include "obj_ptr.h"
 #include "os.h"
 
@@ -49,6 +51,16 @@
   explicit VerifierDeps(const std::vector<const DexFile*>& dex_files)
       REQUIRES(!Locks::verifier_deps_lock_);
 
+  VerifierDeps(const std::vector<const DexFile*>& dex_files,
+               ArrayRef<const uint8_t> data)
+      REQUIRES(!Locks::verifier_deps_lock_);
+
+  // Record the verification status of the class at `type_idx`.
+  static void MaybeRecordVerificationStatus(const DexFile& dex_file,
+                                            uint16_t type_idx,
+                                            MethodVerifier::FailureKind failure_kind)
+      REQUIRES(!Locks::verifier_deps_lock_);
+
   // Record the outcome `klass` of resolving type `type_idx` from `dex_file`.
   // If `klass` is null, the class is assumed unresolved.
   static void MaybeRecordClassResolution(const DexFile& dex_file,
@@ -87,16 +99,31 @@
       REQUIRES(!Locks::verifier_deps_lock_);
 
   // Serialize the recorded dependencies and store the data into `buffer`.
-  void Encode(std::vector<uint8_t>* buffer) const
+  // `dex_files` provides the order of the dex files in which the dependencies
+  // should be emitted.
+  void Encode(const std::vector<const DexFile*>& dex_files, std::vector<uint8_t>* buffer) const
       REQUIRES(!Locks::verifier_deps_lock_);
 
+  // NO_THREAD_SAFETY_ANALYSIS as Dump iterates over dex_deps_, which is guarded by
+  // verifier_deps_lock_, but we expect Dump to be called once the deps collection is done.
+  void Dump(VariableIndentationOutputStream* vios) const
+      NO_THREAD_SAFETY_ANALYSIS;
+
+  // Verify the encoded dependencies of this `VerifierDeps` are still valid.
+  // NO_THREAD_SAFETY_ANALYSIS, as this must be called on a read-only `VerifierDeps`.
+  bool ValidateDependencies(Handle<mirror::ClassLoader> class_loader, Thread* self) const
+      NO_THREAD_SAFETY_ANALYSIS;
+
+  // NO_THREAD_SAFETY_ANALSYS, as this is queried when the VerifierDeps are
+  // fully created.
+  const std::vector<uint16_t>& GetUnverifiedClasses(const DexFile& dex_file) const
+      NO_THREAD_SAFETY_ANALYSIS {
+    return GetDexFileDeps(dex_file)->unverified_classes_;
+  }
+
  private:
   static constexpr uint16_t kUnresolvedMarker = static_cast<uint16_t>(-1);
 
-  // Only used in tests to reconstruct the data structure from serialized data.
-  VerifierDeps(const std::vector<const DexFile*>& dex_files, ArrayRef<uint8_t> data)
-      REQUIRES(!Locks::verifier_deps_lock_);
-
   using ClassResolutionBase = std::tuple<uint32_t, uint16_t>;
   struct ClassResolution : public ClassResolutionBase {
     ClassResolution() = default;
@@ -136,7 +163,7 @@
   };
 
   using TypeAssignabilityBase = std::tuple<uint32_t, uint32_t>;
-  struct TypeAssignability : public std::tuple<uint32_t, uint32_t> {
+  struct TypeAssignability : public TypeAssignabilityBase {
     TypeAssignability() = default;
     TypeAssignability(const TypeAssignability&) = default;
     TypeAssignability(uint32_t destination_idx, uint32_t source_idx)
@@ -165,6 +192,9 @@
     std::set<MethodResolution> virtual_methods_;
     std::set<MethodResolution> interface_methods_;
 
+    // List of classes that were not fully verified in that dex file.
+    std::vector<uint16_t> unverified_classes_;
+
     bool Equals(const DexFileDeps& rhs) const;
   };
 
@@ -175,9 +205,12 @@
   DexFileDeps* GetDexFileDeps(const DexFile& dex_file)
       NO_THREAD_SAFETY_ANALYSIS;
 
+  const DexFileDeps* GetDexFileDeps(const DexFile& dex_file) const
+      NO_THREAD_SAFETY_ANALYSIS;
+
   // Returns true if `klass` is null or not defined in any of dex files which
   // were reported as being compiled.
-  bool IsInClassPath(ObjPtr<mirror::Class> klass)
+  bool IsInClassPath(ObjPtr<mirror::Class> klass) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns the index of `str`. If it is defined in `dex_file_`, this is the dex
@@ -188,13 +221,13 @@
       REQUIRES(Locks::verifier_deps_lock_);
 
   // Returns the string represented by `id`.
-  std::string GetStringFromId(const DexFile& dex_file, uint32_t string_id)
+  std::string GetStringFromId(const DexFile& dex_file, uint32_t string_id) const
       REQUIRES(Locks::verifier_deps_lock_);
 
   // Returns the bytecode access flags of `element` (bottom 16 bits), or
   // `kUnresolvedMarker` if `element` is null.
   template <typename T>
-  uint16_t GetAccessFlags(T* element)
+  static uint16_t GetAccessFlags(T* element)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns a string ID of the descriptor of the declaring class of `element`,
@@ -234,6 +267,54 @@
   bool Equals(const VerifierDeps& rhs) const
       REQUIRES(!Locks::verifier_deps_lock_);
 
+  // Verify `dex_file` according to the `deps`, that is going over each
+  // `DexFileDeps` field, and checking that the recorded information still
+  // holds.
+  bool VerifyDexFile(Handle<mirror::ClassLoader> class_loader,
+                     const DexFile& dex_file,
+                     const DexFileDeps& deps,
+                     Thread* self) const
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(Locks::verifier_deps_lock_);
+
+  bool VerifyAssignability(Handle<mirror::ClassLoader> class_loader,
+                           const DexFile& dex_file,
+                           const std::set<TypeAssignability>& assignables,
+                           bool expected_assignability,
+                           Thread* self) const
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(Locks::verifier_deps_lock_);
+
+  // Verify that the set of resolved classes at the point of creation
+  // of this `VerifierDeps` is still the same.
+  bool VerifyClasses(Handle<mirror::ClassLoader> class_loader,
+                     const DexFile& dex_file,
+                     const std::set<ClassResolution>& classes,
+                     Thread* self) const
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(Locks::verifier_deps_lock_);
+
+  // Verify that the set of resolved fields at the point of creation
+  // of this `VerifierDeps` is still the same, and each field resolves to the
+  // same field holder and access flags.
+  bool VerifyFields(Handle<mirror::ClassLoader> class_loader,
+                    const DexFile& dex_file,
+                    const std::set<FieldResolution>& classes,
+                    Thread* self) const
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(Locks::verifier_deps_lock_);
+
+  // Verify that the set of resolved methods at the point of creation
+  // of this `VerifierDeps` is still the same, and each method resolves to the
+  // same method holder, access flags, and invocation kind.
+  bool VerifyMethods(Handle<mirror::ClassLoader> class_loader,
+                     const DexFile& dex_file,
+                     const std::set<MethodResolution>& methods,
+                     MethodResolutionKind kind,
+                     Thread* self) const
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(Locks::verifier_deps_lock_);
+
   // Map from DexFiles into dependencies collected from verification of their methods.
   std::map<const DexFile*, std::unique_ptr<DexFileDeps>> dex_deps_
       GUARDED_BY(Locks::verifier_deps_lock_);
@@ -241,6 +322,9 @@
   friend class VerifierDepsTest;
   ART_FRIEND_TEST(VerifierDepsTest, StringToId);
   ART_FRIEND_TEST(VerifierDepsTest, EncodeDecode);
+  ART_FRIEND_TEST(VerifierDepsTest, EncodeDecodeMulti);
+  ART_FRIEND_TEST(VerifierDepsTest, VerifyDeps);
+  ART_FRIEND_TEST(VerifierDepsTest, CompilerDriver);
 };
 
 }  // namespace verifier
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 153c7ef..3549586 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -22,6 +22,7 @@
 
 #include "base/logging.h"
 #include "entrypoints/quick/quick_entrypoints_enum.h"
+#include "jni_internal.h"
 #include "mirror/class.h"
 #include "mirror/throwable.h"
 #include "obj_ptr-inl.h"
@@ -34,6 +35,8 @@
 jclass WellKnownClasses::com_android_dex_Dex;
 jclass WellKnownClasses::dalvik_annotation_optimization_CriticalNative;
 jclass WellKnownClasses::dalvik_annotation_optimization_FastNative;
+jclass WellKnownClasses::dalvik_system_BaseDexClassLoader;
+jclass WellKnownClasses::dalvik_system_DexClassLoader;
 jclass WellKnownClasses::dalvik_system_DexFile;
 jclass WellKnownClasses::dalvik_system_DexPathList;
 jclass WellKnownClasses::dalvik_system_DexPathList__Element;
@@ -108,7 +111,7 @@
 
 jfieldID WellKnownClasses::dalvik_system_DexFile_cookie;
 jfieldID WellKnownClasses::dalvik_system_DexFile_fileName;
-jfieldID WellKnownClasses::dalvik_system_PathClassLoader_pathList;
+jfieldID WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList;
 jfieldID WellKnownClasses::dalvik_system_DexPathList_dexElements;
 jfieldID WellKnownClasses::dalvik_system_DexPathList__Element_dexFile;
 jfieldID WellKnownClasses::java_lang_Thread_daemon;
@@ -217,9 +220,9 @@
   ScopedObjectAccess soa(Thread::Current());
   #define LOAD_STRING_INIT(init_runtime_name, init_signature, new_runtime_name,             \
                            new_java_name, new_signature, ...)                               \
-      init_runtime_name = soa.DecodeMethod(                                                 \
+      init_runtime_name = jni::DecodeArtMethod(                                             \
           CacheMethod(env, java_lang_String, false, "<init>", init_signature));             \
-      new_runtime_name = soa.DecodeMethod(                                                  \
+      new_runtime_name = jni::DecodeArtMethod(                                              \
           CacheMethod(env, java_lang_StringFactory, true, new_java_name, new_signature));
       STRING_INIT_LIST(LOAD_STRING_INIT)
   #undef LOAD_STRING_INIT
@@ -237,8 +240,8 @@
 ArtMethod* WellKnownClasses::StringInitToStringFactory(ArtMethod* string_init) {
   #define TO_STRING_FACTORY(init_runtime_name, init_signature, new_runtime_name,            \
                             new_java_name, new_signature, entry_point_name)                 \
-      if (string_init == init_runtime_name) {                                               \
-        return new_runtime_name;                                                            \
+      if (string_init == (init_runtime_name)) {                                             \
+        return (new_runtime_name);                                                          \
       }
       STRING_INIT_LIST(TO_STRING_FACTORY)
   #undef TO_STRING_FACTORY
@@ -249,7 +252,7 @@
 uint32_t WellKnownClasses::StringInitToEntryPoint(ArtMethod* string_init) {
   #define TO_ENTRY_POINT(init_runtime_name, init_signature, new_runtime_name,               \
                          new_java_name, new_signature, entry_point_name)                    \
-      if (string_init == init_runtime_name) {                                               \
+      if (string_init == (init_runtime_name)) {                                             \
         return kQuick ## entry_point_name;                                                  \
       }
       STRING_INIT_LIST(TO_ENTRY_POINT)
@@ -264,6 +267,8 @@
   dalvik_annotation_optimization_CriticalNative =
       CacheClass(env, "dalvik/annotation/optimization/CriticalNative");
   dalvik_annotation_optimization_FastNative = CacheClass(env, "dalvik/annotation/optimization/FastNative");
+  dalvik_system_BaseDexClassLoader = CacheClass(env, "dalvik/system/BaseDexClassLoader");
+  dalvik_system_DexClassLoader = CacheClass(env, "dalvik/system/DexClassLoader");
   dalvik_system_DexFile = CacheClass(env, "dalvik/system/DexFile");
   dalvik_system_DexPathList = CacheClass(env, "dalvik/system/DexPathList");
   dalvik_system_DexPathList__Element = CacheClass(env, "dalvik/system/DexPathList$Element");
@@ -332,9 +337,9 @@
   org_apache_harmony_dalvik_ddmc_DdmServer_broadcast = CacheMethod(env, org_apache_harmony_dalvik_ddmc_DdmServer, true, "broadcast", "(I)V");
   org_apache_harmony_dalvik_ddmc_DdmServer_dispatch = CacheMethod(env, org_apache_harmony_dalvik_ddmc_DdmServer, true, "dispatch", "(I[BII)Lorg/apache/harmony/dalvik/ddmc/Chunk;");
 
+  dalvik_system_BaseDexClassLoader_pathList = CacheField(env, dalvik_system_BaseDexClassLoader, false, "pathList", "Ldalvik/system/DexPathList;");
   dalvik_system_DexFile_cookie = CacheField(env, dalvik_system_DexFile, false, "mCookie", "Ljava/lang/Object;");
   dalvik_system_DexFile_fileName = CacheField(env, dalvik_system_DexFile, false, "mFileName", "Ljava/lang/String;");
-  dalvik_system_PathClassLoader_pathList = CacheField(env, dalvik_system_PathClassLoader, false, "pathList", "Ldalvik/system/DexPathList;");
   dalvik_system_DexPathList_dexElements = CacheField(env, dalvik_system_DexPathList, false, "dexElements", "[Ldalvik/system/DexPathList$Element;");
   dalvik_system_DexPathList__Element_dexFile = CacheField(env, dalvik_system_DexPathList__Element, false, "dexFile", "Ldalvik/system/DexFile;");
   java_lang_Thread_daemon = CacheField(env, java_lang_Thread, false, "daemon", "Z");
@@ -389,7 +394,9 @@
 }
 
 ObjPtr<mirror::Class> WellKnownClasses::ToClass(jclass global_jclass) {
-  return ObjPtr<mirror::Class>::DownCast(Thread::Current()->DecodeJObject(global_jclass));
+  auto ret = ObjPtr<mirror::Class>::DownCast(Thread::Current()->DecodeJObject(global_jclass));
+  DCHECK(!ret.IsNull());
+  return ret;
 }
 
 }  // namespace art
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 2fb5bb4..227996a 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -47,6 +47,8 @@
   static jclass com_android_dex_Dex;
   static jclass dalvik_annotation_optimization_CriticalNative;
   static jclass dalvik_annotation_optimization_FastNative;
+  static jclass dalvik_system_BaseDexClassLoader;
+  static jclass dalvik_system_DexClassLoader;
   static jclass dalvik_system_DexFile;
   static jclass dalvik_system_DexPathList;
   static jclass dalvik_system_DexPathList__Element;
@@ -119,11 +121,11 @@
   static jmethodID org_apache_harmony_dalvik_ddmc_DdmServer_broadcast;
   static jmethodID org_apache_harmony_dalvik_ddmc_DdmServer_dispatch;
 
+  static jfieldID dalvik_system_BaseDexClassLoader_pathList;
   static jfieldID dalvik_system_DexFile_cookie;
   static jfieldID dalvik_system_DexFile_fileName;
   static jfieldID dalvik_system_DexPathList_dexElements;
   static jfieldID dalvik_system_DexPathList__Element_dexFile;
-  static jfieldID dalvik_system_PathClassLoader_pathList;
   static jfieldID java_lang_reflect_Executable_artMethod;
   static jfieldID java_lang_reflect_Proxy_h;
   static jfieldID java_lang_Thread_daemon;
diff --git a/test/015-switch/src/Main.java b/test/015-switch/src/Main.java
index 2a7995a..2b724a1 100644
--- a/test/015-switch/src/Main.java
+++ b/test/015-switch/src/Main.java
@@ -113,7 +113,7 @@
     }
 
     // Long packed-switch that might lead to not creating chained-ifs.
-    public static void packedSwitch7(int value) {
+    public static long packedSwitch7(int value) {
         switch (value) {
             case 1:
                 System.out.println(1); break;
@@ -148,6 +148,113 @@
             default:
                 System.out.println("default"); break;
         }
+
+        // Jump tables previously were emitted in the end of the method code buffer. The
+        // following boilerplate code aims to fill the emitted code buffer extensively
+        // and check that even for big method jump table is correctly emitted, its address
+        // is within a range of corresponded pc-relative instructions (this applies to
+        // ARM mainly).
+        long temp = value;
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+        temp = Long.rotateLeft(temp, value);
+
+        return temp;
     }
 
     // Sparse switch, just leave a gap.
diff --git a/test/021-string2/src/Main.java b/test/021-string2/src/Main.java
index a848fba..51351e1 100644
--- a/test/021-string2/src/Main.java
+++ b/test/021-string2/src/Main.java
@@ -431,6 +431,22 @@
                 "\u0440\u0440\u0440\u0440\u0440\u0440z\u0440",
                 "\u0440\u0440\u0440\u0440\u0440\u0440\u0440z\u0440",
                 "\u0440\u0440\u0440\u0440\u0440\u0440\u0440\u0440z\u0440",
+                "\u0000",
+                "\u0000\u0000",
+                "\u0000\u0000\u0000",
+                "\u0000\u0000\u0000\u0000",
+                "\u0000\u0000\u0000\u0000\u0000",
+                "\u0000\u0000\u0000\u0000\u0000\u0000",
+                "\u0000\u0000\u0000\u0000\u0000\u0000\u0000",
+                "\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000",
+                "\u0000z\u0000",
+                "\u0000\u0000z\u0000",
+                "\u0000\u0000\u0000z\u0000",
+                "\u0000\u0000\u0000\u0000z\u0000",
+                "\u0000\u0000\u0000\u0000\u0000z\u0000",
+                "\u0000\u0000\u0000\u0000\u0000\u0000z\u0000",
+                "\u0000\u0000\u0000\u0000\u0000\u0000\u0000z\u0000",
+                "\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000z\u0000",
         };
         String[] suffixes = {
                 "",
@@ -458,30 +474,40 @@
                     String full = p + c + s;
                     int expX = (c.isEmpty() || c.charAt(0) != 'x') ? -1 : p.length();
                     int exp0440 = (c.isEmpty() || c.charAt(0) != '\u0440') ? -1 : p.length();
+                    int exp0000 = (c.isEmpty() || c.charAt(0) != '\u0000') ? -1 : p.length();
                     Assert.assertEquals(expX, $noinline$indexOf(full, 'x'));
                     Assert.assertEquals(exp0440, $noinline$indexOf(full, '\u0440'));
+                    Assert.assertEquals(exp0000, $noinline$indexOf(full, '\u0000'));
                     Assert.assertEquals(expX, $noinline$indexOf(full, 'x', -1));
                     Assert.assertEquals(exp0440, $noinline$indexOf(full, '\u0440', -1));
+                    Assert.assertEquals(exp0000, $noinline$indexOf(full, '\u0000', -1));
                     Assert.assertEquals(-1, $noinline$indexOf(full, 'x', full.length() + 1));
                     Assert.assertEquals(-1, $noinline$indexOf(full, '\u0440', full.length() + 1));
+                    Assert.assertEquals(-1, $noinline$indexOf(full, '\u0000', full.length() + 1));
                     for (int from = 0; from != full.length(); ++from) {
                         final int eX;
                         final int e0440;
+                        final int e0000;
                         if (from <= p.length()) {
                             eX = expX;
                             e0440 = exp0440;
+                            e0000 = exp0000;
                         } else if (from >= p.length() + c.length()) {
                             eX = -1;
                             e0440 = -1;
+                            e0000 = -1;
                         } else if (full.charAt(from) == 'z') {
                             eX = (full.charAt(from + 1) != 'x') ? -1 : from + 1;
                             e0440 = (full.charAt(from + 1) != '\u0440') ? -1 : from + 1;
+                            e0000 = (full.charAt(from + 1) != '\u0000') ? -1 : from + 1;
                         } else {
                             eX = (full.charAt(from) != 'x') ? -1 : from;
                             e0440 = (full.charAt(from) != '\u0440') ? -1 : from;
+                            e0000 = (full.charAt(from) != '\u0000') ? -1 : from;
                         }
                         Assert.assertEquals(eX, $noinline$indexOf(full, 'x', from));
                         Assert.assertEquals(e0440, $noinline$indexOf(full, '\u0440', from));
+                        Assert.assertEquals(e0000, $noinline$indexOf(full, '\u0000', from));
                     }
                 }
             }
diff --git a/test/030-bad-finalizer/expected.txt b/test/030-bad-finalizer/expected.txt
index ee9cfff..74e208c 100644
--- a/test/030-bad-finalizer/expected.txt
+++ b/test/030-bad-finalizer/expected.txt
@@ -1,4 +1,4 @@
-About to null reference and request GC.
+About to null reference.
 Finalizer started and spinning...
 Finalizer done spinning.
 Finalizer sleeping forever now.
diff --git a/test/030-bad-finalizer/src/Main.java b/test/030-bad-finalizer/src/Main.java
index 942ee25..0e69a96 100644
--- a/test/030-bad-finalizer/src/Main.java
+++ b/test/030-bad-finalizer/src/Main.java
@@ -14,26 +14,60 @@
  * limitations under the License.
  */
 
+import java.util.concurrent.CountDownLatch;
+import static java.util.concurrent.TimeUnit.MINUTES;
+
 /**
  * Test a class with a bad finalizer.
+ *
+ * This test is inherently flaky. It assumes that the system will schedule the finalizer daemon
+ * and finalizer watchdog daemon enough to reach the timeout and throwing the fatal exception.
  */
 public class Main {
-    public static void main(String[] args) {
-        BadFinalizer bf = new BadFinalizer();
+    public static void main(String[] args) throws Exception {
+        CountDownLatch finalizerWait = new CountDownLatch(1);
 
-        System.out.println("About to null reference and request GC.");
-        bf = null;
-        Runtime.getRuntime().gc();
+        // A separate method to ensure no dex register keeps the object alive.
+        createBadFinalizer(finalizerWait);
 
-        for (int i = 0; i < 8; i++) {
-            snooze(4000);
+        // Should have at least two iterations to trigger finalization, but just to make sure run
+        // some more.
+        for (int i = 0; i < 5; i++) {
             Runtime.getRuntime().gc();
         }
 
+        // Now wait for the finalizer to start running. Give it a minute.
+        finalizerWait.await(1, MINUTES);
+
+        // Now fall asleep with a timeout. The timeout is large enough that we expect the
+        // finalizer daemon to have killed the process before the deadline elapses.
+        // Note: the timeout is here (instead of an infinite sleep) to protect the test
+        //       environment (e.g., in case this is run without a timeout wrapper).
+        final long timeout = 60 * 1000;  // 1 minute.
+        long remainingWait = timeout;
+        final long waitStart = System.currentTimeMillis();
+        while (remainingWait > 0) {
+            synchronized (args) {  // Just use an already existing object for simplicity...
+                try {
+                    args.wait(remainingWait);
+                } catch (Exception e) {
+                }
+            }
+            remainingWait = timeout - (System.currentTimeMillis() - waitStart);
+        }
+
+        // We should not get here.
         System.out.println("UNREACHABLE");
         System.exit(0);
     }
 
+    private static void createBadFinalizer(CountDownLatch finalizerWait) {
+        BadFinalizer bf = new BadFinalizer(finalizerWait);
+
+        System.out.println("About to null reference.");
+        bf = null;  // Not that this would make a difference, could be eliminated earlier.
+    }
+
     public static void snooze(int ms) {
         try {
             Thread.sleep(ms);
@@ -45,9 +79,17 @@
      * Class with a bad finalizer.
      */
     public static class BadFinalizer {
+        private CountDownLatch finalizerWait;
+        private volatile int j = 0;  // Volatile in an effort to curb loop optimization.
+
+        public BadFinalizer(CountDownLatch finalizerWait) {
+            this.finalizerWait = finalizerWait;
+        }
+
         protected void finalize() {
+            finalizerWait.countDown();
+
             System.out.println("Finalizer started and spinning...");
-            int j = 0;
 
             /* spin for a bit */
             long start, end;
diff --git a/test/039-join-main/src/Main.java b/test/039-join-main/src/Main.java
index 2373221..60791e4 100644
--- a/test/039-join-main/src/Main.java
+++ b/test/039-join-main/src/Main.java
@@ -14,35 +14,48 @@
  * limitations under the License.
  */
 
+import java.util.concurrent.CountDownLatch;
+
 /**
  * Make sure that a sub-thread can join the main thread.
  */
 public class Main {
-    public static void main(String[] args) {
+    public static void main(String[] args) throws Exception {
         Thread t;
+        CountDownLatch waitLatch = new CountDownLatch(1);
+        CountDownLatch progressLatch = new CountDownLatch(1);
 
-        t = new Thread(new JoinMainSub(Thread.currentThread()), "Joiner");
+        t = new Thread(new JoinMainSub(Thread.currentThread(), waitLatch, progressLatch), "Joiner");
         System.out.print("Starting thread '" + t.getName() + "'\n");
         t.start();
 
-        try { Thread.sleep(1000); }
-        catch (InterruptedException ie) {}
-
+        waitLatch.await();
         System.out.print("JoinMain starter returning\n");
+        progressLatch.countDown();
+
+        // Keep the thread alive a little longer, giving the other thread a chance to join on a
+        // live thread (though that isn't critically important for the test).
+        Thread.currentThread().sleep(500);
     }
 }
 
 class JoinMainSub implements Runnable {
     private Thread mJoinMe;
+    private CountDownLatch waitLatch;
+    private CountDownLatch progressLatch;
 
-    public JoinMainSub(Thread joinMe) {
+    public JoinMainSub(Thread joinMe, CountDownLatch waitLatch, CountDownLatch progressLatch) {
         mJoinMe = joinMe;
+        this.waitLatch = waitLatch;
+        this.progressLatch = progressLatch;
     }
 
     public void run() {
         System.out.print("@ JoinMainSub running\n");
 
         try {
+            waitLatch.countDown();
+            progressLatch.await();
             mJoinMe.join();
             System.out.print("@ JoinMainSub successfully joined main\n");
         } catch (InterruptedException ie) {
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index aca356b..41329af 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -370,7 +370,7 @@
 
 // v2 parts.
 
-extern "C" bool nb_is_compatible(uint32_t bridge_version ATTRIBUTE_UNUSED) {
+extern "C" bool native_bridge_isCompatibleWith(uint32_t bridge_version ATTRIBUTE_UNUSED) {
   return true;
 }
 
@@ -453,7 +453,7 @@
   return true;
 }
 
-static ::android::NativeBridgeSignalHandlerFn native_bridge_get_signal_handler(int signal) {
+static ::android::NativeBridgeSignalHandlerFn native_bridge_getSignalHandler(int signal) {
   // Test segv for already claimed signal, and sigill for not claimed signal
   if ((signal == SIGSEGV) || (signal == SIGILL)) {
     return &nb_signalhandler;
@@ -461,16 +461,63 @@
   return nullptr;
 }
 
+extern "C" int native_bridge_unloadLibrary(void* handle ATTRIBUTE_UNUSED) {
+  printf("dlclose() in native bridge.\n");
+  return 0;
+}
+
+extern "C" const char* native_bridge_getError() {
+  printf("dlerror() in native bridge.\n");
+  return nullptr;
+}
+
+extern "C" bool native_bridge_isPathSupported(const char* library_path ATTRIBUTE_UNUSED) {
+  printf("Checking for path support in native bridge.\n");
+  return false;
+}
+
+extern "C" bool native_bridge_initNamespace(const char*  public_ns_sonames ATTRIBUTE_UNUSED,
+                                            const char*  anon_ns_library_path ATTRIBUTE_UNUSED) {
+  printf("Initializing namespaces in native bridge.\n");
+  return false;
+}
+
+extern "C" android::native_bridge_namespace_t*
+native_bridge_createNamespace(const char* name ATTRIBUTE_UNUSED,
+                              const char* ld_library_path ATTRIBUTE_UNUSED,
+                              const char* default_library_path ATTRIBUTE_UNUSED,
+                              uint64_t type ATTRIBUTE_UNUSED,
+                              const char* permitted_when_isolated_path ATTRIBUTE_UNUSED,
+                              android::native_bridge_namespace_t* parent_ns ATTRIBUTE_UNUSED) {
+  printf("Creating namespace in native bridge.\n");
+  return nullptr;
+}
+
+extern "C" void* native_bridge_loadLibraryExt(const char* libpath ATTRIBUTE_UNUSED,
+                                               int flag ATTRIBUTE_UNUSED,
+                                               android::native_bridge_namespace_t* ns ATTRIBUTE_UNUSED) {
+    printf("Loading library with Extension in native bridge.\n");
+    return nullptr;
+}
 
 // "NativeBridgeItf" is effectively an API (it is the name of the symbol that will be loaded
 // by the native bridge library).
 android::NativeBridgeCallbacks NativeBridgeItf {
-  .version = 2,
+  // v1
+  .version = 3,
   .initialize = &native_bridge_initialize,
   .loadLibrary = &native_bridge_loadLibrary,
   .getTrampoline = &native_bridge_getTrampoline,
   .isSupported = &native_bridge_isSupported,
   .getAppEnv = &native_bridge_getAppEnv,
-  .isCompatibleWith = &nb_is_compatible,
-  .getSignalHandler = &native_bridge_get_signal_handler
+  // v2
+  .isCompatibleWith = &native_bridge_isCompatibleWith,
+  .getSignalHandler = &native_bridge_getSignalHandler,
+  // v3
+  .unloadLibrary = &native_bridge_unloadLibrary,
+  .getError = &native_bridge_getError,
+  .isPathSupported = &native_bridge_isPathSupported,
+  .initNamespace = &native_bridge_initNamespace,
+  .createNamespace = &native_bridge_createNamespace,
+  .loadLibraryExt = &native_bridge_loadLibraryExt
 };
diff --git a/test/141-class-unload/expected.txt b/test/141-class-unload/expected.txt
index 2b77b29..0a03ecb 100644
--- a/test/141-class-unload/expected.txt
+++ b/test/141-class-unload/expected.txt
@@ -21,3 +21,4 @@
 class null false test
 JNI_OnUnload called
 Number of loaded unload-ex maps 0
+Too small false
diff --git a/test/141-class-unload/src/Main.java b/test/141-class-unload/src/Main.java
index f9b6180..2a6e944 100644
--- a/test/141-class-unload/src/Main.java
+++ b/test/141-class-unload/src/Main.java
@@ -47,6 +47,8 @@
             stressTest(constructor);
             // Test that the oat files are unloaded.
             testOatFilesUnloaded(getPid());
+            // Test that objects keep class loader live for sticky GC.
+            testStickyUnload(constructor);
         } catch (Exception e) {
             e.printStackTrace();
         }
@@ -161,6 +163,30 @@
         return intHolder;
     }
 
+    private static Object allocObjectInOtherClassLoader(Constructor<?> constructor)
+            throws Exception {
+      ClassLoader loader = (ClassLoader) constructor.newInstance(
+              DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader());
+      return loader.loadClass("IntHolder").newInstance();
+    }
+
+    // Regression test for public issue 227182.
+    private static void testStickyUnload(Constructor<?> constructor) throws Exception {
+        String s = "";
+        for (int i = 0; i < 10; ++i) {
+            s = "";
+            // The object is the only thing preventing the class loader from being unloaded.
+            Object o = allocObjectInOtherClassLoader(constructor);
+            for (int j = 0; j < 1000; ++j) {
+                s += j + " ";
+            }
+            // Make sure the object still has a valid class (hasn't been incorrectly unloaded).
+            s += o.getClass().getName();
+            o = null;
+        }
+        System.out.println("Too small " + (s.length() < 1000));
+    }
+
     private static WeakReference<Class> setUpUnloadClassWeak(Constructor<?> constructor)
             throws Exception {
         return new WeakReference<Class>(setUpUnloadClass(constructor));
diff --git a/test/151-OpenFileLimit/run b/test/151-OpenFileLimit/run
new file mode 100755
index 0000000..5c83fd0
--- /dev/null
+++ b/test/151-OpenFileLimit/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+flags="$@"
+
+# Reduce the file descriptor limit so the test will reach the limit sooner.
+ulimit -n 512
+${RUN} ${flags}
diff --git a/test/445-checker-licm/expected.txt b/test/445-checker-licm/expected.txt
index e69de29..b0aad4d 100644
--- a/test/445-checker-licm/expected.txt
+++ b/test/445-checker-licm/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/445-checker-licm/src/Main.java b/test/445-checker-licm/src/Main.java
index 061fe6e..00ce3a9 100644
--- a/test/445-checker-licm/src/Main.java
+++ b/test/445-checker-licm/src/Main.java
@@ -164,8 +164,43 @@
     return result;
   }
 
+  //
+  // All operations up to the null check can be hoisted out of the
+  // loop. The null check itself sees the induction in its environment.
+  //
+  /// CHECK-START: int Main.doWhile(int) licm (before)
+  /// CHECK-DAG: <<Add:i\d+>> Add                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG:              LoadClass           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get:l\d+>> StaticFieldGet      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:              NullCheck [<<Get>>] env:[[<<Add>>,<<Get>>,{{i\d+}}]] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:              ArrayLength         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:              BoundsCheck         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:              ArrayGet            loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int Main.doWhile(int) licm (after)
+  /// CHECK-NOT: LoadClass      loop:{{B\d+}}
+  /// CHECK-NOT: StaticFieldGet loop:{{B\d+}}
+  //
+  /// CHECK-START: int Main.doWhile(int) licm (after)
+  /// CHECK-DAG:              LoadClass           loop:none
+  /// CHECK-DAG: <<Get:l\d+>> StaticFieldGet      loop:none
+  /// CHECK-DAG: <<Add:i\d+>> Add                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG:              NullCheck [<<Get>>] env:[[<<Add>>,<<Get>>,{{i\d+}}]] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:              ArrayLength         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:              BoundsCheck         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:              ArrayGet            loop:<<Loop>>      outer_loop:none
+  public static int doWhile(int k) {
+    int i = k;
+    do {
+      i += 2;
+    } while (staticArray[i] == 0);
+    return i;
+  }
+
   public static int staticField = 42;
 
+  public static int[] staticArray = null;
+
   public static void assertEquals(int expected, int actual) {
     if (expected != actual) {
       throw new Error("Expected " + expected + ", got " + actual);
@@ -181,5 +216,24 @@
     assertEquals(21, divAndIntrinsic(new int[] { 4, -2, 8, -3 }));
     assertEquals(45, invariantBoundIntrinsic(-10));
     assertEquals(30, invariantBodyIntrinsic(2, 3));
+
+    staticArray = null;
+    try {
+      doWhile(0);
+      throw new Error("Expected NPE");
+    } catch (NullPointerException e) {
+    }
+    staticArray = new int[5];
+    staticArray[4] = 1;
+    assertEquals(4, doWhile(-2));
+    assertEquals(4, doWhile(0));
+    assertEquals(4, doWhile(2));
+    try {
+      doWhile(1);
+      throw new Error("Expected IOOBE");
+    } catch (IndexOutOfBoundsException e) {
+    }
+
+    System.out.println("passed");
   }
 }
diff --git a/test/562-no-intermediate/expected.txt b/test/478-checker-inline-noreturn/expected.txt
similarity index 100%
copy from test/562-no-intermediate/expected.txt
copy to test/478-checker-inline-noreturn/expected.txt
diff --git a/test/478-checker-inline-noreturn/info.txt b/test/478-checker-inline-noreturn/info.txt
new file mode 100644
index 0000000..64f42ed
--- /dev/null
+++ b/test/478-checker-inline-noreturn/info.txt
@@ -0,0 +1,3 @@
+Tests inlining a function with a no-exit loop into a loop. LinearOrder
+computation fails because of incorrect HLoopInformation if we inline
+a loop without an exit.
diff --git a/test/478-checker-inline-noreturn/src/Main.java b/test/478-checker-inline-noreturn/src/Main.java
new file mode 100644
index 0000000..7aaeac0
--- /dev/null
+++ b/test/478-checker-inline-noreturn/src/Main.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/*
+ * A test that checks that the inliner does not inline functions that contain
+ * a loop with no exit.  This because the incremental update to
+ * HLoopInformation done by the inliner does not work with the LinearOrder
+ * computation if the inlined function does not always return.
+ */
+
+public class Main {
+
+  public static void assertIntEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  public static int $opt$noinline$Function(int x, int y) {
+    int result;
+    if (x <= y) {
+      result = 42;
+    } else {
+      while (true);
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.callerLoop(int, int) inliner (before)
+  /// CHECK:         InvokeStaticOrDirect method_name:Main.$opt$noinline$Function  loop:{{B\d+}}
+
+  /// CHECK-START: int Main.callerLoop(int, int) inliner (after)
+  /// CHECK:         InvokeStaticOrDirect method_name:Main.$opt$noinline$Function  loop:{{B\d+}}
+
+  public static int callerLoop(int max_x, int max_y) {
+    int total = 0;
+    for (int x = 0; x < max_x; ++x) {
+      total += $opt$noinline$Function(x, max_y);
+    }
+    return total;
+  }
+
+  public static void main(String[] args) {
+    assertIntEquals(42, callerLoop(1, 1));
+  }
+}
diff --git a/test/480-checker-dead-blocks/src/Main.java b/test/480-checker-dead-blocks/src/Main.java
index 141054d..0ca822f 100644
--- a/test/480-checker-dead-blocks/src/Main.java
+++ b/test/480-checker-dead-blocks/src/Main.java
@@ -30,7 +30,7 @@
     return false;
   }
 
-  /// CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination$final (before)
+  /// CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination$after_inlining (before)
   /// CHECK-DAG:     <<ArgX:i\d+>>    ParameterValue
   /// CHECK-DAG:     <<ArgY:i\d+>>    ParameterValue
   /// CHECK-DAG:                      If
@@ -39,13 +39,13 @@
   /// CHECK-DAG:     <<Phi:i\d+>>     Phi [<<Add>>,<<Sub>>]
   /// CHECK-DAG:                      Return [<<Phi>>]
 
-  /// CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination$final (after)
+  /// CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination$after_inlining (after)
   /// CHECK-DAG:     <<ArgX:i\d+>>    ParameterValue
   /// CHECK-DAG:     <<ArgY:i\d+>>    ParameterValue
   /// CHECK-DAG:     <<Add:i\d+>>     Add [<<ArgX>>,<<ArgY>>]
   /// CHECK-DAG:                      Return [<<Add>>]
 
-  /// CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination$final (after)
+  /// CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination$after_inlining (after)
   /// CHECK-NOT:                      If
   /// CHECK-NOT:                      Sub
   /// CHECK-NOT:                      Phi
@@ -62,7 +62,7 @@
     return z;
   }
 
-  /// CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination$final (before)
+  /// CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination$after_inlining (before)
   /// CHECK-DAG:     <<ArgX:i\d+>>    ParameterValue
   /// CHECK-DAG:     <<ArgY:i\d+>>    ParameterValue
   /// CHECK-DAG:                      If
@@ -71,13 +71,13 @@
   /// CHECK-DAG:     <<Phi:i\d+>>     Phi [<<Add>>,<<Sub>>]
   /// CHECK-DAG:                      Return [<<Phi>>]
 
-  /// CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination$final (after)
+  /// CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination$after_inlining (after)
   /// CHECK-DAG:     <<ArgX:i\d+>>    ParameterValue
   /// CHECK-DAG:     <<ArgY:i\d+>>    ParameterValue
   /// CHECK-DAG:     <<Sub:i\d+>>     Sub [<<ArgX>>,<<ArgY>>]
   /// CHECK-DAG:                      Return [<<Sub>>]
 
-  /// CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination$final (after)
+  /// CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination$after_inlining (after)
   /// CHECK-NOT:                      If
   /// CHECK-NOT:                      Add
   /// CHECK-NOT:                      Phi
@@ -94,10 +94,10 @@
     return z;
   }
 
-  /// CHECK-START: int Main.testRemoveLoop(int) dead_code_elimination$final (before)
+  /// CHECK-START: int Main.testRemoveLoop(int) dead_code_elimination$after_inlining (before)
   /// CHECK:                          Mul
 
-  /// CHECK-START: int Main.testRemoveLoop(int) dead_code_elimination$final (after)
+  /// CHECK-START: int Main.testRemoveLoop(int) dead_code_elimination$after_inlining (after)
   /// CHECK-NOT:                      Mul
 
   public static int testRemoveLoop(int x) {
@@ -109,11 +109,11 @@
     return x;
   }
 
-  /// CHECK-START: int Main.testInfiniteLoop(int) dead_code_elimination$final (before)
+  /// CHECK-START: int Main.testInfiniteLoop(int) dead_code_elimination$after_inlining (before)
   /// CHECK-DAG:                      Return
   /// CHECK-DAG:                      Exit
 
-  /// CHECK-START: int Main.testInfiniteLoop(int) dead_code_elimination$final (after)
+  /// CHECK-START: int Main.testInfiniteLoop(int) dead_code_elimination$after_inlining (after)
   /// CHECK-NOT:                      Return
   /// CHECK-NOT:                      Exit
 
@@ -124,15 +124,15 @@
     return x;
   }
 
-  /// CHECK-START: int Main.testDeadLoop(int) dead_code_elimination$final (before)
+  /// CHECK-START: int Main.testDeadLoop(int) dead_code_elimination$after_inlining (before)
   /// CHECK-DAG:                      If
   /// CHECK-DAG:                      Add
 
-  /// CHECK-START: int Main.testDeadLoop(int) dead_code_elimination$final (after)
+  /// CHECK-START: int Main.testDeadLoop(int) dead_code_elimination$after_inlining (after)
   /// CHECK-DAG:     <<Arg:i\d+>>     ParameterValue
   /// CHECK-DAG:                      Return [<<Arg>>]
 
-  /// CHECK-START: int Main.testDeadLoop(int) dead_code_elimination$final (after)
+  /// CHECK-START: int Main.testDeadLoop(int) dead_code_elimination$after_inlining (after)
   /// CHECK-NOT:                      If
   /// CHECK-NOT:                      Add
 
@@ -143,16 +143,16 @@
     return x;
   }
 
-  /// CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination$final (before)
+  /// CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination$after_inlining (before)
   /// CHECK-DAG:                      If
   /// CHECK-DAG:                      If
   /// CHECK-DAG:                      Add
 
-  /// CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination$final (after)
+  /// CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination$after_inlining (after)
   /// CHECK-DAG:     <<Arg:i\d+>>     ParameterValue
   /// CHECK-DAG:                      Return [<<Arg>>]
 
-  /// CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination$final (after)
+  /// CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination$after_inlining (after)
   /// CHECK-NOT:                      If
   /// CHECK-NOT:                      Add
 
@@ -165,13 +165,13 @@
     return x;
   }
 
-  /// CHECK-START: int Main.testRemoveSuspendCheck(int, int) dead_code_elimination$final (before)
+  /// CHECK-START: int Main.testRemoveSuspendCheck(int, int) dead_code_elimination$after_inlining (before)
   /// CHECK:                          SuspendCheck
   /// CHECK:                          SuspendCheck
   /// CHECK:                          SuspendCheck
   /// CHECK-NOT:                      SuspendCheck
 
-  /// CHECK-START: int Main.testRemoveSuspendCheck(int, int) dead_code_elimination$final (after)
+  /// CHECK-START: int Main.testRemoveSuspendCheck(int, int) dead_code_elimination$after_inlining (after)
   /// CHECK:                          SuspendCheck
   /// CHECK:                          SuspendCheck
   /// CHECK-NOT:                      SuspendCheck
diff --git a/test/485-checker-dce-loop-update/smali/TestCase.smali b/test/485-checker-dce-loop-update/smali/TestCase.smali
index e3617c7..cda6f73 100644
--- a/test/485-checker-dce-loop-update/smali/TestCase.smali
+++ b/test/485-checker-dce-loop-update/smali/TestCase.smali
@@ -23,7 +23,7 @@
 .end method
 
 
-## CHECK-START: int TestCase.testSingleExit(int, boolean) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testSingleExit(int, boolean) dead_code_elimination$after_inlining (before)
 ## CHECK-DAG:     <<ArgX:i\d+>>  ParameterValue
 ## CHECK-DAG:     <<ArgY:z\d+>>  ParameterValue
 ## CHECK-DAG:     <<Cst1:i\d+>>  IntConstant 1
@@ -36,7 +36,7 @@
 ## CHECK-DAG:     <<Add7>>       Add [<<PhiX>>,<<Cst7>>]                    loop:<<HeaderY>>
 ## CHECK-DAG:                    Return [<<PhiX>>]                          loop:none
 
-## CHECK-START: int TestCase.testSingleExit(int, boolean) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testSingleExit(int, boolean) dead_code_elimination$after_inlining (after)
 ## CHECK-DAG:     <<ArgX:i\d+>>  ParameterValue
 ## CHECK-DAG:     <<ArgY:z\d+>>  ParameterValue
 ## CHECK-DAG:     <<Cst7:i\d+>>  IntConstant 7
@@ -73,7 +73,7 @@
 .end method
 
 
-## CHECK-START: int TestCase.testMultipleExits(int, boolean, boolean) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testMultipleExits(int, boolean, boolean) dead_code_elimination$after_inlining (before)
 ## CHECK-DAG:     <<ArgX:i\d+>>  ParameterValue
 ## CHECK-DAG:     <<ArgY:z\d+>>  ParameterValue
 ## CHECK-DAG:     <<ArgZ:z\d+>>  ParameterValue
@@ -88,7 +88,7 @@
 ## CHECK-DAG:     <<Add7>>       Add [<<PhiX>>,<<Cst7>>]                    loop:<<HeaderY>>
 ## CHECK-DAG:                    Return [<<PhiX>>]                          loop:none
 
-## CHECK-START: int TestCase.testMultipleExits(int, boolean, boolean) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testMultipleExits(int, boolean, boolean) dead_code_elimination$after_inlining (after)
 ## CHECK-DAG:     <<ArgX:i\d+>>  ParameterValue
 ## CHECK-DAG:     <<ArgY:z\d+>>  ParameterValue
 ## CHECK-DAG:     <<ArgZ:z\d+>>  ParameterValue
@@ -129,7 +129,7 @@
 .end method
 
 
-## CHECK-START: int TestCase.testExitPredecessors(int, boolean, boolean) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testExitPredecessors(int, boolean, boolean) dead_code_elimination$after_inlining (before)
 ## CHECK-DAG:     <<ArgX:i\d+>>  ParameterValue
 ## CHECK-DAG:     <<ArgY:z\d+>>  ParameterValue
 ## CHECK-DAG:     <<ArgZ:z\d+>>  ParameterValue
@@ -146,7 +146,7 @@
 ## CHECK-DAG:     <<Add7>>       Add [<<PhiX>>,<<Cst7>>]                    loop:<<HeaderY>>
 ## CHECK-DAG:                    Return [<<SelX>>]                          loop:none
 
-## CHECK-START: int TestCase.testExitPredecessors(int, boolean, boolean) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testExitPredecessors(int, boolean, boolean) dead_code_elimination$after_inlining (after)
 ## CHECK-DAG:     <<ArgX:i\d+>>  ParameterValue
 ## CHECK-DAG:     <<ArgY:z\d+>>  ParameterValue
 ## CHECK-DAG:     <<ArgZ:z\d+>>  ParameterValue
@@ -194,7 +194,7 @@
 .end method
 
 
-## CHECK-START: int TestCase.testInnerLoop(int, boolean, boolean) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testInnerLoop(int, boolean, boolean) dead_code_elimination$after_inlining (before)
 ## CHECK-DAG:     <<ArgX:i\d+>>  ParameterValue
 ## CHECK-DAG:     <<ArgY:z\d+>>  ParameterValue
 ## CHECK-DAG:     <<ArgZ:z\d+>>  ParameterValue
@@ -217,7 +217,7 @@
 ## CHECK-DAG:     <<Add7>>       Add [<<PhiX>>,<<Cst7>>]                    loop:<<HeaderY>>
 ## CHECK-DAG:                    Return [<<PhiX>>]                          loop:none
 
-## CHECK-START: int TestCase.testInnerLoop(int, boolean, boolean) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testInnerLoop(int, boolean, boolean) dead_code_elimination$after_inlining (after)
 ## CHECK-DAG:     <<ArgX:i\d+>>  ParameterValue
 ## CHECK-DAG:     <<ArgY:z\d+>>  ParameterValue
 ## CHECK-DAG:     <<ArgZ:z\d+>>  ParameterValue
diff --git a/test/485-checker-dce-switch/src/Main.java b/test/485-checker-dce-switch/src/Main.java
index 7d5fd4f..95b1a93 100644
--- a/test/485-checker-dce-switch/src/Main.java
+++ b/test/485-checker-dce-switch/src/Main.java
@@ -20,14 +20,14 @@
     return 5;
   }
 
-  /// CHECK-START: int Main.wholeSwitchDead(int) dead_code_elimination$final (before)
+  /// CHECK-START: int Main.wholeSwitchDead(int) dead_code_elimination$after_inlining (before)
   /// CHECK-DAG:                      PackedSwitch
 
-  /// CHECK-START: int Main.wholeSwitchDead(int) dead_code_elimination$final (after)
+  /// CHECK-START: int Main.wholeSwitchDead(int) dead_code_elimination$after_inlining (after)
   /// CHECK-DAG:    <<Const100:i\d+>> IntConstant 100
   /// CHECK-DAG:                      Return [<<Const100>>]
 
-  /// CHECK-START: int Main.wholeSwitchDead(int) dead_code_elimination$final (after)
+  /// CHECK-START: int Main.wholeSwitchDead(int) dead_code_elimination$after_inlining (after)
   /// CHECK-NOT:                      PackedSwitch
 
   public static int wholeSwitchDead(int j) {
@@ -60,14 +60,14 @@
     return l;
   }
 
-  /// CHECK-START: int Main.constantSwitch_InRange() dead_code_elimination$final (before)
+  /// CHECK-START: int Main.constantSwitch_InRange() dead_code_elimination$after_inlining (before)
   /// CHECK-DAG:                      PackedSwitch
 
-  /// CHECK-START: int Main.constantSwitch_InRange() dead_code_elimination$final (after)
+  /// CHECK-START: int Main.constantSwitch_InRange() dead_code_elimination$after_inlining (after)
   /// CHECK-DAG:     <<Const7:i\d+>>  IntConstant 7
   /// CHECK-DAG:                      Return [<<Const7>>]
 
-  /// CHECK-START: int Main.constantSwitch_InRange() dead_code_elimination$final (after)
+  /// CHECK-START: int Main.constantSwitch_InRange() dead_code_elimination$after_inlining (after)
   /// CHECK-NOT:                      PackedSwitch
 
   public static int constantSwitch_InRange() {
@@ -96,14 +96,14 @@
     return i;
   }
 
-  /// CHECK-START: int Main.constantSwitch_AboveRange() dead_code_elimination$final (before)
+  /// CHECK-START: int Main.constantSwitch_AboveRange() dead_code_elimination$after_inlining (before)
   /// CHECK-DAG:                      PackedSwitch
 
-  /// CHECK-START: int Main.constantSwitch_AboveRange() dead_code_elimination$final (after)
+  /// CHECK-START: int Main.constantSwitch_AboveRange() dead_code_elimination$after_inlining (after)
   /// CHECK-DAG:     <<Const15:i\d+>> IntConstant 15
   /// CHECK-DAG:                      Return [<<Const15>>]
 
-  /// CHECK-START: int Main.constantSwitch_AboveRange() dead_code_elimination$final (after)
+  /// CHECK-START: int Main.constantSwitch_AboveRange() dead_code_elimination$after_inlining (after)
   /// CHECK-NOT:                      PackedSwitch
 
   public static int constantSwitch_AboveRange() {
@@ -132,14 +132,14 @@
     return i;
   }
 
-  /// CHECK-START: int Main.constantSwitch_BelowRange() dead_code_elimination$final (before)
+  /// CHECK-START: int Main.constantSwitch_BelowRange() dead_code_elimination$after_inlining (before)
   /// CHECK-DAG:                      PackedSwitch
 
-  /// CHECK-START: int Main.constantSwitch_BelowRange() dead_code_elimination$final (after)
+  /// CHECK-START: int Main.constantSwitch_BelowRange() dead_code_elimination$after_inlining (after)
   /// CHECK-DAG:     <<ConstM5:i\d+>> IntConstant -5
   /// CHECK-DAG:                      Return [<<ConstM5>>]
 
-  /// CHECK-START: int Main.constantSwitch_BelowRange() dead_code_elimination$final (after)
+  /// CHECK-START: int Main.constantSwitch_BelowRange() dead_code_elimination$after_inlining (after)
   /// CHECK-NOT:                      PackedSwitch
 
   public static int constantSwitch_BelowRange() {
diff --git a/test/527-checker-array-access-split/info.txt b/test/527-checker-array-access-split/info.txt
index 9206804..a39bea3 100644
--- a/test/527-checker-array-access-split/info.txt
+++ b/test/527-checker-array-access-split/info.txt
@@ -1 +1 @@
-Test arm64-specific array access optimization.
+Test arm- and arm64-specific array access optimization.
diff --git a/test/530-checker-loops2/src/Main.java b/test/530-checker-loops2/src/Main.java
index 23d6438..47b6475 100644
--- a/test/530-checker-loops2/src/Main.java
+++ b/test/530-checker-loops2/src/Main.java
@@ -890,11 +890,19 @@
     return result;
   }
 
+  /// CHECK-START: int Main.shortIndex(int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>>
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
+  //
+  /// CHECK-START: int Main.shortIndex(int[]) BCE (after)
+  /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>>
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>
+  //
+  /// CHECK-START: int Main.shortIndex(int[]) BCE (after)
+  /// CHECK-NOT: Deoptimize
   static int shortIndex(int[] a) {
     int r = 0;
     // Make sure short/int conversions compiles well (b/32193474).
-    // TODO: investigate type implications and whether we can use
-    //       constant range to apply dyn BCE on all subscripts.
     for (short i = 1; i < 10; i++) {
       int ki = i - 1;
       r += a[ki] + a[i];
diff --git a/test/530-checker-loops3/src/Main.java b/test/530-checker-loops3/src/Main.java
index 6b5c657..209786a 100644
--- a/test/530-checker-loops3/src/Main.java
+++ b/test/530-checker-loops3/src/Main.java
@@ -246,7 +246,7 @@
 
     oneConstantIndex(a, b);
     for (int i = 0; i < a.length; i++) {
-      expectEquals(2, a[i]);;
+      expectEquals(2, a[i]);
     }
     try {
       oneConstantIndex(a, b1);
@@ -256,7 +256,7 @@
 
     multipleConstantIndices(a, b);
     for (int i = 0; i < a.length; i++) {
-      expectEquals(6, a[i]);;
+      expectEquals(6, a[i]);
     }
     try {
       multipleConstantIndices(a, b1);
@@ -266,7 +266,7 @@
 
     oneInvariantIndex(a, b, 1);
     for (int i = 0; i < a.length; i++) {
-      expectEquals(2, a[i]);;
+      expectEquals(2, a[i]);
     }
     try {
       oneInvariantIndex(a, b1, 1);
@@ -276,7 +276,7 @@
 
     multipleInvariantIndices(a, b, 1);
     for (int i = 0; i < a.length; i++) {
-      expectEquals(6, a[i]);;
+      expectEquals(6, a[i]);
     }
     try {
       multipleInvariantIndices(a, b1, 1);
@@ -286,18 +286,18 @@
 
     oneUnitStride(a, b);
     for (int i = 0; i < a.length; i++) {
-      expectEquals(i + 1, a[i]);;
+      expectEquals(i + 1, a[i]);
     }
     try {
       oneUnitStride(a, b1);
       throw new Error("Should throw AIOOBE");
     } catch (ArrayIndexOutOfBoundsException e) {
-      expectEquals(100, a[0]);;
+      expectEquals(100, a[0]);
     }
 
     multipleUnitStrides(a, b);
     for (int i = 1; i < a.length - 1; i++) {
-      expectEquals(3 * i + 3, a[i]);;
+      expectEquals(3 * i + 3, a[i]);
     }
     try {
       multipleUnitStrides(a, b1);
@@ -308,7 +308,7 @@
     multipleUnitStridesConditional(a, b);
     for (int i = 2; i < a.length - 2; i++) {
       int e = 3 * i + 3 + (((i & 1) == 0) ? i + 2 : i);
-      expectEquals(e, a[i]);;
+      expectEquals(e, a[i]);
     }
     try {
       multipleUnitStridesConditional(a, b1);
diff --git a/test/530-checker-lse/expected.txt b/test/530-checker-lse/expected.txt
index e69de29..ddae16a 100644
--- a/test/530-checker-lse/expected.txt
+++ b/test/530-checker-lse/expected.txt
@@ -0,0 +1 @@
+java.lang.ArrayIndexOutOfBoundsException: length=3; index=3
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
index 6b0dedf..9f4be6c 100644
--- a/test/530-checker-lse/src/Main.java
+++ b/test/530-checker-lse/src/Main.java
@@ -18,6 +18,9 @@
   Circle(double radius) {
     this.radius = radius;
   }
+  public double getRadius() {
+    return radius;
+  }
   public double getArea() {
     return radius * radius * Math.PI;
   }
@@ -744,6 +747,44 @@
     return 1.0f;
   }
 
+  /// CHECK-START: double Main.getCircleArea(double, boolean) load_store_elimination (before)
+  /// CHECK: NewInstance
+
+  /// CHECK-START: double Main.getCircleArea(double, boolean) load_store_elimination (after)
+  /// CHECK-NOT: NewInstance
+
+  private static double getCircleArea(double radius, boolean b) {
+    double area = 0d;
+    if (b) {
+      area = new Circle(radius).getArea();
+    }
+    return area;
+  }
+
+  /// CHECK-START: double Main.testDeoptimize(int[], double[], double) load_store_elimination (before)
+  /// CHECK: Deoptimize
+  /// CHECK: NewInstance
+  /// CHECK: Deoptimize
+  /// CHECK: NewInstance
+
+  /// CHECK-START: double Main.testDeoptimize(int[], double[], double) load_store_elimination (after)
+  /// CHECK: Deoptimize
+  /// CHECK: NewInstance
+  /// CHECK: Deoptimize
+  /// CHECK-NOT: NewInstance
+
+  private static double testDeoptimize(int[] iarr, double[] darr, double radius) {
+    iarr[0] = 1;  // One HDeoptimize here. Not triggered.
+    iarr[1] = 1;
+    Circle circle1 = new Circle(radius);
+    iarr[2] = 1;
+    darr[0] = circle1.getRadius();  // One HDeoptimize here, which holds circle1 live. Triggered.
+    darr[1] = circle1.getRadius();
+    darr[2] = circle1.getRadius();
+    darr[3] = circle1.getRadius();
+    return new Circle(Math.PI).getArea();
+  }
+
   static void assertIntEquals(int result, int expected) {
     if (expected != result) {
       throw new Error("Expected: " + expected + ", found: " + result);
@@ -808,6 +849,22 @@
     assertIntEquals(sumWithinRange(array, 1, 5), 11);
     assertFloatEquals(testAllocationEliminationWithLoops(), 1.0f);
     assertFloatEquals(mF, 0f);
+    assertDoubleEquals(Math.PI * Math.PI * Math.PI, getCircleArea(Math.PI, true));
+    assertDoubleEquals(0d, getCircleArea(Math.PI, false));
+
+    int[] iarray = {0, 0, 0};
+    double[] darray = {0d, 0d, 0d};
+    try {
+      assertDoubleEquals(Math.PI * Math.PI * Math.PI, testDeoptimize(iarray, darray, Math.PI));
+    } catch (Exception e) {
+      System.out.println(e);
+    }
+    assertIntEquals(iarray[0], 1);
+    assertIntEquals(iarray[1], 1);
+    assertIntEquals(iarray[2], 1);
+    assertDoubleEquals(darray[0], Math.PI);
+    assertDoubleEquals(darray[1], Math.PI);
+    assertDoubleEquals(darray[2], Math.PI);
   }
 
   static boolean sFlag;
diff --git a/test/530-checker-lse2/expected.txt b/test/530-checker-lse2/expected.txt
new file mode 100644
index 0000000..e18fc7e
--- /dev/null
+++ b/test/530-checker-lse2/expected.txt
@@ -0,0 +1,8 @@
+Start....
+r  = 9.649776E8
+mZ = false
+mI = 0
+mJ = -576460752303423488
+mF = NaN
+mD = NaN
+Done....
diff --git a/test/530-checker-lse2/info.txt b/test/530-checker-lse2/info.txt
new file mode 100644
index 0000000..8dd3f50
--- /dev/null
+++ b/test/530-checker-lse2/info.txt
@@ -0,0 +1,2 @@
+Checker test for testing store/allocation elimination in presence of
+HDeoptimize.
diff --git a/test/530-checker-lse2/src/Main.java b/test/530-checker-lse2/src/Main.java
new file mode 100644
index 0000000..0fe3d87
--- /dev/null
+++ b/test/530-checker-lse2/src/Main.java
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+
+// Modified from a fuzz test.
+public class Main {
+
+  private interface X {
+    int x();
+  }
+
+  private class A {
+    public int a() {
+      return (+ (Math.multiplyExact(mI, mI)));
+    }
+  }
+
+  private class B extends A implements X {
+    public int a() {
+      return super.a() + ((int) (Math.max(364746077.0f, ((float) mD))));
+    }
+    public int x() {
+      return (mI >> (mI++));
+    }
+  }
+
+  private static class C implements X {
+    public static int s() {
+      return 671468641;
+    }
+    public int c() {
+      return -383762838;
+    }
+    public int x() {
+      return -138813312;
+    }
+  }
+
+  private A mA  = new B();
+  private B mB  = new B();
+  private X mBX = new B();
+  private C mC  = new C();
+  private X mCX = new C();
+
+  private boolean mZ = false;
+  private int     mI = 0;
+  private long    mJ = 0;
+  private float   mF = 0;
+  private double  mD = 0;
+
+  private boolean[] mArray = new boolean[576];
+
+  private Main() {
+    boolean a = false;
+    for (int i0 = 0; i0 < 576; i0++) {
+      mArray[i0] = a;
+      a = !a;
+    }
+  }
+
+  /// CHECK-START: float Main.testMethod() load_store_elimination (before)
+  /// CHECK-DAG: Deoptimize
+  /// CHECK-DAG: Deoptimize
+  /// CHECK-DAG: NewInstance
+  /// CHECK-DAG: NewInstance
+  /// CHECK-DAG: NewInstance
+  /// CHECK-DAG: NewInstance
+  /// CHECK-DAG: NewInstance
+  /// CHECK-DAG: NewInstance
+  /// CHECK-DAG: NewInstance
+  /// CHECK-DAG: NewInstance
+  /// CHECK-DAG: NewInstance
+  /// CHECK-DAG: NewInstance
+  /// CHECK-DAG: NewInstance
+  /// CHECK-DAG: NewInstance
+  /// CHECK-DAG: NewInstance
+  /// CHECK-DAG: NewInstance
+  /// CHECK-NOT: NewInstance
+
+  /// CHECK-START: float Main.testMethod() load_store_elimination (after)
+  /// CHECK-DAG: Deoptimize
+  /// CHECK-DAG: Deoptimize
+  /// CHECK-NOT: NewInstance
+
+  private float testMethod() {
+    {
+      int lI0 = (-1456058746 << mI);
+      mD = ((double)(int)(double) mD);
+      for (int i0 = 56 - 1; i0 >= 0; i0--) {
+        mArray[i0] &= (Boolean.logicalOr(((true ? ((boolean) new Boolean((mZ))) : mZ) || mArray[i0]), (mZ)));
+        mF *= (mF * mF);
+        if ((mZ ^ true)) {
+          mF *= ((float)(int)(float) 267827331.0f);
+          mZ ^= ((false & ((boolean) new Boolean(false))) | mZ);
+          for (int i1 = 576 - 1; i1 >= 0; i1--) {
+            mZ &= ((mArray[279]) | ((boolean) new Boolean(true)));
+            mD -= (--mD);
+            for (int i2 = 56 - 1; i2 >= 0; i2--) {
+              mF /= (mF - mF);
+              mI = (Math.min(((int) new Integer(mI)), (766538816 * (++mI))));
+              mF += (mZ ? (mB.a()) : ((! mZ) ? -752042357.0f : (++mF)));
+              mJ |= ((long) new Long((-2084191070L + (mJ | mJ))));
+              lI0 |= ((int) new Integer(((int) new Integer(mI))));
+              if (((boolean) new Boolean(false))) {
+                mZ &= (mZ);
+                mF *= (mF--);
+                mD = (Double.POSITIVE_INFINITY);
+                mF += ((float)(int)(float) (-2026938813.0f * 638401585.0f));
+                mJ = (--mJ);
+                for (int i3 = 56 - 1; i3 >= 0; i3--) {
+                  mI &= (- mI);
+                  mD = (--mD);
+                  mArray[426] = (mZ || false);
+                  mF -= (((this instanceof Main) ? mF : mF) + 976981405.0f);
+                  mZ &= ((mZ) & (this instanceof Main));
+                }
+                mZ ^= (Float.isFinite(-1975953895.0f));
+              } else {
+                mJ /= ((long) (Math.nextDown(-1519600008.0f)));
+                mJ <<= (Math.round(1237681786.0));
+              }
+            }
+            mArray[i0] &= (false || ((1256071300.0f != -353296391.0f) ? false : (mZ ^ mArray[i0])));
+            mF *= (+ ((float) mD));
+            for (int i2 = 0; i2 < 576; i2++) {
+              mD *= ((double) lI0);
+              lI0 = (lI0 & (Integer.MIN_VALUE));
+              mF -= (--mF);
+            }
+            if ((this instanceof Main)) {
+              mZ ^= ((boolean) new Boolean(true));
+            } else {
+              {
+                int lI1 = (mZ ? (--lI0) : 1099574344);
+                mJ >>= (Math.incrementExact(mJ));
+                mJ = (~ -2103354070L);
+              }
+            }
+          }
+        } else {
+          mJ *= (- ((long) new Long(479832084L)));
+          mJ %= (Long.MAX_VALUE);
+          mD /= (--mD);
+          if ((mI > ((mBX.x()) << mI))) {
+            {
+              long lJ0 = (mJ--);
+              mI >>>= (mBX.x());
+            }
+            mF = (+ 505094603.0f);
+            mD *= (((boolean) new Boolean((! false))) ? mD : 1808773781.0);
+            mI *= (Integer.MIN_VALUE);
+            for (int i1 = 576 - 1; i1 >= 0; i1--) {
+              if (((boolean) new Boolean(false))) {
+                mD += ((double)(float)(double) -1051436901.0);
+              } else {
+                mF -= ((float)(int)(float) (Float.min(mF, (mF--))));
+              }
+              for (int i2 = 0; i2 < 576; i2++) {
+                mJ -= ((long) new Long(-1968644857L));
+                mJ ^= (+ (mC.s()));
+              }
+            }
+          } else {
+            mF -= ((- mF) + -2145489966.0f);
+          }
+          mD -= (mD++);
+          mD = (949112777.0 * 1209996119.0);
+        }
+        mZ &= (Boolean.logicalAnd(true, ((mZ) & (((boolean) new Boolean(true)) && true))));
+      }
+    }
+    return ((float) 964977619L);
+  }
+
+  public static void main(String[] args) {
+    System.out.println("Start....");
+    Main t = new Main();
+    float r = 1883600237.0f;
+    try {
+      r = t.testMethod();
+    } catch (Exception e) {
+      // Arithmetic, null pointer, index out of bounds, etc.
+      System.out.println("An exception was caught.");
+    }
+    System.out.println("r  = " + r);
+    System.out.println("mZ = " + t.mZ);
+    System.out.println("mI = " + t.mI);
+    System.out.println("mJ = " + t.mJ);
+    System.out.println("mF = " + t.mF);
+    System.out.println("mD = " + t.mD);
+    System.out.println("Done....");
+  }
+}
+
diff --git a/test/543-checker-dce-trycatch/smali/TestCase.smali b/test/543-checker-dce-trycatch/smali/TestCase.smali
index 5557c7b..f50e01e 100644
--- a/test/543-checker-dce-trycatch/smali/TestCase.smali
+++ b/test/543-checker-dce-trycatch/smali/TestCase.smali
@@ -26,18 +26,18 @@
 # Test a case when one entering TryBoundary is dead but the rest of the try
 # block remains live.
 
-## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$after_inlining (before)
 ## CHECK: Add
 
-## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$after_inlining (before)
 ## CHECK:     TryBoundary kind:entry
 ## CHECK:     TryBoundary kind:entry
 ## CHECK-NOT: TryBoundary kind:entry
 
-## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$after_inlining (after)
 ## CHECK-NOT: Add
 
-## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$after_inlining (after)
 ## CHECK:     TryBoundary kind:entry
 ## CHECK-NOT: TryBoundary kind:entry
 
@@ -71,18 +71,18 @@
 # Test a case when one exiting TryBoundary is dead but the rest of the try
 # block remains live.
 
-## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$after_inlining (before)
 ## CHECK: Add
 
-## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$after_inlining (before)
 ## CHECK:     TryBoundary kind:exit
 ## CHECK:     TryBoundary kind:exit
 ## CHECK-NOT: TryBoundary kind:exit
 
-## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$after_inlining (after)
 ## CHECK-NOT: Add
 
-## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$after_inlining (after)
 ## CHECK:     TryBoundary kind:exit
 ## CHECK-NOT: TryBoundary kind:exit
 
@@ -117,21 +117,21 @@
 # Test that a catch block remains live and consistent if some of try blocks
 # throwing into it are removed.
 
-## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$after_inlining (before)
 ## CHECK:     TryBoundary kind:entry
 ## CHECK:     TryBoundary kind:entry
 ## CHECK-NOT: TryBoundary kind:entry
 
-## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$after_inlining (before)
 ## CHECK:     TryBoundary kind:exit
 ## CHECK:     TryBoundary kind:exit
 ## CHECK-NOT: TryBoundary kind:exit
 
-## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$after_inlining (after)
 ## CHECK:     TryBoundary kind:entry
 ## CHECK-NOT: TryBoundary kind:entry
 
-## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$after_inlining (after)
 ## CHECK:     TryBoundary kind:exit
 ## CHECK-NOT: TryBoundary kind:exit
 
@@ -203,7 +203,7 @@
 
 # Test that DCE removes catch phi uses of instructions defined in dead try blocks.
 
-## CHECK-START: int TestCase.testCatchPhiInputs_DefinedInTryBlock(int, int, int, int) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testCatchPhiInputs_DefinedInTryBlock(int, int, int, int) dead_code_elimination$after_inlining (before)
 ## CHECK-DAG:     <<Arg0:i\d+>>      ParameterValue
 ## CHECK-DAG:     <<Arg1:i\d+>>      ParameterValue
 ## CHECK-DAG:     <<Const0xa:i\d+>>  IntConstant 10
@@ -220,7 +220,7 @@
 ## CHECK-DAG:                        Phi [<<Add>>,<<Const0xc>>,<<Const0xe>>] reg:2 is_catch_phi:true
 ## CHECK-DAG:                        Phi [<<Select>>,<<Const0x10>>,<<Const0x11>>] reg:3 is_catch_phi:true
 
-## CHECK-START: int TestCase.testCatchPhiInputs_DefinedInTryBlock(int, int, int, int) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testCatchPhiInputs_DefinedInTryBlock(int, int, int, int) dead_code_elimination$after_inlining (after)
 ## CHECK-DAG:     <<Const0xb:i\d+>>  IntConstant 11
 ## CHECK-DAG:     <<Const0xc:i\d+>>  IntConstant 12
 ## CHECK-DAG:     <<Const0xd:i\d+>>  IntConstant 13
@@ -277,7 +277,7 @@
 # Test that DCE does not remove catch phi uses of instructions defined outside
 # dead try blocks.
 
-## CHECK-START: int TestCase.testCatchPhiInputs_DefinedOutsideTryBlock(int, int, int, int) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testCatchPhiInputs_DefinedOutsideTryBlock(int, int, int, int) dead_code_elimination$after_inlining (before)
 ## CHECK-DAG:     <<Const0xa:i\d+>> IntConstant 10
 ## CHECK-DAG:     <<Const0xb:i\d+>> IntConstant 11
 ## CHECK-DAG:     <<Const0xc:i\d+>> IntConstant 12
@@ -287,7 +287,7 @@
 ## CHECK-DAG:                       Phi [<<Const0xa>>,<<Const0xb>>,<<Const0xd>>] reg:1 is_catch_phi:true
 ## CHECK-DAG:                       Phi [<<Const0xf>>,<<Const0xc>>,<<Const0xe>>] reg:2 is_catch_phi:true
 
-## CHECK-START: int TestCase.testCatchPhiInputs_DefinedOutsideTryBlock(int, int, int, int) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testCatchPhiInputs_DefinedOutsideTryBlock(int, int, int, int) dead_code_elimination$after_inlining (after)
 ## CHECK-DAG:     <<Const0xa:i\d+>> IntConstant 10
 ## CHECK-DAG:     <<Const0xb:i\d+>> IntConstant 11
 ## CHECK-DAG:     <<Const0xc:i\d+>> IntConstant 12
diff --git a/test/543-checker-dce-trycatch/src/Main.java b/test/543-checker-dce-trycatch/src/Main.java
index 19587e7..0d7596a 100644
--- a/test/543-checker-dce-trycatch/src/Main.java
+++ b/test/543-checker-dce-trycatch/src/Main.java
@@ -35,10 +35,10 @@
   // where TryBoundary still has exception handler successors after having removed
   // some already.
 
-  /// CHECK-START: void Main.testDeadTryCatch(boolean) dead_code_elimination$final (after)
+  /// CHECK-START: void Main.testDeadTryCatch(boolean) dead_code_elimination$after_inlining (after)
   /// CHECK-NOT: TryBoundary
 
-  /// CHECK-START: void Main.testDeadTryCatch(boolean) dead_code_elimination$final (after)
+  /// CHECK-START: void Main.testDeadTryCatch(boolean) dead_code_elimination$after_inlining (after)
   /// CHECK: begin_block
   /// CHECK: begin_block
   /// CHECK: begin_block
@@ -63,4 +63,4 @@
   public static void main(String[] args) {
 
   }
-}
\ No newline at end of file
+}
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index 3c053cf..9e475ab 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -252,27 +252,27 @@
   /// CHECK-START-X86: java.lang.String Main.$noinline$getBootImageString() sharpening (after)
   // Note: load kind depends on PIC/non-PIC
   // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
-  /// CHECK:                LoadString load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}}
+  /// CHECK:                LoadString load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}}
 
   /// CHECK-START-X86_64: java.lang.String Main.$noinline$getBootImageString() sharpening (after)
   // Note: load kind depends on PIC/non-PIC
   // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
-  /// CHECK:                LoadString load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}}
+  /// CHECK:                LoadString load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}}
 
   /// CHECK-START-ARM: java.lang.String Main.$noinline$getBootImageString() sharpening (after)
   // Note: load kind depends on PIC/non-PIC
   // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
-  /// CHECK:                LoadString load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}}
+  /// CHECK:                LoadString load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}}
 
   /// CHECK-START-ARM64: java.lang.String Main.$noinline$getBootImageString() sharpening (after)
   // Note: load kind depends on PIC/non-PIC
   // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
-  /// CHECK:                LoadString load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}}
+  /// CHECK:                LoadString load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}}
 
   /// CHECK-START-MIPS: java.lang.String Main.$noinline$getBootImageString() sharpening (after)
   // Note: load kind depends on PIC/non-PIC
   // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
-  /// CHECK:                LoadString load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}}
+  /// CHECK:                LoadString load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}}
 
   public static String $noinline$getBootImageString() {
     // Prevent inlining to avoid the string comparison being optimized away.
@@ -303,10 +303,6 @@
   /// CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
   /// CHECK:                LoadString load_kind:BssEntry
 
-  /// CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() pc_relative_fixups_mips (after)
-  /// CHECK-DAG:            MipsComputeBaseMethodAddress
-  /// CHECK-DAG:            LoadString load_kind:BssEntry
-
   public static String $noinline$getNonBootImageString() {
     // Prevent inlining to avoid the string comparison being optimized away.
     if (doThrow) { throw new Error(); }
diff --git a/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali b/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali
index 5d4aa56..af43973 100644
--- a/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali
+++ b/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali
@@ -196,7 +196,7 @@
   const-class v0, LMain;
   if-ne v0, v2, :exit
   :other_loop_entry
-  const-class v1, LIrreducibleLoop;
+  const-class v1, Ljava/lang/Class;  # LoadClass that can throw
   goto :loop_entry
   :exit
   return-object v0
@@ -250,7 +250,7 @@
   const/4 v0, 0
   if-ne p0, v0, :other_loop_entry
   :loop_entry
-  const-class v1, LIrreducibleLoop;
+  const-class v1, Ljava/lang/Class;  # LoadClass that can throw
   if-ne v0, p0, :exit
   :other_loop_entry
   sub-int v1, p0, p0
@@ -286,7 +286,7 @@
 .method public static licm3(III)I
   .registers 4
   :loop_entry
-  const-class v0, LIrreducibleLoop;
+  const-class v0, Ljava/lang/Class;  # LoadClass that can throw
   if-ne p1, p2, :exit
   goto :loop_body
 
diff --git a/test/562-no-intermediate/expected.txt b/test/562-checker-no-intermediate/expected.txt
similarity index 100%
rename from test/562-no-intermediate/expected.txt
rename to test/562-checker-no-intermediate/expected.txt
diff --git a/test/562-checker-no-intermediate/info.txt b/test/562-checker-no-intermediate/info.txt
new file mode 100644
index 0000000..38f1f65
--- /dev/null
+++ b/test/562-checker-no-intermediate/info.txt
@@ -0,0 +1,2 @@
+Regression test for optimizing, checking that there is no
+intermediate address live across a Java call.
diff --git a/test/562-checker-no-intermediate/src/Main.java b/test/562-checker-no-intermediate/src/Main.java
new file mode 100644
index 0000000..104ba8b
--- /dev/null
+++ b/test/562-checker-no-intermediate/src/Main.java
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+  /**
+   * Check that the intermediate address computation is not reordered or merged
+   * across the call to Math.abs().
+   */
+
+  /// CHECK-START-ARM: void Main.main(java.lang.String[]) instruction_simplifier_arm (before)
+  /// CHECK-DAG:           <<ConstM42:i\d+>>      IntConstant -42
+  /// CHECK-DAG:           <<Array:l\d+>>         NullCheck
+  /// CHECK-DAG:           <<Index:i\d+>>         BoundsCheck
+  /// CHECK-DAG:           <<ArrayGet:i\d+>>      ArrayGet [<<Array>>,<<Index>>]
+  /// CHECK-DAG:           <<AbsM42:i\d+>>        InvokeStaticOrDirect [<<ConstM42>>] intrinsic:MathAbsInt
+  /// CHECK-DAG:           <<Add:i\d+>>           Add [<<ArrayGet>>,<<AbsM42>>]
+  /// CHECK-DAG:                                  ArraySet [<<Array>>,<<Index>>,<<Add>>]
+
+  /// CHECK-START-ARM: void Main.main(java.lang.String[]) instruction_simplifier_arm (after)
+  /// CHECK-DAG:           <<ConstM42:i\d+>>      IntConstant -42
+  /// CHECK-DAG:           <<DataOffset:i\d+>>    IntConstant
+  /// CHECK-DAG:           <<Array:l\d+>>         NullCheck
+  /// CHECK-DAG:           <<Index:i\d+>>         BoundsCheck
+  /// CHECK-DAG:           <<Address1:i\d+>>      IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-DAG:           <<ArrayGet:i\d+>>      ArrayGet [<<Address1>>,<<Index>>]
+  /// CHECK-DAG:           <<AbsM42:i\d+>>        InvokeStaticOrDirect [<<ConstM42>>] intrinsic:MathAbsInt
+  /// CHECK-DAG:           <<Add:i\d+>>           Add [<<ArrayGet>>,<<AbsM42>>]
+  /// CHECK-DAG:           <<Address2:i\d+>>      IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-DAG:                                  ArraySet [<<Address2>>,<<Index>>,<<Add>>]
+
+  /// CHECK-START-ARM: void Main.main(java.lang.String[]) GVN$after_arch (after)
+  /// CHECK-DAG:           <<ConstM42:i\d+>>      IntConstant -42
+  /// CHECK-DAG:           <<DataOffset:i\d+>>    IntConstant
+  /// CHECK-DAG:           <<Array:l\d+>>         NullCheck
+  /// CHECK-DAG:           <<Index:i\d+>>         BoundsCheck
+  /// CHECK-DAG:           <<Address1:i\d+>>      IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-DAG:           <<ArrayGet:i\d+>>      ArrayGet [<<Address1>>,<<Index>>]
+  /// CHECK-DAG:           <<AbsM42:i\d+>>        InvokeStaticOrDirect [<<ConstM42>>] intrinsic:MathAbsInt
+  /// CHECK-DAG:           <<Add:i\d+>>           Add [<<ArrayGet>>,<<AbsM42>>]
+  /// CHECK-DAG:           <<Address2:i\d+>>      IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-DAG:                                  ArraySet [<<Address2>>,<<Index>>,<<Add>>]
+
+
+  /// CHECK-START-ARM64: void Main.main(java.lang.String[]) instruction_simplifier_arm64 (before)
+  /// CHECK-DAG:           <<ConstM42:i\d+>>      IntConstant -42
+  /// CHECK-DAG:           <<Array:l\d+>>         NullCheck
+  /// CHECK-DAG:           <<Index:i\d+>>         BoundsCheck
+  /// CHECK-DAG:           <<ArrayGet:i\d+>>      ArrayGet [<<Array>>,<<Index>>]
+  /// CHECK-DAG:           <<AbsM42:i\d+>>        InvokeStaticOrDirect [<<ConstM42>>] intrinsic:MathAbsInt
+  /// CHECK-DAG:           <<Add:i\d+>>           Add [<<ArrayGet>>,<<AbsM42>>]
+  /// CHECK-DAG:                                  ArraySet [<<Array>>,<<Index>>,<<Add>>]
+
+  /// CHECK-START-ARM64: void Main.main(java.lang.String[]) instruction_simplifier_arm64 (after)
+  /// CHECK-DAG:           <<ConstM42:i\d+>>      IntConstant -42
+  /// CHECK-DAG:           <<DataOffset:i\d+>>    IntConstant
+  /// CHECK-DAG:           <<Array:l\d+>>         NullCheck
+  /// CHECK-DAG:           <<Index:i\d+>>         BoundsCheck
+  /// CHECK-DAG:           <<Address1:i\d+>>      IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-DAG:           <<ArrayGet:i\d+>>      ArrayGet [<<Address1>>,<<Index>>]
+  /// CHECK-DAG:           <<AbsM42:i\d+>>        InvokeStaticOrDirect [<<ConstM42>>] intrinsic:MathAbsInt
+  /// CHECK-DAG:           <<Add:i\d+>>           Add [<<ArrayGet>>,<<AbsM42>>]
+  /// CHECK-DAG:           <<Address2:i\d+>>      IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-DAG:                                  ArraySet [<<Address2>>,<<Index>>,<<Add>>]
+
+  /// CHECK-START-ARM64: void Main.main(java.lang.String[]) GVN$after_arch (after)
+  /// CHECK-DAG:           <<ConstM42:i\d+>>      IntConstant -42
+  /// CHECK-DAG:           <<DataOffset:i\d+>>    IntConstant
+  /// CHECK-DAG:           <<Array:l\d+>>         NullCheck
+  /// CHECK-DAG:           <<Index:i\d+>>         BoundsCheck
+  /// CHECK-DAG:           <<Address1:i\d+>>      IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-DAG:           <<ArrayGet:i\d+>>      ArrayGet [<<Address1>>,<<Index>>]
+  /// CHECK-DAG:           <<AbsM42:i\d+>>        InvokeStaticOrDirect [<<ConstM42>>] intrinsic:MathAbsInt
+  /// CHECK-DAG:           <<Add:i\d+>>           Add [<<ArrayGet>>,<<AbsM42>>]
+  /// CHECK-DAG:           <<Address2:i\d+>>      IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-DAG:                                  ArraySet [<<Address2>>,<<Index>>,<<Add>>]
+
+  public static void main(String[] args) {
+    array[index] += Math.abs(-42);
+  }
+
+  static int index = 0;
+  static int[] array = new int[2];
+}
diff --git a/test/562-no-intermediate/info.txt b/test/562-no-intermediate/info.txt
deleted file mode 100644
index 4f21aeb..0000000
--- a/test/562-no-intermediate/info.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Regression test for optimizing, checking that there is no
-intermediate address between a Java call.
diff --git a/test/570-checker-osr/src/Main.java b/test/570-checker-osr/src/Main.java
index 8af3894..4de5634 100644
--- a/test/570-checker-osr/src/Main.java
+++ b/test/570-checker-osr/src/Main.java
@@ -17,26 +17,6 @@
 public class Main {
   public static void main(String[] args) {
     System.loadLibrary(args[0]);
-    Thread testThread = new Thread() {
-      public void run() {
-        performTest();
-      }
-    };
-    testThread.start();
-    try {
-      testThread.join(20 * 1000);  // 20s timeout.
-    } catch (InterruptedException ie) {
-      System.out.println("Interrupted.");
-      System.exit(1);
-    }
-    Thread.State state = testThread.getState();
-    if (state != Thread.State.TERMINATED) {
-      System.out.println("Test timed out, current state: " + state);
-      System.exit(1);
-    }
-  }
-
-  public static void performTest() {
     new SubMain();
     if ($noinline$returnInt() != 53) {
       throw new Error("Unexpected return value");
diff --git a/test/586-checker-null-array-get/src/Main.java b/test/586-checker-null-array-get/src/Main.java
index e0782bc..0ea7d34 100644
--- a/test/586-checker-null-array-get/src/Main.java
+++ b/test/586-checker-null-array-get/src/Main.java
@@ -100,7 +100,7 @@
   /// CHECK-DAG:                     Return [<<ArrayGet2>>]
   public static float test1() {
     Test1 test1 = getNullTest1();
-    Test2 test2 = getNullTest2();;
+    Test2 test2 = getNullTest2();
     int[] iarr = test1.iarr;
     float[] farr = test2.farr;
     iarr[0] = iarr[1];
diff --git a/test/611-checker-simplify-if/src/Main.java b/test/611-checker-simplify-if/src/Main.java
index 7dac007..c1d75ec 100644
--- a/test/611-checker-simplify-if/src/Main.java
+++ b/test/611-checker-simplify-if/src/Main.java
@@ -64,13 +64,13 @@
 
   // Test when the phi is the input of the if.
 
-  /// CHECK-START: void Main.testInline(java.lang.String[]) dead_code_elimination$final (before)
+  /// CHECK-START: void Main.testInline(java.lang.String[]) dead_code_elimination$after_inlining (before)
   /// CHECK-DAG: <<Const0:i\d+>>   IntConstant 0
   /// CHECK-DAG:                   If
   /// CHECK-DAG: <<Phi:i\d+>>      Phi
   /// CHECK-DAG:                   If [<<Phi>>]
 
-  /// CHECK-START: void Main.testInline(java.lang.String[]) dead_code_elimination$final (after)
+  /// CHECK-START: void Main.testInline(java.lang.String[]) dead_code_elimination$after_inlining (after)
   /// CHECK:      If
   /// CHECK-NOT:  Phi
   /// CHECK-NOT:  If
@@ -144,7 +144,7 @@
   /// CHECK-NOT:                          GreaterThanOrEqual
   /// CHECK-NOT:                          If
   public static void testGreaterCondition(String[] args) {
-    int a = 42;;
+    int a = 42;
     if (args.length == 42) {
       a = 34;
     } else {
diff --git a/test/618-checker-induction/src/Main.java b/test/618-checker-induction/src/Main.java
index d8bc611..f85479a 100644
--- a/test/618-checker-induction/src/Main.java
+++ b/test/618-checker-induction/src/Main.java
@@ -92,6 +92,43 @@
     }
   }
 
+  /// CHECK-START: void Main.deadConditional(int) loop_optimization (before)
+  /// CHECK-DAG: Phi loop:{{B\d+}} outer_loop:none
+  //
+  /// CHECK-START: void Main.deadConditional(int) loop_optimization (after)
+  /// CHECK-NOT: Phi loop:{{B\d+}}
+  public static void deadConditional(int n) {
+    int k = 0;
+    int m = 0;
+    for (int i = 0; i < n; i++) {
+      if (i == 3)
+        k = i;
+      else
+        m = i;
+    }
+  }
+
+  /// CHECK-START: void Main.deadConditionalCycle(int) loop_optimization (before)
+  /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void Main.deadConditionalCycle(int) loop_optimization (after)
+  /// CHECK-NOT: Phi loop:{{B\d+}}
+  public static void deadConditionalCycle(int n) {
+    int k = 0;
+    int m = 0;
+    for (int i = 0; i < n; i++) {
+      if (i == 3)
+        k--;
+      else
+        m++;
+    }
+  }
+
+
   /// CHECK-START: void Main.deadInduction() loop_optimization (before)
   /// CHECK-DAG: Phi      loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: Phi      loop:<<Loop>>      outer_loop:none
@@ -668,6 +705,8 @@
     potentialInfiniteLoop(4);
     deadNestedLoops();
     deadNestedAndFollowingLoops();
+    deadConditional(4);
+    deadConditionalCycle(4);
 
     deadInduction();
     for (int i = 0; i < a.length; i++) {
diff --git a/test/620-checker-bce-intrinsics/expected.txt b/test/620-checker-bce-intrinsics/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/620-checker-bce-intrinsics/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/620-checker-bce-intrinsics/info.txt b/test/620-checker-bce-intrinsics/info.txt
new file mode 100644
index 0000000..a868845
--- /dev/null
+++ b/test/620-checker-bce-intrinsics/info.txt
@@ -0,0 +1 @@
+Test on bounds check elimination in loops using intrinsics.
diff --git a/test/620-checker-bce-intrinsics/src/Main.java b/test/620-checker-bce-intrinsics/src/Main.java
new file mode 100644
index 0000000..afc3c65
--- /dev/null
+++ b/test/620-checker-bce-intrinsics/src/Main.java
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests on bounds check elimination in loops that use intrinsics.
+ * All bounds checks below should be statically eliminated.
+ */
+public class Main {
+
+  /// CHECK-START: int Main.oneArray(int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>> outer_loop:none
+  //
+  /// CHECK-START: int Main.oneArray(int[]) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  static int oneArray(int[] a) {
+    int x = 0;
+    for (int i = 0; i < a.length; i++) {
+      x += a[i];
+    }
+    return x;
+  }
+
+  /// CHECK-START: int Main.oneArrayAbs(int[], int) BCE (before)
+  /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>> outer_loop:none
+  //
+  /// CHECK-START: int Main.oneArrayAbs(int[], int) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  static int oneArrayAbs(int[] a, int lo) {
+    int x = 0;
+    for (int i = Math.abs(lo); i < a.length; i++) {
+      x += a[i];
+    }
+    return x;
+  }
+
+
+  /// CHECK-START: int Main.twoArrays(int[], int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int Main.twoArrays(int[], int[]) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  static int twoArrays(int[] a, int[] b) {
+    int x = 0;
+    for (int i = 0; i < Math.min(a.length, b.length); i++) {
+      x += a[i] + b[i];
+    }
+    return x;
+  }
+
+  /// CHECK-START: int Main.threeArrays(int[], int[], int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int Main.threeArrays(int[], int[], int[]) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  static int threeArrays(int[] a, int[] b, int[] c) {
+    int x = 0;
+    for (int i = 0; i < Math.min(Math.min(a.length, b.length), c.length); i++) {
+      x += a[i] + b[i] + c[i];
+    }
+    return x;
+  }
+
+  /// CHECK-START: int Main.fourArrays(int[], int[], int[], int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int Main.fourArrays(int[], int[], int[], int[]) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  static int fourArrays(int[] a, int[] b, int[] c, int[] d) {
+    int x = 0;
+    for (int i = 0; i < Math.min(Math.min(a.length, b.length), Math.min(c.length, d.length)); i++) {
+      x += a[i] + b[i] + c[i] + d[i];
+    }
+    return x;
+  }
+
+  /// CHECK-START: int Main.oneArrayWithCleanup(int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck loop:<<Loop1:B\d+>> outer_loop:none
+  /// CHECK-DAG: BoundsCheck loop:<<Loop2:B\d+>> outer_loop:none
+  //
+  /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+  //
+  /// CHECK-START: int Main.oneArrayWithCleanup(int[]) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  static int oneArrayWithCleanup(int[] a) {
+    int x = 0;
+    int n = Math.min(4, a.length);
+    for (int i = 0; i < n; i++) {
+      x += a[i];
+    }
+    for (int i = n; i < a.length; i++) {
+      x += a[i] * 10;
+    }
+    return x;
+  }
+
+  /// CHECK-START: int Main.twoArraysWithCleanup(int[], int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck loop:<<Loop1:B\d+>> outer_loop:none
+  /// CHECK-DAG: BoundsCheck loop:<<Loop1>>      outer_loop:none
+  /// CHECK-DAG: BoundsCheck loop:<<Loop2:B\d+>> outer_loop:none
+  //
+  /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+  //
+  /// CHECK-START: int Main.twoArraysWithCleanup(int[], int[]) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  static int twoArraysWithCleanup(int[] a, int[] b) {
+    int x = 0;
+    int n = Math.min(a.length, b.length);
+    for (int i = n - 1; i >= 0; i--) {
+      x += a[i] + b[i];
+    }
+    for (int i = n; i < a.length; i++) {
+      x += a[i];
+    }
+    return x;
+  }
+
+  /// CHECK-START: int Main.threeArraysWithCleanup(int[], int[], int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck loop:<<Loop1:B\d+>> outer_loop:none
+  /// CHECK-DAG: BoundsCheck loop:<<Loop1>>      outer_loop:none
+  /// CHECK-DAG: BoundsCheck loop:<<Loop1>>      outer_loop:none
+  /// CHECK-DAG: BoundsCheck loop:<<Loop2:B\d+>> outer_loop:none
+  //
+  /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+  //
+  /// CHECK-START: int Main.threeArraysWithCleanup(int[], int[], int[]) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  static int threeArraysWithCleanup(int[] a, int[] b, int[] c) {
+    int x = 0;
+    int n = Math.min(a.length, Math.min(b.length, c.length));
+    for (int i = n - 1; i >= 0; i--) {
+      x += a[i] + b[i] + c[i];
+    }
+    for (int i = n; i < a.length; i++) {
+      x += a[i];
+    }
+    return x;
+  }
+
+  /// CHECK-START: int Main.altLoopLogic(int[], int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int Main.altLoopLogic(int[], int[]) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  static int altLoopLogic(int[] a, int[] b) {
+    int x = 0;
+    int n = Math.min(a.length, b.length);
+    for (int i = n; i-- > 0;) {
+      x += a[i] + b[i];
+    }
+    return x;
+  }
+
+  /// CHECK-START: int Main.hiddenMin(int[], int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int Main.hiddenMin(int[], int[]) BCE (after)
+  //
+  // TODO: make this so
+  static int hiddenMin(int[] a, int[] b) {
+    int x = 0;
+    for (int i = 0; i < a.length && i < b.length; i++) {
+      x += a[i] + b[i];
+    }
+    return x;
+  }
+
+  /// CHECK-START: int Main.hiddenMinWithCleanup(int[], int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck loop:<<Loop1:B\d+>> outer_loop:none
+  /// CHECK-DAG: BoundsCheck loop:<<Loop1>>      outer_loop:none
+  /// CHECK-DAG: BoundsCheck loop:<<Loop2:B\d+>> outer_loop:none
+  //
+  /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+  //
+  /// CHECK-START: int Main.hiddenMinWithCleanup(int[], int[]) BCE (after)
+  //
+  // TODO: make this so
+  static int hiddenMinWithCleanup(int[] a, int[] b) {
+    int x = 0;
+    int i = 0;
+    for (; i < a.length && i < b.length; i++) {
+      x += a[i] + b[i];
+    }
+    for (; i < a.length; i++) {
+      x += a[i];
+    }
+    return x;
+  }
+
+  public static void main(String[] args) {
+    int[] a = { 1, 2, 3, 4, 5 };
+    int[] b = { 6, 7, 8, 9, 4, 2 };
+    int[] c = { 1, 2, 3 };
+    int[] d = { 8, 5, 3, 2 };
+
+    expectEquals(15, oneArray(a));
+    expectEquals(36, oneArray(b));
+    expectEquals(6,  oneArray(c));
+    expectEquals(18, oneArray(d));
+
+    expectEquals(5,  oneArrayAbs(a, -4));
+    expectEquals(15, oneArrayAbs(a, 0));
+    expectEquals(5,  oneArrayAbs(a, 4));
+
+    expectEquals(30, twoArrays(a, a));
+    expectEquals(49, twoArrays(a, b));
+    expectEquals(12, twoArrays(a, c));
+    expectEquals(28, twoArrays(a, d));
+
+    expectEquals(45, threeArrays(a, a, a));
+    expectEquals(33, threeArrays(a, b, c));
+    expectEquals(58, threeArrays(a, b, d));
+    expectEquals(28, threeArrays(a, c, d));
+
+    expectEquals(60, fourArrays(a, a, a, a));
+    expectEquals(49, fourArrays(a, b, c, d));
+
+    expectEquals(60, oneArrayWithCleanup(a));
+    expectEquals(90, oneArrayWithCleanup(b));
+    expectEquals(6,  oneArrayWithCleanup(c));
+    expectEquals(18, oneArrayWithCleanup(d));
+
+    expectEquals(30, twoArraysWithCleanup(a, a));
+    expectEquals(49, twoArraysWithCleanup(a, b));
+    expectEquals(21, twoArraysWithCleanup(a, c));
+    expectEquals(33, twoArraysWithCleanup(a, d));
+
+    expectEquals(45, threeArraysWithCleanup(a, a, a));
+    expectEquals(42, threeArraysWithCleanup(a, b, c));
+    expectEquals(63, threeArraysWithCleanup(a, b, d));
+    expectEquals(37, threeArraysWithCleanup(a, c, d));
+
+    expectEquals(30, altLoopLogic(a, a));
+    expectEquals(49, altLoopLogic(a, b));
+    expectEquals(12, altLoopLogic(a, c));
+    expectEquals(28, altLoopLogic(a, d));
+
+    expectEquals(30, hiddenMin(a, a));
+    expectEquals(49, hiddenMin(a, b));
+    expectEquals(12, hiddenMin(a, c));
+    expectEquals(28, hiddenMin(a, d));
+
+    expectEquals(30, hiddenMinWithCleanup(a, a));
+    expectEquals(49, hiddenMinWithCleanup(a, b));
+    expectEquals(21, hiddenMinWithCleanup(a, c));
+    expectEquals(33, hiddenMinWithCleanup(a, d));
+
+    System.out.println("passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/622-checker-bce-regressions/expected.txt b/test/622-checker-bce-regressions/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/622-checker-bce-regressions/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/622-checker-bce-regressions/info.txt b/test/622-checker-bce-regressions/info.txt
new file mode 100644
index 0000000..a753dfa
--- /dev/null
+++ b/test/622-checker-bce-regressions/info.txt
@@ -0,0 +1 @@
+Regression tests on BCE.
diff --git a/test/622-checker-bce-regressions/src/Main.java b/test/622-checker-bce-regressions/src/Main.java
new file mode 100644
index 0000000..6ba2644
--- /dev/null
+++ b/test/622-checker-bce-regressions/src/Main.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Regression tests for BCE.
+ */
+public class Main {
+
+  static int[] array = new int[10];
+
+  /// CHECK-START: int Main.doNotVisitAfterForwardBCE(int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: BoundsCheck loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int Main.doNotVisitAfterForwardBCE(int[]) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  static int doNotVisitAfterForwardBCE(int[] a) {
+    if (a == null) {
+      throw new Error("Null");
+    }
+    int k = 0;
+    int j = 0;
+    for (int i = 1; i < 10; i++) {
+      j = i - 1;
+      // b/32547652: after DCE, bounds checks become consecutive,
+      // and second should not be revisited after forward BCE.
+      k = a[i] + a[i - 1];
+    }
+    return j;
+  }
+
+  public static void main(String[] args) {
+    expectEquals(8, doNotVisitAfterForwardBCE(array));
+    System.out.println("passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/562-no-intermediate/expected.txt b/test/622-simplifyifs-exception-edges/expected.txt
similarity index 100%
copy from test/562-no-intermediate/expected.txt
copy to test/622-simplifyifs-exception-edges/expected.txt
diff --git a/test/622-simplifyifs-exception-edges/info.txt b/test/622-simplifyifs-exception-edges/info.txt
new file mode 100644
index 0000000..58c4bfb
--- /dev/null
+++ b/test/622-simplifyifs-exception-edges/info.txt
@@ -0,0 +1,2 @@
+Regression test for the SimplifyIfs() graph simplification erroneously trying
+to redirect exception handler edges.
\ No newline at end of file
diff --git a/test/622-simplifyifs-exception-edges/smali/Test.smali b/test/622-simplifyifs-exception-edges/smali/Test.smali
new file mode 100644
index 0000000..5e91258
--- /dev/null
+++ b/test/622-simplifyifs-exception-edges/smali/Test.smali
@@ -0,0 +1,76 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTest;
+
+.super Ljava/lang/Object;
+
+.method public static test([I)I
+    .locals 2
+    const/4 v0, 0
+    :try1_begin
+    array-length v1, p0
+    :try1_end
+    add-int/lit8 v0, v1, -1
+    :try2_begin
+    aget v0, p0, v0
+    :try2_end
+    :end
+    return v0
+
+    :catch_all
+    # Regression test for bug 32545860:
+    #     SimplifyIfs() would have redirected exception handler edges leading here.
+    # Note: There is no move-exception here to prevent matching the SimplifyIfs() pattern.
+    if-eqz v0, :is_zero
+    const/4 v0, -1
+    goto :end
+    :is_zero
+    const/4 v0, -2
+    goto :end
+
+    .catchall {:try1_begin .. :try1_end } :catch_all
+    .catchall {:try2_begin .. :try2_end } :catch_all
+.end method
+
+.method public static test2([II)I
+    .locals 3
+    move v0, p1
+    :try_begin
+    array-length v1, p0
+    add-int/lit8 v1, v1, -1
+    add-int/lit8 v0, v0, 1
+    aget v1, p0, v1
+    const/4 v0, 2
+    aget v2, p0, p1
+    const/4 v0, 3
+    :try_end
+    :end
+    return v0
+
+    :catch_all
+    # Regression test for bug 32546110:
+    #     SimplifyIfs() would have looked at predecessors of this block based on the indexes
+    #     of the catch Phi's inputs. For catch blocks these two arrays are unrelated, so
+    #     this caused out-of-range access triggering a DCHECK() in dchecked_vector<>.
+    # Note: There is no move-exception here to prevent matching the SimplifyIfs() pattern.
+    if-eqz v0, :is_zero
+    const/4 v0, -1
+    goto :end
+    :is_zero
+    const/4 v0, -2
+    goto :end
+
+    .catchall {:try_begin .. :try_end } :catch_all
+.end method
diff --git a/test/622-simplifyifs-exception-edges/src/Main.java b/test/622-simplifyifs-exception-edges/src/Main.java
new file mode 100644
index 0000000..636f047
--- /dev/null
+++ b/test/622-simplifyifs-exception-edges/src/Main.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.lang.reflect.InvocationTargetException;
+
+public class Main {
+    public static void main(String[] args) throws Exception {
+        Class<?> c = Class.forName("Test");
+        Method test = c.getDeclaredMethod("test", int[].class);
+        assertIntEquals(-2, (int)test.invoke(null, new Object[] { null }));
+        assertIntEquals(-1, (int)test.invoke(null, new Object[] { new int[0] }));
+        assertIntEquals(42, (int)test.invoke(null, new Object[] { new int[] { 42 } }));
+
+        Method test2 = c.getDeclaredMethod("test2", int[].class, int.class);
+        assertIntEquals(-2, (int)test2.invoke(null, new Object[] { null, 0 }));
+        assertIntEquals(-1, (int)test2.invoke(null, new Object[] { new int[0], 0 }));
+        assertIntEquals(-1, (int)test2.invoke(null, new Object[] { new int[0], 1 }));
+        assertIntEquals(3, (int)test2.invoke(null, new Object[] { new int[] { 42 }, 0 }));
+    }
+
+    public static void assertIntEquals(int expected, int result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+    // Workaround for non-zero field ids offset in dex file with no fields. Bug: 18051191
+    static final boolean dummy = false;
+}
diff --git a/test/623-checker-loop-regressions/expected.txt b/test/623-checker-loop-regressions/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/623-checker-loop-regressions/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/623-checker-loop-regressions/info.txt b/test/623-checker-loop-regressions/info.txt
new file mode 100644
index 0000000..6271600
--- /dev/null
+++ b/test/623-checker-loop-regressions/info.txt
@@ -0,0 +1 @@
+Regression tests on loop optimizations.
diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java
new file mode 100644
index 0000000..ce5bda1
--- /dev/null
+++ b/test/623-checker-loop-regressions/src/Main.java
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Regression tests for loop optimizations.
+ */
+public class Main {
+
+  /// CHECK-START: int Main.earlyExitFirst(int) loop_optimization (before)
+  /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int Main.earlyExitFirst(int) loop_optimization (after)
+  /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  static int earlyExitFirst(int m) {
+    int k = 0;
+    for (int i = 0; i < 10; i++) {
+      if (i == m) {
+        return k;
+      }
+      k++;
+    }
+    return k;
+  }
+
+  /// CHECK-START: int Main.earlyExitLast(int) loop_optimization (before)
+  /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int Main.earlyExitLast(int) loop_optimization (after)
+  /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop>>      outer_loop:none
+  static int earlyExitLast(int m) {
+    int k = 0;
+    for (int i = 0; i < 10; i++) {
+      k++;
+      if (i == m) {
+        return k;
+      }
+    }
+    return k;
+  }
+
+  /// CHECK-START: int Main.earlyExitNested() loop_optimization (before)
+  /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop1>>      outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:<<Loop1>>
+  /// CHECK-DAG: Phi loop:<<Loop2>>      outer_loop:<<Loop1>>
+  //
+  /// CHECK-START: int Main.earlyExitNested() loop_optimization (after)
+  /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
+  /// CHECK-DAG: Phi loop:<<Loop1>>      outer_loop:none
+  //
+  /// CHECK-START: int Main.earlyExitNested() loop_optimization (after)
+  /// CHECK-NOT: Phi loop:{{B\d+}} outer_loop:{{B\d+}}
+  static int earlyExitNested() {
+    int offset = 0;
+    for (int i = 0; i < 2; i++) {
+      int start = offset;
+      // This loop can be removed.
+      for (int j = 0; j < 2; j++) {
+        offset++;
+      }
+      if (i == 1) {
+        return start;
+      }
+    }
+    return 0;
+  }
+
+  public static void main(String[] args) {
+    expectEquals(10, earlyExitFirst(-1));
+    for (int i = 0; i <= 10; i++) {
+      expectEquals(i, earlyExitFirst(i));
+    }
+    expectEquals(10, earlyExitFirst(11));
+
+    expectEquals(10, earlyExitLast(-1));
+    for (int i = 0; i < 10; i++) {
+      expectEquals(i + 1, earlyExitLast(i));
+    }
+    expectEquals(10, earlyExitLast(10));
+    expectEquals(10, earlyExitLast(11));
+
+    expectEquals(2, earlyExitNested());
+
+    System.out.println("passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/624-checker-stringops/expected.txt b/test/624-checker-stringops/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/624-checker-stringops/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/624-checker-stringops/info.txt b/test/624-checker-stringops/info.txt
new file mode 100644
index 0000000..64344ac
--- /dev/null
+++ b/test/624-checker-stringops/info.txt
@@ -0,0 +1 @@
+Verify some properties of string operations represented by intrinsics.
diff --git a/test/624-checker-stringops/src/Main.java b/test/624-checker-stringops/src/Main.java
new file mode 100644
index 0000000..34e8283
--- /dev/null
+++ b/test/624-checker-stringops/src/Main.java
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests properties of some string operations represented by intrinsics.
+ */
+public class Main {
+
+  static final String ABC = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+  static final String XYZ = "XYZ";
+
+  //
+  // Variant intrinsics remain in the loop, but invariant references are hoisted out of the loop.
+  //
+  /// CHECK-START: int Main.liveIndexOf() licm (before)
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringIndexOf            loop:{{B\d+}} outer_loop:none
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringIndexOfAfter       loop:{{B\d+}} outer_loop:none
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringStringIndexOf      loop:{{B\d+}} outer_loop:none
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringStringIndexOfAfter loop:{{B\d+}} outer_loop:none
+  //
+  /// CHECK-START: int Main.liveIndexOf() licm (after)
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringIndexOf            loop:{{B\d+}} outer_loop:none
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringIndexOfAfter       loop:{{B\d+}} outer_loop:none
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringStringIndexOf      loop:none
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringStringIndexOfAfter loop:none
+  static int liveIndexOf() {
+    int k = ABC.length() + XYZ.length();  // does LoadString before loops
+    for (char c = 'A'; c <= 'Z'; c++) {
+      k += ABC.indexOf(c);
+    }
+    for (char c = 'A'; c <= 'Z'; c++) {
+      k += ABC.indexOf(c, 4);
+    }
+    for (char c = 'A'; c <= 'Z'; c++) {
+      k += ABC.indexOf(XYZ);
+    }
+    for (char c = 'A'; c <= 'Z'; c++) {
+      k += ABC.indexOf(XYZ, 2);
+    }
+    return k;
+  }
+
+  //
+  // All dead intrinsics can be removed completely.
+  //
+  /// CHECK-START: int Main.deadIndexOf() dead_code_elimination$initial (before)
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringIndexOf            loop:{{B\d+}} outer_loop:none
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringIndexOfAfter       loop:{{B\d+}} outer_loop:none
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringStringIndexOf      loop:{{B\d+}} outer_loop:none
+  /// CHECK-DAG: InvokeVirtual intrinsic:StringStringIndexOfAfter loop:{{B\d+}} outer_loop:none
+  //
+  /// CHECK-START: int Main.deadIndexOf() dead_code_elimination$initial (after)
+  /// CHECK-NOT: InvokeVirtual intrinsic:StringIndexOf
+  /// CHECK-NOT: InvokeVirtual intrinsic:StringIndexOfAfter
+  /// CHECK-NOT: InvokeVirtual intrinsic:StringStringIndexOf
+  /// CHECK-NOT: InvokeVirtual intrinsic:StringStringIndexOfAfter
+  static int deadIndexOf() {
+    int k = ABC.length() + XYZ.length();  // does LoadString before loops
+    for (char c = 'A'; c <= 'Z'; c++) {
+      int d = ABC.indexOf(c);
+    }
+    for (char c = 'A'; c <= 'Z'; c++) {
+      int d = ABC.indexOf(c, 4);
+    }
+    for (char c = 'A'; c <= 'Z'; c++) {
+      int d = ABC.indexOf(XYZ);
+    }
+    for (char c = 'A'; c <= 'Z'; c++) {
+      int d = ABC.indexOf(XYZ, 2);
+    }
+    return k;
+  }
+
+  //
+  // Explicit null check on receiver, implicit null check on argument prevents hoisting.
+  //
+  /// CHECK-START: int Main.indexOfExceptions(java.lang.String, java.lang.String) licm (after)
+  /// CHECK-DAG: <<String:l\d+>> NullCheck                                                         loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG:                 InvokeVirtual [<<String>>,{{l\d+}}] intrinsic:StringStringIndexOf loop:<<Loop>>      outer_loop:none
+  static int indexOfExceptions(String s, String t) {
+    int k = 0;
+    for (char c = 'A'; c <= 'Z'; c++) {
+      k += s.indexOf(t);
+    }
+    return k;
+  }
+
+  public static void main(String[] args) {
+    expectEquals(1865, liveIndexOf());
+    expectEquals(29, deadIndexOf());
+    try {
+      indexOfExceptions(null, XYZ);
+      throw new Error("Expected: NPE");
+    } catch (NullPointerException e) {
+    }
+    try {
+      indexOfExceptions(ABC, null);
+      throw new Error("Expected: NPE");
+    } catch (NullPointerException e) {
+    }
+    expectEquals(598, indexOfExceptions(ABC, XYZ));
+
+    System.out.println("passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/625-checker-licm-regressions/expected.txt b/test/625-checker-licm-regressions/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/625-checker-licm-regressions/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/625-checker-licm-regressions/info.txt b/test/625-checker-licm-regressions/info.txt
new file mode 100644
index 0000000..10480df
--- /dev/null
+++ b/test/625-checker-licm-regressions/info.txt
@@ -0,0 +1 @@
+Regression tests on LICM.
diff --git a/test/625-checker-licm-regressions/src/Main.java b/test/625-checker-licm-regressions/src/Main.java
new file mode 100644
index 0000000..f372b1c
--- /dev/null
+++ b/test/625-checker-licm-regressions/src/Main.java
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Regression tests for LICM.
+ */
+public class Main {
+
+  static int sA;
+
+  //
+  // We cannot hoist the null check (can throw) above the field
+  // assignment (has write side effects) because that would result
+  // in throwing an exception before the assignment is done.
+  //
+  /// CHECK-START: void Main.foo(int[]) licm (before)
+  /// CHECK-DAG: LoadClass      loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: StaticFieldSet loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: NullCheck      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: ArrayLength    loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void Main.foo(int[]) licm (after)
+  /// CHECK-DAG: LoadClass      loop:none
+  /// CHECK-DAG: StaticFieldSet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: NullCheck      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: ArrayLength    loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void Main.foo(int[]) licm (after)
+  /// CHECK-NOT: LoadClass      loop:{{B\d+}} outer_loop:none
+  static void foo(int[] arr) {
+    int j = 0;
+    do {
+      sA = 1;
+    } while (j < arr.length);
+  }
+
+  //
+  // Similar situation as in foo(), but now a proper induction value
+  // is assigned to the field inside the do-while loop.
+  //
+  /// CHECK-START: void Main.bar(int[]) licm (before)
+  /// CHECK-DAG: LoadClass      loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: StaticFieldSet loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: NullCheck      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: ArrayLength    loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void Main.bar(int[]) licm (after)
+  /// CHECK-DAG: LoadClass      loop:none
+  /// CHECK-DAG: StaticFieldSet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: NullCheck      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: ArrayLength    loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void Main.bar(int[]) licm (after)
+  /// CHECK-NOT: LoadClass      loop:{{B\d+}} outer_loop:none
+  static void bar(int[] arr) {
+    int j = 0;
+    do {
+      j++;
+      sA = j;
+    } while (j < arr.length);
+  }
+
+  //
+  // Similar situation as in bar(), but now an explicit catch
+  // statement may need the latest value of local j.
+  //
+  /// CHECK-START: int Main.catcher(int[]) licm (before)
+  /// CHECK-DAG: NullCheck   loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArrayLength loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int Main.catcher(int[]) licm (after)
+  /// CHECK-DAG: NullCheck   loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArrayLength loop:<<Loop>>      outer_loop:none
+  static int catcher(int[] arr) {
+    int j = 0;
+    try {
+      do {
+        j++;
+      } while (j < arr.length);
+    } catch (NullPointerException e) {
+      return -j;  // flag exception with negative value
+    }
+    return j;
+  }
+
+  public static void main(String[] args) {
+    sA = 0;
+    try {
+      foo(null);
+      throw new Error("Expected NPE");
+    } catch (NullPointerException e) {
+    }
+    expectEquals(1, sA);
+
+    sA = 0;
+    try {
+      bar(null);
+      throw new Error("Expected NPE");
+    } catch (NullPointerException e) {
+    }
+    expectEquals(1, sA);
+
+    for (int i = 0; i < 5; i++) {
+      sA = 0;
+      bar(new int[i]);
+      expectEquals(i == 0 ? 1 : i, sA);
+    }
+
+    expectEquals(-1, catcher(null));
+    for (int i = 0; i < 5; i++) {
+      expectEquals(i == 0 ? 1 : i, catcher(new int[i]));
+    }
+
+    System.out.println("passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/902-hello-transformation/expected.txt b/test/902-hello-transformation/expected.txt
index e86e814..a826f93 100644
--- a/test/902-hello-transformation/expected.txt
+++ b/test/902-hello-transformation/expected.txt
@@ -1,3 +1,3 @@
-Hello
+hello
 modifying class 'Transform'
 Goodbye
diff --git a/test/902-hello-transformation/run b/test/902-hello-transformation/run
index 204e4cc..3755d1d 100755
--- a/test/902-hello-transformation/run
+++ b/test/902-hello-transformation/run
@@ -39,5 +39,6 @@
                    --experimental runtime-plugins \
                    --runtime-option -agentpath:${agent}=902-hello-transformation,${arg} \
                    --android-runtime-option -Xplugin:${plugin} \
+                   --android-runtime-option -Xfully-deoptable \
                    ${other_args} \
                    --args ${lib}
diff --git a/test/902-hello-transformation/src/Transform.java b/test/902-hello-transformation/src/Transform.java
index dc0a0c4..8e8af35 100644
--- a/test/902-hello-transformation/src/Transform.java
+++ b/test/902-hello-transformation/src/Transform.java
@@ -16,6 +16,13 @@
 
 class Transform {
   public void sayHi() {
-    System.out.println("Hello");
+    // Use lower 'h' to make sure the string will have a different string id
+    // than the transformation (the transformation code is the same except
+    // the actual printed String, which was making the test inacurately passing
+    // in JIT mode when loading the string from the dex cache, as the string ids
+    // of the two different strings were the same).
+    // We know the string ids will be different because lexicographically:
+    // "Goodbye" < "LTransform;" < "hello".
+    System.out.println("hello");
   }
 }
diff --git a/test/902-hello-transformation/transform.cc b/test/902-hello-transformation/transform.cc
index 5b0d219..3369dd4 100644
--- a/test/902-hello-transformation/transform.cc
+++ b/test/902-hello-transformation/transform.cc
@@ -23,6 +23,7 @@
 #include "base/logging.h"
 #include "jni.h"
 #include "openjdkjvmti/jvmti.h"
+#include "ti-agent/common_helper.h"
 #include "ti-agent/common_load.h"
 #include "utils.h"
 
@@ -132,15 +133,13 @@
 jint OnLoad(JavaVM* vm,
             char* options,
             void* reserved ATTRIBUTE_UNUSED) {
-  jvmtiCapabilities caps;
   RuntimeIsJvm = (strcmp("jvm", options) == 0);
   if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
     printf("Unable to get jvmti env!\n");
     return 1;
   }
+  SetAllCapabilities(jvmti_env);
   if (IsJVM()) {
-    jvmti_env->GetPotentialCapabilities(&caps);
-    jvmti_env->AddCapabilities(&caps);
     jvmtiEventCallbacks cbs;
     memset(&cbs, 0, sizeof(cbs));
     cbs.ClassFileLoadHook = transformationHook;
diff --git a/test/903-hello-tagging/expected.txt b/test/903-hello-tagging/expected.txt
index e69de29..872b79b 100644
--- a/test/903-hello-tagging/expected.txt
+++ b/test/903-hello-tagging/expected.txt
@@ -0,0 +1,10 @@
+18
+<nothing>
+18
+[<1;1>, <11;1>, <2;2>, <12;2>, <3;3>, <13;3>, <4;4>, <14;4>, <5;5>, <15;5>, <6;6>, <16;6>, <7;7>, <17;7>, <8;8>, <18;8>, <9;9>, <19;9>]
+4
+[<2;2>, <12;2>, <5;5>, <15;5>]
+18
+[<null;1>, <null;1>, <null;2>, <null;2>, <null;3>, <null;3>, <null;4>, <null;4>, <null;5>, <null;5>, <null;6>, <null;6>, <null;7>, <null;7>, <null;8>, <null;8>, <null;9>, <null;9>]
+18
+[<1;0>, <2;0>, <3;0>, <4;0>, <5;0>, <6;0>, <7;0>, <8;0>, <9;0>, <11;0>, <12;0>, <13;0>, <14;0>, <15;0>, <16;0>, <17;0>, <18;0>, <19;0>]
diff --git a/test/903-hello-tagging/src/Main.java b/test/903-hello-tagging/src/Main.java
index 2856a39..a8aedb4 100644
--- a/test/903-hello-tagging/src/Main.java
+++ b/test/903-hello-tagging/src/Main.java
@@ -15,11 +15,14 @@
  */
 
 import java.lang.ref.WeakReference;
+import java.util.ArrayList;
+import java.util.Arrays;
 
 public class Main {
   public static void main(String[] args) {
     System.loadLibrary(args[1]);
     doTest();
+    testGetTaggedObjects();
   }
 
   public static void doTest() {
@@ -68,6 +71,100 @@
     }
   }
 
+  private static void testGetTaggedObjects() {
+    // Use an array list to ensure that the objects stay live for a bit. Also gives us a source
+    // to compare to. We use index % 10 as the tag.
+    ArrayList<Object> l = new ArrayList<>();
+
+    for (int i = 0; i < 20; i++) {
+      Integer o = new Integer(i);
+      l.add(o);
+      if (i % 10 != 0) {
+        setTag(o, i % 10);
+      }
+    }
+
+    testGetTaggedObjectsRun(l, null, false, false);
+    testGetTaggedObjectsRun(l, null, true, true);
+    testGetTaggedObjectsRun(l, new long[] { 2, 5 }, true, true);
+    testGetTaggedObjectsRun(l, null, false, true);
+    testGetTaggedObjectsRun(l, null, true, false);
+  }
+
+  private static void testGetTaggedObjectsRun(ArrayList<Object> l, long[] searchTags,
+      boolean returnObjects, boolean returnTags) {
+    Object[] result = getTaggedObjects(searchTags, returnObjects, returnTags);
+
+    Object[] objects = (Object[])result[0];
+    long[] tags = (long[])result[1];
+    int count = (int)result[2];
+
+    System.out.println(count);
+    printArraysSorted(objects, tags);
+  }
+
+  private static void printArraysSorted(Object[] objects, long[] tags) {
+    if (objects == null && tags == null) {
+      System.out.println("<nothing>");
+      return;
+    }
+
+    int l1 = objects == null ? 0 : objects.length;
+    int l2 = tags == null ? 0 : tags.length;
+    int l = Math.max(l1, l2);
+    Pair[] tmp = new Pair[l];
+    for (int i = 0; i < l; i++) {
+      tmp[i] = new Pair(objects == null ? null : objects[i], tags == null ? 0 : tags[i]);
+    }
+
+    Arrays.sort(tmp);
+
+    System.out.println(Arrays.toString(tmp));
+  }
+
+  private static class Pair implements Comparable<Pair> {
+    Object obj;
+    long tag;
+    public Pair(Object o, long t) {
+      obj = o;
+      tag = t;
+    }
+
+    public int compareTo(Pair p) {
+      if (tag != p.tag) {
+        return Long.compare(tag, p.tag);
+      }
+
+      if ((obj instanceof Comparable) && (p.obj instanceof Comparable)) {
+        // It's not really correct, but w/e, best effort.
+        int result = ((Comparable<Object>)obj).compareTo(p.obj);
+        if (result != 0) {
+          return result;
+        }
+      }
+
+      if (obj != null && p.obj != null) {
+        return obj.hashCode() - p.obj.hashCode();
+      }
+
+      if (obj != null) {
+        return 1;
+      }
+
+      if (p.obj != null) {
+        return -1;
+      }
+
+      return hashCode() - p.hashCode();
+    }
+
+    public String toString() {
+      return "<" + obj + ";" + tag + ">";
+    }
+  }
+
   private static native void setTag(Object o, long tag);
   private static native long getTag(Object o);
+  private static native Object[] getTaggedObjects(long[] searchTags, boolean returnObjects,
+      boolean returnTags);
 }
diff --git a/test/903-hello-tagging/tagging.cc b/test/903-hello-tagging/tagging.cc
index 7d692fb..1557d45 100644
--- a/test/903-hello-tagging/tagging.cc
+++ b/test/903-hello-tagging/tagging.cc
@@ -21,10 +21,14 @@
 #include <stdio.h>
 #include <vector>
 
+#include "jni.h"
+#include "ScopedLocalRef.h"
+#include "ScopedPrimitiveArray.h"
+
 #include "art_method-inl.h"
 #include "base/logging.h"
-#include "jni.h"
 #include "openjdkjvmti/jvmti.h"
+#include "ti-agent/common_helper.h"
 #include "ti-agent/common_load.h"
 #include "utils.h"
 
@@ -56,6 +60,84 @@
   return tag;
 }
 
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getTaggedObjects(JNIEnv* env,
+                                                                     jclass,
+                                                                     jlongArray searchTags,
+                                                                     jboolean returnObjects,
+                                                                     jboolean returnTags) {
+  ScopedLongArrayRO scoped_array(env);
+  if (searchTags != nullptr) {
+    scoped_array.reset(searchTags);
+  }
+  const jlong* tag_ptr = scoped_array.get();
+  if (tag_ptr == nullptr) {
+    // Can never pass null.
+    tag_ptr = reinterpret_cast<const jlong*>(1);
+  }
+
+  jint result_count;
+  jobject* result_object_array;
+  jobject** result_object_array_ptr = returnObjects == JNI_TRUE ? &result_object_array : nullptr;
+  jlong* result_tag_array;
+  jlong** result_tag_array_ptr = returnTags == JNI_TRUE ? &result_tag_array : nullptr;
+
+  jvmtiError ret = jvmti_env->GetObjectsWithTags(scoped_array.size(),
+                                                 tag_ptr,
+                                                 &result_count,
+                                                 result_object_array_ptr,
+                                                 result_tag_array_ptr);
+  if (ret != JVMTI_ERROR_NONE) {
+    char* err;
+    jvmti_env->GetErrorName(ret, &err);
+    printf("Failure running GetLoadedClasses: %s\n", err);
+    return nullptr;
+  }
+
+  CHECK_GE(result_count, 0);
+
+  ScopedLocalRef<jclass> obj_class(env, env->FindClass("java/lang/Object"));
+  if (obj_class.get() == nullptr) {
+    return nullptr;
+  }
+
+  jobjectArray resultObjectArray = nullptr;
+  if (returnObjects == JNI_TRUE) {
+    resultObjectArray = env->NewObjectArray(result_count, obj_class.get(), nullptr);
+    if (resultObjectArray == nullptr) {
+      return nullptr;
+    }
+    for (jint i = 0; i < result_count; ++i) {
+      env->SetObjectArrayElement(resultObjectArray, i, result_object_array[i]);
+    }
+  }
+
+  jlongArray resultTagArray = nullptr;
+  if (returnTags == JNI_TRUE) {
+    resultTagArray = env->NewLongArray(result_count);
+    env->SetLongArrayRegion(resultTagArray, 0, result_count, result_tag_array);
+  }
+
+  jobject count_integer;
+  {
+    ScopedLocalRef<jclass> integer_class(env, env->FindClass("java/lang/Integer"));
+    jmethodID methodID = env->GetMethodID(integer_class.get(), "<init>", "(I)V");
+    count_integer = env->NewObject(integer_class.get(), methodID, result_count);
+    if (count_integer == nullptr) {
+      return nullptr;
+    }
+  }
+
+  jobjectArray resultArray = env->NewObjectArray(3, obj_class.get(), nullptr);
+  if (resultArray == nullptr) {
+    return nullptr;
+  }
+  env->SetObjectArrayElement(resultArray, 0, resultObjectArray);
+  env->SetObjectArrayElement(resultArray, 1, resultTagArray);
+  env->SetObjectArrayElement(resultArray, 2, count_integer);
+
+  return resultArray;
+}
+
 // Don't do anything
 jint OnLoad(JavaVM* vm,
             char* options ATTRIBUTE_UNUSED,
@@ -64,6 +146,7 @@
     printf("Unable to get jvmti env!\n");
     return 1;
   }
+  SetAllCapabilities(jvmti_env);
   return 0;
 }
 
diff --git a/test/904-object-allocation/tracking.cc b/test/904-object-allocation/tracking.cc
index 57bfed5..9261a9f 100644
--- a/test/904-object-allocation/tracking.cc
+++ b/test/904-object-allocation/tracking.cc
@@ -26,6 +26,7 @@
 #include "openjdkjvmti/jvmti.h"
 #include "ScopedLocalRef.h"
 #include "ScopedUtfChars.h"
+#include "ti-agent/common_helper.h"
 #include "ti-agent/common_load.h"
 #include "utils.h"
 
@@ -95,6 +96,7 @@
     return 1;
   }
   jvmti_env->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_VM_OBJECT_ALLOC, nullptr);
+  SetAllCapabilities(jvmti_env);
   return 0;
 }
 
diff --git a/test/905-object-free/tracking_free.cc b/test/905-object-free/tracking_free.cc
index b41a914..fc43acc 100644
--- a/test/905-object-free/tracking_free.cc
+++ b/test/905-object-free/tracking_free.cc
@@ -26,6 +26,7 @@
 #include "openjdkjvmti/jvmti.h"
 #include "ScopedLocalRef.h"
 #include "ScopedUtfChars.h"
+#include "ti-agent/common_helper.h"
 #include "ti-agent/common_load.h"
 #include "utils.h"
 
@@ -87,6 +88,7 @@
     printf("Unable to get jvmti env!\n");
     return 1;
   }
+  SetAllCapabilities(jvmti_env);
   return 0;
 }
 
diff --git a/test/906-iterate-heap/iterate_heap.cc b/test/906-iterate-heap/iterate_heap.cc
index ab1d8d8..8dac89d 100644
--- a/test/906-iterate-heap/iterate_heap.cc
+++ b/test/906-iterate-heap/iterate_heap.cc
@@ -25,6 +25,7 @@
 #include "jni.h"
 #include "openjdkjvmti/jvmti.h"
 #include "ScopedPrimitiveArray.h"
+#include "ti-agent/common_helper.h"
 #include "ti-agent/common_load.h"
 
 namespace art {
@@ -180,6 +181,7 @@
     printf("Unable to get jvmti env!\n");
     return 1;
   }
+  SetAllCapabilities(jvmti_env);
   return 0;
 }
 
diff --git a/test/907-get-loaded-classes/get_loaded_classes.cc b/test/907-get-loaded-classes/get_loaded_classes.cc
index 0e09d1b..afbb774 100644
--- a/test/907-get-loaded-classes/get_loaded_classes.cc
+++ b/test/907-get-loaded-classes/get_loaded_classes.cc
@@ -27,6 +27,7 @@
 #include "ScopedLocalRef.h"
 #include "ScopedUtfChars.h"
 
+#include "ti-agent/common_helper.h"
 #include "ti-agent/common_load.h"
 
 namespace art {
@@ -50,28 +51,14 @@
     return nullptr;
   }
 
-  ScopedLocalRef<jclass> obj_class(env, env->FindClass("java/lang/String"));
-  if (obj_class.get() == nullptr) {
-    return nullptr;
-  }
-
-  jobjectArray ret = env->NewObjectArray(count, obj_class.get(), nullptr);
-  if (ret == nullptr) {
-    return ret;
-  }
-
-  for (size_t i = 0; i < static_cast<size_t>(count); ++i) {
+  auto callback = [&](jint i) {
     jstring class_name = GetClassName(env, classes[i]);
-    env->SetObjectArrayElement(ret, static_cast<jint>(i), class_name);
-    env->DeleteLocalRef(class_name);
-  }
-
-  // Need to:
-  // 1) Free the local references.
-  // 2) Deallocate.
-  for (size_t i = 0; i < static_cast<size_t>(count); ++i) {
     env->DeleteLocalRef(classes[i]);
-  }
+    return class_name;
+  };
+  jobjectArray ret = CreateObjectArray(env, count, "java/lang/String", callback);
+
+  // Need to Deallocate.
   jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(classes));
 
   return ret;
@@ -85,6 +72,7 @@
     printf("Unable to get jvmti env!\n");
     return 1;
   }
+  SetAllCapabilities(jvmti_env);
   return 0;
 }
 
diff --git a/test/908-gc-start-finish/gc_callbacks.cc b/test/908-gc-start-finish/gc_callbacks.cc
index d546513..771d1ad 100644
--- a/test/908-gc-start-finish/gc_callbacks.cc
+++ b/test/908-gc-start-finish/gc_callbacks.cc
@@ -22,6 +22,7 @@
 #include "base/macros.h"
 #include "jni.h"
 #include "openjdkjvmti/jvmti.h"
+#include "ti-agent/common_helper.h"
 #include "ti-agent/common_load.h"
 
 namespace art {
@@ -98,6 +99,7 @@
     printf("Unable to get jvmti env!\n");
     return 1;
   }
+  SetAllCapabilities(jvmti_env);
   return 0;
 }
 
diff --git a/test/910-methods/build b/test/910-methods/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/910-methods/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/910-methods/expected.txt b/test/910-methods/expected.txt
new file mode 100644
index 0000000..9a74799
--- /dev/null
+++ b/test/910-methods/expected.txt
@@ -0,0 +1,15 @@
+[toString, ()Ljava/lang/String;, null]
+class java.lang.Object
+1
+[charAt, (I)C, null]
+class java.lang.String
+257
+[sqrt, (D)D, null]
+class java.lang.Math
+265
+[add, (Ljava/lang/Object;)Z, null]
+interface java.util.List
+1025
+[run, ()V, null]
+class $Proxy0
+17
diff --git a/test/910-methods/info.txt b/test/910-methods/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/910-methods/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/910-methods/methods.cc b/test/910-methods/methods.cc
new file mode 100644
index 0000000..8f0850b
--- /dev/null
+++ b/test/910-methods/methods.cc
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "methods.h"
+
+#include <stdio.h>
+
+#include "base/macros.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ScopedLocalRef.h"
+
+#include "ti-agent/common_helper.h"
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test910Methods {
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getMethodName(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) {
+  jmethodID id = env->FromReflectedMethod(method);
+
+  char* name;
+  char* sig;
+  char* gen;
+  jvmtiError result = jvmti_env->GetMethodName(id, &name, &sig, &gen);
+  if (result != JVMTI_ERROR_NONE) {
+    char* err;
+    jvmti_env->GetErrorName(result, &err);
+    printf("Failure running GetMethodName: %s\n", err);
+    return nullptr;
+  }
+
+  auto callback = [&](jint i) {
+    if (i == 0) {
+      return name == nullptr ? nullptr : env->NewStringUTF(name);
+    } else if (i == 1) {
+      return sig == nullptr ? nullptr : env->NewStringUTF(sig);
+    } else {
+      return gen == nullptr ? nullptr : env->NewStringUTF(gen);
+    }
+  };
+  jobjectArray ret = CreateObjectArray(env, 3, "java/lang/String", callback);
+
+  // Need to deallocate the strings.
+  if (name != nullptr) {
+    jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(name));
+  }
+  if (sig != nullptr) {
+    jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(sig));
+  }
+  if (gen != nullptr) {
+    jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(gen));
+  }
+
+  return ret;
+}
+
+extern "C" JNIEXPORT jclass JNICALL Java_Main_getMethodDeclaringClass(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) {
+  jmethodID id = env->FromReflectedMethod(method);
+
+  jclass declaring_class;
+  jvmtiError result = jvmti_env->GetMethodDeclaringClass(id, &declaring_class);
+  if (result != JVMTI_ERROR_NONE) {
+    char* err;
+    jvmti_env->GetErrorName(result, &err);
+    printf("Failure running GetMethodDeclaringClass: %s\n", err);
+    return nullptr;
+  }
+
+  return declaring_class;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_getMethodModifiers(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) {
+  jmethodID id = env->FromReflectedMethod(method);
+
+  jint modifiers;
+  jvmtiError result = jvmti_env->GetMethodModifiers(id, &modifiers);
+  if (result != JVMTI_ERROR_NONE) {
+    char* err;
+    jvmti_env->GetErrorName(result, &err);
+    printf("Failure running GetMethodModifiers: %s\n", err);
+    return 0;
+  }
+
+  return modifiers;
+}
+
+// Don't do anything
+jint OnLoad(JavaVM* vm,
+            char* options ATTRIBUTE_UNUSED,
+            void* reserved ATTRIBUTE_UNUSED) {
+  if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+    printf("Unable to get jvmti env!\n");
+    return 1;
+  }
+  SetAllCapabilities(jvmti_env);
+  return 0;
+}
+
+}  // namespace Test910Methods
+}  // namespace art
diff --git a/test/562-no-intermediate/src/Main.java b/test/910-methods/methods.h
similarity index 67%
copy from test/562-no-intermediate/src/Main.java
copy to test/910-methods/methods.h
index 3b74d6f..93d1874 100644
--- a/test/562-no-intermediate/src/Main.java
+++ b/test/910-methods/methods.h
@@ -14,14 +14,17 @@
  * limitations under the License.
  */
 
-public class Main {
+#ifndef ART_TEST_910_METHODS_METHODS_H_
+#define ART_TEST_910_METHODS_METHODS_H_
 
-  /// CHECK-START-ARM64: int Main.main(String[]) register_allocator (after)
-  /// CHECK-NOT: IntermediateAddress
-  public static void main(String[] args) {
-    array[index] += Math.cos(42);
-  }
+#include <jni.h>
 
-  static int index = 0;
-  static double[] array = new double[2];
-}
+namespace art {
+namespace Test910Methods {
+
+jint OnLoad(JavaVM* vm, char* options, void* reserved);
+
+}  // namespace Test910Methods
+}  // namespace art
+
+#endif  // ART_TEST_910_METHODS_METHODS_H_
diff --git a/test/910-methods/run b/test/910-methods/run
new file mode 100755
index 0000000..4dd2555
--- /dev/null
+++ b/test/910-methods/run
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if  [[ "$@" == *"-O"* ]]; then
+  agent=libtiagent.so
+  plugin=libopenjdkjvmti.so
+  lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+  arg="jvm"
+else
+  arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+  other_args=" -Xcompiler-option --debuggable "
+else
+  other_args=""
+fi
+
+./default-run "$@" --experimental agents \
+                   --experimental runtime-plugins \
+                   --runtime-option -agentpath:${agent}=910-methods,${arg} \
+                   --android-runtime-option -Xplugin:${plugin} \
+                   ${other_args} \
+                   --args ${lib}
diff --git a/test/910-methods/src/Main.java b/test/910-methods/src/Main.java
new file mode 100644
index 0000000..3459134
--- /dev/null
+++ b/test/910-methods/src/Main.java
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.util.Arrays;
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    System.loadLibrary(args[1]);
+
+    doTest();
+  }
+
+  public static void doTest() throws Exception {
+    testMethod("java.lang.Object", "toString");
+    testMethod("java.lang.String", "charAt", int.class);
+    testMethod("java.lang.Math", "sqrt", double.class);
+    testMethod("java.util.List", "add", Object.class);
+
+    testMethod(getProxyClass(), "run");
+  }
+
+  private static Class<?> proxyClass = null;
+
+  private static Class<?> getProxyClass() throws Exception {
+    if (proxyClass != null) {
+      return proxyClass;
+    }
+
+    proxyClass = Proxy.getProxyClass(Main.class.getClassLoader(), new Class[] { Runnable.class });
+    return proxyClass;
+  }
+
+  private static void testMethod(String className, String methodName, Class<?>... types)
+      throws Exception {
+    Class<?> base = Class.forName(className);
+    testMethod(base, methodName, types);
+  }
+
+  private static void testMethod(Class<?> base, String methodName, Class<?>... types)
+      throws Exception {
+    Method m = base.getDeclaredMethod(methodName, types);
+    String[] result = getMethodName(m);
+    System.out.println(Arrays.toString(result));
+
+    Class<?> declClass = getMethodDeclaringClass(m);
+    if (base != declClass) {
+      throw new RuntimeException("Declaring class not equal: " + base + " vs " + declClass);
+    }
+    System.out.println(declClass);
+
+    int modifiers = getMethodModifiers(m);
+    if (modifiers != m.getModifiers()) {
+      throw new RuntimeException("Modifiers not equal: " + m.getModifiers() + " vs " + modifiers);
+    }
+    System.out.println(modifiers);
+  }
+
+  private static native String[] getMethodName(Method m);
+  private static native Class<?> getMethodDeclaringClass(Method m);
+  private static native int getMethodModifiers(Method m);
+}
diff --git a/test/911-get-stack-trace/build b/test/911-get-stack-trace/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/911-get-stack-trace/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/911-get-stack-trace/expected.txt b/test/911-get-stack-trace/expected.txt
new file mode 100644
index 0000000..20bab78
--- /dev/null
+++ b/test/911-get-stack-trace/expected.txt
@@ -0,0 +1,208 @@
+###################
+### Same thread ###
+###################
+From top
+---------
+ getStackTrace (Ljava/lang/Thread;II)[Ljava/lang/String;
+ print (Ljava/lang/Thread;II)V
+ printOrWait (IILMain$ControlData;)V
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ doTest ()V
+ main ([Ljava/lang/String;)V
+---------
+ print (Ljava/lang/Thread;II)V
+ printOrWait (IILMain$ControlData;)V
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ doTest ()V
+ main ([Ljava/lang/String;)V
+---------
+ getStackTrace (Ljava/lang/Thread;II)[Ljava/lang/String;
+ print (Ljava/lang/Thread;II)V
+ printOrWait (IILMain$ControlData;)V
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+---------
+ printOrWait (IILMain$ControlData;)V
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+From bottom
+---------
+ main ([Ljava/lang/String;)V
+---------
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ doTest ()V
+ main ([Ljava/lang/String;)V
+---------
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+
+################################
+### Other thread (suspended) ###
+################################
+From top
+---------
+ wait ()V
+ printOrWait (IILMain$ControlData;)V
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ run ()V
+---------
+ printOrWait (IILMain$ControlData;)V
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ run ()V
+---------
+ wait ()V
+ printOrWait (IILMain$ControlData;)V
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+---------
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+From bottom
+---------
+ run ()V
+---------
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ run ()V
+---------
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+
+###########################
+### Other thread (live) ###
+###########################
+From top
+---------
+ printOrWait (IILMain$ControlData;)V
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ run ()V
+---------
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ run ()V
+---------
+ printOrWait (IILMain$ControlData;)V
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+---------
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+From bottom
+---------
+ run ()V
+---------
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ run ()V
+---------
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
+ foo (IIILMain$ControlData;)I
+ baz (IIILMain$ControlData;)Ljava/lang/Object;
+ bar (IIILMain$ControlData;)J
diff --git a/test/911-get-stack-trace/info.txt b/test/911-get-stack-trace/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/911-get-stack-trace/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/911-get-stack-trace/run b/test/911-get-stack-trace/run
new file mode 100755
index 0000000..43fc325
--- /dev/null
+++ b/test/911-get-stack-trace/run
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if  [[ "$@" == *"-O"* ]]; then
+  agent=libtiagent.so
+  plugin=libopenjdkjvmti.so
+  lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+  arg="jvm"
+else
+  arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+  other_args=" -Xcompiler-option --debuggable "
+else
+  other_args=""
+fi
+
+./default-run "$@" --experimental agents \
+                   --experimental runtime-plugins \
+                   --runtime-option -agentpath:${agent}=911-get-stack-trace,${arg} \
+                   --android-runtime-option -Xplugin:${plugin} \
+                   ${other_args} \
+                   --args ${lib}
diff --git a/test/911-get-stack-trace/src/Main.java b/test/911-get-stack-trace/src/Main.java
new file mode 100644
index 0000000..df4501d
--- /dev/null
+++ b/test/911-get-stack-trace/src/Main.java
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.concurrent.CountDownLatch;
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    System.loadLibrary(args[1]);
+
+    doTest();
+    doTestOtherThreadWait();
+    doTestOtherThreadBusyLoop();
+  }
+
+  public static void doTest() throws Exception {
+    System.out.println("###################");
+    System.out.println("### Same thread ###");
+    System.out.println("###################");
+    System.out.println("From top");
+    Recurse.foo(4, 0, 25, null);
+    Recurse.foo(4, 1, 25, null);
+    Recurse.foo(4, 0, 5, null);
+    Recurse.foo(4, 2, 5, null);
+
+    System.out.println("From bottom");
+    Recurse.foo(4, -1, 25, null);
+    Recurse.foo(4, -5, 5, null);
+    Recurse.foo(4, -7, 5, null);
+  }
+
+  public static void doTestOtherThreadWait() throws Exception {
+    System.out.println();
+    System.out.println("################################");
+    System.out.println("### Other thread (suspended) ###");
+    System.out.println("################################");
+    final ControlData data = new ControlData();
+    data.waitFor = new Object();
+    Thread t = new Thread() {
+      public void run() {
+        Recurse.foo(4, 0, 0, data);
+      }
+    };
+    t.start();
+    data.reached.await();
+    Thread.yield();
+    Thread.sleep(500);  // A little bit of time...
+
+    System.out.println("From top");
+    print(t, 0, 25);
+    print(t, 1, 25);
+    print(t, 0, 5);
+    print(t, 2, 5);
+
+    System.out.println("From bottom");
+    print(t, -1, 25);
+    print(t, -5, 5);
+    print(t, -7, 5);
+
+    // Let the thread make progress and die.
+    synchronized(data.waitFor) {
+      data.waitFor.notifyAll();
+    }
+    t.join();
+  }
+
+  public static void doTestOtherThreadBusyLoop() throws Exception {
+    System.out.println();
+    System.out.println("###########################");
+    System.out.println("### Other thread (live) ###");
+    System.out.println("###########################");
+    final ControlData data = new ControlData();
+    Thread t = new Thread() {
+      public void run() {
+        Recurse.foo(4, 0, 0, data);
+      }
+    };
+    t.start();
+    data.reached.await();
+    Thread.yield();
+    Thread.sleep(500);  // A little bit of time...
+
+    System.out.println("From top");
+    print(t, 0, 25);
+    print(t, 1, 25);
+    print(t, 0, 5);
+    print(t, 2, 5);
+
+    System.out.println("From bottom");
+    print(t, -1, 25);
+    print(t, -5, 5);
+    print(t, -7, 5);
+
+    // Let the thread stop looping and die.
+    data.stop = true;
+    t.join();
+  }
+
+  public static void print(String[] stack) {
+    System.out.println("---------");
+    for (int i = 0; i < stack.length; i += 2) {
+      System.out.print(' ');
+      System.out.print(stack[i]);
+      System.out.print(' ');
+      System.out.println(stack[i + 1]);
+    }
+  }
+
+  public static void print(Thread t, int start, int max) {
+    print(getStackTrace(t, start, max));
+  }
+
+  // Wrap generated stack traces into a class to separate them nicely.
+  public static class Recurse {
+
+    public static int foo(int x, int start, int max, ControlData data) {
+      bar(x, start, max, data);
+      return 0;
+    }
+
+    private static long bar(int x, int start, int max, ControlData data) {
+      baz(x, start, max, data);
+      return 0;
+    }
+
+    private static Object baz(int x, int start, int max, ControlData data) {
+      if (x == 0) {
+        printOrWait(start, max, data);
+      } else {
+        foo(x - 1, start, max, data);
+      }
+      return null;
+    }
+
+    private static void printOrWait(int start, int max, ControlData data) {
+      if (data == null) {
+        print(Thread.currentThread(), start, max);
+      } else {
+        if (data.waitFor != null) {
+          synchronized (data.waitFor) {
+            data.reached.countDown();
+            try {
+              data.waitFor.wait();  // Use wait() as it doesn't have a "hidden" Java call-graph.
+            } catch (Throwable t) {
+              throw new RuntimeException(t);
+            }
+          }
+        } else {
+          data.reached.countDown();
+          while (!data.stop) {
+            // Busy-loop.
+          }
+        }
+      }
+    }
+  }
+
+  public static class ControlData {
+    CountDownLatch reached = new CountDownLatch(1);
+    Object waitFor = null;
+    volatile boolean stop = false;
+  }
+
+  public static native String[] getStackTrace(Thread thread, int start, int max);
+}
diff --git a/test/911-get-stack-trace/stack_trace.cc b/test/911-get-stack-trace/stack_trace.cc
new file mode 100644
index 0000000..e7d9380
--- /dev/null
+++ b/test/911-get-stack-trace/stack_trace.cc
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "stack_trace.h"
+
+#include <memory>
+#include <stdio.h>
+
+#include "base/logging.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ScopedLocalRef.h"
+#include "ti-agent/common_helper.h"
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test911GetStackTrace {
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getStackTrace(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thread, jint start, jint max) {
+  std::unique_ptr<jvmtiFrameInfo[]> frames(new jvmtiFrameInfo[max]);
+
+  jint count;
+  {
+    jvmtiError result = jvmti_env->GetStackTrace(thread, start, max, frames.get(), &count);
+    if (result != JVMTI_ERROR_NONE) {
+      char* err;
+      jvmti_env->GetErrorName(result, &err);
+      printf("Failure running GetStackTrace: %s\n", err);
+      return nullptr;
+    }
+  }
+
+  auto callback = [&](jint i) -> jstring {
+    size_t method_index = static_cast<size_t>(i) / 2;
+    char* name;
+    char* sig;
+    char* gen;
+    {
+      jvmtiError result2 = jvmti_env->GetMethodName(frames[method_index].method, &name, &sig, &gen);
+      if (result2 != JVMTI_ERROR_NONE) {
+        char* err;
+        jvmti_env->GetErrorName(result2, &err);
+        printf("Failure running GetMethodName: %s\n", err);
+        return nullptr;
+      }
+    }
+    jstring callback_result;
+    if (i % 2 == 0) {
+      callback_result = name == nullptr ? nullptr : env->NewStringUTF(name);
+    } else {
+      callback_result = sig == nullptr ? nullptr : env->NewStringUTF(sig);
+    }
+
+    if (name != nullptr) {
+      jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(name));
+    }
+    if (sig != nullptr) {
+      jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(sig));
+    }
+    if (gen != nullptr) {
+      jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(gen));
+    }
+    return callback_result;
+  };
+  return CreateObjectArray(env, 2 * count, "java/lang/String", callback);
+}
+
+// Don't do anything
+jint OnLoad(JavaVM* vm,
+            char* options ATTRIBUTE_UNUSED,
+            void* reserved ATTRIBUTE_UNUSED) {
+  if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+    printf("Unable to get jvmti env!\n");
+    return 1;
+  }
+  SetAllCapabilities(jvmti_env);
+  return 0;
+}
+
+}  // namespace Test911GetStackTrace
+}  // namespace art
diff --git a/test/562-no-intermediate/src/Main.java b/test/911-get-stack-trace/stack_trace.h
similarity index 64%
copy from test/562-no-intermediate/src/Main.java
copy to test/911-get-stack-trace/stack_trace.h
index 3b74d6f..eba2a91 100644
--- a/test/562-no-intermediate/src/Main.java
+++ b/test/911-get-stack-trace/stack_trace.h
@@ -14,14 +14,17 @@
  * limitations under the License.
  */
 
-public class Main {
+#ifndef ART_TEST_911_GET_STACK_TRACE_STACK_TRACE_H_
+#define ART_TEST_911_GET_STACK_TRACE_STACK_TRACE_H_
 
-  /// CHECK-START-ARM64: int Main.main(String[]) register_allocator (after)
-  /// CHECK-NOT: IntermediateAddress
-  public static void main(String[] args) {
-    array[index] += Math.cos(42);
-  }
+#include <jni.h>
 
-  static int index = 0;
-  static double[] array = new double[2];
-}
+namespace art {
+namespace Test911GetStackTrace {
+
+jint OnLoad(JavaVM* vm, char* options, void* reserved);
+
+}  // namespace Test911GetStackTrace
+}  // namespace art
+
+#endif  // ART_TEST_911_GET_STACK_TRACE_STACK_TRACE_H_
diff --git a/test/912-classes/build b/test/912-classes/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/912-classes/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/912-classes/classes.cc b/test/912-classes/classes.cc
new file mode 100644
index 0000000..838a92a
--- /dev/null
+++ b/test/912-classes/classes.cc
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "classes.h"
+
+#include <stdio.h>
+
+#include "base/macros.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ScopedLocalRef.h"
+
+#include "ti-agent/common_helper.h"
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test912Classes {
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getClassSignature(
+    JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jclass klass) {
+  char* sig;
+  char* gen;
+  jvmtiError result = jvmti_env->GetClassSignature(klass, &sig, &gen);
+  if (result != JVMTI_ERROR_NONE) {
+    char* err;
+    jvmti_env->GetErrorName(result, &err);
+    printf("Failure running GetClassSignature: %s\n", err);
+    return nullptr;
+  }
+
+  auto callback = [&](jint i) {
+    if (i == 0) {
+      return sig == nullptr ? nullptr : env->NewStringUTF(sig);
+    } else {
+      return gen == nullptr ? nullptr : env->NewStringUTF(gen);
+    }
+  };
+  jobjectArray ret = CreateObjectArray(env, 2, "java/lang/String", callback);
+
+  // Need to deallocate the strings.
+  if (sig != nullptr) {
+    jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(sig));
+  }
+  if (gen != nullptr) {
+    jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(gen));
+  }
+
+  return ret;
+}
+
+// Don't do anything
+jint OnLoad(JavaVM* vm,
+            char* options ATTRIBUTE_UNUSED,
+            void* reserved ATTRIBUTE_UNUSED) {
+  if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+    printf("Unable to get jvmti env!\n");
+    return 1;
+  }
+  SetAllCapabilities(jvmti_env);
+  return 0;
+}
+
+}  // namespace Test912Classes
+}  // namespace art
diff --git a/test/562-no-intermediate/src/Main.java b/test/912-classes/classes.h
similarity index 67%
copy from test/562-no-intermediate/src/Main.java
copy to test/912-classes/classes.h
index 3b74d6f..62fb203 100644
--- a/test/562-no-intermediate/src/Main.java
+++ b/test/912-classes/classes.h
@@ -14,14 +14,17 @@
  * limitations under the License.
  */
 
-public class Main {
+#ifndef ART_TEST_912_CLASSES_CLASSES_H_
+#define ART_TEST_912_CLASSES_CLASSES_H_
 
-  /// CHECK-START-ARM64: int Main.main(String[]) register_allocator (after)
-  /// CHECK-NOT: IntermediateAddress
-  public static void main(String[] args) {
-    array[index] += Math.cos(42);
-  }
+#include <jni.h>
 
-  static int index = 0;
-  static double[] array = new double[2];
-}
+namespace art {
+namespace Test912Classes {
+
+jint OnLoad(JavaVM* vm, char* options, void* reserved);
+
+}  // namespace Test912Classes
+}  // namespace art
+
+#endif  // ART_TEST_912_CLASSES_CLASSES_H_
diff --git a/test/912-classes/expected.txt b/test/912-classes/expected.txt
new file mode 100644
index 0000000..71b22f4
--- /dev/null
+++ b/test/912-classes/expected.txt
@@ -0,0 +1,7 @@
+[Ljava/lang/Object;, null]
+[Ljava/lang/String;, null]
+[Ljava/lang/Math;, null]
+[Ljava/util/List;, null]
+[L$Proxy0;, null]
+[I, null]
+[[D, null]
diff --git a/test/912-classes/info.txt b/test/912-classes/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/912-classes/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/912-classes/run b/test/912-classes/run
new file mode 100755
index 0000000..64bbb98
--- /dev/null
+++ b/test/912-classes/run
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if  [[ "$@" == *"-O"* ]]; then
+  agent=libtiagent.so
+  plugin=libopenjdkjvmti.so
+  lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+  arg="jvm"
+else
+  arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+  other_args=" -Xcompiler-option --debuggable "
+else
+  other_args=""
+fi
+
+./default-run "$@" --experimental agents \
+                   --experimental runtime-plugins \
+                   --runtime-option -agentpath:${agent}=912-classes,${arg} \
+                   --android-runtime-option -Xplugin:${plugin} \
+                   ${other_args} \
+                   --args ${lib}
diff --git a/test/912-classes/src/Main.java b/test/912-classes/src/Main.java
new file mode 100644
index 0000000..025584e
--- /dev/null
+++ b/test/912-classes/src/Main.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Proxy;
+import java.util.Arrays;
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    System.loadLibrary(args[1]);
+
+    doTest();
+  }
+
+  public static void doTest() throws Exception {
+    testClass("java.lang.Object");
+    testClass("java.lang.String");
+    testClass("java.lang.Math");
+    testClass("java.util.List");
+
+    testClass(getProxyClass());
+
+    testClass(int.class);
+    testClass(double[].class);
+  }
+
+  private static Class<?> proxyClass = null;
+
+  private static Class<?> getProxyClass() throws Exception {
+    if (proxyClass != null) {
+      return proxyClass;
+    }
+
+    proxyClass = Proxy.getProxyClass(Main.class.getClassLoader(), new Class[] { Runnable.class });
+    return proxyClass;
+  }
+
+  private static void testClass(String className) throws Exception {
+    Class<?> base = Class.forName(className);
+    testClass(base);
+  }
+
+  private static void testClass(Class<?> base) throws Exception {
+    String[] result = getClassSignature(base);
+    System.out.println(Arrays.toString(result));
+  }
+
+  private static native String[] getClassSignature(Class<?> c);
+}
diff --git a/test/913-heaps/build b/test/913-heaps/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/913-heaps/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/913-heaps/expected.txt b/test/913-heaps/expected.txt
new file mode 100644
index 0000000..d1ddbae
--- /dev/null
+++ b/test/913-heaps/expected.txt
@@ -0,0 +1,92 @@
+---
+true true
+root@root --(stack-local)--> 1@1000 [size=16, length=-1]
+root@root --(stack-local)--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
+0@0 --(array-element@0)--> 1@1000 [size=16, length=-1]
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=132, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
+1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+2001@0 --(interface)--> 2000@0 [size=132, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+root@root --(stack-local)--> 1@1000 [size=16, length=-1]
+root@root --(stack-local)--> 2@1000 [size=16, length=-1]
+root@root --(stack-local)--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 2@1000 [size=16, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
+0@0 --(array-element@0)--> 1@1000 [size=16, length=-1]
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=132, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
+1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+2001@0 --(interface)--> 2000@0 [size=132, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+root@root --(jni-global)--> 1@1000 [size=16, length=-1]
+root@root --(jni-local)--> 1@1000 [size=16, length=-1]
+root@root --(stack-local)--> 1@1000 [size=16, length=-1]
+root@root --(thread)--> 1@1000 [size=16, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=132, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
+1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+2001@0 --(interface)--> 2000@0 [size=132, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
+root@root --(jni-global)--> 1@1000 [size=16, length=-1]
+root@root --(jni-local)--> 1@1000 [size=16, length=-1]
+root@root --(stack-local)--> 1@1000 [size=16, length=-1]
+root@root --(stack-local)--> 2@1000 [size=16, length=-1]
+root@root --(thread)--> 1@1000 [size=16, length=-1]
+root@root --(thread)--> 2@1000 [size=16, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
+1001@0 --(superclass)--> 1000@0 [size=123, length=-1]
+1002@0 --(interface)--> 2001@0 [size=132, length=-1]
+1002@0 --(superclass)--> 1001@0 [size=123, length=-1]
+1@1000 --(class)--> 1000@0 [size=123, length=-1]
+1@1000 --(field@12)--> 3@1001 [size=24, length=-1]
+1@1000 --(field@8)--> 2@1000 [size=16, length=-1]
+2001@0 --(interface)--> 2000@0 [size=132, length=-1]
+2@1000 --(class)--> 1000@0 [size=123, length=-1]
+3@1001 --(class)--> 1001@0 [size=123, length=-1]
+3@1001 --(field@16)--> 4@1000 [size=16, length=-1]
+3@1001 --(field@20)--> 5@1002 [size=32, length=-1]
+4@1000 --(class)--> 1000@0 [size=123, length=-1]
+5@1002 --(class)--> 1002@0 [size=123, length=-1]
+5@1002 --(field@24)--> 6@1000 [size=16, length=-1]
+5@1002 --(field@28)--> 1@1000 [size=16, length=-1]
+6@1000 --(class)--> 1000@0 [size=123, length=-1]
+---
diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc
new file mode 100644
index 0000000..871902e
--- /dev/null
+++ b/test/913-heaps/heaps.cc
@@ -0,0 +1,434 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "heaps.h"
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/stringprintf.h"
+#include "jit/jit.h"
+#include "jni.h"
+#include "native_stack_dump.h"
+#include "openjdkjvmti/jvmti.h"
+#include "runtime.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+#include "ti-agent/common_helper.h"
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test913Heaps {
+
+extern "C" JNIEXPORT void JNICALL Java_Main_forceGarbageCollection(JNIEnv* env ATTRIBUTE_UNUSED,
+                                                                   jclass klass ATTRIBUTE_UNUSED) {
+  jvmtiError ret = jvmti_env->ForceGarbageCollection();
+  if (ret != JVMTI_ERROR_NONE) {
+    char* err;
+    jvmti_env->GetErrorName(ret, &err);
+    printf("Error forcing a garbage collection: %s\n", err);
+  }
+}
+
+class IterationConfig {
+ public:
+  IterationConfig() {}
+  virtual ~IterationConfig() {}
+
+  virtual jint Handle(jvmtiHeapReferenceKind reference_kind,
+                      const jvmtiHeapReferenceInfo* reference_info,
+                      jlong class_tag,
+                      jlong referrer_class_tag,
+                      jlong size,
+                      jlong* tag_ptr,
+                      jlong* referrer_tag_ptr,
+                      jint length,
+                      void* user_data) = 0;
+};
+
+static jint JNICALL HeapReferenceCallback(jvmtiHeapReferenceKind reference_kind,
+                                          const jvmtiHeapReferenceInfo* reference_info,
+                                          jlong class_tag,
+                                          jlong referrer_class_tag,
+                                          jlong size,
+                                          jlong* tag_ptr,
+                                          jlong* referrer_tag_ptr,
+                                          jint length,
+                                          void* user_data) {
+  IterationConfig* config = reinterpret_cast<IterationConfig*>(user_data);
+  return config->Handle(reference_kind,
+                        reference_info,
+                        class_tag,
+                        referrer_class_tag,
+                        size,
+                        tag_ptr,
+                        referrer_tag_ptr,
+                        length,
+                        user_data);
+}
+
+static bool Run(jint heap_filter,
+                jclass klass_filter,
+                jobject initial_object,
+                IterationConfig* config) {
+  jvmtiHeapCallbacks callbacks;
+  memset(&callbacks, 0, sizeof(jvmtiHeapCallbacks));
+  callbacks.heap_reference_callback = HeapReferenceCallback;
+
+  jvmtiError ret = jvmti_env->FollowReferences(heap_filter,
+                                               klass_filter,
+                                               initial_object,
+                                               &callbacks,
+                                               config);
+  if (ret != JVMTI_ERROR_NONE) {
+    char* err;
+    jvmti_env->GetErrorName(ret, &err);
+    printf("Failure running FollowReferences: %s\n", err);
+    return false;
+  }
+  return true;
+}
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_followReferences(JNIEnv* env,
+                                                                     jclass klass ATTRIBUTE_UNUSED,
+                                                                     jint heap_filter,
+                                                                     jclass klass_filter,
+                                                                     jobject initial_object,
+                                                                     jint stop_after,
+                                                                     jint follow_set,
+                                                                     jobject jniRef) {
+  class PrintIterationConfig FINAL : public IterationConfig {
+   public:
+    PrintIterationConfig(jint _stop_after, jint _follow_set)
+        : counter_(0),
+          stop_after_(_stop_after),
+          follow_set_(_follow_set) {
+    }
+
+    jint Handle(jvmtiHeapReferenceKind reference_kind,
+                const jvmtiHeapReferenceInfo* reference_info,
+                jlong class_tag,
+                jlong referrer_class_tag,
+                jlong size,
+                jlong* tag_ptr,
+                jlong* referrer_tag_ptr,
+                jint length,
+                void* user_data ATTRIBUTE_UNUSED) OVERRIDE {
+      jlong tag = *tag_ptr;
+      // Only check tagged objects.
+      if (tag == 0) {
+        return JVMTI_VISIT_OBJECTS;
+      }
+
+      Print(reference_kind,
+            reference_info,
+            class_tag,
+            referrer_class_tag,
+            size,
+            tag_ptr,
+            referrer_tag_ptr,
+            length);
+
+      counter_++;
+      if (counter_ == stop_after_) {
+        return JVMTI_VISIT_ABORT;
+      }
+
+      if (tag > 0 && tag < 32) {
+        bool should_visit_references = (follow_set_ & (1 << static_cast<int32_t>(tag))) != 0;
+        return should_visit_references ? JVMTI_VISIT_OBJECTS : 0;
+      }
+
+      return JVMTI_VISIT_OBJECTS;
+    }
+
+    void Print(jvmtiHeapReferenceKind reference_kind,
+               const jvmtiHeapReferenceInfo* reference_info,
+               jlong class_tag,
+               jlong referrer_class_tag,
+               jlong size,
+               jlong* tag_ptr,
+               jlong* referrer_tag_ptr,
+               jint length) {
+      std::string referrer_str;
+      if (referrer_tag_ptr == nullptr) {
+        referrer_str = "root@root";
+      } else {
+        referrer_str = StringPrintf("%" PRId64 "@%" PRId64, *referrer_tag_ptr, referrer_class_tag);
+      }
+
+      jlong adapted_size = size;
+      if (*tag_ptr >= 1000) {
+        // This is a class or interface, the size of which will be dependent on the architecture.
+        // Do not print the size, but detect known values and "normalize" for the golden file.
+        if ((sizeof(void*) == 4 && size == 180) || (sizeof(void*) == 8 && size == 232)) {
+          adapted_size = 123;
+        }
+      }
+
+      std::string referree_str = StringPrintf("%" PRId64 "@%" PRId64, *tag_ptr, class_tag);
+
+      lines_.push_back(CreateElem(referrer_str,
+                                  referree_str,
+                                  reference_kind,
+                                  reference_info,
+                                  adapted_size,
+                                  length));
+
+      if (reference_kind == JVMTI_HEAP_REFERENCE_THREAD && *tag_ptr == 1000) {
+        DumpStacks();
+      }
+    }
+
+    std::vector<std::string> GetLines() const {
+      std::vector<std::string> ret;
+      for (const std::unique_ptr<Elem>& e : lines_) {
+        ret.push_back(e->Print());
+      }
+      return ret;
+    }
+
+   private:
+    // We need to postpone some printing, as required functions are not callback-safe.
+    class Elem {
+     public:
+      Elem(const std::string& referrer, const std::string& referree, jlong size, jint length)
+          : referrer_(referrer), referree_(referree), size_(size), length_(length) {}
+      virtual ~Elem() {}
+
+      std::string Print() const {
+        return StringPrintf("%s --(%s)--> %s [size=%" PRId64 ", length=%d]",
+                            referrer_.c_str(),
+                            PrintArrowType().c_str(),
+                            referree_.c_str(),
+                            size_,
+                            length_);
+      }
+
+     protected:
+      virtual std::string PrintArrowType() const = 0;
+
+     private:
+      std::string referrer_;
+      std::string referree_;
+      jlong size_;
+      jint length_;
+    };
+
+    // For simple or unimplemented cases.
+    class StringElement : public Elem {
+     public:
+      StringElement(const std::string& referrer,
+                   const std::string& referree,
+                   jlong size,
+                   jint length,
+                   const std::string& string)
+          : Elem(referrer, referree, size, length), string_(string) {}
+
+     protected:
+      std::string PrintArrowType() const OVERRIDE {
+        return string_;
+      }
+
+     private:
+      const std::string string_;
+    };
+
+    static std::unique_ptr<Elem> CreateElem(const std::string& referrer,
+                                            const std::string& referree,
+                                            jvmtiHeapReferenceKind reference_kind,
+                                            const jvmtiHeapReferenceInfo* reference_info,
+                                            jlong size,
+                                            jint length) {
+      switch (reference_kind) {
+        case JVMTI_HEAP_REFERENCE_CLASS:
+          return std::unique_ptr<Elem>(new StringElement(referrer,
+                                                         referree,
+                                                         size,
+                                                         length,
+                                                         "class"));
+        case JVMTI_HEAP_REFERENCE_FIELD: {
+          std::string tmp = StringPrintf("field@%d", reference_info->field.index);
+          return std::unique_ptr<Elem>(new StringElement(referrer,
+                                                        referree,
+                                                        size,
+                                                        length,
+                                                        tmp));
+        }
+        case JVMTI_HEAP_REFERENCE_ARRAY_ELEMENT: {
+          std::string tmp = StringPrintf("array-element@%d", reference_info->array.index);
+          return std::unique_ptr<Elem>(new StringElement(referrer,
+                                                         referree,
+                                                         size,
+                                                         length,
+                                                         tmp));
+        }
+        case JVMTI_HEAP_REFERENCE_CLASS_LOADER:
+          return std::unique_ptr<Elem>(new StringElement(referrer,
+                                                         referree,
+                                                         size,
+                                                         length,
+                                                         "classloader"));
+        case JVMTI_HEAP_REFERENCE_SIGNERS:
+          return std::unique_ptr<Elem>(new StringElement(referrer,
+                                                         referree,
+                                                         size,
+                                                         length,
+                                                         "signers"));
+        case JVMTI_HEAP_REFERENCE_PROTECTION_DOMAIN:
+          return std::unique_ptr<Elem>(new StringElement(referrer,
+                                                         referree,
+                                                         size,
+                                                         length,
+                                                         "protection-domain"));
+        case JVMTI_HEAP_REFERENCE_INTERFACE:
+          return std::unique_ptr<Elem>(new StringElement(referrer,
+                                                         referree,
+                                                         size,
+                                                         length,
+                                                         "interface"));
+        case JVMTI_HEAP_REFERENCE_STATIC_FIELD: {
+          std::string tmp = StringPrintf("array-element@%d", reference_info->array.index);
+          return std::unique_ptr<Elem>(new StringElement(referrer,
+                                                         referree,
+                                                         size,
+                                                         length,
+                                                         tmp));;
+        }
+        case JVMTI_HEAP_REFERENCE_CONSTANT_POOL:
+          return std::unique_ptr<Elem>(new StringElement(referrer,
+                                                         referree,
+                                                         size,
+                                                         length,
+                                                         "constant-pool"));
+        case JVMTI_HEAP_REFERENCE_SUPERCLASS:
+          return std::unique_ptr<Elem>(new StringElement(referrer,
+                                                         referree,
+                                                         size,
+                                                         length,
+                                                         "superclass"));
+        case JVMTI_HEAP_REFERENCE_JNI_GLOBAL:
+          return std::unique_ptr<Elem>(new StringElement(referrer,
+                                                         referree,
+                                                         size,
+                                                         length,
+                                                         "jni-global"));
+        case JVMTI_HEAP_REFERENCE_SYSTEM_CLASS:
+          return std::unique_ptr<Elem>(new StringElement(referrer,
+                                                         referree,
+                                                         size,
+                                                         length,
+                                                         "system-class"));
+        case JVMTI_HEAP_REFERENCE_MONITOR:
+          return std::unique_ptr<Elem>(new StringElement(referrer,
+                                                         referree,
+                                                         size,
+                                                         length,
+                                                         "monitor"));
+        case JVMTI_HEAP_REFERENCE_STACK_LOCAL:
+          return std::unique_ptr<Elem>(new StringElement(referrer,
+                                                         referree,
+                                                         size,
+                                                         length,
+                                                         "stack-local"));
+        case JVMTI_HEAP_REFERENCE_JNI_LOCAL:
+          return std::unique_ptr<Elem>(new StringElement(referrer,
+                                                         referree,
+                                                         size,
+                                                         length,
+                                                         "jni-local"));
+        case JVMTI_HEAP_REFERENCE_THREAD:
+          return std::unique_ptr<Elem>(new StringElement(referrer,
+                                                         referree,
+                                                         size,
+                                                         length,
+                                                         "thread"));
+        case JVMTI_HEAP_REFERENCE_OTHER:
+          return std::unique_ptr<Elem>(new StringElement(referrer,
+                                                         referree,
+                                                         size,
+                                                         length,
+                                                         "other"));
+      }
+      LOG(FATAL) << "Unknown kind";
+      UNREACHABLE();
+    }
+
+    static void DumpStacks() NO_THREAD_SAFETY_ANALYSIS {
+      auto dump_function = [](art::Thread* t, void* data ATTRIBUTE_UNUSED) {
+        std::string name;
+        t->GetThreadName(name);
+        LOG(ERROR) << name;
+        art::DumpNativeStack(LOG_STREAM(ERROR), t->GetTid());
+      };
+      art::Runtime::Current()->GetThreadList()->ForEach(dump_function, nullptr);
+    }
+
+    jint counter_;
+    const jint stop_after_;
+    const jint follow_set_;
+
+    std::vector<std::unique_ptr<Elem>> lines_;
+  };
+
+  jit::ScopedJitSuspend sjs;  // Wait to avoid JIT influence (e.g., JNI globals).
+
+  // If jniRef isn't null, add a local and a global ref.
+  ScopedLocalRef<jobject> jni_local_ref(env, nullptr);
+  jobject jni_global_ref = nullptr;
+  if (jniRef != nullptr) {
+    jni_local_ref.reset(env->NewLocalRef(jniRef));
+    jni_global_ref = env->NewGlobalRef(jniRef);
+  }
+
+  PrintIterationConfig config(stop_after, follow_set);
+  Run(heap_filter, klass_filter, initial_object, &config);
+
+  std::vector<std::string> lines = config.GetLines();
+  jobjectArray ret = CreateObjectArray(env,
+                                       static_cast<jint>(lines.size()),
+                                       "java/lang/String",
+                                       [&](jint i) {
+                                         return env->NewStringUTF(lines[i].c_str());
+                                       });
+
+  if (jni_global_ref != nullptr) {
+    env->DeleteGlobalRef(jni_global_ref);
+  }
+
+  return ret;
+}
+
+// Don't do anything
+jint OnLoad(JavaVM* vm,
+            char* options ATTRIBUTE_UNUSED,
+            void* reserved ATTRIBUTE_UNUSED) {
+  if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+    printf("Unable to get jvmti env!\n");
+    return 1;
+  }
+  SetAllCapabilities(jvmti_env);
+  return 0;
+}
+
+}  // namespace Test913Heaps
+}  // namespace art
diff --git a/test/562-no-intermediate/src/Main.java b/test/913-heaps/heaps.h
similarity index 68%
rename from test/562-no-intermediate/src/Main.java
rename to test/913-heaps/heaps.h
index 3b74d6f..bd828ac 100644
--- a/test/562-no-intermediate/src/Main.java
+++ b/test/913-heaps/heaps.h
@@ -14,14 +14,17 @@
  * limitations under the License.
  */
 
-public class Main {
+#ifndef ART_TEST_913_HEAPS_HEAPS_H_
+#define ART_TEST_913_HEAPS_HEAPS_H_
 
-  /// CHECK-START-ARM64: int Main.main(String[]) register_allocator (after)
-  /// CHECK-NOT: IntermediateAddress
-  public static void main(String[] args) {
-    array[index] += Math.cos(42);
-  }
+#include <jni.h>
 
-  static int index = 0;
-  static double[] array = new double[2];
-}
+namespace art {
+namespace Test913Heaps {
+
+jint OnLoad(JavaVM* vm, char* options, void* reserved);
+
+}  // namespace Test913Heaps
+}  // namespace art
+
+#endif  // ART_TEST_913_HEAPS_HEAPS_H_
diff --git a/test/913-heaps/info.txt b/test/913-heaps/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/913-heaps/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/913-heaps/run b/test/913-heaps/run
new file mode 100755
index 0000000..7bd8cbd
--- /dev/null
+++ b/test/913-heaps/run
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if  [[ "$@" == *"-O"* ]]; then
+  agent=libtiagent.so
+  plugin=libopenjdkjvmti.so
+  lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+  arg="jvm"
+else
+  arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+  other_args=" -Xcompiler-option --debuggable "
+else
+  other_args=""
+fi
+
+./default-run "$@" --experimental agents \
+                   --experimental runtime-plugins \
+                   --runtime-option -agentpath:${agent}=913-heaps,${arg} \
+                   --android-runtime-option -Xplugin:${plugin} \
+                   ${other_args} \
+                   --args ${lib}
diff --git a/test/913-heaps/src/Main.java b/test/913-heaps/src/Main.java
new file mode 100644
index 0000000..a6ace9a
--- /dev/null
+++ b/test/913-heaps/src/Main.java
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    System.loadLibrary(args[1]);
+
+    doTest();
+    doFollowReferencesTest();
+  }
+
+  public static void doTest() throws Exception {
+    setupGcCallback();
+
+    enableGcTracking(true);
+    run();
+    enableGcTracking(false);
+  }
+
+  private static void run() {
+    clearStats();
+    forceGarbageCollection();
+    printStats();
+  }
+
+  private static void clearStats() {
+    getGcStarts();
+    getGcFinishes();
+  }
+
+  private static void printStats() {
+    System.out.println("---");
+    int s = getGcStarts();
+    int f = getGcFinishes();
+    System.out.println((s > 0) + " " + (f > 0));
+  }
+
+  public static void doFollowReferencesTest() throws Exception {
+    // Force GCs to clean up dirt.
+    Runtime.getRuntime().gc();
+    Runtime.getRuntime().gc();
+
+    setTag(Thread.currentThread(), 3000);
+
+    {
+      ArrayList<Object> tmpStorage = new ArrayList<>();
+      doFollowReferencesTestNonRoot(tmpStorage);
+      tmpStorage = null;
+    }
+
+    // Force GCs to clean up dirt.
+    Runtime.getRuntime().gc();
+    Runtime.getRuntime().gc();
+
+    doFollowReferencesTestRoot();
+
+    // Force GCs to clean up dirt.
+    Runtime.getRuntime().gc();
+    Runtime.getRuntime().gc();
+  }
+
+  private static void doFollowReferencesTestNonRoot(ArrayList<Object> tmpStorage) {
+    Verifier v = new Verifier();
+    tagClasses(v);
+    A a = createTree(v);
+    tmpStorage.add(a);
+    v.add("0@0", "1@1000");  // tmpStorage[0] --(array-element)--> a.
+
+    doFollowReferencesTestImpl(null, Integer.MAX_VALUE, -1, null, v, null);
+    doFollowReferencesTestImpl(a.foo, Integer.MAX_VALUE, -1, null, v, "2@1000");
+
+    tmpStorage.clear();
+  }
+
+  private static void doFollowReferencesTestRoot() {
+    Verifier v = new Verifier();
+    tagClasses(v);
+    A a = createTree(v);
+
+    doFollowReferencesTestImpl(null, Integer.MAX_VALUE, -1, a, v, null);
+    doFollowReferencesTestImpl(a.foo, Integer.MAX_VALUE, -1, a, v, "2@1000");
+  }
+
+  private static void doFollowReferencesTestImpl(A root, int stopAfter, int followSet,
+      Object asRoot, Verifier v, String additionalEnabled) {
+    String[] lines =
+        followReferences(0, null, root, stopAfter, followSet, asRoot);
+
+    v.process(lines, additionalEnabled);
+
+    // TODO: Test filters.
+  }
+
+  private static void tagClasses(Verifier v) {
+    setTag(A.class, 1000);
+
+    setTag(B.class, 1001);
+    v.add("1001@0", "1000@0");  // B.class --(superclass)--> A.class.
+
+    setTag(C.class, 1002);
+    v.add("1002@0", "1001@0");  // C.class --(superclass)--> B.class.
+    v.add("1002@0", "2001@0");  // C.class --(interface)--> I2.class.
+
+    setTag(I1.class, 2000);
+
+    setTag(I2.class, 2001);
+    v.add("2001@0", "2000@0");  // I2.class --(interface)--> I1.class.
+  }
+
+  private static A createTree(Verifier v) {
+    A aInst = new A();
+    setTag(aInst, 1);
+    String aInstStr = "1@1000";
+    String aClassStr = "1000@0";
+    v.add(aInstStr, aClassStr);  // A -->(class) --> A.class.
+
+    A a2Inst = new A();
+    setTag(a2Inst, 2);
+    aInst.foo = a2Inst;
+    String a2InstStr = "2@1000";
+    v.add(a2InstStr, aClassStr);  // A2 -->(class) --> A.class.
+    v.add(aInstStr, a2InstStr);   // A -->(field) --> A2.
+
+    B bInst = new B();
+    setTag(bInst, 3);
+    aInst.foo2 = bInst;
+    String bInstStr = "3@1001";
+    String bClassStr = "1001@0";
+    v.add(bInstStr, bClassStr);  // B -->(class) --> B.class.
+    v.add(aInstStr, bInstStr);   // A -->(field) --> B.
+
+    A a3Inst = new A();
+    setTag(a3Inst, 4);
+    bInst.bar = a3Inst;
+    String a3InstStr = "4@1000";
+    v.add(a3InstStr, aClassStr);  // A3 -->(class) --> A.class.
+    v.add(bInstStr, a3InstStr);   // B -->(field) --> A3.
+
+    C cInst = new C();
+    setTag(cInst, 5);
+    bInst.bar2 = cInst;
+    String cInstStr = "5@1000";
+    String cClassStr = "1002@0";
+    v.add(cInstStr, cClassStr);  // C -->(class) --> C.class.
+    v.add(bInstStr, cInstStr);   // B -->(field) --> C.
+
+    A a4Inst = new A();
+    setTag(a4Inst, 6);
+    cInst.baz = a4Inst;
+    String a4InstStr = "6@1000";
+    v.add(a4InstStr, aClassStr);  // A4 -->(class) --> A.class.
+    v.add(cInstStr, a4InstStr);   // C -->(field) --> A4.
+
+    cInst.baz2 = aInst;
+    v.add(cInstStr, aInstStr);  // C -->(field) --> A.
+
+    return aInst;
+  }
+
+  public static class A {
+    public A foo;
+    public A foo2;
+
+    public A() {}
+    public A(A a, A b) {
+      foo = a;
+      foo2 = b;
+    }
+  }
+
+  public static class B extends A {
+    public A bar;
+    public A bar2;
+
+    public B() {}
+    public B(A a, A b) {
+      bar = a;
+      bar2 = b;
+    }
+  }
+
+  public static interface I1 {
+    public final static int i1Field = 1;
+  }
+
+  public static interface I2 extends I1 {
+    public final static int i2Field = 2;
+  }
+
+  public static class C extends B implements I2 {
+    public A baz;
+    public A baz2;
+
+    public C() {}
+    public C(A a, A b) {
+      baz = a;
+      baz2 = b;
+    }
+  }
+
+  public static class Verifier {
+    public static class Node {
+      public String referrer;
+
+      public HashSet<String> referrees = new HashSet<>();
+
+      public Node(String r) {
+        referrer = r;
+      }
+
+      public boolean isRoot() {
+        return referrer.startsWith("root@");
+      }
+    }
+
+    HashMap<String, Node> nodes = new HashMap<>();
+
+    public Verifier() {
+    }
+
+    public void add(String referrer, String referree) {
+      if (!nodes.containsKey(referrer)) {
+        nodes.put(referrer, new Node(referrer));
+      }
+      if (referree != null) {
+        nodes.get(referrer).referrees.add(referree);
+      }
+    }
+
+    public void process(String[] lines, String additionalEnabledReferrer) {
+      // This method isn't optimal. The loops could be merged. However, it's more readable if
+      // the different parts are separated.
+
+      ArrayList<String> rootLines = new ArrayList<>();
+      ArrayList<String> nonRootLines = new ArrayList<>();
+
+      // Check for consecutive chunks of referrers. Also ensure roots come first.
+      {
+        String currentHead = null;
+        boolean rootsDone = false;
+        HashSet<String> completedReferrers = new HashSet<>();
+        for (String l : lines) {
+          String referrer = getReferrer(l);
+
+          if (isRoot(referrer)) {
+            if (rootsDone) {
+              System.out.println("ERROR: Late root " + l);
+              print(lines);
+              return;
+            }
+            rootLines.add(l);
+            continue;
+          }
+
+          rootsDone = true;
+
+          if (currentHead == null) {
+            currentHead = referrer;
+          } else {
+            if (!currentHead.equals(referrer)) {
+              completedReferrers.add(currentHead);
+              currentHead = referrer;
+              if (completedReferrers.contains(referrer)) {
+                System.out.println("Non-contiguous referrer " + l);
+                print(lines);
+                return;
+              }
+            }
+          }
+          nonRootLines.add(l);
+        }
+      }
+
+      // Sort (root order is not specified) and print the roots.
+      // TODO: What about extra roots? JNI and the interpreter seem to introduce those (though it
+      //       isn't clear why a debuggable-AoT test doesn't have the same, at least for locals).
+      //       For now, swallow duplicates, and resolve once we have the metadata for the roots.
+      {
+        Collections.sort(rootLines);
+        String lastRoot = null;
+        for (String l : rootLines) {
+          if (lastRoot != null && lastRoot.equals(l)) {
+            continue;
+          }
+          lastRoot = l;
+          System.out.println(l);
+        }
+      }
+
+      // Iterate through the lines, keeping track of which referrers are visited, to ensure the
+      // order is acceptable.
+      HashSet<String> enabled = new HashSet<>();
+      if (additionalEnabledReferrer != null) {
+        enabled.add(additionalEnabledReferrer);
+      }
+      // Always add "0@0".
+      enabled.add("0@0");
+
+      for (String l : lines) {
+        String referrer = getReferrer(l);
+        String referree = getReferree(l);
+        if (isRoot(referrer)) {
+          // For a root src, just enable the referree.
+          enabled.add(referree);
+        } else {
+          // Check that the referrer is enabled (may be visited).
+          if (!enabled.contains(referrer)) {
+            System.out.println("Referrer " + referrer + " not enabled: " + l);
+            print(lines);
+            return;
+          }
+          enabled.add(referree);
+        }
+      }
+
+      // Now just sort the non-root lines and output them
+      Collections.sort(nonRootLines);
+      for (String l : nonRootLines) {
+        System.out.println(l);
+      }
+
+      System.out.println("---");
+    }
+
+    public static boolean isRoot(String ref) {
+      return ref.startsWith("root@");
+    }
+
+    private static String getReferrer(String line) {
+      int i = line.indexOf(" --");
+      if (i <= 0) {
+        throw new IllegalArgumentException(line);
+      }
+      int j = line.indexOf(' ');
+      if (i != j) {
+        throw new IllegalArgumentException(line);
+      }
+      return line.substring(0, i);
+    }
+
+    private static String getReferree(String line) {
+      int i = line.indexOf("--> ");
+      if (i <= 0) {
+        throw new IllegalArgumentException(line);
+      }
+      int j = line.indexOf(' ', i + 4);
+      if (j < 0) {
+        throw new IllegalArgumentException(line);
+      }
+      return line.substring(i + 4, j);
+    }
+
+    private static void print(String[] lines) {
+      for (String l : lines) {
+        System.out.println(l);
+      }
+    }
+  }
+
+  private static native void setupGcCallback();
+  private static native void enableGcTracking(boolean enable);
+  private static native int getGcStarts();
+  private static native int getGcFinishes();
+  private static native void forceGarbageCollection();
+
+  private static native void setTag(Object o, long tag);
+  private static native long getTag(Object o);
+
+  private static native String[] followReferences(int heapFilter, Class<?> klassFilter,
+      Object initialObject, int stopAfter, int followSet, Object jniRef);
+}
diff --git a/test/955-methodhandles-smali/expected.txt b/test/955-methodhandles-smali/expected.txt
index 047a287..5de1274 100644
--- a/test/955-methodhandles-smali/expected.txt
+++ b/test/955-methodhandles-smali/expected.txt
@@ -5,4 +5,5 @@
 40
 43
 44
-0-11
+0
+-1
diff --git a/test/955-methodhandles-smali/smali/Main.smali b/test/955-methodhandles-smali/smali/Main.smali
index 9681d56..52460a8 100644
--- a/test/955-methodhandles-smali/smali/Main.smali
+++ b/test/955-methodhandles-smali/smali/Main.smali
@@ -220,24 +220,22 @@
     invoke-polymorphic {v0, v1, v1}, Ljava/lang/invoke/MethodHandle;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/Long;Ljava/lang/Long;)I
     move-result v3
     sget-object v4, Ljava/lang/System;->out:Ljava/io/PrintStream;
-    invoke-virtual {v4, v3}, Ljava/io/PrintStream;->print(I)V
+    invoke-virtual {v4, v3}, Ljava/io/PrintStream;->println(I)V
 
     # Call compareTo(long) - this is an implicit box.
     const-wide v2, 44
     invoke-polymorphic {v0, v1, v2, v3}, Ljava/lang/invoke/MethodHandle;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/Long;J)I
     move-result v3
     sget-object v4, Ljava/lang/System;->out:Ljava/io/PrintStream;
-    invoke-virtual {v4, v3}, Ljava/io/PrintStream;->print(I)V
+    invoke-virtual {v4, v3}, Ljava/io/PrintStream;->println(I)V
 
     # Call compareTo(int) - this is an implicit box.
-    const v2, 40
-    invoke-polymorphic {v0, v1, v2}, Ljava/lang/invoke/MethodHandle;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/Long;I)I
-    move-result v3
-    sget-object v4, Ljava/lang/System;->out:Ljava/io/PrintStream;
-    invoke-virtual {v4, v3}, Ljava/io/PrintStream;->print(I)V
-
-    # Add a newline at the end of file.
-    invoke-virtual {v4}, Ljava/io/PrintStream;->println()V
+# This throws WrongMethodTypeException as it's a two step conversion int->long->Long or int->Integer->Long.
+#    const v2, 40
+#    invoke-polymorphic {v0, v1, v2}, Ljava/lang/invoke/MethodHandle;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/Long;I)I
+#    move-result v3
+#    sget-object v4, Ljava/lang/System;->out:Ljava/io/PrintStream;
+#    invoke-virtual {v4, v3}, Ljava/io/PrintStream;->print(I)V
 
     return-void
 .end method
diff --git a/test/956-methodhandles/build b/test/956-methodhandles/build
index 613e97c..a423ca6 100755
--- a/test/956-methodhandles/build
+++ b/test/956-methodhandles/build
@@ -20,9 +20,6 @@
 if [[ $@ != *"--jvm"* ]]; then
   # Don't do anything with jvm.
   export USE_JACK=true
-  export JACK_SERVER=false
-  export JACK_REPOSITORY="${ANDROID_BUILD_TOP}/prebuilts/sdk/tools/jacks"
-  export JACK_VERSION=4.11.BETA
 fi
 
 ./default-build "$@" --experimental method-handles
diff --git a/test/956-methodhandles/expected.txt b/test/956-methodhandles/expected.txt
index ddc1cb0..0a5caa1 100644
--- a/test/956-methodhandles/expected.txt
+++ b/test/956-methodhandles/expected.txt
@@ -3,3 +3,7 @@
 foo_A
 foo_B
 privateRyan_D
+Received exception: Expected (java.lang.String, java.lang.String)java.lang.String but was (java.lang.String, java.lang.Object)void
+String constructors done.
+testReferenceReturnValueConversions done.
+testPrimitiveReturnValueConversions done.
diff --git a/test/956-methodhandles/src/Main.java b/test/956-methodhandles/src/Main.java
index badea53..8713caa 100644
--- a/test/956-methodhandles/src/Main.java
+++ b/test/956-methodhandles/src/Main.java
@@ -19,10 +19,19 @@
 import java.lang.invoke.MethodHandles.Lookup;
 import java.lang.invoke.MethodType;
 import java.lang.invoke.WrongMethodTypeException;
+import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.Arrays;
 
 public class Main {
 
   public static class A {
+    public A() {}
+
     public void foo() {
       System.out.println("foo_A");
     }
@@ -57,8 +66,14 @@
   public static void main(String[] args) throws Throwable {
     testfindSpecial_invokeSuperBehaviour();
     testfindSpecial_invokeDirectBehaviour();
-
-    testThrowException();
+    testExceptionDetailMessages();
+    testfindVirtual();
+    testfindStatic();
+    testUnreflects();
+    testAsType();
+    testConstructors();
+    testStringConstructors();
+    testReturnValueConversions();
   }
 
   public static void testfindSpecial_invokeSuperBehaviour() throws Throwable {
@@ -87,10 +102,6 @@
       mh1.invokeExact(aInstance);
       System.out.println("mh1.invoke(aInstance) should not succeeed");
     } catch (WrongMethodTypeException expected) {
-    } catch (ClassCastException workaround) {
-      // TODO(narayan): ART treats all invokes as if they were non-exact. We
-      // should throw a WMTE if we execute an invoke-polymorphic instruction whose
-      // target method is MethodHandle.invokeExact.
     }
 
     // This should *still* be as if an invoke-super was called from one of C's
@@ -109,6 +120,19 @@
       System.out.println("findSpecial(A.class, foo, .. D.class) unexpectedly succeeded.");
     } catch (IllegalAccessException expected) {
     }
+
+    // Check return type matches for find.
+    try {
+      B.lookup.findSpecial(A.class /* refC */, "foo",
+                           MethodType.methodType(int.class), B.class /* specialCaller */);
+      fail();
+    } catch (NoSuchMethodException e) {}
+    // Check constructors
+    try {
+      B.lookup.findSpecial(A.class /* refC */, "<init>",
+                           MethodType.methodType(void.class), B.class /* specialCaller */);
+      fail();
+    } catch (NoSuchMethodException e) {}
   }
 
   public static void testfindSpecial_invokeDirectBehaviour() throws Throwable {
@@ -134,20 +158,806 @@
     }
   }
 
-  public static void testThrowException() throws Throwable {
-    MethodHandle handle = MethodHandles.throwException(String.class,
-        IllegalArgumentException.class);
-    if (handle.type().returnType() != String.class) {
-      System.out.println("Unexpected return type for handle: " + handle
-          + " [ " + handle.type() + "]");
+  public static void testExceptionDetailMessages() throws Throwable {
+    MethodHandle handle = MethodHandles.lookup().findVirtual(String.class, "concat",
+        MethodType.methodType(String.class, String.class));
+
+    try {
+      handle.invokeExact("a", new Object());
+      System.out.println("invokeExact(\"a\", new Object()) unexpectedly succeeded.");
+    } catch (WrongMethodTypeException ex) {
+      System.out.println("Received exception: " + ex.getMessage());
+    }
+  }
+
+  public interface Foo {
+    public String foo();
+  }
+
+  public interface Bar extends Foo {
+    public String bar();
+  }
+
+  public static class BarSuper {
+    public String superPublicMethod() {
+      return "superPublicMethod";
+    }
+
+    public String superProtectedMethod() {
+      return "superProtectedMethod";
+    }
+
+    String superPackageMethod() {
+      return "superPackageMethod";
+    }
+  }
+
+  public static class BarImpl extends BarSuper implements Bar {
+    public BarImpl() {
+    }
+
+    @Override
+    public String foo() {
+      return "foo";
+    }
+
+    @Override
+    public String bar() {
+      return "bar";
+    }
+
+    public String add(int x, int y) {
+      return Arrays.toString(new int[] { x, y });
+    }
+
+    private String privateMethod() { return "privateMethod"; }
+
+    public static String staticMethod() { return staticString; }
+
+    private static String staticString;
+
+    {
+      // Static constructor
+      staticString = Long.toString(System.currentTimeMillis());
+    }
+
+    static final MethodHandles.Lookup lookup = MethodHandles.lookup();
+  }
+
+  public static void testfindVirtual() throws Throwable {
+    // Virtual lookups on static methods should not succeed.
+    try {
+        MethodHandles.lookup().findVirtual(
+            BarImpl.class,  "staticMethod", MethodType.methodType(String.class));
+        System.out.println("findVirtual(staticMethod) unexpectedly succeeded");
+    } catch (IllegalAccessException expected) {
+    }
+
+    // Virtual lookups on private methods should not succeed, unless the Lookup
+    // context had sufficient privileges.
+    try {
+        MethodHandles.lookup().findVirtual(
+            BarImpl.class,  "privateMethod", MethodType.methodType(String.class));
+        System.out.println("findVirtual(privateMethod) unexpectedly succeeded");
+    } catch (IllegalAccessException expected) {
+    }
+
+    // Virtual lookup on a private method with a context that *does* have sufficient
+    // privileges.
+    MethodHandle mh = BarImpl.lookup.findVirtual(
+            BarImpl.class,  "privateMethod", MethodType.methodType(String.class));
+    String str = (String) mh.invoke(new BarImpl());
+    if (!"privateMethod".equals(str)) {
+      System.out.println("Unexpected return value for BarImpl#privateMethod: " + str);
+    }
+
+    // Find virtual must find interface methods defined by interfaces implemented
+    // by the class.
+    mh = MethodHandles.lookup().findVirtual(BarImpl.class, "foo",
+        MethodType.methodType(String.class));
+    str = (String) mh.invoke(new BarImpl());
+    if (!"foo".equals(str)) {
+      System.out.println("Unexpected return value for BarImpl#foo: " + str);
+    }
+
+    // Find virtual should check rtype.
+    try {
+      mh = MethodHandles.lookup().findVirtual(BarImpl.class, "foo",
+                                              MethodType.methodType(void.class));
+      fail();
+    } catch (NoSuchMethodException e) {}
+
+    // And ptypes
+    mh = MethodHandles.lookup().findVirtual(
+        BarImpl.class, "add", MethodType.methodType(String.class, int.class, int.class));
+    try {
+      mh = MethodHandles.lookup().findVirtual(
+          BarImpl.class, "add", MethodType.methodType(String.class, Integer.class, int.class));
+    } catch (NoSuchMethodException e) {}
+
+    // .. and their super-interfaces.
+    mh = MethodHandles.lookup().findVirtual(BarImpl.class, "bar",
+        MethodType.methodType(String.class));
+    str = (String) mh.invoke(new BarImpl());
+    if (!"bar".equals(str)) {
+      System.out.println("Unexpected return value for BarImpl#bar: " + str);
+    }
+
+    // TODO(narayan): Fix this case, we're using the wrong ArtMethod for the
+    // invoke resulting in a failing check in the interpreter.
+    //
+    // mh = MethodHandles.lookup().findVirtual(Bar.class, "bar",
+    //    MethodType.methodType(String.class));
+    // str = (String) mh.invoke(new BarImpl());
+    // if (!"bar".equals(str)) {
+    //   System.out.println("Unexpected return value for BarImpl#bar: " + str);
+    // }
+
+    // We should also be able to lookup public / protected / package methods in
+    // the super class, given sufficient access privileges.
+    mh = MethodHandles.lookup().findVirtual(BarImpl.class, "superPublicMethod",
+        MethodType.methodType(String.class));
+    str = (String) mh.invoke(new BarImpl());
+    if (!"superPublicMethod".equals(str)) {
+      System.out.println("Unexpected return value for BarImpl#superPublicMethod: " + str);
+    }
+
+    mh = MethodHandles.lookup().findVirtual(BarImpl.class, "superProtectedMethod",
+        MethodType.methodType(String.class));
+    str = (String) mh.invoke(new BarImpl());
+    if (!"superProtectedMethod".equals(str)) {
+      System.out.println("Unexpected return value for BarImpl#superProtectedMethod: " + str);
+    }
+
+    mh = MethodHandles.lookup().findVirtual(BarImpl.class, "superPackageMethod",
+        MethodType.methodType(String.class));
+    str = (String) mh.invoke(new BarImpl());
+    if (!"superPackageMethod".equals(str)) {
+      System.out.println("Unexpected return value for BarImpl#superPackageMethod: " + str);
     }
 
     try {
-      handle.invoke();
-      System.out.println("Expected an exception of type: java.lang.IllegalArgumentException");
-    } catch (IllegalArgumentException expected) {
+      MethodHandles.lookup().findVirtual(BarImpl.class, "<init>",
+                                        MethodType.methodType(void.class));
+      fail();
+    } catch (NoSuchMethodException e) {}
+  }
+
+  public static void testfindStatic() throws Throwable {
+    MethodHandles.lookup().findStatic(BarImpl.class, "staticMethod",
+                                      MethodType.methodType(String.class));
+    try {
+      MethodHandles.lookup().findStatic(BarImpl.class, "staticMethod",
+                                        MethodType.methodType(void.class));
+      fail();
+    } catch (NoSuchMethodException e) {}
+    try {
+      MethodHandles.lookup().findStatic(BarImpl.class, "staticMethod",
+                                        MethodType.methodType(String.class, int.class));
+      fail();
+    } catch (NoSuchMethodException e) {}
+    try {
+      MethodHandles.lookup().findStatic(BarImpl.class, "<clinit>",
+                                        MethodType.methodType(void.class));
+      fail();
+    } catch (NoSuchMethodException e) {}
+    try {
+      MethodHandles.lookup().findStatic(BarImpl.class, "<init>",
+                                        MethodType.methodType(void.class));
+      fail();
+    } catch (NoSuchMethodException e) {}
+  }
+
+  static class UnreflectTester {
+    public String publicField;
+    private String privateField;
+
+    public static String publicStaticField = "publicStaticValue";
+    private static String privateStaticField = "privateStaticValue";
+
+    private UnreflectTester(String val) {
+      publicField = val;
+      privateField = val;
+    }
+
+    // NOTE: The boolean constructor argument only exists to give this a
+    // different signature.
+    public UnreflectTester(String val, boolean unused) {
+      this(val);
+    }
+
+    private static String privateStaticMethod() {
+      return "privateStaticMethod";
+    }
+
+    private String privateMethod() {
+      return "privateMethod";
+    }
+
+    public static String publicStaticMethod() {
+      return "publicStaticMethod";
+    }
+
+    public String publicMethod() {
+      return "publicMethod";
     }
   }
+
+  public static void testUnreflects() throws Throwable {
+    UnreflectTester instance = new UnreflectTester("unused");
+    Method publicMethod = UnreflectTester.class.getMethod("publicMethod");
+
+    MethodHandle mh = MethodHandles.lookup().unreflect(publicMethod);
+    assertEquals("publicMethod", (String) mh.invoke(instance));
+    assertEquals("publicMethod", (String) mh.invokeExact(instance));
+
+    Method publicStaticMethod = UnreflectTester.class.getMethod("publicStaticMethod");
+    mh = MethodHandles.lookup().unreflect(publicStaticMethod);
+    assertEquals("publicStaticMethod", (String) mh.invoke());
+    assertEquals("publicStaticMethod", (String) mh.invokeExact());
+
+    Method privateMethod = UnreflectTester.class.getDeclaredMethod("privateMethod");
+    try {
+      mh = MethodHandles.lookup().unreflect(privateMethod);
+      fail();
+    } catch (IllegalAccessException expected) {}
+
+    privateMethod.setAccessible(true);
+    mh = MethodHandles.lookup().unreflect(privateMethod);
+    assertEquals("privateMethod", (String) mh.invoke(instance));
+    assertEquals("privateMethod", (String) mh.invokeExact(instance));
+
+    Method privateStaticMethod = UnreflectTester.class.getDeclaredMethod("privateStaticMethod");
+    try {
+      mh = MethodHandles.lookup().unreflect(privateStaticMethod);
+      fail();
+    } catch (IllegalAccessException expected) {}
+
+    privateStaticMethod.setAccessible(true);
+    mh = MethodHandles.lookup().unreflect(privateStaticMethod);
+    assertEquals("privateStaticMethod", (String) mh.invoke());
+    assertEquals("privateStaticMethod", (String) mh.invokeExact());
+
+    Constructor privateConstructor = UnreflectTester.class.getDeclaredConstructor(String.class);
+    try {
+      mh = MethodHandles.lookup().unreflectConstructor(privateConstructor);
+      fail();
+    } catch (IllegalAccessException expected) {}
+
+    privateConstructor.setAccessible(true);
+    mh = MethodHandles.lookup().unreflectConstructor(privateConstructor);
+    instance = (UnreflectTester) mh.invokeExact("abc");
+    assertEquals("abc", instance.publicField);
+    instance = (UnreflectTester) mh.invoke("def");
+    assertEquals("def", instance.publicField);
+    Constructor publicConstructor = UnreflectTester.class.getConstructor(String.class,
+        boolean.class);
+    mh = MethodHandles.lookup().unreflectConstructor(publicConstructor);
+    instance = (UnreflectTester) mh.invokeExact("abc", false);
+    assertEquals("abc", instance.publicField);
+    instance = (UnreflectTester) mh.invoke("def", true);
+    assertEquals("def", instance.publicField);
+
+    // TODO(narayan): Non exact invokes for field sets/gets are not implemented yet.
+    //
+    // assertEquals("instanceValue", (String) mh.invoke(new UnreflectTester("instanceValue")));
+    Field publicField = UnreflectTester.class.getField("publicField");
+    mh = MethodHandles.lookup().unreflectGetter(publicField);
+    instance = new UnreflectTester("instanceValue");
+    assertEquals("instanceValue", (String) mh.invokeExact(instance));
+
+    mh = MethodHandles.lookup().unreflectSetter(publicField);
+    instance = new UnreflectTester("instanceValue");
+    mh.invokeExact(instance, "updatedInstanceValue");
+    assertEquals("updatedInstanceValue", instance.publicField);
+
+    Field publicStaticField = UnreflectTester.class.getField("publicStaticField");
+    mh = MethodHandles.lookup().unreflectGetter(publicStaticField);
+    UnreflectTester.publicStaticField = "updatedStaticValue";
+    assertEquals("updatedStaticValue", (String) mh.invokeExact());
+
+    mh = MethodHandles.lookup().unreflectSetter(publicStaticField);
+    UnreflectTester.publicStaticField = "updatedStaticValue";
+    mh.invokeExact("updatedStaticValue2");
+    assertEquals("updatedStaticValue2", UnreflectTester.publicStaticField);
+
+    Field privateField = UnreflectTester.class.getDeclaredField("privateField");
+    try {
+      mh = MethodHandles.lookup().unreflectGetter(privateField);
+      fail();
+    } catch (IllegalAccessException expected) {
+    }
+    try {
+      mh = MethodHandles.lookup().unreflectSetter(privateField);
+      fail();
+    } catch (IllegalAccessException expected) {
+    }
+
+    privateField.setAccessible(true);
+
+    mh = MethodHandles.lookup().unreflectGetter(privateField);
+    instance = new UnreflectTester("instanceValue");
+    assertEquals("instanceValue", (String) mh.invokeExact(instance));
+
+    mh = MethodHandles.lookup().unreflectSetter(privateField);
+    instance = new UnreflectTester("instanceValue");
+    mh.invokeExact(instance, "updatedInstanceValue");
+    assertEquals("updatedInstanceValue", instance.privateField);
+
+    Field privateStaticField = UnreflectTester.class.getDeclaredField("privateStaticField");
+    try {
+      mh = MethodHandles.lookup().unreflectGetter(privateStaticField);
+      fail();
+    } catch (IllegalAccessException expected) {
+    }
+    try {
+      mh = MethodHandles.lookup().unreflectSetter(privateStaticField);
+      fail();
+    } catch (IllegalAccessException expected) {
+    }
+
+    privateStaticField.setAccessible(true);
+    mh = MethodHandles.lookup().unreflectGetter(privateStaticField);
+    privateStaticField.set(null, "updatedStaticValue");
+    assertEquals("updatedStaticValue", (String) mh.invokeExact());
+
+    mh = MethodHandles.lookup().unreflectSetter(privateStaticField);
+    privateStaticField.set(null, "updatedStaticValue");
+    mh.invokeExact("updatedStaticValue2");
+    assertEquals("updatedStaticValue2", (String) privateStaticField.get(null));
+  }
+
+  // This method only exists to fool Jack's handling of types. See b/32536744.
+  public static CharSequence getSequence() {
+    return "foo";
+  }
+
+  public static void testAsType() throws Throwable {
+    // The type of this handle is (String, String)String.
+    MethodHandle mh = MethodHandles.lookup().findVirtual(String.class,
+        "concat", MethodType.methodType(String.class, String.class));
+
+    // Change it to (CharSequence, String)Object.
+    MethodHandle asType = mh.asType(
+        MethodType.methodType(Object.class, CharSequence.class, String.class));
+
+    Object obj = asType.invokeExact((CharSequence) getSequence(), "bar");
+    assertEquals("foobar", (String) obj);
+
+    // Should fail due to a wrong return type.
+    try {
+      String str = (String) asType.invokeExact((CharSequence) getSequence(), "bar");
+      fail();
+    } catch (WrongMethodTypeException expected) {
+    }
+
+    // Should fail due to a wrong argument type (String instead of Charsequence).
+    try {
+      String str = (String) asType.invokeExact("baz", "bar");
+      fail();
+    } catch (WrongMethodTypeException expected) {
+    }
+
+    // Calls to asType should fail if the types are not convertible.
+    //
+    // Bad return type conversion.
+    try {
+      mh.asType(MethodType.methodType(int.class, String.class, String.class));
+      fail();
+    } catch (WrongMethodTypeException expected) {
+    }
+
+    // Bad argument conversion.
+    try {
+      mh.asType(MethodType.methodType(String.class, int.class, String.class));
+      fail();
+    } catch (WrongMethodTypeException expected) {
+    }
+  }
+
+  public static void assertEquals(String s1, String s2) {
+    if (s1 == s2) {
+      return;
+    }
+
+    if (s1 != null && s2 != null && s1.equals(s2)) {
+      return;
+    }
+
+    throw new AssertionError("assertEquals s1: " + s1 + ", s2: " + s2);
+  }
+
+  public static void fail() {
+    System.out.println("fail");
+    Thread.dumpStack();
+  }
+
+  public static void fail(String message) {
+    System.out.println("fail: " + message);
+    Thread.dumpStack();
+  }
+
+  public static void testConstructors() throws Throwable {
+    MethodHandle mh =
+        MethodHandles.lookup().findConstructor(Float.class,
+                                               MethodType.methodType(void.class,
+                                                                     float.class));
+    Float value = (Float) mh.invokeExact(0.33f);
+    if (value.floatValue() != 0.33f) {
+      fail("Unexpected float value from invokeExact " + value.floatValue());
+    }
+
+    value = (Float) mh.invoke(3.34f);
+    if (value.floatValue() != 3.34f) {
+      fail("Unexpected float value from invoke " + value.floatValue());
+    }
+
+    mh = MethodHandles.lookup().findConstructor(Double.class,
+                                                MethodType.methodType(void.class, String.class));
+    Double d = (Double) mh.invoke("8.45e3");
+    if (d.doubleValue() != 8.45e3) {
+      fail("Unexpected double value from Double(String) " + value.doubleValue());
+    }
+
+    mh = MethodHandles.lookup().findConstructor(Double.class,
+                                                MethodType.methodType(void.class, double.class));
+    d = (Double) mh.invoke(8.45e3);
+    if (d.doubleValue() != 8.45e3) {
+      fail("Unexpected double value from Double(double) " + value.doubleValue());
+    }
+
+    // Primitive type
+    try {
+      mh = MethodHandles.lookup().findConstructor(int.class, MethodType.methodType(void.class));
+      fail("Unexpected lookup success for primitive constructor");
+    } catch (NoSuchMethodException e) {}
+
+    // Interface
+    try {
+      mh = MethodHandles.lookup().findConstructor(Readable.class,
+                                                  MethodType.methodType(void.class));
+      fail("Unexpected lookup success for interface constructor");
+    } catch (NoSuchMethodException e) {}
+
+    // Abstract
+    mh = MethodHandles.lookup().findConstructor(Process.class, MethodType.methodType(void.class));
+    try {
+      mh.invoke();
+      fail("Unexpected ability to instantiate an abstract class");
+    } catch (InstantiationException e) {}
+
+    // Non-existent
+    try {
+        MethodHandle bad = MethodHandles.lookup().findConstructor(
+            String.class, MethodType.methodType(String.class, Float.class));
+        fail("Unexpected success for non-existent constructor");
+    } catch (NoSuchMethodException e) {}
+
+    // Non-void constructor search. (I)I instead of (I)V.
+    try {
+        MethodHandle foo = MethodHandles.lookup().findConstructor(
+            Integer.class, MethodType.methodType(Integer.class, Integer.class));
+        fail("Unexpected success for non-void type for findConstructor");
+    } catch (NoSuchMethodException e) {}
+  }
+
+  public static void testStringConstructors() throws Throwable {
+    final String testPattern = "The system as we know it is broken";
+
+    // String()
+    MethodHandle mh = MethodHandles.lookup().findConstructor(
+        String.class, MethodType.methodType(void.class));
+    String s = (String) mh.invokeExact();
+    if (!s.equals("")) {
+      fail("Unexpected empty string constructor result: '" + s + "'");
+    }
+
+    // String(String)
+    mh = MethodHandles.lookup().findConstructor(
+        String.class, MethodType.methodType(void.class, String.class));
+    s = (String) mh.invokeExact(testPattern);
+    if (!s.equals(testPattern)) {
+      fail("Unexpected string constructor result: '" + s + "'");
+    }
+
+    // String(char[])
+    mh = MethodHandles.lookup().findConstructor(
+        String.class, MethodType.methodType(void.class, char[].class));
+    s = (String) mh.invokeExact(testPattern.toCharArray());
+    if (!s.equals(testPattern)) {
+      fail("Unexpected string constructor result: '" + s + "'");
+    }
+
+    // String(char[], int, int)
+    mh = MethodHandles.lookup().findConstructor(
+        String.class, MethodType.methodType(void.class, char[].class, int.class, int.class));
+    s = (String) mh.invokeExact(new char [] { 'a', 'b', 'c', 'd', 'e'}, 2, 3);
+    if (!s.equals("cde")) {
+      fail("Unexpected string constructor result: '" + s + "'");
+    }
+
+    // String(int[] codePoints, int offset, int count)
+    StringBuffer sb = new StringBuffer(testPattern);
+    int[] codePoints = new int[sb.codePointCount(0, sb.length())];
+    for (int i = 0; i < sb.length(); ++i) {
+      codePoints[i] = sb.codePointAt(i);
+    }
+    mh = MethodHandles.lookup().findConstructor(
+        String.class, MethodType.methodType(void.class, int[].class, int.class, int.class));
+    s = (String) mh.invokeExact(codePoints, 0, codePoints.length);
+    if (!s.equals(testPattern)) {
+      fail("Unexpected string constructor result: '" + s + "'");
+    }
+
+    // String(byte ascii[], int hibyte, int offset, int count)
+    byte [] ascii = testPattern.getBytes(StandardCharsets.US_ASCII);
+    mh = MethodHandles.lookup().findConstructor(
+        String.class, MethodType.methodType(void.class, byte[].class, int.class, int.class));
+    s = (String) mh.invokeExact(ascii, 0, ascii.length);
+    if (!s.equals(testPattern)) {
+      fail("Unexpected string constructor result: '" + s + "'");
+    }
+
+    // String(byte bytes[], int offset, int length, String charsetName)
+    mh = MethodHandles.lookup().findConstructor(
+        String.class,
+        MethodType.methodType(void.class, byte[].class, int.class, int.class, String.class));
+    s = (String) mh.invokeExact(ascii, 0, 5, StandardCharsets.US_ASCII.name());
+    if (!s.equals(testPattern.substring(0, 5))) {
+      fail("Unexpected string constructor result: '" + s + "'");
+    }
+
+    // String(byte bytes[], int offset, int length, Charset charset)
+    mh = MethodHandles.lookup().findConstructor(
+        String.class,
+        MethodType.methodType(void.class, byte[].class, int.class, int.class, Charset.class));
+    s = (String) mh.invokeExact(ascii, 0, 5, StandardCharsets.US_ASCII);
+    if (!s.equals(testPattern.substring(0, 5))) {
+      fail("Unexpected string constructor result: '" + s + "'");
+    }
+
+    // String(byte bytes[], String charsetName)
+    mh = MethodHandles.lookup().findConstructor(
+        String.class,
+        MethodType.methodType(void.class, byte[].class, String.class));
+    s = (String) mh.invokeExact(ascii, StandardCharsets.US_ASCII.name());
+    if (!s.equals(testPattern)) {
+      fail("Unexpected string constructor result: '" + s + "'");
+    }
+
+    // String(byte bytes[], Charset charset)
+    mh = MethodHandles.lookup().findConstructor(
+        String.class, MethodType.methodType(void.class, byte[].class, Charset.class));
+    s = (String) mh.invokeExact(ascii, StandardCharsets.US_ASCII);
+    if (!s.equals(testPattern)) {
+      fail("Unexpected string constructor result: '" + s + "'");
+    }
+
+    // String(byte bytes[], int offset, int length)
+    mh = MethodHandles.lookup().findConstructor(
+        String.class, MethodType.methodType(void.class, byte[].class, int.class, int.class));
+    s = (String) mh.invokeExact(ascii, 1, ascii.length - 2);
+    s = testPattern.charAt(0) + s + testPattern.charAt(testPattern.length() - 1);
+    if (!s.equals(testPattern)) {
+      fail("Unexpected string constructor result: '" + s + "'");
+    }
+
+    // String(byte bytes[])
+    mh = MethodHandles.lookup().findConstructor(
+        String.class, MethodType.methodType(void.class, byte[].class));
+    s = (String) mh.invokeExact(ascii);
+    if (!s.equals(testPattern)) {
+      fail("Unexpected string constructor result: '" + s + "'");
+    }
+
+    // String(StringBuffer buffer)
+    mh = MethodHandles.lookup().findConstructor(
+        String.class, MethodType.methodType(void.class, StringBuffer.class));
+    s = (String) mh.invokeExact(sb);
+    if (!s.equals(testPattern)) {
+      fail("Unexpected string constructor result: '" + s + "'");
+    }
+
+    System.out.println("String constructors done.");
+  }
+
+  private static void testReferenceReturnValueConversions() throws Throwable {
+    MethodHandle mh = MethodHandles.lookup().findStatic(
+        Float.class, "valueOf", MethodType.methodType(Float.class, String.class));
+
+    // No conversion
+    Float f = (Float) mh.invokeExact("1.375");
+    if (f.floatValue() != 1.375) {
+      fail();
+    }
+    f = (Float) mh.invoke("1.875");
+    if (f.floatValue() != 1.875) {
+      fail();
+    }
+
+    // Bad conversion
+    try {
+      int i = (int) mh.invokeExact("7.77");
+      fail();
+    } catch (WrongMethodTypeException e) {}
+
+    try {
+      int i = (int) mh.invoke("7.77");
+      fail();
+    } catch (WrongMethodTypeException e) {}
+
+    // Assignment to super-class.
+    Number n = (Number) mh.invoke("1.11");
+    try {
+      Number o = (Number) mh.invokeExact("1.11");
+      fail();
+    } catch (WrongMethodTypeException e) {}
+
+    // Assignment to widened boxed primitive class.
+    try {
+      Double u = (Double) mh.invoke("1.11");
+      fail();
+    } catch (ClassCastException e) {}
+
+    try {
+      Double v = (Double) mh.invokeExact("1.11");
+      fail();
+    } catch (WrongMethodTypeException e) {}
+
+    // Unboxed
+    float p = (float) mh.invoke("1.11");
+    if (p != 1.11f) {
+      fail();
+    }
+
+    // Unboxed and widened
+    double d = (double) mh.invoke("2.5");
+    if (d != 2.5) {
+      fail();
+    }
+
+    // Interface
+    Comparable<Float> c = (Comparable<Float>) mh.invoke("2.125");
+    if (c.compareTo(new Float(2.125f)) != 0) {
+      fail();
+    }
+
+    System.out.println("testReferenceReturnValueConversions done.");
+  }
+
+  private static void testPrimitiveReturnValueConversions() throws Throwable {
+    MethodHandle mh = MethodHandles.lookup().findStatic(
+        Math.class, "min", MethodType.methodType(int.class, int.class, int.class));
+
+    final int SMALL = -8972;
+    final int LARGE = 7932529;
+
+    // No conversion
+    if ((int) mh.invokeExact(LARGE, SMALL) != SMALL) {
+      fail();
+    } else if ((int) mh.invoke(LARGE, SMALL) != SMALL) {
+      fail();
+    } else if ((int) mh.invokeExact(SMALL, LARGE) != SMALL) {
+      fail();
+    } else if ((int) mh.invoke(SMALL, LARGE) != SMALL) {
+      fail();
+    }
+
+    // int -> long
+    try {
+      if ((long) mh.invokeExact(LARGE, SMALL) != (long) SMALL) {}
+        fail();
+    } catch (WrongMethodTypeException e) {}
+
+    if ((long) mh.invoke(LARGE, SMALL) != (long) SMALL) {
+      fail();
+    }
+
+    // int -> short
+    try {
+      if ((short) mh.invokeExact(LARGE, SMALL) != (short) SMALL) {}
+      fail();
+    } catch (WrongMethodTypeException e) {}
+
+    try {
+      if ((short) mh.invoke(LARGE, SMALL) != (short) SMALL) {
+        fail();
+      }
+    } catch (WrongMethodTypeException e) {}
+
+    // int -> Integer
+    try {
+      if (!((Integer) mh.invokeExact(LARGE, SMALL)).equals(new Integer(SMALL))) {}
+      fail();
+    } catch (WrongMethodTypeException e) {}
+
+    if (!((Integer) mh.invoke(LARGE, SMALL)).equals(new Integer(SMALL))) {
+      fail();
+    }
+
+    // int -> Long
+    try {
+      Long l = (Long) mh.invokeExact(LARGE, SMALL);
+      fail();
+    } catch (WrongMethodTypeException e) {}
+
+    try {
+      Long l = (Long) mh.invoke(LARGE, SMALL);
+      fail();
+    } catch (WrongMethodTypeException e) {}
+
+    // int -> Short
+    try {
+      Short s = (Short) mh.invokeExact(LARGE, SMALL);
+      fail();
+    } catch (WrongMethodTypeException e) {}
+
+    try {
+      Short s = (Short) mh.invoke(LARGE, SMALL);
+      fail();
+    } catch (WrongMethodTypeException e) {}
+
+    // int -> Process
+    try {
+      Process p = (Process) mh.invokeExact(LARGE, SMALL);
+      fail();
+    } catch (WrongMethodTypeException e) {}
+
+    try {
+      Process p = (Process) mh.invoke(LARGE, SMALL);
+      fail();
+    } catch (WrongMethodTypeException e) {}
+
+    // void -> Object
+    mh = MethodHandles.lookup().findStatic(System.class, "gc", MethodType.methodType(void.class));
+    Object o = (Object) mh.invoke();
+    if (o != null) fail();
+
+    // void -> long
+    long l = (long) mh.invoke();
+    if (l != 0) fail();
+
+    // boolean -> Boolean
+    mh = MethodHandles.lookup().findStatic(Boolean.class, "parseBoolean",
+                                           MethodType.methodType(boolean.class, String.class));
+    Boolean z = (Boolean) mh.invoke("True");
+    if (!z.booleanValue()) fail();
+
+    // boolean -> int
+    try {
+        int dummy = (int) mh.invoke("True");
+        fail();
+    } catch (WrongMethodTypeException e) {}
+
+    // boolean -> Integer
+    try {
+        Integer dummy = (Integer) mh.invoke("True");
+        fail();
+    } catch (WrongMethodTypeException e) {}
+
+    // Boolean -> boolean
+    mh = MethodHandles.lookup().findStatic(Boolean.class, "valueOf",
+                                           MethodType.methodType(Boolean.class, boolean.class));
+    boolean w = (boolean) mh.invoke(false);
+    if (w) fail();
+
+    // Boolean -> int
+    try {
+        int dummy = (int) mh.invoke(false);
+        fail();
+    } catch (WrongMethodTypeException e) {}
+
+    // Boolean -> Integer
+    try {
+        Integer dummy = (Integer) mh.invoke("True");
+        fail();
+    } catch (WrongMethodTypeException e) {}
+
+    System.out.println("testPrimitiveReturnValueConversions done.");
+  }
+
+  public static void testReturnValueConversions() throws Throwable {
+    testReferenceReturnValueConversions();
+    testPrimitiveReturnValueConversions();
+  }
 }
-
-
diff --git a/test/957-methodhandle-transforms/build b/test/957-methodhandle-transforms/build
new file mode 100755
index 0000000..a423ca6
--- /dev/null
+++ b/test/957-methodhandle-transforms/build
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+if [[ $@ != *"--jvm"* ]]; then
+  # Don't do anything with jvm.
+  export USE_JACK=true
+fi
+
+./default-build "$@" --experimental method-handles
diff --git a/test/957-methodhandle-transforms/expected.txt b/test/957-methodhandle-transforms/expected.txt
new file mode 100644
index 0000000..7540ef7
--- /dev/null
+++ b/test/957-methodhandle-transforms/expected.txt
@@ -0,0 +1,18 @@
+Message: foo, Message2: 42
+Message: foo, Message2: 42
+Message: foo, Message2: 42
+Message: foo, Message2: 42
+Message: foo, Message2: 42
+Message: foo, Message2: 42
+Message: foo, Message2: 42
+Target: Arg1: foo, Arg2: 42
+Target: Arg1: foo, Arg2: 42
+Handler: java.lang.IllegalArgumentException: exceptionMessage, Arg1: foo, Arg2: 42, ExMsg: exceptionMessage
+Handler: java.lang.IllegalArgumentException: exceptionMessage, Arg1: foo, Arg2: 42, ExMsg: exceptionMessage
+Handler: java.lang.IllegalArgumentException: exceptionMessage, Arg1: foo
+Handler: java.lang.IllegalArgumentException: exceptionMessage, Arg1: foo
+target: target, 42, 56
+target: target, 42, 56
+fallback: fallback, 42, 56
+target: target, 42, 56
+target: target, 42, 56
diff --git a/test/957-methodhandle-transforms/info.txt b/test/957-methodhandle-transforms/info.txt
new file mode 100644
index 0000000..bc50e85
--- /dev/null
+++ b/test/957-methodhandle-transforms/info.txt
@@ -0,0 +1,3 @@
+Tests for method handle transformations.
+
+NOTE: needs to run under ART or a Java 8 Language runtime and compiler.
diff --git a/test/957-methodhandle-transforms/run b/test/957-methodhandle-transforms/run
new file mode 100755
index 0000000..a9f1822
--- /dev/null
+++ b/test/957-methodhandle-transforms/run
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+./default-run "$@" --experimental method-handles
diff --git a/test/957-methodhandle-transforms/src/Main.java b/test/957-methodhandle-transforms/src/Main.java
new file mode 100644
index 0000000..5806509
--- /dev/null
+++ b/test/957-methodhandle-transforms/src/Main.java
@@ -0,0 +1,907 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodHandles.Lookup;
+import java.lang.invoke.MethodType;
+import java.lang.invoke.WrongMethodTypeException;
+
+public class Main {
+  public static void main(String[] args) throws Throwable {
+    testThrowException();
+    testDropArguments();
+    testCatchException();
+    testGuardWithTest();
+    testArrayElementGetter();
+    testArrayElementSetter();
+    testIdentity();
+    testConstant();
+    testBindTo();
+    testFilterReturnValue();
+    testPermuteArguments();
+  }
+
+  public static void testThrowException() throws Throwable {
+    MethodHandle handle = MethodHandles.throwException(String.class,
+        IllegalArgumentException.class);
+
+    if (handle.type().returnType() != String.class) {
+      System.out.println("Unexpected return type for handle: " + handle +
+          " [ " + handle.type() + "]");
+    }
+
+    final IllegalArgumentException iae = new IllegalArgumentException("boo!");
+    try {
+      handle.invoke(iae);
+      System.out.println("Expected an exception of type: java.lang.IllegalArgumentException");
+    } catch (IllegalArgumentException expected) {
+      if (expected != iae) {
+        System.out.println("Wrong exception: expected " + iae + " but was " + expected);
+      }
+    }
+  }
+
+  public static void dropArguments_delegate(String message, long message2) {
+    System.out.println("Message: " + message + ", Message2: " + message2);
+  }
+
+  public static void testDropArguments() throws Throwable {
+    MethodHandle delegate = MethodHandles.lookup().findStatic(Main.class,
+        "dropArguments_delegate",
+        MethodType.methodType(void.class, new Class<?>[] { String.class, long.class }));
+
+    MethodHandle transform = MethodHandles.dropArguments(delegate, 0, int.class, Object.class);
+
+    // The transformer will accept two additional arguments at position zero.
+    try {
+      transform.invokeExact("foo", 42l);
+      fail();
+    } catch (WrongMethodTypeException expected) {
+    }
+
+    transform.invokeExact(45, new Object(), "foo", 42l);
+    transform.invoke(45, new Object(), "foo", 42l);
+
+    // Additional arguments at position 1.
+    transform = MethodHandles.dropArguments(delegate, 1, int.class, Object.class);
+    transform.invokeExact("foo", 45, new Object(), 42l);
+    transform.invoke("foo", 45, new Object(), 42l);
+
+    // Additional arguments at position 2.
+    transform = MethodHandles.dropArguments(delegate, 2, int.class, Object.class);
+    transform.invokeExact("foo", 42l, 45, new Object());
+    transform.invoke("foo", 42l, 45, new Object());
+
+    // Note that we still perform argument conversions even for the arguments that
+    // are subsequently dropped.
+    try {
+      transform.invoke("foo", 42l, 45l, new Object());
+      fail();
+    } catch (WrongMethodTypeException expected) {
+    } catch (IllegalArgumentException expected) {
+      // TODO(narayan): We currently throw the wrong type of exception here,
+      // it's IAE and should be WMTE instead.
+    }
+
+    // Check that asType works as expected.
+    transform = MethodHandles.dropArguments(delegate, 0, int.class, Object.class);
+    transform = transform.asType(MethodType.methodType(void.class,
+          new Class<?>[] { short.class, Object.class, String.class, long.class }));
+    transform.invokeExact((short) 45, new Object(), "foo", 42l);
+
+    // Invalid argument location, should not be allowed.
+    try {
+      MethodHandles.dropArguments(delegate, -1, int.class, Object.class);
+      fail();
+    } catch (IllegalArgumentException expected) {
+    }
+
+    // Invalid argument location, should not be allowed.
+    try {
+      MethodHandles.dropArguments(delegate, 3, int.class, Object.class);
+      fail();
+    } catch (IllegalArgumentException expected) {
+    }
+
+    try {
+      MethodHandles.dropArguments(delegate, 1, void.class);
+      fail();
+    } catch (IllegalArgumentException expected) {
+    }
+  }
+
+  public static String testCatchException_target(String arg1, long arg2, String exceptionMessage)
+      throws Throwable {
+    if (exceptionMessage != null) {
+      throw new IllegalArgumentException(exceptionMessage);
+    }
+
+    System.out.println("Target: Arg1: " + arg1 + ", Arg2: " + arg2);
+    return "target";
+  }
+
+  public static String testCatchException_handler(IllegalArgumentException iae, String arg1, long arg2,
+      String exMsg) {
+    System.out.println("Handler: " + iae + ", Arg1: " + arg1 + ", Arg2: " + arg2 + ", ExMsg: " + exMsg);
+    return "handler1";
+  }
+
+  public static String testCatchException_handler2(IllegalArgumentException iae, String arg1) {
+    System.out.println("Handler: " + iae + ", Arg1: " + arg1);
+    return "handler2";
+  }
+
+  public static void testCatchException() throws Throwable {
+    MethodHandle target = MethodHandles.lookup().findStatic(Main.class,
+        "testCatchException_target",
+        MethodType.methodType(String.class, new Class<?>[] { String.class, long.class, String.class }));
+
+    MethodHandle handler = MethodHandles.lookup().findStatic(Main.class,
+        "testCatchException_handler",
+        MethodType.methodType(String.class, new Class<?>[] { IllegalArgumentException.class,
+            String.class, long.class, String.class }));
+
+    MethodHandle adapter = MethodHandles.catchException(target, IllegalArgumentException.class,
+        handler);
+
+    String returnVal = null;
+
+    // These two should end up calling the target always. We're passing a null exception
+    // message here, which means the target will not throw.
+    returnVal = (String) adapter.invoke("foo", 42, null);
+    assertEquals("target", returnVal);
+    returnVal = (String) adapter.invokeExact("foo", 42l, (String) null);
+    assertEquals("target", returnVal);
+
+    // We're passing a non-null exception message here, which means the target will throw,
+    // which in turn means that the handler must be called for the next two invokes.
+    returnVal = (String) adapter.invoke("foo", 42, "exceptionMessage");
+    assertEquals("handler1", returnVal);
+    returnVal = (String) adapter.invokeExact("foo", 42l, "exceptionMessage");
+    assertEquals("handler1", returnVal);
+
+    handler = MethodHandles.lookup().findStatic(Main.class,
+        "testCatchException_handler2",
+        MethodType.methodType(String.class, new Class<?>[] { IllegalArgumentException.class,
+            String.class }));
+    adapter = MethodHandles.catchException(target, IllegalArgumentException.class, handler);
+
+    returnVal = (String) adapter.invoke("foo", 42, "exceptionMessage");
+    assertEquals("handler2", returnVal);
+    returnVal = (String) adapter.invokeExact("foo", 42l, "exceptionMessage");
+    assertEquals("handler2", returnVal);
+
+    // Test that the type of the invoke doesn't matter. Here we call
+    // IllegalArgumentException.toString() on the exception that was thrown by
+    // the target.
+    handler = MethodHandles.lookup().findVirtual(IllegalArgumentException.class,
+        "toString", MethodType.methodType(String.class));
+    adapter = MethodHandles.catchException(target, IllegalArgumentException.class, handler);
+
+    returnVal = (String) adapter.invoke("foo", 42, "exceptionMessage");
+    assertEquals("java.lang.IllegalArgumentException: exceptionMessage", returnVal);
+    returnVal = (String) adapter.invokeExact("foo", 42l, "exceptionMessage2");
+    assertEquals("java.lang.IllegalArgumentException: exceptionMessage2", returnVal);
+
+    // Check that asType works as expected.
+    adapter = MethodHandles.catchException(target, IllegalArgumentException.class,
+        handler);
+    adapter = adapter.asType(MethodType.methodType(String.class,
+          new Class<?>[] { String.class, int.class, String.class }));
+    returnVal = (String) adapter.invokeExact("foo", 42, "exceptionMessage");
+    assertEquals("java.lang.IllegalArgumentException: exceptionMessage", returnVal);
+  }
+
+  public static boolean testGuardWithTest_test(String arg1, long arg2) {
+    return "target".equals(arg1) && 42 == arg2;
+  }
+
+  public static String testGuardWithTest_target(String arg1, long arg2, int arg3) {
+    System.out.println("target: " + arg1 + ", " + arg2  + ", " + arg3);
+    return "target";
+  }
+
+  public static String testGuardWithTest_fallback(String arg1, long arg2, int arg3) {
+    System.out.println("fallback: " + arg1 + ", " + arg2  + ", " + arg3);
+    return "fallback";
+  }
+
+  public static void testGuardWithTest() throws Throwable {
+    MethodHandle test = MethodHandles.lookup().findStatic(Main.class,
+        "testGuardWithTest_test",
+        MethodType.methodType(boolean.class, new Class<?>[] { String.class, long.class }));
+
+    final MethodType type = MethodType.methodType(String.class,
+        new Class<?>[] { String.class, long.class, int.class });
+
+    final MethodHandle target = MethodHandles.lookup().findStatic(Main.class,
+        "testGuardWithTest_target", type);
+    final MethodHandle fallback = MethodHandles.lookup().findStatic(Main.class,
+        "testGuardWithTest_fallback", type);
+
+    MethodHandle adapter = MethodHandles.guardWithTest(test, target, fallback);
+
+    String returnVal = null;
+
+    returnVal = (String) adapter.invoke("target", 42, 56);
+    assertEquals("target", returnVal);
+    returnVal = (String) adapter.invokeExact("target", 42l, 56);
+    assertEquals("target", returnVal);
+
+    returnVal = (String) adapter.invoke("fallback", 42l, 56);
+    assertEquals("fallback", returnVal);
+    returnVal = (String) adapter.invokeExact("target", 42l, 56);
+    assertEquals("target", returnVal);
+
+    // Check that asType works as expected.
+    adapter = adapter.asType(MethodType.methodType(String.class,
+          new Class<?>[] { String.class, int.class, int.class }));
+    returnVal = (String) adapter.invokeExact("target", 42, 56);
+    assertEquals("target", returnVal);
+  }
+
+  public static void testArrayElementGetter() throws Throwable {
+    MethodHandle getter = MethodHandles.arrayElementGetter(int[].class);
+
+    {
+      int[] array = new int[1];
+      array[0] = 42;
+      int value = (int) getter.invoke(array, 0);
+      if (value != 42) {
+        System.out.println("Unexpected value: " + value);
+      }
+
+      try {
+        value = (int) getter.invoke(array, -1);
+        fail();
+      } catch (ArrayIndexOutOfBoundsException expected) {
+      }
+
+      try {
+        value = (int) getter.invoke(null, -1);
+        fail();
+      } catch (NullPointerException expected) {
+      }
+    }
+
+    {
+      getter = MethodHandles.arrayElementGetter(long[].class);
+      long[] array = new long[1];
+      array[0] = 42;
+      long value = (long) getter.invoke(array, 0);
+      if (value != 42l) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    {
+      getter = MethodHandles.arrayElementGetter(short[].class);
+      short[] array = new short[1];
+      array[0] = 42;
+      short value = (short) getter.invoke(array, 0);
+      if (value != 42l) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    {
+      getter = MethodHandles.arrayElementGetter(char[].class);
+      char[] array = new char[1];
+      array[0] = 42;
+      char value = (char) getter.invoke(array, 0);
+      if (value != 42l) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    {
+      getter = MethodHandles.arrayElementGetter(byte[].class);
+      byte[] array = new byte[1];
+      array[0] = (byte) 0x8;
+      byte value = (byte) getter.invoke(array, 0);
+      if (value != (byte) 0x8) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    {
+      getter = MethodHandles.arrayElementGetter(boolean[].class);
+      boolean[] array = new boolean[1];
+      array[0] = true;
+      boolean value = (boolean) getter.invoke(array, 0);
+      if (!value) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    {
+      getter = MethodHandles.arrayElementGetter(float[].class);
+      float[] array = new float[1];
+      array[0] = 42.0f;
+      float value = (float) getter.invoke(array, 0);
+      if (value != 42.0f) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    {
+      getter = MethodHandles.arrayElementGetter(double[].class);
+      double[] array = new double[1];
+      array[0] = 42.0;
+      double value = (double) getter.invoke(array, 0);
+      if (value != 42.0) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    {
+      getter = MethodHandles.arrayElementGetter(String[].class);
+      String[] array = new String[3];
+      array[0] = "42";
+      array[1] = "48";
+      array[2] = "54";
+      String value = (String) getter.invoke(array, 0);
+      assertEquals("42", value);
+      value = (String) getter.invoke(array, 1);
+      assertEquals("48", value);
+      value = (String) getter.invoke(array, 2);
+      assertEquals("54", value);
+    }
+  }
+
+  public static void testArrayElementSetter() throws Throwable {
+    MethodHandle setter = MethodHandles.arrayElementSetter(int[].class);
+
+    {
+      int[] array = new int[2];
+      setter.invoke(array, 0, 42);
+      setter.invoke(array, 1, 43);
+
+      if (array[0] != 42) {
+        System.out.println("Unexpected value: " + array[0]);
+      }
+      if (array[1] != 43) {
+        System.out.println("Unexpected value: " + array[1]);
+      }
+
+      try {
+        setter.invoke(array, -1, 42);
+        fail();
+      } catch (ArrayIndexOutOfBoundsException expected) {
+      }
+
+      try {
+        setter.invoke(null, 0, 42);
+        fail();
+      } catch (NullPointerException expected) {
+      }
+    }
+
+    {
+      setter = MethodHandles.arrayElementSetter(long[].class);
+      long[] array = new long[1];
+      setter.invoke(array, 0, 42l);
+      if (array[0] != 42l) {
+        System.out.println("Unexpected value: " + array[0]);
+      }
+    }
+
+    {
+      setter = MethodHandles.arrayElementSetter(short[].class);
+      short[] array = new short[1];
+      setter.invoke(array, 0, (short) 42);
+      if (array[0] != 42l) {
+        System.out.println("Unexpected value: " + array[0]);
+      }
+    }
+
+    {
+      setter = MethodHandles.arrayElementSetter(char[].class);
+      char[] array = new char[1];
+      setter.invoke(array, 0, (char) 42);
+      if (array[0] != 42) {
+        System.out.println("Unexpected value: " + array[0]);
+      }
+    }
+
+    {
+      setter = MethodHandles.arrayElementSetter(byte[].class);
+      byte[] array = new byte[1];
+      setter.invoke(array, 0, (byte) 0x8);
+      if (array[0] != (byte) 0x8) {
+        System.out.println("Unexpected value: " + array[0]);
+      }
+    }
+
+    {
+      setter = MethodHandles.arrayElementSetter(boolean[].class);
+      boolean[] array = new boolean[1];
+      setter.invoke(array, 0, true);
+      if (!array[0]) {
+        System.out.println("Unexpected value: " + array[0]);
+      }
+    }
+
+    {
+      setter = MethodHandles.arrayElementSetter(float[].class);
+      float[] array = new float[1];
+      setter.invoke(array, 0, 42.0f);
+      if (array[0] != 42.0f) {
+        System.out.println("Unexpected value: " + array[0]);
+      }
+    }
+
+    {
+      setter = MethodHandles.arrayElementSetter(double[].class);
+      double[] array = new double[1];
+      setter.invoke(array, 0, 42.0);
+      if (array[0] != 42.0) {
+        System.out.println("Unexpected value: " + array[0]);
+      }
+    }
+
+    {
+      setter = MethodHandles.arrayElementSetter(String[].class);
+      String[] array = new String[3];
+      setter.invoke(array, 0, "42");
+      setter.invoke(array, 1, "48");
+      setter.invoke(array, 2, "54");
+      assertEquals("42", array[0]);
+      assertEquals("48", array[1]);
+      assertEquals("54", array[2]);
+    }
+  }
+
+  public static void testIdentity() throws Throwable {
+    {
+      MethodHandle identity = MethodHandles.identity(boolean.class);
+      boolean value = (boolean) identity.invoke(false);
+      if (value) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    {
+      MethodHandle identity = MethodHandles.identity(byte.class);
+      byte value = (byte) identity.invoke((byte) 0x8);
+      if (value != (byte) 0x8) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    {
+      MethodHandle identity = MethodHandles.identity(char.class);
+      char value = (char) identity.invoke((char) -56);
+      if (value != (char) -56) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    {
+      MethodHandle identity = MethodHandles.identity(short.class);
+      short value = (short) identity.invoke((short) -59);
+      if (value != (short) -59) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    {
+      MethodHandle identity = MethodHandles.identity(int.class);
+      int value = (int) identity.invoke(52);
+      if (value != 52) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    {
+      MethodHandle identity = MethodHandles.identity(long.class);
+      long value = (long) identity.invoke(-76l);
+      if (value != (long) -76) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    {
+      MethodHandle identity = MethodHandles.identity(float.class);
+      float value = (float) identity.invoke(56.0f);
+      if (value != (float) 56.0f) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    {
+      MethodHandle identity = MethodHandles.identity(double.class);
+      double value = (double) identity.invoke((double) 72.0);
+      if (value != (double) 72.0) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    {
+      MethodHandle identity = MethodHandles.identity(String.class);
+      String value = (String) identity.invoke("bazman");
+      assertEquals("bazman", value);
+    }
+  }
+
+  public static void testConstant() throws Throwable {
+    // int constants.
+    {
+      MethodHandle constant = MethodHandles.constant(int.class, 56);
+      int value = (int) constant.invoke();
+      if (value != 56) {
+        System.out.println("Unexpected value: " + value);
+      }
+
+      // short constant values are converted to int.
+      constant = MethodHandles.constant(int.class, (short) 52);
+      value = (int) constant.invoke();
+      if (value != 52) {
+        System.out.println("Unexpected value: " + value);
+      }
+
+      // char constant values are converted to int.
+      constant = MethodHandles.constant(int.class, (char) 'b');
+      value = (int) constant.invoke();
+      if (value != (int) 'b') {
+        System.out.println("Unexpected value: " + value);
+      }
+
+      // int constant values are converted to int.
+      constant = MethodHandles.constant(int.class, (byte) 0x1);
+      value = (int) constant.invoke();
+      if (value != 1) {
+        System.out.println("Unexpected value: " + value);
+      }
+
+      // boolean, float, double and long primitive constants are not convertible
+      // to int, so the handle creation must fail with a CCE.
+      try {
+        MethodHandles.constant(int.class, false);
+        fail();
+      } catch (ClassCastException expected) {
+      }
+
+      try {
+        MethodHandles.constant(int.class, 0.1f);
+        fail();
+      } catch (ClassCastException expected) {
+      }
+
+      try {
+        MethodHandles.constant(int.class, 0.2);
+        fail();
+      } catch (ClassCastException expected) {
+      }
+
+      try {
+        MethodHandles.constant(int.class, 73l);
+        fail();
+      } catch (ClassCastException expected) {
+      }
+    }
+
+    // long constants.
+    {
+      MethodHandle constant = MethodHandles.constant(long.class, 56l);
+      long value = (long) constant.invoke();
+      if (value != 56l) {
+        System.out.println("Unexpected value: " + value);
+      }
+
+      constant = MethodHandles.constant(long.class, (int) 56);
+      value = (long) constant.invoke();
+      if (value != 56l) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    // byte constants.
+    {
+      MethodHandle constant = MethodHandles.constant(byte.class, (byte) 0x12);
+      byte value = (byte) constant.invoke();
+      if (value != (byte) 0x12) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    // boolean constants.
+    {
+      MethodHandle constant = MethodHandles.constant(boolean.class, true);
+      boolean value = (boolean) constant.invoke();
+      if (!value) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    // char constants.
+    {
+      MethodHandle constant = MethodHandles.constant(char.class, 'f');
+      char value = (char) constant.invoke();
+      if (value != 'f') {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    // short constants.
+    {
+      MethodHandle constant = MethodHandles.constant(short.class, (short) 123);
+      short value = (short) constant.invoke();
+      if (value != (short) 123) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    // float constants.
+    {
+      MethodHandle constant = MethodHandles.constant(float.class, 56.0f);
+      float value = (float) constant.invoke();
+      if (value != 56.0f) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    // double constants.
+    {
+      MethodHandle constant = MethodHandles.constant(double.class, 256.0);
+      double value = (double) constant.invoke();
+      if (value != 256.0) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    // reference constants.
+    {
+      MethodHandle constant = MethodHandles.constant(String.class, "256.0");
+      String value = (String) constant.invoke();
+      assertEquals("256.0", value);
+    }
+  }
+
+  public static void testBindTo() throws Throwable {
+    MethodHandle stringCharAt = MethodHandles.lookup().findVirtual(
+        String.class, "charAt", MethodType.methodType(char.class, int.class));
+
+    char value = (char) stringCharAt.invoke("foo", 0);
+    if (value != 'f') {
+      System.out.println("Unexpected value: " + value);
+    }
+
+    MethodHandle bound = stringCharAt.bindTo("foo");
+    value = (char) bound.invoke(0);
+    if (value != 'f') {
+      System.out.println("Unexpected value: " + value);
+    }
+
+    try {
+      stringCharAt.bindTo(new Object());
+      fail();
+    } catch (ClassCastException expected) {
+    }
+
+    bound = stringCharAt.bindTo(null);
+    try {
+      bound.invoke(0);
+      fail();
+    } catch (NullPointerException expected) {
+    }
+
+    MethodHandle integerParseInt = MethodHandles.lookup().findStatic(
+        Integer.class, "parseInt", MethodType.methodType(int.class, String.class));
+
+    bound = integerParseInt.bindTo("78452");
+    int intValue = (int) bound.invoke();
+    if (intValue != 78452) {
+      System.out.println("Unexpected value: " + intValue);
+    }
+  }
+
+  public static String filterReturnValue_target(int a) {
+    return "ReturnValue" + a;
+  }
+
+  public static boolean filterReturnValue_filter(String value) {
+    return value.indexOf("42") != -1;
+  }
+
+  public static int filterReturnValue_intTarget(String a) {
+    return Integer.parseInt(a);
+  }
+
+  public static int filterReturnValue_intFilter(int b) {
+    return b + 1;
+  }
+
+  public static void filterReturnValue_voidTarget() {
+  }
+
+  public static int filterReturnValue_voidFilter() {
+    return 42;
+  }
+
+  public static void testFilterReturnValue() throws Throwable {
+    // A target that returns a reference.
+    {
+      final MethodHandle target = MethodHandles.lookup().findStatic(Main.class,
+          "filterReturnValue_target", MethodType.methodType(String.class, int.class));
+      final MethodHandle filter = MethodHandles.lookup().findStatic(Main.class,
+          "filterReturnValue_filter", MethodType.methodType(boolean.class, String.class));
+
+      MethodHandle adapter = MethodHandles.filterReturnValue(target, filter);
+
+      boolean value = (boolean) adapter.invoke((int) 42);
+      if (!value) {
+        System.out.println("Unexpected value: " + value);
+      }
+      value = (boolean) adapter.invoke((int) 43);
+      if (value) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    // A target that returns a primitive.
+    {
+      final MethodHandle target = MethodHandles.lookup().findStatic(Main.class,
+          "filterReturnValue_intTarget", MethodType.methodType(int.class, String.class));
+      final MethodHandle filter = MethodHandles.lookup().findStatic(Main.class,
+          "filterReturnValue_intFilter", MethodType.methodType(int.class, int.class));
+
+      MethodHandle adapter = MethodHandles.filterReturnValue(target, filter);
+
+      int value = (int) adapter.invoke("56");
+      if (value != 57) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+
+    // A target that returns void.
+    {
+      final MethodHandle target = MethodHandles.lookup().findStatic(Main.class,
+          "filterReturnValue_voidTarget", MethodType.methodType(void.class));
+      final MethodHandle filter = MethodHandles.lookup().findStatic(Main.class,
+          "filterReturnValue_voidFilter", MethodType.methodType(int.class));
+
+      MethodHandle adapter = MethodHandles.filterReturnValue(target, filter);
+
+      int value = (int) adapter.invoke();
+      if (value != 42) {
+        System.out.println("Unexpected value: " + value);
+      }
+    }
+  }
+
+  public static void permuteArguments_callee(boolean a, byte b, char c,
+      short d, int e, long f, float g, double h) {
+    if (a == true && b == (byte) 'b' && c == 'c' && d == (short) 56 &&
+        e == 78 && f == (long) 97 && g == 98.0f && f == 97.0) {
+      return;
+    }
+
+    System.out.println("Unexpected arguments: " + a + ", " + b + ", " + c
+        + ", " + d + ", " + e + ", " + f + ", " + g + ", " + h);
+  }
+
+  public static void permuteArguments_boxingCallee(boolean a, Integer b) {
+    if (a && b.intValue() == 42) {
+      return;
+    }
+
+    System.out.println("Unexpected arguments: " + a + ", " + b);
+  }
+
+  public static void testPermuteArguments() throws Throwable {
+    {
+      final MethodHandle target = MethodHandles.lookup().findStatic(
+          Main.class, "permuteArguments_callee",
+          MethodType.methodType(void.class, new Class<?>[] {
+            boolean.class, byte.class, char.class, short.class, int.class,
+            long.class, float.class, double.class }));
+
+      final MethodType newType = MethodType.methodType(void.class, new Class<?>[] {
+        double.class, float.class, long.class, int.class, short.class, char.class,
+        byte.class, boolean.class });
+
+      final MethodHandle permutation = MethodHandles.permuteArguments(
+          target, newType, new int[] { 7, 6, 5, 4, 3, 2, 1, 0 });
+
+      permutation.invoke((double) 97.0, (float) 98.0f, (long) 97, 78,
+          (short) 56, 'c', (byte) 'b', (boolean) true);
+
+      // The permutation array was not of the right length.
+      try {
+        MethodHandles.permuteArguments(target, newType,
+            new int[] { 7 });
+        fail();
+      } catch (IllegalArgumentException expected) {
+      }
+
+      // The permutation array has an element that's out of bounds
+      // (there's no argument with idx == 8).
+      try {
+        MethodHandles.permuteArguments(target, newType,
+            new int[] { 8, 6, 5, 4, 3, 2, 1, 0 });
+        fail();
+      } catch (IllegalArgumentException expected) {
+      }
+
+      // The permutation array maps to an incorrect type.
+      try {
+        MethodHandles.permuteArguments(target, newType,
+            new int[] { 7, 7, 5, 4, 3, 2, 1, 0 });
+        fail();
+      } catch (IllegalArgumentException expected) {
+      }
+    }
+
+    // Tests for reference arguments as well as permutations that
+    // repeat arguments.
+    {
+      final MethodHandle target = MethodHandles.lookup().findVirtual(
+          String.class, "concat", MethodType.methodType(String.class, String.class));
+
+      final MethodType newType = MethodType.methodType(String.class, String.class,
+          String.class);
+
+      assertEquals("foobar", (String) target.invoke("foo", "bar"));
+
+      MethodHandle permutation = MethodHandles.permuteArguments(target,
+          newType, new int[] { 1, 0 });
+      assertEquals("barfoo", (String) permutation.invoke("foo", "bar"));
+
+      permutation = MethodHandles.permuteArguments(target, newType, new int[] { 0, 0 });
+      assertEquals("foofoo", (String) permutation.invoke("foo", "bar"));
+
+      permutation = MethodHandles.permuteArguments(target, newType, new int[] { 1, 1 });
+      assertEquals("barbar", (String) permutation.invoke("foo", "bar"));
+    }
+
+    // Tests for boxing and unboxing.
+    {
+      final MethodHandle target = MethodHandles.lookup().findStatic(
+          Main.class, "permuteArguments_boxingCallee",
+          MethodType.methodType(void.class, new Class<?>[] { boolean.class, Integer.class }));
+
+      final MethodType newType = MethodType.methodType(void.class,
+          new Class<?>[] { Integer.class, boolean.class });
+
+      MethodHandle permutation = MethodHandles.permuteArguments(target,
+          newType, new int[] { 1, 0 });
+
+      permutation.invoke(42, true);
+      permutation.invoke(42, Boolean.TRUE);
+      permutation.invoke(Integer.valueOf(42), true);
+      permutation.invoke(Integer.valueOf(42), Boolean.TRUE);
+    }
+  }
+
+  public static void fail() {
+    System.out.println("FAIL");
+    Thread.dumpStack();
+  }
+
+  public static void assertEquals(String s1, String s2) {
+    if (s1 == s2) {
+      return;
+    }
+
+    if (s1 != null && s2 != null && s1.equals(s2)) {
+      return;
+    }
+
+    throw new AssertionError("assertEquals s1: " + s1 + ", s2: " + s2);
+  }
+}
diff --git a/test/958-methodhandle-emulated-stackframe/build b/test/958-methodhandle-emulated-stackframe/build
new file mode 100755
index 0000000..a423ca6
--- /dev/null
+++ b/test/958-methodhandle-emulated-stackframe/build
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+if [[ $@ != *"--jvm"* ]]; then
+  # Don't do anything with jvm.
+  export USE_JACK=true
+fi
+
+./default-build "$@" --experimental method-handles
diff --git a/test/958-methodhandle-emulated-stackframe/expected.txt b/test/958-methodhandle-emulated-stackframe/expected.txt
new file mode 100644
index 0000000..5f38259
--- /dev/null
+++ b/test/958-methodhandle-emulated-stackframe/expected.txt
@@ -0,0 +1,32 @@
+boolean: false
+char: h
+short: 56
+int: 72
+long: 2147483689
+float: 0.56
+double: 100.0
+String: hello
+Object: goodbye
+boolean: false
+char: h
+short: 56
+int: 72
+long: 73
+float: 0.56
+double: 100.0
+String: hello
+Object: goodbye
+true
+true
+a
+a
+42
+42
+43
+43
+43.0
+43.0
+43.0
+43.0
+plank
+plank
diff --git a/test/958-methodhandle-emulated-stackframe/info.txt b/test/958-methodhandle-emulated-stackframe/info.txt
new file mode 100644
index 0000000..bec2324
--- /dev/null
+++ b/test/958-methodhandle-emulated-stackframe/info.txt
@@ -0,0 +1,5 @@
+Tests for dalvik.system.EmulatedStackFrame, which is used to implement
+MethodHandle transformations. This is a separate test because it tests
+an implementation detail and hence cannot be used with --mode=jvm.
+
+NOTE: needs to run under ART or a Java 8 Language runtime and compiler.
diff --git a/test/958-methodhandle-emulated-stackframe/run b/test/958-methodhandle-emulated-stackframe/run
new file mode 100755
index 0000000..a9f1822
--- /dev/null
+++ b/test/958-methodhandle-emulated-stackframe/run
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+./default-run "$@" --experimental method-handles
diff --git a/test/958-methodhandle-emulated-stackframe/src/Main.java b/test/958-methodhandle-emulated-stackframe/src/Main.java
new file mode 100644
index 0000000..f739d47
--- /dev/null
+++ b/test/958-methodhandle-emulated-stackframe/src/Main.java
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodHandles.Lookup;
+import java.lang.invoke.MethodType;
+import java.lang.invoke.WrongMethodTypeException;
+import java.lang.invoke.Transformers.Transformer;
+
+import dalvik.system.EmulatedStackFrame;
+
+public class Main {
+
+  public static void testDelegate_allTypes(boolean z, char a, short b, int c, long d,
+                                           float e, double f, String g, Object h) {
+    System.out.println("boolean: " + z);
+    System.out.println("char: " + a);
+    System.out.println("short: " + b);
+    System.out.println("int: " + c);
+    System.out.println("long: " + d);
+    System.out.println("float: " + e);
+    System.out.println("double: " + f);
+    System.out.println("String: " + g);
+    System.out.println("Object: " + h);
+  }
+
+  public static boolean testDelegate_returnBoolean() {
+    return true;
+  }
+
+  public static char testDelegate_returnChar() {
+    return 'a';
+  }
+
+  public static int testDelegate_returnInt() {
+    return 42;
+  }
+
+  public static long testDelegate_returnLong() {
+    return 43;
+  }
+
+  public static float testDelegate_returnFloat() {
+    return 43.0f;
+  }
+
+  public static double testDelegate_returnDouble() {
+    return 43.0;
+  }
+
+  public static String testDelegate_returnString() {
+    return "plank";
+  }
+
+  public static class DelegatingTransformer extends Transformer {
+    private final MethodHandle delegate;
+
+    public DelegatingTransformer(MethodHandle delegate) {
+      super(delegate.type());
+      this.delegate = delegate;
+    }
+
+    @Override
+    public void transform(EmulatedStackFrame stackFrame) throws Throwable {
+      delegate.invoke(stackFrame);
+    }
+  }
+
+  public static void main(String[] args) throws Throwable {
+    MethodHandle specialFunctionHandle = MethodHandles.lookup().findStatic(
+        Main.class, "testDelegate_allTypes", MethodType.methodType(void.class,
+          new Class<?>[] { boolean.class, char.class, short.class, int.class, long.class,
+            float.class, double.class, String.class, Object.class }));
+
+    DelegatingTransformer delegate = new DelegatingTransformer(specialFunctionHandle);
+
+    // Test an exact invoke.
+    //
+    // Note that the shorter form below doesn't work and must be
+    // investigated on the jack side :  b/32536744
+    //
+    // delegate.invokeExact(false, 'h', (short) 56, 72, Integer.MAX_VALUE + 42l,
+    //    0.56f, 100.0d, "hello", (Object) "goodbye");
+
+    Object obj = "goodbye";
+    delegate.invokeExact(false, 'h', (short) 56, 72, Integer.MAX_VALUE + 42l,
+        0.56f, 100.0d, "hello", obj);
+
+    // Test a non exact invoke with one int -> long conversion and a float -> double
+    // conversion.
+    delegate.invoke(false, 'h', (short) 56, 72, 73,
+        0.56f, 100.0f, "hello", "goodbye");
+
+    // Should throw a WrongMethodTypeException if the types don't align.
+    try {
+      delegate.invoke(false);
+      throw new AssertionError("Call to invoke unexpectedly succeeded");
+    } catch (WrongMethodTypeException expected) {
+    }
+
+    // Test return values.
+
+    // boolean.
+    MethodHandle returner = MethodHandles.lookup().findStatic(
+        Main.class, "testDelegate_returnBoolean", MethodType.methodType(boolean.class));
+    delegate = new DelegatingTransformer(returner);
+
+    System.out.println((boolean) delegate.invoke());
+    System.out.println((boolean) delegate.invokeExact());
+
+    // char.
+    returner = MethodHandles.lookup().findStatic(
+        Main.class, "testDelegate_returnChar", MethodType.methodType(char.class));
+    delegate = new DelegatingTransformer(returner);
+
+    System.out.println((char) delegate.invoke());
+    System.out.println((char) delegate.invokeExact());
+
+    // int.
+    returner = MethodHandles.lookup().findStatic(
+        Main.class, "testDelegate_returnInt", MethodType.methodType(int.class));
+    delegate = new DelegatingTransformer(returner);
+
+    System.out.println((int) delegate.invoke());
+    System.out.println((int) delegate.invokeExact());
+
+    // long.
+    returner = MethodHandles.lookup().findStatic(
+        Main.class, "testDelegate_returnLong", MethodType.methodType(long.class));
+    delegate = new DelegatingTransformer(returner);
+
+    System.out.println((long) delegate.invoke());
+    System.out.println((long) delegate.invokeExact());
+
+    // float.
+    returner = MethodHandles.lookup().findStatic(
+        Main.class, "testDelegate_returnFloat", MethodType.methodType(float.class));
+    delegate = new DelegatingTransformer(returner);
+
+    System.out.println((float) delegate.invoke());
+    System.out.println((float) delegate.invokeExact());
+
+    // double.
+    returner = MethodHandles.lookup().findStatic(
+        Main.class, "testDelegate_returnDouble", MethodType.methodType(double.class));
+    delegate = new DelegatingTransformer(returner);
+
+    System.out.println((double) delegate.invoke());
+    System.out.println((double) delegate.invokeExact());
+
+    // references.
+    returner = MethodHandles.lookup().findStatic(
+        Main.class, "testDelegate_returnString", MethodType.methodType(String.class));
+    delegate = new DelegatingTransformer(returner);
+
+    System.out.println((String) delegate.invoke());
+    System.out.println((String) delegate.invokeExact());
+  }
+}
+
+
diff --git a/test/959-invoke-polymorphic-accessors/build b/test/959-invoke-polymorphic-accessors/build
new file mode 100644
index 0000000..a423ca6
--- /dev/null
+++ b/test/959-invoke-polymorphic-accessors/build
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+if [[ $@ != *"--jvm"* ]]; then
+  # Don't do anything with jvm.
+  export USE_JACK=true
+fi
+
+./default-build "$@" --experimental method-handles
diff --git a/test/959-invoke-polymorphic-accessors/expected.txt b/test/959-invoke-polymorphic-accessors/expected.txt
new file mode 100644
index 0000000..de2916b
--- /dev/null
+++ b/test/959-invoke-polymorphic-accessors/expected.txt
@@ -0,0 +1,4 @@
+1515870810
+Passed MethodHandles.Lookup tests for accessors.
+Passed MethodHandle.invokeExact() tests for accessors.
+Passed MethodHandle.invoke() tests for accessors.
diff --git a/test/959-invoke-polymorphic-accessors/info.txt b/test/959-invoke-polymorphic-accessors/info.txt
new file mode 100644
index 0000000..b2f55f0
--- /dev/null
+++ b/test/959-invoke-polymorphic-accessors/info.txt
@@ -0,0 +1 @@
+This test requires Jack with invoke-polymorphic support.
diff --git a/test/959-invoke-polymorphic-accessors/run b/test/959-invoke-polymorphic-accessors/run
new file mode 100644
index 0000000..a9f1822
--- /dev/null
+++ b/test/959-invoke-polymorphic-accessors/run
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+./default-run "$@" --experimental method-handles
diff --git a/test/959-invoke-polymorphic-accessors/src/Main.java b/test/959-invoke-polymorphic-accessors/src/Main.java
new file mode 100644
index 0000000..b7ecf8e
--- /dev/null
+++ b/test/959-invoke-polymorphic-accessors/src/Main.java
@@ -0,0 +1,919 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.WrongMethodTypeException;
+
+public class Main {
+
+    public static class ValueHolder {
+        public boolean m_z = false;
+        public byte m_b = 0;
+        public char m_c = 'a';
+        public short m_s = 0;
+        public int m_i = 0;
+        public float m_f = 0.0f;
+        public double m_d = 0.0;
+        public long m_j = 0;
+        public String m_l = "a";
+
+        public static boolean s_z;
+        public static byte s_b;
+        public static char s_c;
+        public static short s_s;
+        public static int s_i;
+        public static float s_f;
+        public static double s_d;
+        public static long s_j;
+        public static String s_l;
+
+        public final int m_fi = 0xa5a5a5a5;
+        public static final int s_fi = 0x5a5a5a5a;
+    }
+
+    public static class Tester {
+        public static void assertActualAndExpectedMatch(boolean actual, boolean expected)
+                throws AssertionError {
+            if (actual != expected) {
+                throw new AssertionError("Actual != Expected (" + actual + " != " + expected + ")");
+            }
+        }
+
+        public static void assertTrue(boolean value) throws AssertionError {
+            if (!value) {
+                throw new AssertionError("Value is not true");
+            }
+        }
+
+        public static void unreachable() throws Throwable{
+            throw new Error("unreachable");
+        }
+    }
+
+    public static class InvokeExactTester extends Tester {
+        private enum PrimitiveType {
+            Boolean,
+            Byte,
+            Char,
+            Short,
+            Int,
+            Long,
+            Float,
+            Double,
+            String,
+        }
+
+        private enum AccessorType {
+            IPUT,
+            SPUT,
+            IGET,
+            SGET,
+        }
+
+        static void setByte(MethodHandle m, ValueHolder v, byte value, boolean expectFailure)
+                throws Throwable {
+            boolean exceptionThrown = false;
+            try {
+                if (v == null) {
+                    m.invokeExact(value);
+                }
+                else {
+                    m.invokeExact(v, value);
+                }
+            }
+            catch (WrongMethodTypeException e) {
+                exceptionThrown = true;
+            }
+            assertActualAndExpectedMatch(exceptionThrown, expectFailure);
+        }
+
+        static void setByte(MethodHandle m, byte value, boolean expectFailure) throws Throwable {
+            setByte(m, null, value, expectFailure);
+        }
+
+        static void getByte(MethodHandle m, ValueHolder v, byte value, boolean expectFailure)
+                throws Throwable {
+            boolean exceptionThrown = false;
+            try {
+                final byte got;
+                if (v == null) {
+                    got = (byte)m.invokeExact();
+                } else {
+                    got = (byte)m.invokeExact(v);
+                }
+                assertTrue(got == value);
+            }
+            catch (WrongMethodTypeException e) {
+                exceptionThrown = true;
+            }
+            assertActualAndExpectedMatch(exceptionThrown, expectFailure);
+        }
+
+        static void getByte(MethodHandle m, byte value, boolean expectFailure) throws Throwable {
+            getByte(m, null, value, expectFailure);
+        }
+
+        static void setChar(MethodHandle m, ValueHolder v, char value, boolean expectFailure)
+                throws Throwable {
+            boolean exceptionThrown = false;
+            try {
+                if (v == null) {
+                    m.invokeExact(value);
+                }
+                else {
+                    m.invokeExact(v, value);
+                }
+            }
+            catch (WrongMethodTypeException e) {
+                exceptionThrown = true;
+            }
+            assertActualAndExpectedMatch(exceptionThrown, expectFailure);
+        }
+
+        static void setChar(MethodHandle m, char value, boolean expectFailure) throws Throwable {
+            setChar(m, null, value, expectFailure);
+        }
+
+        static void getChar(MethodHandle m, ValueHolder v, char value, boolean expectFailure)
+                throws Throwable {
+            boolean exceptionThrown = false;
+            try {
+                final char got;
+                if (v == null) {
+                    got = (char)m.invokeExact();
+                } else {
+                    got = (char)m.invokeExact(v);
+                }
+                assertTrue(got == value);
+            }
+            catch (WrongMethodTypeException e) {
+                exceptionThrown = true;
+            }
+            assertActualAndExpectedMatch(exceptionThrown, expectFailure);
+        }
+
+        static void getChar(MethodHandle m, char value, boolean expectFailure) throws Throwable {
+            getChar(m, null, value, expectFailure);
+        }
+
+        static void setShort(MethodHandle m, ValueHolder v, short value, boolean expectFailure)
+                throws Throwable {
+            boolean exceptionThrown = false;
+            try {
+                if (v == null) {
+                    m.invokeExact(value);
+                }
+                else {
+                    m.invokeExact(v, value);
+                }
+            }
+            catch (WrongMethodTypeException e) {
+                exceptionThrown = true;
+            }
+            assertActualAndExpectedMatch(exceptionThrown, expectFailure);
+        }
+
+        static void setShort(MethodHandle m, short value, boolean expectFailure) throws Throwable {
+            setShort(m, null, value, expectFailure);
+        }
+
+        static void getShort(MethodHandle m, ValueHolder v, short value, boolean expectFailure)
+                throws Throwable {
+            boolean exceptionThrown = false;
+            try {
+                final short got = (v == null) ? (short)m.invokeExact() : (short)m.invokeExact(v);
+                assertTrue(got == value);
+            }
+            catch (WrongMethodTypeException e) {
+                exceptionThrown = true;
+            }
+            assertActualAndExpectedMatch(exceptionThrown, expectFailure);
+        }
+
+        static void getShort(MethodHandle m, short value, boolean expectFailure) throws Throwable {
+            getShort(m, null, value, expectFailure);
+        }
+
+        static void setInt(MethodHandle m, ValueHolder v, int value, boolean expectFailure)
+                throws Throwable {
+            boolean exceptionThrown = false;
+            try {
+                if (v == null) {
+                    m.invokeExact(value);
+                }
+                else {
+                    m.invokeExact(v, value);
+                }
+            }
+            catch (WrongMethodTypeException e) {
+                exceptionThrown = true;
+            }
+            assertActualAndExpectedMatch(exceptionThrown, expectFailure);
+        }
+
+        static void setInt(MethodHandle m, int value, boolean expectFailure) throws Throwable {
+            setInt(m, null, value, expectFailure);
+        }
+
+        static void getInt(MethodHandle m, ValueHolder v, int value, boolean expectFailure)
+                throws Throwable {
+            boolean exceptionThrown = false;
+            try {
+                final int got = (v == null) ? (int)m.invokeExact() : (int)m.invokeExact(v);
+                assertTrue(got == value);
+            }
+            catch (WrongMethodTypeException e) {
+                exceptionThrown = true;
+            }
+            assertActualAndExpectedMatch(exceptionThrown, expectFailure);
+        }
+
+        static void getInt(MethodHandle m, int value, boolean expectFailure) throws Throwable {
+            getInt(m, null, value, expectFailure);
+        }
+
+        static void setLong(MethodHandle m, ValueHolder v, long value, boolean expectFailure)
+                throws Throwable {
+            boolean exceptionThrown = false;
+            try {
+                if (v == null) {
+                    m.invokeExact(value);
+                }
+                else {
+                    m.invokeExact(v, value);
+                }
+            }
+            catch (WrongMethodTypeException e) {
+                exceptionThrown = true;
+            }
+            assertActualAndExpectedMatch(exceptionThrown, expectFailure);
+        }
+
+        static void setLong(MethodHandle m, long value, boolean expectFailure) throws Throwable {
+            setLong(m, null, value, expectFailure);
+        }
+
+        static void getLong(MethodHandle m, ValueHolder v, long value, boolean expectFailure)
+                throws Throwable {
+            boolean exceptionThrown = false;
+            try {
+                final long got = (v == null) ? (long)m.invokeExact() : (long)m.invokeExact(v);
+                assertTrue(got == value);
+            }
+            catch (WrongMethodTypeException e) {
+                exceptionThrown = true;
+            }
+            assertActualAndExpectedMatch(exceptionThrown, expectFailure);
+        }
+
+        static void getLong(MethodHandle m, long value, boolean expectFailure) throws Throwable {
+            getLong(m, null, value, expectFailure);
+        }
+
+        static void setFloat(MethodHandle m, ValueHolder v, float value, boolean expectFailure)
+                throws Throwable {
+            boolean exceptionThrown = false;
+            try {
+                if (v == null) {
+                    m.invokeExact(value);
+                }
+                else {
+                    m.invokeExact(v, value);
+                }
+            }
+            catch (WrongMethodTypeException e) {
+                exceptionThrown = true;
+            }
+            assertActualAndExpectedMatch(exceptionThrown, expectFailure);
+        }
+
+        static void setFloat(MethodHandle m, float value, boolean expectFailure) throws Throwable {
+            setFloat(m, null, value, expectFailure);
+        }
+
+        static void getFloat(MethodHandle m, ValueHolder v, float value, boolean expectFailure)
+                throws Throwable {
+            boolean exceptionThrown = false;
+            try {
+                final float got = (v == null) ? (float)m.invokeExact() : (float)m.invokeExact(v);
+                assertTrue(got == value);
+            }
+            catch (WrongMethodTypeException e) {
+                exceptionThrown = true;
+            }
+            assertActualAndExpectedMatch(exceptionThrown, expectFailure);
+        }
+
+        static void getFloat(MethodHandle m, float value, boolean expectFailure) throws Throwable {
+            getFloat(m, null, value, expectFailure);
+        }
+
+        static void setDouble(MethodHandle m, ValueHolder v, double value, boolean expectFailure)
+                throws Throwable {
+            boolean exceptionThrown = false;
+            try {
+                if (v == null) {
+                    m.invokeExact(value);
+                }
+                else {
+                    m.invokeExact(v, value);
+                }
+            }
+            catch (WrongMethodTypeException e) {
+                exceptionThrown = true;
+            }
+            assertActualAndExpectedMatch(exceptionThrown, expectFailure);
+        }
+
+        static void setDouble(MethodHandle m, double value, boolean expectFailure)
+                throws Throwable {
+            setDouble(m, null, value, expectFailure);
+        }
+
+        static void getDouble(MethodHandle m, ValueHolder v, double value, boolean expectFailure)
+                throws Throwable {
+            boolean exceptionThrown = false;
+            try {
+                final double got = (v == null) ? (double)m.invokeExact() : (double)m.invokeExact(v);
+                assertTrue(got == value);
+            }
+            catch (WrongMethodTypeException e) {
+                exceptionThrown = true;
+            }
+            assertActualAndExpectedMatch(exceptionThrown, expectFailure);
+        }
+
+        static void getDouble(MethodHandle m, double value, boolean expectFailure)
+                throws Throwable {
+            getDouble(m, null, value, expectFailure);
+        }
+
+        static void setString(MethodHandle m, ValueHolder v, String value, boolean expectFailure)
+                throws Throwable {
+            boolean exceptionThrown = false;
+            try {
+                if (v == null) {
+                    m.invokeExact(value);
+                }
+                else {
+                    m.invokeExact(v, value);
+                }
+            }
+            catch (WrongMethodTypeException e) {
+                exceptionThrown = true;
+            }
+            assertActualAndExpectedMatch(exceptionThrown, expectFailure);
+        }
+
+        static void setString(MethodHandle m, String value, boolean expectFailure)
+                throws Throwable {
+            setString(m, null, value, expectFailure);
+        }
+
+        static void getString(MethodHandle m, ValueHolder v, String value, boolean expectFailure)
+                throws Throwable {
+            boolean exceptionThrown = false;
+            try {
+                final String got = (v == null) ? (String)m.invokeExact() : (String)m.invokeExact(v);
+                assertTrue(got.equals(value));
+            }
+            catch (WrongMethodTypeException e) {
+                exceptionThrown = true;
+            }
+            assertActualAndExpectedMatch(exceptionThrown, expectFailure);
+        }
+
+        static void getString(MethodHandle m, String value, boolean expectFailure)
+                throws Throwable {
+            getString(m, null, value, expectFailure);
+        }
+
+        static void setBoolean(MethodHandle m, ValueHolder v, boolean value, boolean expectFailure)
+                throws Throwable {
+            boolean exceptionThrown = false;
+            try {
+                if (v == null) {
+                    m.invokeExact(value);
+                }
+                else {
+                    m.invokeExact(v, value);
+                }
+            }
+            catch (WrongMethodTypeException e) {
+                exceptionThrown = true;
+            }
+            assertActualAndExpectedMatch(exceptionThrown, expectFailure);
+        }
+
+        static void setBoolean(MethodHandle m, boolean value, boolean expectFailure)
+                throws Throwable {
+            setBoolean(m, null, value, expectFailure);
+        }
+
+        static void getBoolean(MethodHandle m, ValueHolder v, boolean value, boolean expectFailure)
+                throws Throwable {
+            boolean exceptionThrown = false;
+            try {
+                final boolean got =
+                        (v == null) ? (boolean)m.invokeExact() : (boolean)m.invokeExact(v);
+                assertTrue(got == value);
+            }
+            catch (WrongMethodTypeException e) {
+                exceptionThrown = true;
+            }
+            assertActualAndExpectedMatch(exceptionThrown, expectFailure);
+        }
+
+        static void getBoolean(MethodHandle m, boolean value, boolean expectFailure)
+                throws Throwable {
+            getBoolean(m, null, value, expectFailure);
+        }
+
+        static boolean resultFor(PrimitiveType actualType, PrimitiveType expectedType,
+                                 AccessorType actualAccessor,
+                                 AccessorType expectedAccessor) {
+            return (actualType != expectedType) || (actualAccessor != expectedAccessor);
+        }
+
+        static void tryAccessor(MethodHandle methodHandle,
+                                ValueHolder valueHolder,
+                                PrimitiveType primitive,
+                                Object value,
+                                AccessorType accessor) throws Throwable {
+            boolean booleanValue =
+                    value instanceof Boolean ? ((Boolean)value).booleanValue() : false;
+            setBoolean(methodHandle, valueHolder, booleanValue,
+                       resultFor(primitive, PrimitiveType.Boolean, accessor, AccessorType.IPUT));
+            setBoolean(methodHandle, booleanValue,
+                       resultFor(primitive, PrimitiveType.Boolean, accessor, AccessorType.SPUT));
+            getBoolean(methodHandle, valueHolder, booleanValue,
+                       resultFor(primitive, PrimitiveType.Boolean, accessor, AccessorType.IGET));
+            getBoolean(methodHandle, booleanValue,
+                       resultFor(primitive, PrimitiveType.Boolean, accessor, AccessorType.SGET));
+
+            byte byteValue = value instanceof Byte ? ((Byte)value).byteValue() : (byte)0;
+            setByte(methodHandle, valueHolder, byteValue,
+                    resultFor(primitive, PrimitiveType.Byte, accessor, AccessorType.IPUT));
+            setByte(methodHandle, byteValue,
+                    resultFor(primitive, PrimitiveType.Byte, accessor, AccessorType.SPUT));
+            getByte(methodHandle, valueHolder, byteValue,
+                    resultFor(primitive, PrimitiveType.Byte, accessor, AccessorType.IGET));
+            getByte(methodHandle, byteValue,
+                    resultFor(primitive, PrimitiveType.Byte, accessor, AccessorType.SGET));
+
+            char charValue = value instanceof Character ? ((Character)value).charValue() : 'z';
+            setChar(methodHandle, valueHolder, charValue,
+                    resultFor(primitive, PrimitiveType.Char, accessor, AccessorType.IPUT));
+            setChar(methodHandle, charValue,
+                    resultFor(primitive, PrimitiveType.Char, accessor, AccessorType.SPUT));
+            getChar(methodHandle, valueHolder, charValue,
+                    resultFor(primitive, PrimitiveType.Char, accessor, AccessorType.IGET));
+            getChar(methodHandle, charValue,
+                    resultFor(primitive, PrimitiveType.Char, accessor, AccessorType.SGET));
+
+            short shortValue = value instanceof Short ? ((Short)value).shortValue() : (short)0;
+            setShort(methodHandle, valueHolder, shortValue,
+                     resultFor(primitive, PrimitiveType.Short, accessor, AccessorType.IPUT));
+            setShort(methodHandle, shortValue,
+                    resultFor(primitive, PrimitiveType.Short, accessor, AccessorType.SPUT));
+            getShort(methodHandle, valueHolder, shortValue,
+                     resultFor(primitive, PrimitiveType.Short, accessor, AccessorType.IGET));
+            getShort(methodHandle, shortValue,
+                    resultFor(primitive, PrimitiveType.Short, accessor, AccessorType.SGET));
+
+            int intValue = value instanceof Integer ? ((Integer)value).intValue() : -1;
+            setInt(methodHandle, valueHolder, intValue,
+                   resultFor(primitive, PrimitiveType.Int, accessor, AccessorType.IPUT));
+            setInt(methodHandle, intValue,
+                   resultFor(primitive, PrimitiveType.Int, accessor, AccessorType.SPUT));
+            getInt(methodHandle, valueHolder, intValue,
+                   resultFor(primitive, PrimitiveType.Int, accessor, AccessorType.IGET));
+            getInt(methodHandle, intValue,
+                   resultFor(primitive, PrimitiveType.Int, accessor, AccessorType.SGET));
+
+            long longValue = value instanceof Long ? ((Long)value).longValue() : (long)-1;
+            setLong(methodHandle, valueHolder, longValue,
+                    resultFor(primitive, PrimitiveType.Long, accessor, AccessorType.IPUT));
+            setLong(methodHandle, longValue,
+                    resultFor(primitive, PrimitiveType.Long, accessor, AccessorType.SPUT));
+            getLong(methodHandle, valueHolder, longValue,
+                    resultFor(primitive, PrimitiveType.Long, accessor, AccessorType.IGET));
+            getLong(methodHandle, longValue,
+                    resultFor(primitive, PrimitiveType.Long, accessor, AccessorType.SGET));
+
+            float floatValue = value instanceof Float ? ((Float)value).floatValue() : -1.0f;
+            setFloat(methodHandle, valueHolder, floatValue,
+                    resultFor(primitive, PrimitiveType.Float, accessor, AccessorType.IPUT));
+            setFloat(methodHandle, floatValue,
+                    resultFor(primitive, PrimitiveType.Float, accessor, AccessorType.SPUT));
+            getFloat(methodHandle, valueHolder, floatValue,
+                    resultFor(primitive, PrimitiveType.Float, accessor, AccessorType.IGET));
+            getFloat(methodHandle, floatValue,
+                     resultFor(primitive, PrimitiveType.Float, accessor, AccessorType.SGET));
+
+            double doubleValue = value instanceof Double ? ((Double)value).doubleValue() : -1.0;
+            setDouble(methodHandle, valueHolder, doubleValue,
+                      resultFor(primitive, PrimitiveType.Double, accessor, AccessorType.IPUT));
+            setDouble(methodHandle, doubleValue,
+                      resultFor(primitive, PrimitiveType.Double, accessor, AccessorType.SPUT));
+            getDouble(methodHandle, valueHolder, doubleValue,
+                      resultFor(primitive, PrimitiveType.Double, accessor, AccessorType.IGET));
+            getDouble(methodHandle, doubleValue,
+                      resultFor(primitive, PrimitiveType.Double, accessor, AccessorType.SGET));
+
+            String stringValue = value instanceof String ? ((String) value) : "No Spock, no";
+            setString(methodHandle, valueHolder, stringValue,
+                      resultFor(primitive, PrimitiveType.String, accessor, AccessorType.IPUT));
+            setString(methodHandle, stringValue,
+                      resultFor(primitive, PrimitiveType.String, accessor, AccessorType.SPUT));
+            getString(methodHandle, valueHolder, stringValue,
+                      resultFor(primitive, PrimitiveType.String, accessor, AccessorType.IGET));
+            getString(methodHandle, stringValue,
+                      resultFor(primitive, PrimitiveType.String, accessor, AccessorType.SGET));
+        }
+
+        public static void main() throws Throwable {
+            ValueHolder valueHolder = new ValueHolder();
+            MethodHandles.Lookup lookup = MethodHandles.lookup();
+
+            boolean [] booleans = { false, true, false };
+            for (boolean b : booleans) {
+                Boolean boxed = new Boolean(b);
+                tryAccessor(lookup.findSetter(ValueHolder.class, "m_z", boolean.class),
+                            valueHolder, PrimitiveType.Boolean, boxed, AccessorType.IPUT);
+                tryAccessor(lookup.findGetter(ValueHolder.class, "m_z", boolean.class),
+                            valueHolder, PrimitiveType.Boolean, boxed, AccessorType.IGET);
+                assertTrue(valueHolder.m_z == b);
+                tryAccessor(lookup.findStaticSetter(ValueHolder.class, "s_z", boolean.class),
+                            valueHolder, PrimitiveType.Boolean, boxed, AccessorType.SPUT);
+                tryAccessor(lookup.findStaticGetter(ValueHolder.class, "s_z", boolean.class),
+                            valueHolder, PrimitiveType.Boolean, boxed, AccessorType.SGET);
+                assertTrue(ValueHolder.s_z == b);
+            }
+
+            byte [] bytes = { (byte)0x73, (byte)0xfe };
+            for (byte b : bytes) {
+                Byte boxed = new Byte(b);
+                tryAccessor(lookup.findSetter(ValueHolder.class, "m_b", byte.class),
+                            valueHolder, PrimitiveType.Byte, boxed, AccessorType.IPUT);
+                tryAccessor(lookup.findGetter(ValueHolder.class, "m_b", byte.class),
+                            valueHolder, PrimitiveType.Byte, boxed, AccessorType.IGET);
+                assertTrue(valueHolder.m_b == b);
+                tryAccessor(lookup.findStaticSetter(ValueHolder.class, "s_b", byte.class),
+                            valueHolder, PrimitiveType.Byte, boxed, AccessorType.SPUT);
+                tryAccessor(lookup.findStaticGetter(ValueHolder.class, "s_b", byte.class),
+                            valueHolder, PrimitiveType.Byte, boxed, AccessorType.SGET);
+                assertTrue(ValueHolder.s_b == b);
+            }
+
+            char [] chars = { 'a', 'b', 'c' };
+            for (char c : chars) {
+                Character boxed = new Character(c);
+                tryAccessor(lookup.findSetter(ValueHolder.class, "m_c", char.class),
+                            valueHolder, PrimitiveType.Char, boxed, AccessorType.IPUT);
+                tryAccessor(lookup.findGetter(ValueHolder.class, "m_c", char.class),
+                            valueHolder, PrimitiveType.Char, boxed, AccessorType.IGET);
+                assertTrue(valueHolder.m_c == c);
+                tryAccessor(lookup.findStaticSetter(ValueHolder.class, "s_c", char.class),
+                            valueHolder, PrimitiveType.Char, boxed, AccessorType.SPUT);
+                tryAccessor(lookup.findStaticGetter(ValueHolder.class, "s_c", char.class),
+                            valueHolder, PrimitiveType.Char, boxed, AccessorType.SGET);
+                assertTrue(ValueHolder.s_c == c);
+            }
+
+            short [] shorts = { (short)0x1234, (short)0x4321 };
+            for (short s : shorts) {
+                Short boxed = new Short(s);
+                tryAccessor(lookup.findSetter(ValueHolder.class, "m_s", short.class),
+                            valueHolder, PrimitiveType.Short, boxed, AccessorType.IPUT);
+                tryAccessor(lookup.findGetter(ValueHolder.class, "m_s", short.class),
+                            valueHolder, PrimitiveType.Short, boxed, AccessorType.IGET);
+                assertTrue(valueHolder.m_s == s);
+                tryAccessor(lookup.findStaticSetter(ValueHolder.class, "s_s", short.class),
+                            valueHolder, PrimitiveType.Short, boxed, AccessorType.SPUT);
+                tryAccessor(lookup.findStaticGetter(ValueHolder.class, "s_s", short.class),
+                            valueHolder, PrimitiveType.Short, boxed, AccessorType.SGET);
+                assertTrue(ValueHolder.s_s == s);
+            }
+
+            int [] ints = { -100000000, 10000000 };
+            for (int i : ints) {
+                Integer boxed = new Integer(i);
+                tryAccessor(lookup.findSetter(ValueHolder.class, "m_i", int.class),
+                            valueHolder, PrimitiveType.Int, boxed, AccessorType.IPUT);
+                tryAccessor(lookup.findGetter(ValueHolder.class, "m_i", int.class),
+                            valueHolder, PrimitiveType.Int, boxed, AccessorType.IGET);
+                assertTrue(valueHolder.m_i == i);
+                tryAccessor(lookup.findStaticSetter(ValueHolder.class, "s_i", int.class),
+                            valueHolder, PrimitiveType.Int, boxed, AccessorType.SPUT);
+                tryAccessor(lookup.findStaticGetter(ValueHolder.class, "s_i", int.class),
+                            valueHolder, PrimitiveType.Int, boxed, AccessorType.SGET);
+                assertTrue(ValueHolder.s_i == i);
+            }
+
+            float [] floats = { 0.99f, -1.23e-17f };
+            for (float f : floats) {
+                Float boxed = new Float(f);
+                tryAccessor(lookup.findSetter(ValueHolder.class, "m_f", float.class),
+                            valueHolder, PrimitiveType.Float, boxed, AccessorType.IPUT);
+                tryAccessor(lookup.findGetter(ValueHolder.class, "m_f", float.class),
+                            valueHolder, PrimitiveType.Float, boxed, AccessorType.IGET);
+                assertTrue(valueHolder.m_f == f);
+                tryAccessor(lookup.findStaticSetter(ValueHolder.class, "s_f", float.class),
+                            valueHolder, PrimitiveType.Float, boxed, AccessorType.SPUT);
+                tryAccessor(lookup.findStaticGetter(ValueHolder.class, "s_f", float.class),
+                            valueHolder, PrimitiveType.Float, boxed, AccessorType.SGET);
+                assertTrue(ValueHolder.s_f == f);
+            }
+
+            double [] doubles = { 0.44444444444e37, -0.555555555e-37 };
+            for (double d : doubles) {
+                Double boxed = new Double(d);
+                tryAccessor(lookup.findSetter(ValueHolder.class, "m_d", double.class),
+                            valueHolder, PrimitiveType.Double, boxed, AccessorType.IPUT);
+                tryAccessor(lookup.findGetter(ValueHolder.class, "m_d", double.class),
+                            valueHolder, PrimitiveType.Double, boxed, AccessorType.IGET);
+                assertTrue(valueHolder.m_d == d);
+                tryAccessor(lookup.findStaticSetter(ValueHolder.class, "s_d", double.class),
+                            valueHolder, PrimitiveType.Double, boxed, AccessorType.SPUT);
+                tryAccessor(lookup.findStaticGetter(ValueHolder.class, "s_d", double.class),
+                            valueHolder, PrimitiveType.Double, boxed, AccessorType.SGET);
+                assertTrue(ValueHolder.s_d == d);
+            }
+
+            long [] longs = { 0x0123456789abcdefl, 0xfedcba9876543210l };
+            for (long j : longs) {
+                Long boxed = new Long(j);
+                tryAccessor(lookup.findSetter(ValueHolder.class, "m_j", long.class),
+                            valueHolder, PrimitiveType.Long, boxed, AccessorType.IPUT);
+                tryAccessor(lookup.findGetter(ValueHolder.class, "m_j", long.class),
+                            valueHolder, PrimitiveType.Long, boxed, AccessorType.IGET);
+                assertTrue(valueHolder.m_j == j);
+                tryAccessor(lookup.findStaticSetter(ValueHolder.class, "s_j", long.class),
+                            valueHolder, PrimitiveType.Long, boxed, AccessorType.SPUT);
+                tryAccessor(lookup.findStaticGetter(ValueHolder.class, "s_j", long.class),
+                            valueHolder, PrimitiveType.Long, boxed, AccessorType.SGET);
+                assertTrue(ValueHolder.s_j == j);
+            }
+
+            String [] strings = { "octopus", "crab" };
+            for (String s : strings) {
+                tryAccessor(lookup.findSetter(ValueHolder.class, "m_l", String.class),
+                            valueHolder, PrimitiveType.String, s, AccessorType.IPUT);
+                tryAccessor(lookup.findGetter(ValueHolder.class, "m_l", String.class),
+                            valueHolder, PrimitiveType.String, s, AccessorType.IGET);
+                assertTrue(s.equals(valueHolder.m_l));
+                tryAccessor(lookup.findStaticSetter(ValueHolder.class, "s_l", String.class),
+                            valueHolder, PrimitiveType.String, s, AccessorType.SPUT);
+                tryAccessor(lookup.findStaticGetter(ValueHolder.class, "s_l", String.class),
+                            valueHolder, PrimitiveType.String, s, AccessorType.SGET);
+                assertTrue(s.equals(ValueHolder.s_l));
+            }
+
+            System.out.println("Passed MethodHandle.invokeExact() tests for accessors.");
+        }
+    }
+
+    public static class FindAccessorTester extends Tester {
+        public static void main() throws Throwable {
+            // NB having a static field test here is essential for
+            // this test. MethodHandles need to ensure the class
+            // (ValueHolder) is initialized. This happens in the
+            // invoke-polymorphic dispatch.
+            MethodHandles.Lookup lookup = MethodHandles.lookup();
+            try {
+                MethodHandle mh = lookup.findStaticGetter(ValueHolder.class, "s_fi", int.class);
+                int initialValue = (int)mh.invokeExact();
+                System.out.println(initialValue);
+            } catch (NoSuchFieldException e) { unreachable(); }
+            try {
+                MethodHandle mh = lookup.findStaticSetter(ValueHolder.class, "s_i", int.class);
+                mh.invokeExact(0);
+            } catch (NoSuchFieldException e) { unreachable(); }
+            try {
+                lookup.findStaticGetter(ValueHolder.class, "s_fi", byte.class);
+                unreachable();
+            } catch (NoSuchFieldException e) {}
+            try {
+                lookup.findGetter(ValueHolder.class, "s_fi", byte.class);
+                unreachable();
+            } catch (NoSuchFieldException e) {}
+            try {
+                lookup.findStaticSetter(ValueHolder.class, "s_fi", int.class);
+                unreachable();
+            } catch (IllegalAccessException e) {}
+
+            lookup.findGetter(ValueHolder.class, "m_fi", int.class);
+            try {
+                lookup.findGetter(ValueHolder.class, "m_fi", byte.class);
+                unreachable();
+            } catch (NoSuchFieldException e) {}
+            try {
+                lookup.findStaticGetter(ValueHolder.class, "m_fi", byte.class);
+                unreachable();
+            } catch (NoSuchFieldException e) {}
+            try {
+                lookup.findSetter(ValueHolder.class, "m_fi", int.class);
+                unreachable();
+            } catch (IllegalAccessException e) {}
+
+            System.out.println("Passed MethodHandles.Lookup tests for accessors.");
+        }
+    }
+
+    public static class InvokeTester extends Tester {
+        private static void testStaticGetter() throws Throwable {
+            MethodHandles.Lookup lookup = MethodHandles.lookup();
+            MethodHandle h0 = lookup.findStaticGetter(ValueHolder.class, "s_fi", int.class);
+            h0.invoke();
+            Number t = (Number)h0.invoke();
+            int u = (int)h0.invoke();
+            Integer v = (Integer)h0.invoke();
+            long w = (long)h0.invoke();
+            try {
+                byte x = (byte)h0.invoke();
+                unreachable();
+            } catch (WrongMethodTypeException e) {}
+            try {
+                String y = (String)h0.invoke();
+                unreachable();
+            } catch (WrongMethodTypeException e) {}
+            try {
+                Long z = (Long)h0.invoke();
+                unreachable();
+            } catch (WrongMethodTypeException e) {}
+        }
+
+        private static void testMemberGetter() throws Throwable {
+            ValueHolder valueHolder = new ValueHolder();
+            MethodHandles.Lookup lookup = MethodHandles.lookup();
+            MethodHandle h0 = lookup.findGetter(ValueHolder.class, "m_fi", int.class);
+            h0.invoke(valueHolder);
+            Number t = (Number)h0.invoke(valueHolder);
+            int u = (int)h0.invoke(valueHolder);
+            Integer v = (Integer)h0.invoke(valueHolder);
+            long w = (long)h0.invoke(valueHolder);
+            try {
+                byte x = (byte)h0.invoke(valueHolder);
+                unreachable();
+            } catch (WrongMethodTypeException e) {}
+            try {
+                String y = (String)h0.invoke(valueHolder);
+                unreachable();
+            } catch (WrongMethodTypeException e) {}
+            try {
+                Long z = (Long)h0.invoke(valueHolder);
+                unreachable();
+            } catch (WrongMethodTypeException e) {}
+        }
+
+        /*package*/ static Number getDoubleAsNumber() {
+            return new Double(1.4e77);
+        }
+        /*package*/ static Number getFloatAsNumber() {
+            return new Float(7.77);
+        }
+        /*package*/ static Object getFloatAsObject() {
+            return new Float(-7.77);
+        }
+
+        private static void testMemberSetter() throws Throwable {
+            ValueHolder valueHolder = new ValueHolder();
+            MethodHandles.Lookup lookup = MethodHandles.lookup();
+            MethodHandle h0 = lookup.findSetter(ValueHolder.class, "m_f", float.class);
+            h0.invoke(valueHolder, 0.22f);
+            h0.invoke(valueHolder, new Float(1.11f));
+            Number floatNumber = getFloatAsNumber();
+            h0.invoke(valueHolder, floatNumber);
+            assertTrue(valueHolder.m_f == floatNumber.floatValue());
+            Object objNumber = getFloatAsObject();
+            h0.invoke(valueHolder, objNumber);
+            assertTrue(valueHolder.m_f == ((Float) objNumber).floatValue());
+            try {
+              h0.invoke(valueHolder, (Float)null);
+              unreachable();
+            } catch (NullPointerException e) {}
+
+            h0.invoke(valueHolder, (byte)1);
+            h0.invoke(valueHolder, (short)2);
+            h0.invoke(valueHolder, 3);
+            h0.invoke(valueHolder, 4l);
+
+            assertTrue(null == (Object) h0.invoke(valueHolder, 33));
+            assertTrue(0.0f == (float) h0.invoke(valueHolder, 33));
+            assertTrue(0l == (long) h0.invoke(valueHolder, 33));
+
+            try {
+                h0.invoke(valueHolder, 0.33);
+                unreachable();
+            } catch (WrongMethodTypeException e) {}
+            try {
+                Number doubleNumber = getDoubleAsNumber();
+                h0.invoke(valueHolder, doubleNumber);
+                unreachable();
+            } catch (ClassCastException e) {}
+            try {
+                Number doubleNumber = null;
+                h0.invoke(valueHolder, doubleNumber);
+                unreachable();
+            } catch (NullPointerException e) {}
+            try {
+                // Mismatched return type - float != void
+                float tmp = (float)h0.invoke(valueHolder, 0.45f);
+                assertTrue(tmp == 0.0);
+            } catch (Exception e) { unreachable(); }
+            try {
+                h0.invoke(valueHolder, "bam");
+                unreachable();
+            } catch (WrongMethodTypeException e) {}
+            try {
+                String s = null;
+                h0.invoke(valueHolder, s);
+                unreachable();
+            } catch (WrongMethodTypeException e) {}
+        }
+
+        private static void testStaticSetter() throws Throwable {
+            MethodHandles.Lookup lookup = MethodHandles.lookup();
+            MethodHandle h0 = lookup.findStaticSetter(ValueHolder.class, "s_f", float.class);
+            h0.invoke(0.22f);
+            h0.invoke(new Float(1.11f));
+            Number floatNumber = new Float(0.88f);
+            h0.invoke(floatNumber);
+            assertTrue(ValueHolder.s_f == floatNumber.floatValue());
+
+            try {
+              h0.invoke((Float)null);
+              unreachable();
+            } catch (NullPointerException e) {}
+
+            h0.invoke((byte)1);
+            h0.invoke((short)2);
+            h0.invoke(3);
+            h0.invoke(4l);
+
+            assertTrue(null == (Object) h0.invoke(33));
+            assertTrue(0.0f == (float) h0.invoke(33));
+            assertTrue(0l == (long) h0.invoke(33));
+
+            try {
+                h0.invoke(0.33);
+                unreachable();
+            } catch (WrongMethodTypeException e) {}
+            try {
+                Number doubleNumber = getDoubleAsNumber();
+                h0.invoke(doubleNumber);
+                unreachable();
+            } catch (ClassCastException e) {}
+            try {
+                Number doubleNumber = new Double(1.01);
+                doubleNumber = (doubleNumber.doubleValue() != 0.1) ? null : doubleNumber;
+                h0.invoke(doubleNumber);
+                unreachable();
+            } catch (NullPointerException e) {}
+            try {
+                // Mismatched return type - float != void
+                float tmp = (float)h0.invoke(0.45f);
+                assertTrue(tmp == 0.0);
+            } catch (Exception e) { unreachable(); }
+            try {
+                h0.invoke("bam");
+                unreachable();
+            } catch (WrongMethodTypeException e) {}
+            try {
+                String s = null;
+                h0.invoke(s);
+                unreachable();
+            } catch (WrongMethodTypeException e) {}
+        }
+
+        public static void main() throws Throwable{
+            testStaticGetter();
+            testMemberGetter();
+            testStaticSetter();
+            testMemberSetter();
+            System.out.println("Passed MethodHandle.invoke() tests for accessors.");
+        }
+    }
+
+    public static void main(String[] args) throws Throwable {
+        // FindAccessor test should be the first test class in this
+        // file to ensure class initialization test is run.
+        FindAccessorTester.main();
+        InvokeExactTester.main();
+        InvokeTester.main();
+    }
+}
diff --git a/test/Android.arm_vixl.mk b/test/Android.arm_vixl.mk
index 0bbcb64..21b31b4 100644
--- a/test/Android.arm_vixl.mk
+++ b/test/Android.arm_vixl.mk
@@ -16,382 +16,36 @@
 
 # Known broken tests for the ARM VIXL backend.
 TEST_ART_BROKEN_OPTIMIZING_ARM_VIXL_RUN_TESTS := \
-  002-sleep \
   003-omnibus-opcodes \
-  004-InterfaceTest \
-  004-JniTest \
-  004-NativeAllocations \
-  004-ReferenceMap \
-  004-SignalTest \
-  004-StackWalk \
-  004-ThreadStress \
-  004-UnsafeTest \
-  004-checker-UnsafeTest18 \
-  005-annotations \
-  006-args \
-  008-exceptions \
-  009-instanceof \
-  011-array-copy \
-  012-math \
-  015-switch \
-  017-float \
-  018-stack-overflow \
-  019-wrong-array-type \
   020-string \
   021-string2 \
-  022-interface \
-  023-many-interfaces \
-  024-illegal-access \
-  025-access-controller \
-  027-arithmetic \
-  028-array-write \
-  031-class-attributes \
-  032-concrete-sub \
-  035-enum \
-  036-finalizer \
-  037-inherit \
-  041-narrowing \
   042-new-instance \
-  043-privates \
   044-proxy \
-  045-reflect-array \
-  046-reflect \
-  047-returns \
-  048-reflect-v8 \
-  049-show-object \
-  050-sync-test \
-  051-thread \
-  052-verifier-fun \
-  053-wait-some \
-  054-uncaught \
-  058-enum-order \
-  059-finalizer-throw \
-  061-out-of-memory \
-  062-character-encodings \
-  063-process-manager \
-  064-field-access \
-  065-mismatched-implements \
-  066-mismatched-super \
-  067-preemptive-unpark \
-  068-classloader \
-  069-field-type \
-  070-nio-buffer \
-  071-dexfile \
-  072-precise-gc \
-  074-gc-thrash \
-  075-verification-error \
-  076-boolean-put \
-  079-phantom \
   080-oom-throw \
-  080-oom-throw-with-finalizer \
-  081-hot-exceptions \
   082-inline-execute \
-  083-compiler-regressions \
-  086-null-super \
-  087-gc-after-link \
-  088-monitor-verification \
-  090-loop-formation \
-  091-override-package-private-method \
-  093-serialization \
-  094-pattern \
   096-array-copy-concurrent-gc \
-  098-ddmc \
   099-vmdebug \
   100-reflect2 \
-  101-fibonacci \
-  102-concurrent-gc \
   103-string-append \
-  104-growth-limit \
-  106-exceptions2 \
-  107-int-math2 \
-  108-check-cast \
-  109-suspend-check \
-  112-double-math \
-  113-multidex \
   114-ParallelGC \
-  117-nopatchoat \
-  119-noimage-patchoat \
-  120-hashcode \
-  121-modifiers \
   122-npe \
-  123-compiler-regressions-mt \
-  123-inline-execute2 \
-  127-checker-secondarydex \
   129-ThreadGetId \
-  131-structural-change \
-  132-daemon-locks-shutdown \
-  133-static-invoke-super \
-  134-reg-promotion \
-  135-MirandaDispatch \
-  136-daemon-jni-shutdown \
   137-cfi \
-  138-duplicate-classes-check2 \
-  139-register-natives \
-  140-field-packing \
-  141-class-unload \
-  142-classloader2 \
   144-static-field-sigquit \
-  145-alloc-tracking-stress \
-  146-bad-interface \
-  150-loadlibrary \
-  201-built-in-except-detail-messages \
-  302-float-conversion \
-  304-method-tracing \
-  403-optimizing-long \
-  404-optimizing-allocator \
-  405-optimizing-long-allocator \
-  406-fields \
-  407-arrays \
-  410-floats \
-  411-optimizing-arith-mul \
   412-new-array \
-  413-regalloc-regression \
-  414-optimizing-arith-sub \
-  414-static-fields \
-  415-optimizing-arith-neg \
-  416-optimizing-arith-not \
-  417-optimizing-arith-div \
-  419-long-parameter \
-  421-exceptions \
-  421-large-frame \
-  422-instanceof \
-  422-type-conversion \
-  423-invoke-interface \
-  424-checkcast \
-  425-invoke-super \
-  426-monitor \
-  427-bitwise \
-  427-bounds \
-  428-optimizing-arith-rem \
-  429-ssa-builder \
-  430-live-register-slow-path \
-  431-optimizing-arith-shifts \
-  431-type-propagation \
-  432-optimizing-cmp \
-  434-invoke-direct \
-  436-rem-float \
-  436-shift-constant \
-  437-inline \
-  438-volatile \
   439-npe \
-  439-swap-double \
-  440-stmp \
-  441-checker-inliner \
-  442-checker-constant-folding \
-  444-checker-nce \
-  445-checker-licm \
-  446-checker-inliner2 \
-  447-checker-inliner3 \
-  448-multiple-returns \
-  449-checker-bce \
   450-checker-types \
-  451-regression-add-float \
-  451-spill-splot \
-  452-multiple-returns2 \
-  453-not-byte \
-  454-get-vreg \
-  456-baseline-array-set \
-  457-regs \
-  458-checker-instruct-simplification \
-  458-long-to-fpu \
-  459-dead-phi \
-  460-multiple-returns3 \
-  461-get-reference-vreg \
-  462-checker-inlining-dex-files \
-  463-checker-boolean-simplifier \
-  464-checker-inline-sharpen-calls \
-  466-get-live-vreg \
-  467-regalloc-pair \
-  468-checker-bool-simplif-regression \
-  469-condition-materialization \
-  470-huge-method \
-  471-deopt-environment \
-  472-type-propagation \
-  473-checker-inliner-constants \
-  474-checker-boolean-input \
-  474-fp-sub-neg \
-  475-regression-inliner-ids \
-  476-checker-ctor-memory-barrier \
-  477-checker-bound-type \
-  477-long-2-float-convers-precision \
-  478-checker-clinit-check-pruning \
-  478-checker-inliner-nested-loop \
-  480-checker-dead-blocks \
-  482-checker-loop-back-edge-use \
-  483-dce-block \
-  484-checker-register-hints \
-  485-checker-dce-loop-update \
-  485-checker-dce-switch \
-  486-checker-must-do-null-check \
-  487-checker-inline-calls \
   488-checker-inline-recursive-calls \
-  490-checker-inline \
-  491-current-method \
-  492-checker-inline-invoke-interface \
-  493-checker-inline-invoke-interface \
-  494-checker-instanceof-tests \
-  495-checker-checkcast-tests \
-  496-checker-inlining-class-loader \
-  497-inlining-and-class-loader \
-  498-type-propagation \
-  499-bce-phi-array-length \
-  500-instanceof \
-  501-null-constant-dce \
-  501-regression-packed-switch \
-  503-dead-instructions \
-  504-regression-baseline-entry \
-  508-checker-disassembly \
-  510-checker-try-catch \
-  513-array-deopt \
-  514-shifts \
   515-dce-dominator \
-  517-checker-builder-fallthrough \
-  518-null-array-get \
-  519-bound-load-class \
   520-equivalent-phi \
-  521-checker-array-set-null \
-  521-regression-integer-field-set \
-  522-checker-regression-monitor-exit \
-  523-checker-can-throw-regression \
   525-checker-arrays-fields1 \
   525-checker-arrays-fields2 \
-  526-checker-caller-callee-regs \
-  526-long-regalloc \
   527-checker-array-access-split \
-  528-long-hint \
-  529-checker-unresolved \
-  529-long-split \
-  530-checker-loops1 \
-  530-checker-loops2 \
-  530-checker-loops3 \
-  530-checker-lse \
-  530-checker-regression-reftyp-final \
-  530-instanceof-checkcast \
-  532-checker-nonnull-arrayset \
-  534-checker-bce-deoptimization \
-  535-deopt-and-inlining \
-  535-regression-const-val \
-  536-checker-intrinsic-optimization \
-  536-checker-needs-access-check \
-  537-checker-arraycopy \
-  537-checker-inline-and-unverified \
-  537-checker-jump-over-jump \
   538-checker-embed-constants \
-  540-checker-rtp-bug \
-  541-regression-inlined-deopt \
-  542-bitfield-rotates \
-  542-unresolved-access-check \
-  543-checker-dce-trycatch \
-  543-env-long-ref \
-  545-tracing-and-jit \
-  546-regression-simplify-catch \
-  548-checker-inlining-and-dce \
-  550-checker-multiply-accumulate \
-  550-checker-regression-wide-store \
-  551-checker-clinit \
-  551-checker-shifter-operand \
-  551-implicit-null-checks \
-  551-invoke-super \
-  552-checker-primitive-typeprop \
   552-checker-sharpening \
-  552-invoke-non-existent-super \
-  553-invoke-super \
-  554-checker-rtp-checkcast \
-  555-UnsafeGetLong-regression \
-  556-invoke-super \
-  557-checker-instruct-simplifier-ror \
-  558-switch \
-  559-bce-ssa \
-  559-checker-irreducible-loop \
-  559-checker-rtp-ifnotnull \
-  560-packed-switch \
-  561-divrem \
-  561-shared-slowpaths \
-  562-bce-preheader \
-  562-no-intermediate \
-  563-checker-fakestring \
-  564-checker-bitcount \
-  564-checker-inline-loop \
-  564-checker-irreducible-loop \
-  564-checker-negbitwise \
-  565-checker-condition-liveness \
-  565-checker-doublenegbitwise \
-  565-checker-irreducible-loop \
-  565-checker-rotate \
-  566-checker-codegen-select \
-  566-checker-signum \
-  566-polymorphic-inlining \
-  567-checker-compare \
-  568-checker-onebit \
+  562-checker-no-intermediate \
   570-checker-osr \
-  570-checker-select \
-  571-irreducible-loop \
-  572-checker-array-get-regression \
-  573-checker-checkcast-regression \
-  574-irreducible-and-constant-area \
-  575-checker-isnan \
-  575-checker-string-init-alias \
-  577-checker-fp2int \
-  578-bce-visit \
-  580-checker-round \
-  580-checker-string-fact-intrinsics \
-  581-rtp \
-  582-checker-bce-length \
-  583-checker-zero \
-  584-checker-div-bool \
-  586-checker-null-array-get \
-  587-inline-class-error \
-  588-checker-irreducib-lifetime-hole \
-  589-super-imt \
-  590-checker-arr-set-null-regression \
-  591-new-instance-string \
-  592-checker-regression-bool-input \
-  593-checker-boolean-2-integral-conv \
-  593-checker-long-2-float-regression \
-  593-checker-shift-and-simplifier \
-  594-checker-array-alias \
-  594-invoke-super \
-  594-load-string-regression \
-  595-error-class \
-  596-checker-dead-phi \
-  597-deopt-new-string \
-  599-checker-irreducible-loop \
-  600-verifier-fails \
-  601-method-access \
   602-deoptimizeable \
-  603-checker-instanceof \
-  604-hot-static-interface \
-  605-new-string-from-bytes \
-  608-checker-unresolved-lse \
-  609-checker-inline-interface \
-  609-checker-x86-bounds-check \
-  610-arraycopy \
-  611-checker-simplify-if \
-  612-jit-dex-cache \
-  613-inlining-dex-cache \
-  614-checker-dump-constant-location \
-  615-checker-arm64-store-zero \
-  617-clinit-oome \
-  618-checker-induction \
   700-LoadArgRegs \
-  701-easy-div-rem \
-  702-LargeBranchOffset \
-  703-floating-point-div \
-  704-multiply-accumulate \
-  705-register-conflict \
   800-smali \
-  802-deoptimization \
-  960-default-smali \
-  961-default-iface-resolution-gen \
-  963-default-range-smali \
-  965-default-verify \
-  966-default-conflict \
-  967-default-ame \
-  968-default-partial-compile-gen \
-  969-iface-super \
-  971-iface-super \
-  972-default-imt-collision \
-  972-iface-super-multidex \
-  973-default-multidex \
-  974-verify-interface-super \
-  975-iface-private
+
diff --git a/test/Android.bp b/test/Android.bp
index be1864c..fe20f29 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -253,6 +253,10 @@
         "907-get-loaded-classes/get_loaded_classes.cc",
         "908-gc-start-finish/gc_callbacks.cc",
         "909-attach-agent/attach.cc",
+        "910-methods/methods.cc",
+        "911-get-stack-trace/stack_trace.cc",
+        "912-classes/classes.cc",
+        "913-heaps/heaps.cc",
     ],
     shared_libs: [
         "libbase",
@@ -262,10 +266,7 @@
 art_cc_test_library {
     name: "libtiagent",
     defaults: ["libtiagent-defaults"],
-    shared_libs: [
-        "libart",
-        "libopenjdkjvmti",
-    ],
+    shared_libs: ["libart"],
 }
 
 art_cc_test_library {
@@ -274,10 +275,7 @@
         "libtiagent-defaults",
         "art_debug_defaults",
     ],
-    shared_libs: [
-        "libartd",
-        "libopenjdkjvmtid",
-    ],
+    shared_libs: ["libartd"],
 }
 
 cc_defaults {
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 1e3a997..e92ba1a 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -276,6 +276,10 @@
   907-get-loaded-classes \
   908-gc-start-finish \
   909-attach-agent \
+  910-methods \
+  911-get-stack-trace \
+  912-classes \
+  913-heaps \
 
 ifneq (,$(filter target,$(TARGET_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -370,11 +374,15 @@
 # Tests that are broken with GC stress.
 # * 137-cfi needs to unwind a second forked process. We're using a primitive sleep to wait till we
 #   hope the second process got into the expected state. The slowness of gcstress makes this bad.
+# * 908-gc-start-finish expects GCs only to be run at clear points. The reduced heap size makes
+#   this non-deterministic. Same for 913.
 # * 961-default-iface-resolution-gen and 964-default-iface-init-genare very long tests that often
 #   will take more than the timeout to run when gcstress is enabled. This is because gcstress
 #   slows down allocations significantly which these tests do a lot.
 TEST_ART_BROKEN_GCSTRESS_RUN_TESTS := \
   137-cfi \
+  908-gc-start-finish \
+  913-heaps \
   961-default-iface-resolution-gen \
   964-default-iface-init-gen
 
@@ -461,8 +469,10 @@
 # 802 and 570-checker-osr:
 # This test dynamically enables tracing to force a deoptimization. This makes the test meaningless
 # when already tracing, and writes an error message that we do not want to check for.
+# 130 occasional timeout b/32383962.
 TEST_ART_BROKEN_TRACING_RUN_TESTS := \
   087-gc-after-link \
+  130-hprof \
   137-cfi \
   141-class-unload \
   570-checker-osr \
@@ -493,8 +503,10 @@
 # also uses Generic JNI instead of the JNI compiler.
 # Test 906 iterates the heap filtering with different options. No instances should be created
 # between those runs to be able to have precise checks.
+# Test 902 hits races with the JIT compiler. b/32821077
 TEST_ART_BROKEN_JIT_RUN_TESTS := \
   137-cfi \
+  902-hello-transformation \
   904-object-allocation \
   906-iterate-heap \
 
@@ -598,11 +610,8 @@
 # Tests that should fail in the read barrier configuration with the Optimizing compiler (AOT).
 # 484: Baker's fast path based read barrier compiler instrumentation generates code containing
 #      more parallel moves on x86, thus some Checker assertions may fail.
-# 527: On ARM64 and ARM, the read barrier instrumentation does not support the HIntermediateAddress
-#      instruction yet (b/26601270).
 TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS := \
-  484-checker-register-hints \
-  527-checker-array-access-split
+  484-checker-register-hints
 
 # Tests that should fail in the read barrier configuration with JIT (Optimizing compiler).
 TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS :=
@@ -795,7 +804,9 @@
   $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libnativebridgetest) \
   $(ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
   $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$(ART_HOST_SHLIB_EXTENSION) \
-  $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$(ART_HOST_SHLIB_EXTENSION)
+  $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$(ART_HOST_SHLIB_EXTENSION) \
+  $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkjvmti$(ART_HOST_SHLIB_EXTENSION) \
+  $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkjvmtid$(ART_HOST_SHLIB_EXTENSION) \
 
 ifneq ($(HOST_PREFER_32_BIT),true)
 ART_TEST_HOST_RUN_TEST_DEPENDENCIES += \
@@ -808,7 +819,10 @@
   $(OUT_DIR)/$(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libnativebridgetest) \
   $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
   $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$(ART_HOST_SHLIB_EXTENSION) \
-  $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$(ART_HOST_SHLIB_EXTENSION)
+  $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$(ART_HOST_SHLIB_EXTENSION) \
+  $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkjvmti$(ART_HOST_SHLIB_EXTENSION) \
+  $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkjvmtid$(ART_HOST_SHLIB_EXTENSION) \
+
 endif
 
 # Create a rule to build and run a tests following the form:
diff --git a/test/562-no-intermediate/src/Main.java b/test/DexToDexDecompiler/Main.java
similarity index 67%
copy from test/562-no-intermediate/src/Main.java
copy to test/DexToDexDecompiler/Main.java
index 3b74d6f..8f5075a 100644
--- a/test/562-no-intermediate/src/Main.java
+++ b/test/DexToDexDecompiler/Main.java
@@ -15,13 +15,20 @@
  */
 
 public class Main {
-
-  /// CHECK-START-ARM64: int Main.main(String[]) register_allocator (after)
-  /// CHECK-NOT: IntermediateAddress
-  public static void main(String[] args) {
-    array[index] += Math.cos(42);
+  Main() {
+    // Will be quickened with RETURN_VOID_NO_BARRIER.
   }
 
-  static int index = 0;
-  static double[] array = new double[2];
+  public static void main() {
+    Main m = new Main();
+    Object o = m;
+    // The call and field accesses will be quickened.
+    m.foo(m.a);
+
+    // The checkcast will be quickened.
+    m.foo(((Main)o).a);
+  }
+
+  int a;
+  void foo(int a) {}
 }
diff --git a/test/MyClassNatives/MyClassNatives.java b/test/MyClassNatives/MyClassNatives.java
index 3cb1f23..c601e3e 100644
--- a/test/MyClassNatives/MyClassNatives.java
+++ b/test/MyClassNatives/MyClassNatives.java
@@ -139,8 +139,8 @@
         float f9, int i10, float f10);
 
     // Normal native
-    native static void stackArgsSignExtendedMips64(int i1, int i2, int i3, int i4, int i5, int i6,
-        int i7, int i8);
+    native static long getStackArgSignExtendedMips64(int i1, int i2, int i3, int i4, int i5, int i6,
+        int stack_arg);
 
     // Normal native
     static native double logD(double d);
@@ -273,8 +273,8 @@
         float f9, int i10, float f10);
 
     @FastNative
-    native static void stackArgsSignExtendedMips64_Fast(int i1, int i2, int i3, int i4, int i5, int i6,
-        int i7, int i8);
+    native static long getStackArgSignExtendedMips64_Fast(int i1, int i2, int i3, int i4, int i5, int i6,
+        int stack_arg);
 
     @FastNative
     static native double logD_Fast(double d);
@@ -316,10 +316,6 @@
         float f9, int i10, float f10);
 
     @CriticalNative
-    native static void stackArgsSignExtendedMips64_Critical(int i1, int i2, int i3, int i4, int i5, int i6,
-        int i7, int i8);
-
-    @CriticalNative
     static native double logD_Critical(double d);
     @CriticalNative
     static native float logF_Critical(float f);
diff --git a/test/VerifierDeps/MyClassWithNoSuper.smali b/test/VerifierDeps/MyClassWithNoSuper.smali
new file mode 100644
index 0000000..d8509bc
--- /dev/null
+++ b/test/VerifierDeps/MyClassWithNoSuper.smali
@@ -0,0 +1,16 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LMyClassWithNoSuper;
+.super LNoSuper;
diff --git a/test/VerifierDeps/MyClassWithNoSuperButFailures.smali b/test/VerifierDeps/MyClassWithNoSuperButFailures.smali
new file mode 100644
index 0000000..1dbe9d1
--- /dev/null
+++ b/test/VerifierDeps/MyClassWithNoSuperButFailures.smali
@@ -0,0 +1,21 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LMyClassWithNoSuperButFailures;
+.super LNoSuper;
+
+.method public final foo()I
+  .registers 1
+  return-void
+.end method
diff --git a/test/VerifierDeps/MyVerificationFailure.smali b/test/VerifierDeps/MyVerificationFailure.smali
new file mode 100644
index 0000000..187b1ad
--- /dev/null
+++ b/test/VerifierDeps/MyVerificationFailure.smali
@@ -0,0 +1,21 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LMyVerificationFailure;
+.super Ljava/lang/Object;
+
+.method public final foo()I
+  .registers 1
+  return-void
+.end method
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index 4248148..a2eb370 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -128,9 +128,10 @@
     return;
   }
 
+  Thread* self = Thread::Current();
   ArtMethod* method = nullptr;
   {
-    ScopedObjectAccess soa(Thread::Current());
+    ScopedObjectAccess soa(self);
 
     ScopedUtfChars chars(env, method_name);
     CHECK(chars.c_str() != nullptr);
@@ -147,11 +148,11 @@
     } else {
       // Sleep to yield to the compiler thread.
       usleep(1000);
-      ScopedObjectAccess soa(Thread::Current());
+      ScopedObjectAccess soa(self);
       // Make sure there is a profiling info, required by the compiler.
-      ProfilingInfo::Create(soa.Self(), method, /* retry_allocation */ true);
+      ProfilingInfo::Create(self, method, /* retry_allocation */ true);
       // Will either ensure it's compiled or do the compilation itself.
-      jit->CompileMethod(method, soa.Self(), /* osr */ false);
+      jit->CompileMethod(method, self, /* osr */ false);
     }
   }
 }
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 3535f32..c525b2b 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -148,7 +148,7 @@
         SECONDARY_DEX=":$DEX_LOCATION/$TEST_NAME-ex.jar"
         # Enable cfg-append to make sure we get the dump for both dex files.
         # (otherwise the runtime compilation of the secondary dex will overwrite
-        # the dump of the first one)
+        # the dump of the first one).
         FLAGS="${FLAGS} -Xcompiler-option --dump-cfg-append"
         COMPILE_FLAGS="${COMPILE_FLAGS} --dump-cfg-append"
         shift
@@ -397,9 +397,30 @@
 fi
 
 if [ "$HOST" = "n" ]; then
-  ISA=$(adb shell ls -F /data/dalvik-cache | grep -Ewo "${ARCHITECTURES_PATTERN}")
+  # Need to be root to query /data/dalvik-cache
+  adb root > /dev/null
+  adb wait-for-device
+  ISA=
+  ISA_adb_invocation=
+  ISA_outcome=
+  # We iterate a few times to workaround an adb issue. b/32655576
+  for i in {1..10}; do
+    ISA_adb_invocation=$(adb shell ls -F /data/dalvik-cache)
+    ISA_outcome=$?
+    ISA=$(echo $ISA_adb_invocation | grep -Ewo "${ARCHITECTURES_PATTERN}")
+    if [ x"$ISA" != "x" ]; then
+      break;
+    fi
+  done
   if [ x"$ISA" = "x" ]; then
     echo "Unable to determine architecture"
+    # Print a few things for helping diagnosing the problem.
+    echo "adb invocation output: $ISA_adb_invocation"
+    echo "adb invocation outcome: $ISA_outcome"
+    echo $(adb shell ls -F /data/dalvik-cache)
+    echo $(adb shell ls /data/dalvik-cache)
+    echo ${ARCHITECTURES_PATTERN}
+    echo $(adb shell ls -F /data/dalvik-cache | grep -Ewo "${ARCHITECTURES_PATTERN}")
     exit 1
   fi
 fi
diff --git a/test/run-test b/test/run-test
index 7a4afaf..37eefb3 100755
--- a/test/run-test
+++ b/test/run-test
@@ -758,8 +758,8 @@
 if [ "$run_checker" = "yes" -a "$target_mode" = "yes" ]; then
   # We will need to `adb pull` the .cfg output from the target onto the host to
   # run checker on it. This file can be big.
-  build_file_size_limit=24576
-  run_file_size_limit=24576
+  build_file_size_limit=32768
+  run_file_size_limit=32768
 fi
 if [ ${USE_JACK} = "false" ]; then
   # Set ulimit if we build with dx only, Jack can generate big temp files.
diff --git a/test/ti-agent/common_helper.h b/test/ti-agent/common_helper.h
new file mode 100644
index 0000000..84997f3
--- /dev/null
+++ b/test/ti-agent/common_helper.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_TI_AGENT_COMMON_HELPER_H_
+#define ART_TEST_TI_AGENT_COMMON_HELPER_H_
+
+#include "jni.h"
+#include "ScopedLocalRef.h"
+
+namespace art {
+
+template <typename T>
+static jobjectArray CreateObjectArray(JNIEnv* env,
+                                      jint length,
+                                      const char* component_type_descriptor,
+                                      T src) {
+  if (length < 0) {
+    return nullptr;
+  }
+
+  ScopedLocalRef<jclass> obj_class(env, env->FindClass(component_type_descriptor));
+  if (obj_class.get() == nullptr) {
+    return nullptr;
+  }
+
+  ScopedLocalRef<jobjectArray> ret(env, env->NewObjectArray(length, obj_class.get(), nullptr));
+  if (ret.get() == nullptr) {
+    return nullptr;
+  }
+
+  for (jint i = 0; i < length; ++i) {
+    jobject element = src(i);
+    env->SetObjectArrayElement(ret.get(), static_cast<jint>(i), element);
+    env->DeleteLocalRef(element);
+    if (env->ExceptionCheck()) {
+      return nullptr;
+    }
+  }
+
+  return ret.release();
+}
+
+static void SetAllCapabilities(jvmtiEnv* env) {
+  jvmtiCapabilities caps;
+  env->GetPotentialCapabilities(&caps);
+  env->AddCapabilities(&caps);
+}
+
+}  // namespace art
+
+#endif  // ART_TEST_TI_AGENT_COMMON_HELPER_H_
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index 90d0a66..a959482 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -33,6 +33,10 @@
 #include "907-get-loaded-classes/get_loaded_classes.h"
 #include "908-gc-start-finish/gc_callbacks.h"
 #include "909-attach-agent/attach.h"
+#include "910-methods/methods.h"
+#include "911-get-stack-trace/stack_trace.h"
+#include "912-classes/classes.h"
+#include "913-heaps/heaps.h"
 
 namespace art {
 
@@ -58,6 +62,10 @@
   { "907-get-loaded-classes", Test907GetLoadedClasses::OnLoad, nullptr },
   { "908-gc-start-finish", Test908GcStartFinish::OnLoad, nullptr },
   { "909-attach-agent", nullptr, Test909AttachAgent::OnAttach },
+  { "910-methods", Test910Methods::OnLoad, nullptr },
+  { "911-get-stack-trace", Test911GetStackTrace::OnLoad, nullptr },
+  { "912-classes", Test912Classes::OnLoad, nullptr },
+  { "913-heaps", Test913Heaps::OnLoad, nullptr },
 };
 
 static AgentLib* FindAgent(char* name) {
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 12e0338..2d26b48 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -19,7 +19,17 @@
   exit 1
 fi
 
-out_dir=${OUT_DIR-out}
+# Logic for setting out_dir from build/make/core/envsetup.mk:
+if [[ -z $OUT_DIR ]]; then
+  if [[ -z $OUT_DIR_COMMON_BASE ]]; then
+    out_dir=out
+  else
+    out_dir=${OUT_DIR_COMMON_BASE}/${PWD##*/}
+  fi
+else
+  out_dir=${OUT_DIR}
+fi
+
 java_libraries_dir=${out_dir}/target/common/obj/JAVA_LIBRARIES
 common_targets="vogar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests mockito-target ${out_dir}/host/linux-x86/bin/jack"
 mode="target"
diff --git a/tools/cpp-define-generator/constant_class.def b/tools/cpp-define-generator/constant_class.def
index 58372f9..f46cd33 100644
--- a/tools/cpp-define-generator/constant_class.def
+++ b/tools/cpp-define-generator/constant_class.def
@@ -25,6 +25,7 @@
 
 DEFINE_FLAG_OFFSET(MIRROR_CLASS, STATUS_INITIALIZED,       art::mirror::Class::kStatusInitialized)
 DEFINE_FLAG_OFFSET(ACCESS_FLAGS, CLASS_IS_FINALIZABLE,     art::kAccClassIsFinalizable)
+DEFINE_FLAG_OFFSET(ACCESS_FLAGS, CLASS_IS_INTERFACE,       art::kAccInterface)
 // TODO: We should really have a BitPosition which also checks it's a power of 2.
 DEFINE_FLAG_OFFSET(ACCESS_FLAGS, CLASS_IS_FINALIZABLE_BIT, art::MostSignificantBit(art::kAccClassIsFinalizable))
 
diff --git a/tools/cpp-define-generator/constant_lockword.def b/tools/cpp-define-generator/constant_lockword.def
index 67ed5b5..08d5885 100644
--- a/tools/cpp-define-generator/constant_lockword.def
+++ b/tools/cpp-define-generator/constant_lockword.def
@@ -30,6 +30,10 @@
 DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_MASK_TOGGLED, uint32_t, kReadBarrierStateMaskShiftedToggled)
 DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_ONE,       int32_t,  kThinLockCountOne)
 
+DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS, uint32_t, kStateForwardingAddress)
+DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS_OVERFLOW, uint32_t, kStateForwardingAddressOverflow)
+DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS_SHIFT, uint32_t, kForwardingAddressShift)
+
 DEFINE_LOCK_WORD_EXPR(GC_STATE_MASK_SHIFTED,   uint32_t,  kGCStateMaskShifted)
 DEFINE_LOCK_WORD_EXPR(GC_STATE_MASK_SHIFTED_TOGGLED, uint32_t, kGCStateMaskShiftedToggled)
 DEFINE_LOCK_WORD_EXPR(GC_STATE_SHIFT,   int32_t,  kGCStateShift)
diff --git a/tools/cpp-define-generator/constant_thread.def b/tools/cpp-define-generator/constant_thread.def
index af5ca21..1364b55 100644
--- a/tools/cpp-define-generator/constant_thread.def
+++ b/tools/cpp-define-generator/constant_thread.def
@@ -25,5 +25,7 @@
 
 DEFINE_THREAD_CONSTANT(SUSPEND_REQUEST,    int32_t, art::kSuspendRequest)
 DEFINE_THREAD_CONSTANT(CHECKPOINT_REQUEST, int32_t, art::kCheckpointRequest)
+DEFINE_THREAD_CONSTANT(EMPTY_CHECKPOINT_REQUEST, int32_t, art::kEmptyCheckpointRequest)
+DEFINE_THREAD_CONSTANT(SUSPEND_OR_CHECKPOINT_REQUEST,  int32_t, art::kSuspendRequest | art::kCheckpointRequest | art::kEmptyCheckpointRequest)
 
 #undef DEFINE_THREAD_CONSTANT
diff --git a/tools/cpp-define-generator/generate-asm-support b/tools/cpp-define-generator/generate-asm-support
index f95648b..fcdf72f 100755
--- a/tools/cpp-define-generator/generate-asm-support
+++ b/tools/cpp-define-generator/generate-asm-support
@@ -5,4 +5,4 @@
 
 [[ -z ${ANDROID_BUILD_TOP+x} ]] && (echo "Run source build/envsetup.sh first" >&2 && exit 1)
 
-cpp-define-generator-datad > ${ANDROID_BUILD_TOP}/art/runtime/generated/asm_support_gen.h
+cpp-define-generator-data > ${ANDROID_BUILD_TOP}/art/runtime/generated/asm_support_gen.h
diff --git a/tools/cpp-define-generator/main.cc b/tools/cpp-define-generator/main.cc
index a1b463a..fc99f8a 100644
--- a/tools/cpp-define-generator/main.cc
+++ b/tools/cpp-define-generator/main.cc
@@ -59,12 +59,12 @@
 }
 
 template <typename T>
-void cpp_define(std::string name, T value) {
+void cpp_define(const std::string& name, T value) {
   std::cout << "#define " << name << " " << pretty_format(value) << std::endl;
 }
 
 template <typename T>
-void emit_check_eq(T value, std::string expr) {
+void emit_check_eq(T value, const std::string& expr) {
   std::cout << "DEFINE_CHECK_EQ(" << value << ", (" << expr << "))" << std::endl;
 }