blob: a8af92cbe6e09eaaec2bb556b067e689f300891c [file] [log] [blame]
buzbeeee17e0a2013-07-31 10:47:37 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Vladimir Markobe0e5462014-02-26 11:24:15 +000017#include <algorithm>
Ian Rogers700a4022014-05-19 16:49:03 -070018#include <memory>
19
buzbeeee17e0a2013-07-31 10:47:37 -070020#include "compiler_internals.h"
21#include "dataflow_iterator-inl.h"
Vladimir Markobe0e5462014-02-26 11:24:15 +000022#include "dex_instruction.h"
23#include "dex_instruction-inl.h"
Vladimir Markof096aad2014-01-23 15:51:58 +000024#include "dex/verified_method.h"
Vladimir Marko5816ed42013-11-27 17:04:20 +000025#include "dex/quick/dex_file_method_inliner.h"
26#include "dex/quick/dex_file_to_method_inliner_map.h"
Brian Carlstrom6449c622014-02-10 23:48:36 -080027#include "driver/compiler_options.h"
Vladimir Marko69f08ba2014-04-11 12:28:11 +010028#include "utils/scoped_arena_containers.h"
buzbeeee17e0a2013-07-31 10:47:37 -070029
30namespace art {
31
32 // Instruction characteristics used to statically identify computation-intensive methods.
33const uint32_t MIRGraph::analysis_attributes_[kMirOpLast] = {
34 // 00 NOP
35 AN_NONE,
36
37 // 01 MOVE vA, vB
38 AN_MOVE,
39
40 // 02 MOVE_FROM16 vAA, vBBBB
41 AN_MOVE,
42
43 // 03 MOVE_16 vAAAA, vBBBB
44 AN_MOVE,
45
46 // 04 MOVE_WIDE vA, vB
47 AN_MOVE,
48
49 // 05 MOVE_WIDE_FROM16 vAA, vBBBB
50 AN_MOVE,
51
52 // 06 MOVE_WIDE_16 vAAAA, vBBBB
53 AN_MOVE,
54
55 // 07 MOVE_OBJECT vA, vB
56 AN_MOVE,
57
58 // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
59 AN_MOVE,
60
61 // 09 MOVE_OBJECT_16 vAAAA, vBBBB
62 AN_MOVE,
63
64 // 0A MOVE_RESULT vAA
65 AN_MOVE,
66
67 // 0B MOVE_RESULT_WIDE vAA
68 AN_MOVE,
69
70 // 0C MOVE_RESULT_OBJECT vAA
71 AN_MOVE,
72
73 // 0D MOVE_EXCEPTION vAA
74 AN_MOVE,
75
76 // 0E RETURN_VOID
77 AN_BRANCH,
78
79 // 0F RETURN vAA
80 AN_BRANCH,
81
82 // 10 RETURN_WIDE vAA
83 AN_BRANCH,
84
85 // 11 RETURN_OBJECT vAA
86 AN_BRANCH,
87
88 // 12 CONST_4 vA, #+B
89 AN_SIMPLECONST,
90
91 // 13 CONST_16 vAA, #+BBBB
92 AN_SIMPLECONST,
93
94 // 14 CONST vAA, #+BBBBBBBB
95 AN_SIMPLECONST,
96
97 // 15 CONST_HIGH16 VAA, #+BBBB0000
98 AN_SIMPLECONST,
99
100 // 16 CONST_WIDE_16 vAA, #+BBBB
101 AN_SIMPLECONST,
102
103 // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
104 AN_SIMPLECONST,
105
106 // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
107 AN_SIMPLECONST,
108
109 // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
110 AN_SIMPLECONST,
111
112 // 1A CONST_STRING vAA, string@BBBB
113 AN_NONE,
114
115 // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
116 AN_NONE,
117
118 // 1C CONST_CLASS vAA, type@BBBB
119 AN_NONE,
120
121 // 1D MONITOR_ENTER vAA
122 AN_NONE,
123
124 // 1E MONITOR_EXIT vAA
125 AN_NONE,
126
127 // 1F CHK_CAST vAA, type@BBBB
128 AN_NONE,
129
130 // 20 INSTANCE_OF vA, vB, type@CCCC
131 AN_NONE,
132
133 // 21 ARRAY_LENGTH vA, vB
134 AN_ARRAYOP,
135
136 // 22 NEW_INSTANCE vAA, type@BBBB
137 AN_HEAVYWEIGHT,
138
139 // 23 NEW_ARRAY vA, vB, type@CCCC
140 AN_HEAVYWEIGHT,
141
142 // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
143 AN_HEAVYWEIGHT,
144
145 // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
146 AN_HEAVYWEIGHT,
147
148 // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
149 AN_NONE,
150
151 // 27 THROW vAA
152 AN_HEAVYWEIGHT | AN_BRANCH,
153
154 // 28 GOTO
155 AN_BRANCH,
156
157 // 29 GOTO_16
158 AN_BRANCH,
159
160 // 2A GOTO_32
161 AN_BRANCH,
162
163 // 2B PACKED_SWITCH vAA, +BBBBBBBB
buzbeefe9ca402013-08-21 09:48:11 -0700164 AN_SWITCH,
buzbeeee17e0a2013-07-31 10:47:37 -0700165
166 // 2C SPARSE_SWITCH vAA, +BBBBBBBB
buzbeefe9ca402013-08-21 09:48:11 -0700167 AN_SWITCH,
buzbeeee17e0a2013-07-31 10:47:37 -0700168
169 // 2D CMPL_FLOAT vAA, vBB, vCC
170 AN_MATH | AN_FP | AN_SINGLE,
171
172 // 2E CMPG_FLOAT vAA, vBB, vCC
173 AN_MATH | AN_FP | AN_SINGLE,
174
175 // 2F CMPL_DOUBLE vAA, vBB, vCC
176 AN_MATH | AN_FP | AN_DOUBLE,
177
178 // 30 CMPG_DOUBLE vAA, vBB, vCC
179 AN_MATH | AN_FP | AN_DOUBLE,
180
181 // 31 CMP_LONG vAA, vBB, vCC
182 AN_MATH | AN_LONG,
183
184 // 32 IF_EQ vA, vB, +CCCC
185 AN_MATH | AN_BRANCH | AN_INT,
186
187 // 33 IF_NE vA, vB, +CCCC
188 AN_MATH | AN_BRANCH | AN_INT,
189
190 // 34 IF_LT vA, vB, +CCCC
191 AN_MATH | AN_BRANCH | AN_INT,
192
193 // 35 IF_GE vA, vB, +CCCC
194 AN_MATH | AN_BRANCH | AN_INT,
195
196 // 36 IF_GT vA, vB, +CCCC
197 AN_MATH | AN_BRANCH | AN_INT,
198
199 // 37 IF_LE vA, vB, +CCCC
200 AN_MATH | AN_BRANCH | AN_INT,
201
202 // 38 IF_EQZ vAA, +BBBB
203 AN_MATH | AN_BRANCH | AN_INT,
204
205 // 39 IF_NEZ vAA, +BBBB
206 AN_MATH | AN_BRANCH | AN_INT,
207
208 // 3A IF_LTZ vAA, +BBBB
209 AN_MATH | AN_BRANCH | AN_INT,
210
211 // 3B IF_GEZ vAA, +BBBB
212 AN_MATH | AN_BRANCH | AN_INT,
213
214 // 3C IF_GTZ vAA, +BBBB
215 AN_MATH | AN_BRANCH | AN_INT,
216
217 // 3D IF_LEZ vAA, +BBBB
218 AN_MATH | AN_BRANCH | AN_INT,
219
220 // 3E UNUSED_3E
221 AN_NONE,
222
223 // 3F UNUSED_3F
224 AN_NONE,
225
226 // 40 UNUSED_40
227 AN_NONE,
228
229 // 41 UNUSED_41
230 AN_NONE,
231
232 // 42 UNUSED_42
233 AN_NONE,
234
235 // 43 UNUSED_43
236 AN_NONE,
237
238 // 44 AGET vAA, vBB, vCC
239 AN_ARRAYOP,
240
241 // 45 AGET_WIDE vAA, vBB, vCC
242 AN_ARRAYOP,
243
244 // 46 AGET_OBJECT vAA, vBB, vCC
245 AN_ARRAYOP,
246
247 // 47 AGET_BOOLEAN vAA, vBB, vCC
248 AN_ARRAYOP,
249
250 // 48 AGET_BYTE vAA, vBB, vCC
251 AN_ARRAYOP,
252
253 // 49 AGET_CHAR vAA, vBB, vCC
254 AN_ARRAYOP,
255
256 // 4A AGET_SHORT vAA, vBB, vCC
257 AN_ARRAYOP,
258
259 // 4B APUT vAA, vBB, vCC
260 AN_ARRAYOP,
261
262 // 4C APUT_WIDE vAA, vBB, vCC
263 AN_ARRAYOP,
264
265 // 4D APUT_OBJECT vAA, vBB, vCC
266 AN_ARRAYOP,
267
268 // 4E APUT_BOOLEAN vAA, vBB, vCC
269 AN_ARRAYOP,
270
271 // 4F APUT_BYTE vAA, vBB, vCC
272 AN_ARRAYOP,
273
274 // 50 APUT_CHAR vAA, vBB, vCC
275 AN_ARRAYOP,
276
277 // 51 APUT_SHORT vAA, vBB, vCC
278 AN_ARRAYOP,
279
280 // 52 IGET vA, vB, field@CCCC
281 AN_NONE,
282
283 // 53 IGET_WIDE vA, vB, field@CCCC
284 AN_NONE,
285
286 // 54 IGET_OBJECT vA, vB, field@CCCC
287 AN_NONE,
288
289 // 55 IGET_BOOLEAN vA, vB, field@CCCC
290 AN_NONE,
291
292 // 56 IGET_BYTE vA, vB, field@CCCC
293 AN_NONE,
294
295 // 57 IGET_CHAR vA, vB, field@CCCC
296 AN_NONE,
297
298 // 58 IGET_SHORT vA, vB, field@CCCC
299 AN_NONE,
300
301 // 59 IPUT vA, vB, field@CCCC
302 AN_NONE,
303
304 // 5A IPUT_WIDE vA, vB, field@CCCC
305 AN_NONE,
306
307 // 5B IPUT_OBJECT vA, vB, field@CCCC
308 AN_NONE,
309
310 // 5C IPUT_BOOLEAN vA, vB, field@CCCC
311 AN_NONE,
312
313 // 5D IPUT_BYTE vA, vB, field@CCCC
314 AN_NONE,
315
316 // 5E IPUT_CHAR vA, vB, field@CCCC
317 AN_NONE,
318
319 // 5F IPUT_SHORT vA, vB, field@CCCC
320 AN_NONE,
321
322 // 60 SGET vAA, field@BBBB
323 AN_NONE,
324
325 // 61 SGET_WIDE vAA, field@BBBB
326 AN_NONE,
327
328 // 62 SGET_OBJECT vAA, field@BBBB
329 AN_NONE,
330
331 // 63 SGET_BOOLEAN vAA, field@BBBB
332 AN_NONE,
333
334 // 64 SGET_BYTE vAA, field@BBBB
335 AN_NONE,
336
337 // 65 SGET_CHAR vAA, field@BBBB
338 AN_NONE,
339
340 // 66 SGET_SHORT vAA, field@BBBB
341 AN_NONE,
342
343 // 67 SPUT vAA, field@BBBB
344 AN_NONE,
345
346 // 68 SPUT_WIDE vAA, field@BBBB
347 AN_NONE,
348
349 // 69 SPUT_OBJECT vAA, field@BBBB
350 AN_NONE,
351
352 // 6A SPUT_BOOLEAN vAA, field@BBBB
353 AN_NONE,
354
355 // 6B SPUT_BYTE vAA, field@BBBB
356 AN_NONE,
357
358 // 6C SPUT_CHAR vAA, field@BBBB
359 AN_NONE,
360
361 // 6D SPUT_SHORT vAA, field@BBBB
362 AN_NONE,
363
364 // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
365 AN_INVOKE | AN_HEAVYWEIGHT,
366
367 // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
368 AN_INVOKE | AN_HEAVYWEIGHT,
369
370 // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
371 AN_INVOKE | AN_HEAVYWEIGHT,
372
373 // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
374 AN_INVOKE | AN_HEAVYWEIGHT,
375
376 // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
377 AN_INVOKE | AN_HEAVYWEIGHT,
378
379 // 73 UNUSED_73
380 AN_NONE,
381
382 // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
383 AN_INVOKE | AN_HEAVYWEIGHT,
384
385 // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
386 AN_INVOKE | AN_HEAVYWEIGHT,
387
388 // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
389 AN_INVOKE | AN_HEAVYWEIGHT,
390
391 // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
392 AN_INVOKE | AN_HEAVYWEIGHT,
393
394 // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
395 AN_INVOKE | AN_HEAVYWEIGHT,
396
397 // 79 UNUSED_79
398 AN_NONE,
399
400 // 7A UNUSED_7A
401 AN_NONE,
402
403 // 7B NEG_INT vA, vB
404 AN_MATH | AN_INT,
405
406 // 7C NOT_INT vA, vB
407 AN_MATH | AN_INT,
408
409 // 7D NEG_LONG vA, vB
410 AN_MATH | AN_LONG,
411
412 // 7E NOT_LONG vA, vB
413 AN_MATH | AN_LONG,
414
415 // 7F NEG_FLOAT vA, vB
416 AN_MATH | AN_FP | AN_SINGLE,
417
418 // 80 NEG_DOUBLE vA, vB
419 AN_MATH | AN_FP | AN_DOUBLE,
420
421 // 81 INT_TO_LONG vA, vB
422 AN_MATH | AN_INT | AN_LONG,
423
424 // 82 INT_TO_FLOAT vA, vB
425 AN_MATH | AN_FP | AN_INT | AN_SINGLE,
426
427 // 83 INT_TO_DOUBLE vA, vB
428 AN_MATH | AN_FP | AN_INT | AN_DOUBLE,
429
430 // 84 LONG_TO_INT vA, vB
431 AN_MATH | AN_INT | AN_LONG,
432
433 // 85 LONG_TO_FLOAT vA, vB
434 AN_MATH | AN_FP | AN_LONG | AN_SINGLE,
435
436 // 86 LONG_TO_DOUBLE vA, vB
437 AN_MATH | AN_FP | AN_LONG | AN_DOUBLE,
438
439 // 87 FLOAT_TO_INT vA, vB
440 AN_MATH | AN_FP | AN_INT | AN_SINGLE,
441
442 // 88 FLOAT_TO_LONG vA, vB
443 AN_MATH | AN_FP | AN_LONG | AN_SINGLE,
444
445 // 89 FLOAT_TO_DOUBLE vA, vB
446 AN_MATH | AN_FP | AN_SINGLE | AN_DOUBLE,
447
448 // 8A DOUBLE_TO_INT vA, vB
449 AN_MATH | AN_FP | AN_INT | AN_DOUBLE,
450
451 // 8B DOUBLE_TO_LONG vA, vB
452 AN_MATH | AN_FP | AN_LONG | AN_DOUBLE,
453
454 // 8C DOUBLE_TO_FLOAT vA, vB
455 AN_MATH | AN_FP | AN_SINGLE | AN_DOUBLE,
456
457 // 8D INT_TO_BYTE vA, vB
458 AN_MATH | AN_INT,
459
460 // 8E INT_TO_CHAR vA, vB
461 AN_MATH | AN_INT,
462
463 // 8F INT_TO_SHORT vA, vB
464 AN_MATH | AN_INT,
465
466 // 90 ADD_INT vAA, vBB, vCC
467 AN_MATH | AN_INT,
468
469 // 91 SUB_INT vAA, vBB, vCC
470 AN_MATH | AN_INT,
471
472 // 92 MUL_INT vAA, vBB, vCC
473 AN_MATH | AN_INT,
474
475 // 93 DIV_INT vAA, vBB, vCC
476 AN_MATH | AN_INT,
477
478 // 94 REM_INT vAA, vBB, vCC
479 AN_MATH | AN_INT,
480
481 // 95 AND_INT vAA, vBB, vCC
482 AN_MATH | AN_INT,
483
484 // 96 OR_INT vAA, vBB, vCC
485 AN_MATH | AN_INT,
486
487 // 97 XOR_INT vAA, vBB, vCC
488 AN_MATH | AN_INT,
489
490 // 98 SHL_INT vAA, vBB, vCC
491 AN_MATH | AN_INT,
492
493 // 99 SHR_INT vAA, vBB, vCC
494 AN_MATH | AN_INT,
495
496 // 9A USHR_INT vAA, vBB, vCC
497 AN_MATH | AN_INT,
498
499 // 9B ADD_LONG vAA, vBB, vCC
500 AN_MATH | AN_LONG,
501
502 // 9C SUB_LONG vAA, vBB, vCC
503 AN_MATH | AN_LONG,
504
505 // 9D MUL_LONG vAA, vBB, vCC
506 AN_MATH | AN_LONG,
507
508 // 9E DIV_LONG vAA, vBB, vCC
509 AN_MATH | AN_LONG,
510
511 // 9F REM_LONG vAA, vBB, vCC
512 AN_MATH | AN_LONG,
513
514 // A0 AND_LONG vAA, vBB, vCC
515 AN_MATH | AN_LONG,
516
517 // A1 OR_LONG vAA, vBB, vCC
518 AN_MATH | AN_LONG,
519
520 // A2 XOR_LONG vAA, vBB, vCC
521 AN_MATH | AN_LONG,
522
523 // A3 SHL_LONG vAA, vBB, vCC
524 AN_MATH | AN_LONG,
525
526 // A4 SHR_LONG vAA, vBB, vCC
527 AN_MATH | AN_LONG,
528
529 // A5 USHR_LONG vAA, vBB, vCC
530 AN_MATH | AN_LONG,
531
532 // A6 ADD_FLOAT vAA, vBB, vCC
533 AN_MATH | AN_FP | AN_SINGLE,
534
535 // A7 SUB_FLOAT vAA, vBB, vCC
536 AN_MATH | AN_FP | AN_SINGLE,
537
538 // A8 MUL_FLOAT vAA, vBB, vCC
539 AN_MATH | AN_FP | AN_SINGLE,
540
541 // A9 DIV_FLOAT vAA, vBB, vCC
542 AN_MATH | AN_FP | AN_SINGLE,
543
544 // AA REM_FLOAT vAA, vBB, vCC
545 AN_MATH | AN_FP | AN_SINGLE,
546
547 // AB ADD_DOUBLE vAA, vBB, vCC
548 AN_MATH | AN_FP | AN_DOUBLE,
549
550 // AC SUB_DOUBLE vAA, vBB, vCC
551 AN_MATH | AN_FP | AN_DOUBLE,
552
553 // AD MUL_DOUBLE vAA, vBB, vCC
554 AN_MATH | AN_FP | AN_DOUBLE,
555
556 // AE DIV_DOUBLE vAA, vBB, vCC
557 AN_MATH | AN_FP | AN_DOUBLE,
558
559 // AF REM_DOUBLE vAA, vBB, vCC
560 AN_MATH | AN_FP | AN_DOUBLE,
561
562 // B0 ADD_INT_2ADDR vA, vB
563 AN_MATH | AN_INT,
564
565 // B1 SUB_INT_2ADDR vA, vB
566 AN_MATH | AN_INT,
567
568 // B2 MUL_INT_2ADDR vA, vB
569 AN_MATH | AN_INT,
570
571 // B3 DIV_INT_2ADDR vA, vB
572 AN_MATH | AN_INT,
573
574 // B4 REM_INT_2ADDR vA, vB
575 AN_MATH | AN_INT,
576
577 // B5 AND_INT_2ADDR vA, vB
578 AN_MATH | AN_INT,
579
580 // B6 OR_INT_2ADDR vA, vB
581 AN_MATH | AN_INT,
582
583 // B7 XOR_INT_2ADDR vA, vB
584 AN_MATH | AN_INT,
585
586 // B8 SHL_INT_2ADDR vA, vB
587 AN_MATH | AN_INT,
588
589 // B9 SHR_INT_2ADDR vA, vB
590 AN_MATH | AN_INT,
591
592 // BA USHR_INT_2ADDR vA, vB
593 AN_MATH | AN_INT,
594
595 // BB ADD_LONG_2ADDR vA, vB
596 AN_MATH | AN_LONG,
597
598 // BC SUB_LONG_2ADDR vA, vB
599 AN_MATH | AN_LONG,
600
601 // BD MUL_LONG_2ADDR vA, vB
602 AN_MATH | AN_LONG,
603
604 // BE DIV_LONG_2ADDR vA, vB
605 AN_MATH | AN_LONG,
606
607 // BF REM_LONG_2ADDR vA, vB
608 AN_MATH | AN_LONG,
609
610 // C0 AND_LONG_2ADDR vA, vB
611 AN_MATH | AN_LONG,
612
613 // C1 OR_LONG_2ADDR vA, vB
614 AN_MATH | AN_LONG,
615
616 // C2 XOR_LONG_2ADDR vA, vB
617 AN_MATH | AN_LONG,
618
619 // C3 SHL_LONG_2ADDR vA, vB
620 AN_MATH | AN_LONG,
621
622 // C4 SHR_LONG_2ADDR vA, vB
623 AN_MATH | AN_LONG,
624
625 // C5 USHR_LONG_2ADDR vA, vB
626 AN_MATH | AN_LONG,
627
628 // C6 ADD_FLOAT_2ADDR vA, vB
629 AN_MATH | AN_FP | AN_SINGLE,
630
631 // C7 SUB_FLOAT_2ADDR vA, vB
632 AN_MATH | AN_FP | AN_SINGLE,
633
634 // C8 MUL_FLOAT_2ADDR vA, vB
635 AN_MATH | AN_FP | AN_SINGLE,
636
637 // C9 DIV_FLOAT_2ADDR vA, vB
638 AN_MATH | AN_FP | AN_SINGLE,
639
640 // CA REM_FLOAT_2ADDR vA, vB
641 AN_MATH | AN_FP | AN_SINGLE,
642
643 // CB ADD_DOUBLE_2ADDR vA, vB
644 AN_MATH | AN_FP | AN_DOUBLE,
645
646 // CC SUB_DOUBLE_2ADDR vA, vB
647 AN_MATH | AN_FP | AN_DOUBLE,
648
649 // CD MUL_DOUBLE_2ADDR vA, vB
650 AN_MATH | AN_FP | AN_DOUBLE,
651
652 // CE DIV_DOUBLE_2ADDR vA, vB
653 AN_MATH | AN_FP | AN_DOUBLE,
654
655 // CF REM_DOUBLE_2ADDR vA, vB
656 AN_MATH | AN_FP | AN_DOUBLE,
657
658 // D0 ADD_INT_LIT16 vA, vB, #+CCCC
659 AN_MATH | AN_INT,
660
661 // D1 RSUB_INT vA, vB, #+CCCC
662 AN_MATH | AN_INT,
663
664 // D2 MUL_INT_LIT16 vA, vB, #+CCCC
665 AN_MATH | AN_INT,
666
667 // D3 DIV_INT_LIT16 vA, vB, #+CCCC
668 AN_MATH | AN_INT,
669
670 // D4 REM_INT_LIT16 vA, vB, #+CCCC
671 AN_MATH | AN_INT,
672
673 // D5 AND_INT_LIT16 vA, vB, #+CCCC
674 AN_MATH | AN_INT,
675
676 // D6 OR_INT_LIT16 vA, vB, #+CCCC
677 AN_MATH | AN_INT,
678
679 // D7 XOR_INT_LIT16 vA, vB, #+CCCC
680 AN_MATH | AN_INT,
681
682 // D8 ADD_INT_LIT8 vAA, vBB, #+CC
683 AN_MATH | AN_INT,
684
685 // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
686 AN_MATH | AN_INT,
687
688 // DA MUL_INT_LIT8 vAA, vBB, #+CC
689 AN_MATH | AN_INT,
690
691 // DB DIV_INT_LIT8 vAA, vBB, #+CC
692 AN_MATH | AN_INT,
693
694 // DC REM_INT_LIT8 vAA, vBB, #+CC
695 AN_MATH | AN_INT,
696
697 // DD AND_INT_LIT8 vAA, vBB, #+CC
698 AN_MATH | AN_INT,
699
700 // DE OR_INT_LIT8 vAA, vBB, #+CC
701 AN_MATH | AN_INT,
702
703 // DF XOR_INT_LIT8 vAA, vBB, #+CC
704 AN_MATH | AN_INT,
705
706 // E0 SHL_INT_LIT8 vAA, vBB, #+CC
707 AN_MATH | AN_INT,
708
709 // E1 SHR_INT_LIT8 vAA, vBB, #+CC
710 AN_MATH | AN_INT,
711
712 // E2 USHR_INT_LIT8 vAA, vBB, #+CC
713 AN_MATH | AN_INT,
714
715 // E3 IGET_VOLATILE
716 AN_NONE,
717
718 // E4 IPUT_VOLATILE
719 AN_NONE,
720
721 // E5 SGET_VOLATILE
722 AN_NONE,
723
724 // E6 SPUT_VOLATILE
725 AN_NONE,
726
727 // E7 IGET_OBJECT_VOLATILE
728 AN_NONE,
729
730 // E8 IGET_WIDE_VOLATILE
731 AN_NONE,
732
733 // E9 IPUT_WIDE_VOLATILE
734 AN_NONE,
735
736 // EA SGET_WIDE_VOLATILE
737 AN_NONE,
738
739 // EB SPUT_WIDE_VOLATILE
740 AN_NONE,
741
742 // EC BREAKPOINT
743 AN_NONE,
744
745 // ED THROW_VERIFICATION_ERROR
746 AN_HEAVYWEIGHT | AN_BRANCH,
747
748 // EE EXECUTE_INLINE
749 AN_NONE,
750
751 // EF EXECUTE_INLINE_RANGE
752 AN_NONE,
753
754 // F0 INVOKE_OBJECT_INIT_RANGE
755 AN_INVOKE | AN_HEAVYWEIGHT,
756
757 // F1 RETURN_VOID_BARRIER
758 AN_BRANCH,
759
760 // F2 IGET_QUICK
761 AN_NONE,
762
763 // F3 IGET_WIDE_QUICK
764 AN_NONE,
765
766 // F4 IGET_OBJECT_QUICK
767 AN_NONE,
768
769 // F5 IPUT_QUICK
770 AN_NONE,
771
772 // F6 IPUT_WIDE_QUICK
773 AN_NONE,
774
775 // F7 IPUT_OBJECT_QUICK
776 AN_NONE,
777
778 // F8 INVOKE_VIRTUAL_QUICK
779 AN_INVOKE | AN_HEAVYWEIGHT,
780
781 // F9 INVOKE_VIRTUAL_QUICK_RANGE
782 AN_INVOKE | AN_HEAVYWEIGHT,
783
784 // FA INVOKE_SUPER_QUICK
785 AN_INVOKE | AN_HEAVYWEIGHT,
786
787 // FB INVOKE_SUPER_QUICK_RANGE
788 AN_INVOKE | AN_HEAVYWEIGHT,
789
790 // FC IPUT_OBJECT_VOLATILE
791 AN_NONE,
792
793 // FD SGET_OBJECT_VOLATILE
794 AN_NONE,
795
796 // FE SPUT_OBJECT_VOLATILE
797 AN_NONE,
798
799 // FF UNUSED_FF
800 AN_NONE,
801
802 // Beginning of extended MIR opcodes
803 // 100 MIR_PHI
804 AN_NONE,
805
806 // 101 MIR_COPY
807 AN_NONE,
808
809 // 102 MIR_FUSED_CMPL_FLOAT
810 AN_NONE,
811
812 // 103 MIR_FUSED_CMPG_FLOAT
813 AN_NONE,
814
815 // 104 MIR_FUSED_CMPL_DOUBLE
816 AN_NONE,
817
818 // 105 MIR_FUSED_CMPG_DOUBLE
819 AN_NONE,
820
821 // 106 MIR_FUSED_CMP_LONG
822 AN_NONE,
823
824 // 107 MIR_NOP
825 AN_NONE,
826
827 // 108 MIR_NULL_CHECK
828 AN_NONE,
829
830 // 109 MIR_RANGE_CHECK
831 AN_NONE,
832
833 // 110 MIR_DIV_ZERO_CHECK
834 AN_NONE,
835
836 // 111 MIR_CHECK
837 AN_NONE,
838
839 // 112 MIR_CHECKPART2
840 AN_NONE,
841
842 // 113 MIR_SELECT
843 AN_NONE,
Junmo Parke1fa1dd2014-08-04 17:57:57 +0900844
845 // 114 MirOpConstVector
846 AN_NONE,
847
848 // 115 MirOpMoveVector
849 AN_NONE,
850
851 // 116 MirOpPackedMultiply
852 AN_NONE,
853
854 // 117 MirOpPackedAddition
855 AN_NONE,
856
857 // 118 MirOpPackedSubtract
858 AN_NONE,
859
860 // 119 MirOpPackedShiftLeft
861 AN_NONE,
862
863 // 120 MirOpPackedSignedShiftRight
864 AN_NONE,
865
866 // 121 MirOpPackedUnsignedShiftRight
867 AN_NONE,
868
869 // 122 MirOpPackedAnd
870 AN_NONE,
871
872 // 123 MirOpPackedOr
873 AN_NONE,
874
875 // 124 MirOpPackedXor
876 AN_NONE,
877
878 // 125 MirOpPackedAddReduce
879 AN_NONE,
880
881 // 126 MirOpPackedReduce
882 AN_NONE,
883
884 // 127 MirOpPackedSet
885 AN_NONE,
886
887 // 128 MirOpReserveVectorRegisters
888 AN_NONE,
889
890 // 129 MirOpReturnVectorRegisters
891 AN_NONE,
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700892
893 // 130 MirOpMemBarrier
894 AN_NONE,
buzbeeee17e0a2013-07-31 10:47:37 -0700895};
896
897struct MethodStats {
898 int dex_instructions;
899 int math_ops;
900 int fp_ops;
901 int array_ops;
902 int branch_ops;
903 int heavyweight_ops;
904 bool has_computational_loop;
buzbeefe9ca402013-08-21 09:48:11 -0700905 bool has_switch;
buzbeeee17e0a2013-07-31 10:47:37 -0700906 float math_ratio;
907 float fp_ratio;
908 float array_ratio;
909 float branch_ratio;
910 float heavyweight_ratio;
911};
912
913void MIRGraph::AnalyzeBlock(BasicBlock* bb, MethodStats* stats) {
914 if (bb->visited || (bb->block_type != kDalvikByteCode)) {
915 return;
916 }
917 bool computational_block = true;
918 bool has_math = false;
919 /*
920 * For the purposes of this scan, we want to treat the set of basic blocks broken
921 * by an exception edge as a single basic block. We'll scan forward along the fallthrough
922 * edges until we reach an explicit branch or return.
923 */
924 BasicBlock* ending_bb = bb;
925 if (ending_bb->last_mir_insn != NULL) {
926 uint32_t ending_flags = analysis_attributes_[ending_bb->last_mir_insn->dalvikInsn.opcode];
927 while ((ending_flags & AN_BRANCH) == 0) {
buzbee0d829482013-10-11 15:24:55 -0700928 ending_bb = GetBasicBlock(ending_bb->fall_through);
buzbeeee17e0a2013-07-31 10:47:37 -0700929 ending_flags = analysis_attributes_[ending_bb->last_mir_insn->dalvikInsn.opcode];
930 }
931 }
932 /*
933 * Ideally, we'd weight the operations by loop nesting level, but to do so we'd
934 * first need to do some expensive loop detection - and the point of this is to make
935 * an informed guess before investing in computation. However, we can cheaply detect
936 * many simple loop forms without having to do full dataflow analysis.
937 */
938 int loop_scale_factor = 1;
939 // Simple for and while loops
buzbee0d829482013-10-11 15:24:55 -0700940 if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->fall_through == NullBasicBlockId)) {
941 if ((GetBasicBlock(ending_bb->taken)->taken == bb->id) ||
942 (GetBasicBlock(ending_bb->taken)->fall_through == bb->id)) {
buzbeeee17e0a2013-07-31 10:47:37 -0700943 loop_scale_factor = 25;
944 }
945 }
946 // Simple do-while loop
buzbee0d829482013-10-11 15:24:55 -0700947 if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->taken == bb->id)) {
buzbeeee17e0a2013-07-31 10:47:37 -0700948 loop_scale_factor = 25;
949 }
950
951 BasicBlock* tbb = bb;
952 bool done = false;
953 while (!done) {
954 tbb->visited = true;
955 for (MIR* mir = tbb->first_mir_insn; mir != NULL; mir = mir->next) {
Jean Christophe Beyler2ab40eb2014-06-02 09:03:14 -0700956 if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
buzbeeee17e0a2013-07-31 10:47:37 -0700957 // Skip any MIR pseudo-op.
958 continue;
959 }
960 uint32_t flags = analysis_attributes_[mir->dalvikInsn.opcode];
961 stats->dex_instructions += loop_scale_factor;
962 if ((flags & AN_BRANCH) == 0) {
963 computational_block &= ((flags & AN_COMPUTATIONAL) != 0);
964 } else {
965 stats->branch_ops += loop_scale_factor;
966 }
967 if ((flags & AN_MATH) != 0) {
968 stats->math_ops += loop_scale_factor;
969 has_math = true;
970 }
971 if ((flags & AN_FP) != 0) {
972 stats->fp_ops += loop_scale_factor;
973 }
974 if ((flags & AN_ARRAYOP) != 0) {
975 stats->array_ops += loop_scale_factor;
976 }
977 if ((flags & AN_HEAVYWEIGHT) != 0) {
978 stats->heavyweight_ops += loop_scale_factor;
979 }
buzbeefe9ca402013-08-21 09:48:11 -0700980 if ((flags & AN_SWITCH) != 0) {
981 stats->has_switch = true;
982 }
buzbeeee17e0a2013-07-31 10:47:37 -0700983 }
984 if (tbb == ending_bb) {
985 done = true;
986 } else {
buzbee0d829482013-10-11 15:24:55 -0700987 tbb = GetBasicBlock(tbb->fall_through);
buzbeeee17e0a2013-07-31 10:47:37 -0700988 }
989 }
990 if (has_math && computational_block && (loop_scale_factor > 1)) {
991 stats->has_computational_loop = true;
992 }
993}
994
Andreas Gampe060e6fe2014-06-19 11:34:06 -0700995bool MIRGraph::ComputeSkipCompilation(MethodStats* stats, bool skip_default,
996 std::string* skip_message) {
buzbeeee17e0a2013-07-31 10:47:37 -0700997 float count = stats->dex_instructions;
998 stats->math_ratio = stats->math_ops / count;
999 stats->fp_ratio = stats->fp_ops / count;
1000 stats->branch_ratio = stats->branch_ops / count;
1001 stats->array_ratio = stats->array_ops / count;
1002 stats->heavyweight_ratio = stats->heavyweight_ops / count;
1003
1004 if (cu_->enable_debug & (1 << kDebugShowFilterStats)) {
1005 LOG(INFO) << "STATS " << stats->dex_instructions << ", math:"
1006 << stats->math_ratio << ", fp:"
1007 << stats->fp_ratio << ", br:"
1008 << stats->branch_ratio << ", hw:"
buzbeefe9ca402013-08-21 09:48:11 -07001009 << stats->heavyweight_ratio << ", arr:"
buzbeeee17e0a2013-07-31 10:47:37 -07001010 << stats->array_ratio << ", hot:"
1011 << stats->has_computational_loop << ", "
1012 << PrettyMethod(cu_->method_idx, *cu_->dex_file);
1013 }
1014
1015 // Computation intensive?
1016 if (stats->has_computational_loop && (stats->heavyweight_ratio < 0.04)) {
1017 return false;
1018 }
1019
1020 // Complex, logic-intensive?
Brian Carlstrom6449c622014-02-10 23:48:36 -08001021 if (cu_->compiler_driver->GetCompilerOptions().IsSmallMethod(GetNumDalvikInsns()) &&
buzbeeee17e0a2013-07-31 10:47:37 -07001022 stats->branch_ratio > 0.3) {
1023 return false;
1024 }
1025
1026 // Significant floating point?
1027 if (stats->fp_ratio > 0.05) {
1028 return false;
1029 }
1030
1031 // Significant generic math?
1032 if (stats->math_ratio > 0.3) {
1033 return false;
1034 }
1035
1036 // If array-intensive, compiling is probably worthwhile.
1037 if (stats->array_ratio > 0.1) {
1038 return false;
1039 }
1040
buzbeefe9ca402013-08-21 09:48:11 -07001041 // Switch operations benefit greatly from compilation, so go ahead and spend the cycles.
1042 if (stats->has_switch) {
1043 return false;
1044 }
1045
1046 // If significant in size and high proportion of expensive operations, skip.
Brian Carlstrom6449c622014-02-10 23:48:36 -08001047 if (cu_->compiler_driver->GetCompilerOptions().IsSmallMethod(GetNumDalvikInsns()) &&
buzbeefe9ca402013-08-21 09:48:11 -07001048 (stats->heavyweight_ratio > 0.3)) {
Andreas Gampe060e6fe2014-06-19 11:34:06 -07001049 *skip_message = "Is a small method with heavyweight ratio " +
1050 std::to_string(stats->heavyweight_ratio);
buzbeeee17e0a2013-07-31 10:47:37 -07001051 return true;
1052 }
1053
1054 return skip_default;
1055}
1056
1057 /*
1058 * Will eventually want this to be a bit more sophisticated and happen at verification time.
buzbeeee17e0a2013-07-31 10:47:37 -07001059 */
Andreas Gampe060e6fe2014-06-19 11:34:06 -07001060bool MIRGraph::SkipCompilation(std::string* skip_message) {
Brian Carlstrom6449c622014-02-10 23:48:36 -08001061 const CompilerOptions& compiler_options = cu_->compiler_driver->GetCompilerOptions();
1062 CompilerOptions::CompilerFilter compiler_filter = compiler_options.GetCompilerFilter();
1063 if (compiler_filter == CompilerOptions::kEverything) {
buzbeeee17e0a2013-07-31 10:47:37 -07001064 return false;
1065 }
1066
buzbeeb1f1d642014-02-27 12:55:32 -08001067 // Contains a pattern we don't want to compile?
buzbee8c7a02a2014-06-14 12:33:09 -07001068 if (PuntToInterpreter()) {
Andreas Gampe060e6fe2014-06-19 11:34:06 -07001069 *skip_message = "Punt to interpreter set";
buzbeeb1f1d642014-02-27 12:55:32 -08001070 return true;
1071 }
1072
Calin Juravlec1b643c2014-05-30 23:44:11 +01001073 if (!compiler_options.IsCompilationEnabled()) {
Andreas Gampe060e6fe2014-06-19 11:34:06 -07001074 *skip_message = "Compilation disabled";
buzbeeee17e0a2013-07-31 10:47:37 -07001075 return true;
1076 }
1077
buzbeefe9ca402013-08-21 09:48:11 -07001078 // Set up compilation cutoffs based on current filter mode.
1079 size_t small_cutoff = 0;
1080 size_t default_cutoff = 0;
1081 switch (compiler_filter) {
Brian Carlstrom6449c622014-02-10 23:48:36 -08001082 case CompilerOptions::kBalanced:
1083 small_cutoff = compiler_options.GetSmallMethodThreshold();
1084 default_cutoff = compiler_options.GetLargeMethodThreshold();
buzbeefe9ca402013-08-21 09:48:11 -07001085 break;
Brian Carlstrom6449c622014-02-10 23:48:36 -08001086 case CompilerOptions::kSpace:
1087 small_cutoff = compiler_options.GetTinyMethodThreshold();
1088 default_cutoff = compiler_options.GetSmallMethodThreshold();
buzbeefe9ca402013-08-21 09:48:11 -07001089 break;
Brian Carlstrom6449c622014-02-10 23:48:36 -08001090 case CompilerOptions::kSpeed:
1091 small_cutoff = compiler_options.GetHugeMethodThreshold();
1092 default_cutoff = compiler_options.GetHugeMethodThreshold();
buzbeefe9ca402013-08-21 09:48:11 -07001093 break;
1094 default:
1095 LOG(FATAL) << "Unexpected compiler_filter_: " << compiler_filter;
1096 }
1097
1098 // If size < cutoff, assume we'll compile - but allow removal.
1099 bool skip_compilation = (GetNumDalvikInsns() >= default_cutoff);
Andreas Gampe060e6fe2014-06-19 11:34:06 -07001100 if (skip_compilation) {
1101 *skip_message = "#Insns >= default_cutoff: " + std::to_string(GetNumDalvikInsns());
1102 }
buzbeefe9ca402013-08-21 09:48:11 -07001103
1104 /*
1105 * Filter 1: Huge methods are likely to be machine generated, but some aren't.
1106 * If huge, assume we won't compile, but allow futher analysis to turn it back on.
1107 */
Brian Carlstrom6449c622014-02-10 23:48:36 -08001108 if (compiler_options.IsHugeMethod(GetNumDalvikInsns())) {
buzbeefe9ca402013-08-21 09:48:11 -07001109 skip_compilation = true;
Andreas Gampe060e6fe2014-06-19 11:34:06 -07001110 *skip_message = "Huge method: " + std::to_string(GetNumDalvikInsns());
buzbeeb48819d2013-09-14 16:15:25 -07001111 // If we're got a huge number of basic blocks, don't bother with further analysis.
Brian Carlstrom6449c622014-02-10 23:48:36 -08001112 if (static_cast<size_t>(num_blocks_) > (compiler_options.GetHugeMethodThreshold() / 2)) {
buzbeeb48819d2013-09-14 16:15:25 -07001113 return true;
1114 }
Brian Carlstrom6449c622014-02-10 23:48:36 -08001115 } else if (compiler_options.IsLargeMethod(GetNumDalvikInsns()) &&
buzbeeb48819d2013-09-14 16:15:25 -07001116 /* If it's large and contains no branches, it's likely to be machine generated initialization */
1117 (GetBranchCount() == 0)) {
Andreas Gampe060e6fe2014-06-19 11:34:06 -07001118 *skip_message = "Large method with no branches";
buzbeeb48819d2013-09-14 16:15:25 -07001119 return true;
Brian Carlstrom6449c622014-02-10 23:48:36 -08001120 } else if (compiler_filter == CompilerOptions::kSpeed) {
buzbeefe9ca402013-08-21 09:48:11 -07001121 // If not huge, compile.
1122 return false;
buzbeeee17e0a2013-07-31 10:47:37 -07001123 }
1124
1125 // Filter 2: Skip class initializers.
1126 if (((cu_->access_flags & kAccConstructor) != 0) && ((cu_->access_flags & kAccStatic) != 0)) {
Andreas Gampe060e6fe2014-06-19 11:34:06 -07001127 *skip_message = "Class initializer";
buzbeeee17e0a2013-07-31 10:47:37 -07001128 return true;
1129 }
1130
1131 // Filter 3: if this method is a special pattern, go ahead and emit the canned pattern.
Vladimir Marko5816ed42013-11-27 17:04:20 +00001132 if (cu_->compiler_driver->GetMethodInlinerMap() != nullptr &&
1133 cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1134 ->IsSpecial(cu_->method_idx)) {
buzbeeee17e0a2013-07-31 10:47:37 -07001135 return false;
1136 }
1137
buzbeefe9ca402013-08-21 09:48:11 -07001138 // Filter 4: if small, just compile.
buzbeeee17e0a2013-07-31 10:47:37 -07001139 if (GetNumDalvikInsns() < small_cutoff) {
1140 return false;
1141 }
1142
1143 // Analyze graph for:
1144 // o floating point computation
1145 // o basic blocks contained in loop with heavy arithmetic.
1146 // o proportion of conditional branches.
1147
1148 MethodStats stats;
1149 memset(&stats, 0, sizeof(stats));
1150
1151 ClearAllVisitedFlags();
buzbee56c71782013-09-05 17:13:19 -07001152 AllNodesIterator iter(this);
buzbeeee17e0a2013-07-31 10:47:37 -07001153 for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
1154 AnalyzeBlock(bb, &stats);
1155 }
1156
Andreas Gampe060e6fe2014-06-19 11:34:06 -07001157 return ComputeSkipCompilation(&stats, skip_compilation, skip_message);
buzbeeee17e0a2013-07-31 10:47:37 -07001158}
1159
Vladimir Markobe0e5462014-02-26 11:24:15 +00001160void MIRGraph::DoCacheFieldLoweringInfo() {
Vladimir Markoa24122d2014-03-07 10:18:14 +00001161 // All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN.
1162 const uint32_t max_refs = (current_code_item_->insns_size_in_code_units_ - 1u) / 2u;
1163 ScopedArenaAllocator allocator(&cu_->arena_stack);
1164 uint16_t* field_idxs =
1165 reinterpret_cast<uint16_t*>(allocator.Alloc(max_refs * sizeof(uint16_t), kArenaAllocMisc));
Vladimir Markobe0e5462014-02-26 11:24:15 +00001166
1167 // Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end.
1168 size_t ifield_pos = 0u;
Vladimir Markoa24122d2014-03-07 10:18:14 +00001169 size_t sfield_pos = max_refs;
Vladimir Markobe0e5462014-02-26 11:24:15 +00001170 AllNodesIterator iter(this);
1171 for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1172 if (bb->block_type != kDalvikByteCode) {
1173 continue;
1174 }
1175 for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1176 if (mir->dalvikInsn.opcode >= Instruction::IGET &&
1177 mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
Vladimir Markobe0e5462014-02-26 11:24:15 +00001178 const Instruction* insn = Instruction::At(current_code_item_->insns_ + mir->offset);
Vladimir Markobe0e5462014-02-26 11:24:15 +00001179 // Get field index and try to find it among existing indexes. If found, it's usually among
1180 // the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
1181 // is a linear search, it actually performs much better than map based approach.
1182 if (mir->dalvikInsn.opcode <= Instruction::IPUT_SHORT) {
Vladimir Markoa24122d2014-03-07 10:18:14 +00001183 uint16_t field_idx = insn->VRegC_22c();
Vladimir Markobe0e5462014-02-26 11:24:15 +00001184 size_t i = ifield_pos;
1185 while (i != 0u && field_idxs[i - 1] != field_idx) {
1186 --i;
1187 }
1188 if (i != 0u) {
1189 mir->meta.ifield_lowering_info = i - 1;
1190 } else {
1191 mir->meta.ifield_lowering_info = ifield_pos;
Vladimir Markoa24122d2014-03-07 10:18:14 +00001192 field_idxs[ifield_pos++] = field_idx;
Vladimir Markobe0e5462014-02-26 11:24:15 +00001193 }
1194 } else {
Vladimir Markoa24122d2014-03-07 10:18:14 +00001195 uint16_t field_idx = insn->VRegB_21c();
Vladimir Markobe0e5462014-02-26 11:24:15 +00001196 size_t i = sfield_pos;
Vladimir Markoa24122d2014-03-07 10:18:14 +00001197 while (i != max_refs && field_idxs[i] != field_idx) {
Vladimir Markobe0e5462014-02-26 11:24:15 +00001198 ++i;
1199 }
Vladimir Markoa24122d2014-03-07 10:18:14 +00001200 if (i != max_refs) {
1201 mir->meta.sfield_lowering_info = max_refs - i - 1u;
Vladimir Markobe0e5462014-02-26 11:24:15 +00001202 } else {
Vladimir Markoa24122d2014-03-07 10:18:14 +00001203 mir->meta.sfield_lowering_info = max_refs - sfield_pos;
Vladimir Markobe0e5462014-02-26 11:24:15 +00001204 field_idxs[--sfield_pos] = field_idx;
1205 }
1206 }
1207 DCHECK_LE(ifield_pos, sfield_pos);
1208 }
1209 }
1210 }
1211
1212 if (ifield_pos != 0u) {
1213 // Resolve instance field infos.
1214 DCHECK_EQ(ifield_lowering_infos_.Size(), 0u);
1215 ifield_lowering_infos_.Resize(ifield_pos);
1216 for (size_t pos = 0u; pos != ifield_pos; ++pos) {
1217 ifield_lowering_infos_.Insert(MirIFieldLoweringInfo(field_idxs[pos]));
1218 }
1219 MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
1220 ifield_lowering_infos_.GetRawStorage(), ifield_pos);
1221 }
1222
Vladimir Markoa24122d2014-03-07 10:18:14 +00001223 if (sfield_pos != max_refs) {
Vladimir Markobe0e5462014-02-26 11:24:15 +00001224 // Resolve static field infos.
1225 DCHECK_EQ(sfield_lowering_infos_.Size(), 0u);
Vladimir Markoa24122d2014-03-07 10:18:14 +00001226 sfield_lowering_infos_.Resize(max_refs - sfield_pos);
1227 for (size_t pos = max_refs; pos != sfield_pos;) {
Vladimir Markobe0e5462014-02-26 11:24:15 +00001228 --pos;
1229 sfield_lowering_infos_.Insert(MirSFieldLoweringInfo(field_idxs[pos]));
1230 }
1231 MirSFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
Vladimir Markoa24122d2014-03-07 10:18:14 +00001232 sfield_lowering_infos_.GetRawStorage(), max_refs - sfield_pos);
Vladimir Markobe0e5462014-02-26 11:24:15 +00001233 }
1234}
1235
Vladimir Markof096aad2014-01-23 15:51:58 +00001236void MIRGraph::DoCacheMethodLoweringInfo() {
1237 static constexpr uint16_t invoke_types[] = { kVirtual, kSuper, kDirect, kStatic, kInterface };
1238
1239 // Embed the map value in the entry to avoid extra padding in 64-bit builds.
1240 struct MapEntry {
1241 // Map key: target_method_idx, invoke_type, devirt_target. Ordered to avoid padding.
1242 const MethodReference* devirt_target;
1243 uint16_t target_method_idx;
1244 uint16_t invoke_type;
1245 // Map value.
1246 uint32_t lowering_info_index;
1247 };
1248
1249 // Sort INVOKEs by method index, then by opcode, then by devirtualization target.
1250 struct MapEntryComparator {
1251 bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
1252 if (lhs.target_method_idx != rhs.target_method_idx) {
1253 return lhs.target_method_idx < rhs.target_method_idx;
1254 }
1255 if (lhs.invoke_type != rhs.invoke_type) {
1256 return lhs.invoke_type < rhs.invoke_type;
1257 }
1258 if (lhs.devirt_target != rhs.devirt_target) {
1259 if (lhs.devirt_target == nullptr) {
1260 return true;
1261 }
1262 if (rhs.devirt_target == nullptr) {
1263 return false;
1264 }
1265 return devirt_cmp(*lhs.devirt_target, *rhs.devirt_target);
1266 }
1267 return false;
1268 }
1269 MethodReferenceComparator devirt_cmp;
1270 };
1271
Vladimir Markof096aad2014-01-23 15:51:58 +00001272 ScopedArenaAllocator allocator(&cu_->arena_stack);
1273
1274 // All INVOKE instructions take 3 code units and there must also be a RETURN.
1275 uint32_t max_refs = (current_code_item_->insns_size_in_code_units_ - 1u) / 3u;
1276
Vladimir Marko69f08ba2014-04-11 12:28:11 +01001277 // Map invoke key (see MapEntry) to lowering info index and vice versa.
Vladimir Markof096aad2014-01-23 15:51:58 +00001278 // The invoke_map and sequential entries are essentially equivalent to Boost.MultiIndex's
1279 // multi_index_container with one ordered index and one sequential index.
Vladimir Marko69f08ba2014-04-11 12:28:11 +01001280 ScopedArenaSet<MapEntry, MapEntryComparator> invoke_map(MapEntryComparator(),
1281 allocator.Adapter());
Vladimir Markof096aad2014-01-23 15:51:58 +00001282 const MapEntry** sequential_entries = reinterpret_cast<const MapEntry**>(
1283 allocator.Alloc(max_refs * sizeof(sequential_entries[0]), kArenaAllocMisc));
1284
1285 // Find INVOKE insns and their devirtualization targets.
1286 AllNodesIterator iter(this);
1287 for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1288 if (bb->block_type != kDalvikByteCode) {
1289 continue;
1290 }
1291 for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1292 if (mir->dalvikInsn.opcode >= Instruction::INVOKE_VIRTUAL &&
1293 mir->dalvikInsn.opcode <= Instruction::INVOKE_INTERFACE_RANGE &&
1294 mir->dalvikInsn.opcode != Instruction::RETURN_VOID_BARRIER) {
1295 // Decode target method index and invoke type.
1296 const Instruction* insn = Instruction::At(current_code_item_->insns_ + mir->offset);
1297 uint16_t target_method_idx;
1298 uint16_t invoke_type_idx;
1299 if (mir->dalvikInsn.opcode <= Instruction::INVOKE_INTERFACE) {
1300 target_method_idx = insn->VRegB_35c();
1301 invoke_type_idx = mir->dalvikInsn.opcode - Instruction::INVOKE_VIRTUAL;
1302 } else {
1303 target_method_idx = insn->VRegB_3rc();
1304 invoke_type_idx = mir->dalvikInsn.opcode - Instruction::INVOKE_VIRTUAL_RANGE;
1305 }
1306
1307 // Find devirtualization target.
1308 // TODO: The devirt map is ordered by the dex pc here. Is there a way to get INVOKEs
1309 // ordered by dex pc as well? That would allow us to keep an iterator to devirt targets
1310 // and increment it as needed instead of making O(log n) lookups.
1311 const VerifiedMethod* verified_method = GetCurrentDexCompilationUnit()->GetVerifiedMethod();
1312 const MethodReference* devirt_target = verified_method->GetDevirtTarget(mir->offset);
1313
1314 // Try to insert a new entry. If the insertion fails, we will have found an old one.
1315 MapEntry entry = {
1316 devirt_target,
1317 target_method_idx,
1318 invoke_types[invoke_type_idx],
1319 static_cast<uint32_t>(invoke_map.size())
1320 };
1321 auto it = invoke_map.insert(entry).first; // Iterator to either the old or the new entry.
1322 mir->meta.method_lowering_info = it->lowering_info_index;
1323 // If we didn't actually insert, this will just overwrite an existing value with the same.
1324 sequential_entries[it->lowering_info_index] = &*it;
1325 }
1326 }
1327 }
1328
1329 if (invoke_map.empty()) {
1330 return;
1331 }
1332
1333 // Prepare unique method infos, set method info indexes for their MIRs.
1334 DCHECK_EQ(method_lowering_infos_.Size(), 0u);
1335 const size_t count = invoke_map.size();
1336 method_lowering_infos_.Resize(count);
1337 for (size_t pos = 0u; pos != count; ++pos) {
1338 const MapEntry* entry = sequential_entries[pos];
1339 MirMethodLoweringInfo method_info(entry->target_method_idx,
1340 static_cast<InvokeType>(entry->invoke_type));
1341 if (entry->devirt_target != nullptr) {
1342 method_info.SetDevirtualizationTarget(*entry->devirt_target);
1343 }
1344 method_lowering_infos_.Insert(method_info);
1345 }
1346 MirMethodLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
1347 method_lowering_infos_.GetRawStorage(), count);
1348}
1349
Andreas Gampe060e6fe2014-06-19 11:34:06 -07001350bool MIRGraph::SkipCompilationByName(const std::string& methodname) {
Dave Allison39c3bfb2014-01-28 18:33:52 -08001351 return cu_->compiler_driver->SkipCompilation(methodname);
1352}
1353
buzbeeee17e0a2013-07-31 10:47:37 -07001354} // namespace art