| Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) 2016 The Android Open Source Project | 
 | 3 |  * | 
 | 4 |  * Licensed under the Apache License, Version 2.0 (the "License"); | 
 | 5 |  * you may not use this file except in compliance with the License. | 
 | 6 |  * You may obtain a copy of the License at | 
 | 7 |  * | 
 | 8 |  *      http://www.apache.org/licenses/LICENSE-2.0 | 
 | 9 |  * | 
 | 10 |  * Unless required by applicable law or agreed to in writing, software | 
 | 11 |  * distributed under the License is distributed on an "AS IS" BASIS, | 
 | 12 |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
 | 13 |  * See the License for the specific language governing permissions and | 
 | 14 |  * limitations under the License. | 
 | 15 |  */ | 
 | 16 |  | 
 | 17 | #include <string> | 
 | 18 |  | 
 | 19 | #include "prepare_for_register_allocation.h" | 
 | 20 | #include "scheduler.h" | 
 | 21 |  | 
 | 22 | #ifdef ART_ENABLE_CODEGEN_arm64 | 
 | 23 | #include "scheduler_arm64.h" | 
 | 24 | #endif | 
 | 25 |  | 
| xueliang.zhong | f7caf68 | 2017-03-01 16:07:02 +0000 | [diff] [blame] | 26 | #ifdef ART_ENABLE_CODEGEN_arm | 
 | 27 | #include "scheduler_arm.h" | 
 | 28 | #endif | 
 | 29 |  | 
| Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 30 | namespace art { | 
 | 31 |  | 
 | 32 | void SchedulingGraph::AddDependency(SchedulingNode* node, | 
 | 33 |                                     SchedulingNode* dependency, | 
 | 34 |                                     bool is_data_dependency) { | 
 | 35 |   if (node == nullptr || dependency == nullptr) { | 
 | 36 |     // A `nullptr` node indicates an instruction out of scheduling range (eg. in | 
 | 37 |     // an other block), so we do not need to add a dependency edge to the graph. | 
 | 38 |     return; | 
 | 39 |   } | 
 | 40 |  | 
 | 41 |   if (is_data_dependency) { | 
 | 42 |     if (!HasImmediateDataDependency(node, dependency)) { | 
 | 43 |       node->AddDataPredecessor(dependency); | 
 | 44 |     } | 
 | 45 |   } else if (!HasImmediateOtherDependency(node, dependency)) { | 
 | 46 |     node->AddOtherPredecessor(dependency); | 
 | 47 |   } | 
 | 48 | } | 
 | 49 |  | 
 | 50 | static bool MayHaveReorderingDependency(SideEffects node, SideEffects other) { | 
 | 51 |   // Read after write. | 
 | 52 |   if (node.MayDependOn(other)) { | 
 | 53 |     return true; | 
 | 54 |   } | 
 | 55 |  | 
 | 56 |   // Write after read. | 
 | 57 |   if (other.MayDependOn(node)) { | 
 | 58 |     return true; | 
 | 59 |   } | 
 | 60 |  | 
 | 61 |   // Memory write after write. | 
 | 62 |   if (node.DoesAnyWrite() && other.DoesAnyWrite()) { | 
 | 63 |     return true; | 
 | 64 |   } | 
 | 65 |  | 
 | 66 |   return false; | 
 | 67 | } | 
 | 68 |  | 
| xueliang.zhong | 2a3471f | 2017-05-08 18:36:40 +0100 | [diff] [blame] | 69 | size_t SchedulingGraph::ArrayAccessHeapLocation(HInstruction* array, HInstruction* index) const { | 
 | 70 |   DCHECK(heap_location_collector_ != nullptr); | 
 | 71 |   size_t heap_loc = heap_location_collector_->GetArrayAccessHeapLocation(array, index); | 
 | 72 |   // This array access should be analyzed and added to HeapLocationCollector before. | 
 | 73 |   DCHECK(heap_loc != HeapLocationCollector::kHeapLocationNotFound); | 
 | 74 |   return heap_loc; | 
 | 75 | } | 
| Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 76 |  | 
| xueliang.zhong | 2a3471f | 2017-05-08 18:36:40 +0100 | [diff] [blame] | 77 | bool SchedulingGraph::ArrayAccessMayAlias(const HInstruction* node, | 
 | 78 |                                           const HInstruction* other) const { | 
 | 79 |   DCHECK(heap_location_collector_ != nullptr); | 
 | 80 |   size_t node_heap_loc = ArrayAccessHeapLocation(node->InputAt(0), node->InputAt(1)); | 
 | 81 |   size_t other_heap_loc = ArrayAccessHeapLocation(other->InputAt(0), other->InputAt(1)); | 
 | 82 |  | 
 | 83 |   // For example: arr[0] and arr[0] | 
 | 84 |   if (node_heap_loc == other_heap_loc) { | 
| Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 85 |     return true; | 
 | 86 |   } | 
 | 87 |  | 
| xueliang.zhong | 2a3471f | 2017-05-08 18:36:40 +0100 | [diff] [blame] | 88 |   // For example: arr[0] and arr[i] | 
 | 89 |   if (heap_location_collector_->MayAlias(node_heap_loc, other_heap_loc)) { | 
 | 90 |     return true; | 
 | 91 |   } | 
 | 92 |  | 
 | 93 |   return false; | 
 | 94 | } | 
 | 95 |  | 
 | 96 | static bool IsArrayAccess(const HInstruction* instruction) { | 
 | 97 |   return instruction->IsArrayGet() || instruction->IsArraySet(); | 
 | 98 | } | 
 | 99 |  | 
 | 100 | static bool IsInstanceFieldAccess(const HInstruction* instruction) { | 
 | 101 |   return instruction->IsInstanceFieldGet() || | 
 | 102 |          instruction->IsInstanceFieldSet() || | 
 | 103 |          instruction->IsUnresolvedInstanceFieldGet() || | 
 | 104 |          instruction->IsUnresolvedInstanceFieldSet(); | 
 | 105 | } | 
 | 106 |  | 
 | 107 | static bool IsStaticFieldAccess(const HInstruction* instruction) { | 
 | 108 |   return instruction->IsStaticFieldGet() || | 
 | 109 |          instruction->IsStaticFieldSet() || | 
 | 110 |          instruction->IsUnresolvedStaticFieldGet() || | 
 | 111 |          instruction->IsUnresolvedStaticFieldSet(); | 
 | 112 | } | 
 | 113 |  | 
 | 114 | static bool IsResolvedFieldAccess(const HInstruction* instruction) { | 
 | 115 |   return instruction->IsInstanceFieldGet() || | 
 | 116 |          instruction->IsInstanceFieldSet() || | 
 | 117 |          instruction->IsStaticFieldGet() || | 
 | 118 |          instruction->IsStaticFieldSet(); | 
 | 119 | } | 
 | 120 |  | 
 | 121 | static bool IsUnresolvedFieldAccess(const HInstruction* instruction) { | 
 | 122 |   return instruction->IsUnresolvedInstanceFieldGet() || | 
 | 123 |          instruction->IsUnresolvedInstanceFieldSet() || | 
 | 124 |          instruction->IsUnresolvedStaticFieldGet() || | 
 | 125 |          instruction->IsUnresolvedStaticFieldSet(); | 
 | 126 | } | 
 | 127 |  | 
 | 128 | static bool IsFieldAccess(const HInstruction* instruction) { | 
 | 129 |   return IsResolvedFieldAccess(instruction) || IsUnresolvedFieldAccess(instruction); | 
 | 130 | } | 
 | 131 |  | 
 | 132 | static const FieldInfo* GetFieldInfo(const HInstruction* instruction) { | 
 | 133 |   if (instruction->IsInstanceFieldGet()) { | 
 | 134 |     return &instruction->AsInstanceFieldGet()->GetFieldInfo(); | 
 | 135 |   } else if (instruction->IsInstanceFieldSet()) { | 
 | 136 |     return &instruction->AsInstanceFieldSet()->GetFieldInfo(); | 
 | 137 |   } else if (instruction->IsStaticFieldGet()) { | 
 | 138 |     return &instruction->AsStaticFieldGet()->GetFieldInfo(); | 
 | 139 |   } else if (instruction->IsStaticFieldSet()) { | 
 | 140 |     return &instruction->AsStaticFieldSet()->GetFieldInfo(); | 
 | 141 |   } else { | 
 | 142 |     LOG(FATAL) << "Unexpected field access type"; | 
 | 143 |     UNREACHABLE(); | 
 | 144 |   } | 
 | 145 | } | 
 | 146 |  | 
 | 147 | size_t SchedulingGraph::FieldAccessHeapLocation(HInstruction* obj, const FieldInfo* field) const { | 
 | 148 |   DCHECK(obj != nullptr); | 
 | 149 |   DCHECK(field != nullptr); | 
 | 150 |   DCHECK(heap_location_collector_ != nullptr); | 
 | 151 |  | 
 | 152 |   size_t heap_loc = heap_location_collector_->FindHeapLocationIndex( | 
 | 153 |      heap_location_collector_->FindReferenceInfoOf( | 
 | 154 |          heap_location_collector_->HuntForOriginalReference(obj)), | 
 | 155 |      field->GetFieldOffset().SizeValue(), | 
 | 156 |      nullptr, | 
 | 157 |      field->GetDeclaringClassDefIndex()); | 
 | 158 |   // This field access should be analyzed and added to HeapLocationCollector before. | 
 | 159 |   DCHECK(heap_loc != HeapLocationCollector::kHeapLocationNotFound); | 
 | 160 |  | 
 | 161 |   return heap_loc; | 
 | 162 | } | 
 | 163 |  | 
 | 164 | bool SchedulingGraph::FieldAccessMayAlias(const HInstruction* node, | 
 | 165 |                                           const HInstruction* other) const { | 
 | 166 |   DCHECK(heap_location_collector_ != nullptr); | 
 | 167 |  | 
 | 168 |   // Static and instance field accesses should not alias. | 
 | 169 |   if ((IsInstanceFieldAccess(node) && IsStaticFieldAccess(other)) || | 
 | 170 |       (IsStaticFieldAccess(node) && IsInstanceFieldAccess(other))) { | 
 | 171 |     return false; | 
 | 172 |   } | 
 | 173 |  | 
 | 174 |   // If either of the field accesses is unresolved. | 
 | 175 |   if (IsUnresolvedFieldAccess(node) || IsUnresolvedFieldAccess(other)) { | 
 | 176 |     // Conservatively treat these two accesses may alias. | 
 | 177 |     return true; | 
 | 178 |   } | 
 | 179 |  | 
 | 180 |   // If both fields accesses are resolved. | 
 | 181 |   const FieldInfo* node_field = GetFieldInfo(node); | 
 | 182 |   const FieldInfo* other_field = GetFieldInfo(other); | 
 | 183 |  | 
 | 184 |   size_t node_loc = FieldAccessHeapLocation(node->InputAt(0), node_field); | 
 | 185 |   size_t other_loc = FieldAccessHeapLocation(other->InputAt(0), other_field); | 
 | 186 |  | 
 | 187 |   if (node_loc == other_loc) { | 
 | 188 |     return true; | 
 | 189 |   } | 
 | 190 |  | 
 | 191 |   if (!heap_location_collector_->MayAlias(node_loc, other_loc)) { | 
 | 192 |     return false; | 
 | 193 |   } | 
 | 194 |  | 
 | 195 |   return true; | 
 | 196 | } | 
 | 197 |  | 
 | 198 | bool SchedulingGraph::HasMemoryDependency(const HInstruction* node, | 
 | 199 |                                           const HInstruction* other) const { | 
 | 200 |   if (!MayHaveReorderingDependency(node->GetSideEffects(), other->GetSideEffects())) { | 
 | 201 |     return false; | 
 | 202 |   } | 
 | 203 |  | 
 | 204 |   if (heap_location_collector_ == nullptr || | 
 | 205 |       heap_location_collector_->GetNumberOfHeapLocations() == 0) { | 
 | 206 |     // Without HeapLocation information from load store analysis, | 
 | 207 |     // we cannot do further disambiguation analysis on these two instructions. | 
 | 208 |     // Just simply say that those two instructions have memory dependency. | 
 | 209 |     return true; | 
 | 210 |   } | 
 | 211 |  | 
 | 212 |   if (IsArrayAccess(node) && IsArrayAccess(other)) { | 
 | 213 |     return ArrayAccessMayAlias(node, other); | 
 | 214 |   } | 
 | 215 |   if (IsFieldAccess(node) && IsFieldAccess(other)) { | 
 | 216 |     return FieldAccessMayAlias(node, other); | 
 | 217 |   } | 
 | 218 |  | 
 | 219 |   // TODO(xueliang): LSA to support alias analysis among HVecLoad, HVecStore and ArrayAccess | 
 | 220 |   if (node->IsVecMemoryOperation() && other->IsVecMemoryOperation()) { | 
 | 221 |     return true; | 
 | 222 |   } | 
 | 223 |   if (node->IsVecMemoryOperation() && IsArrayAccess(other)) { | 
 | 224 |     return true; | 
 | 225 |   } | 
 | 226 |   if (IsArrayAccess(node) && other->IsVecMemoryOperation()) { | 
 | 227 |     return true; | 
 | 228 |   } | 
 | 229 |  | 
 | 230 |   // Heap accesses of different kinds should not alias. | 
 | 231 |   if (IsArrayAccess(node) && IsFieldAccess(other)) { | 
 | 232 |     return false; | 
 | 233 |   } | 
 | 234 |   if (IsFieldAccess(node) && IsArrayAccess(other)) { | 
 | 235 |     return false; | 
 | 236 |   } | 
 | 237 |   if (node->IsVecMemoryOperation() && IsFieldAccess(other)) { | 
 | 238 |     return false; | 
 | 239 |   } | 
 | 240 |   if (IsFieldAccess(node) && other->IsVecMemoryOperation()) { | 
 | 241 |     return false; | 
 | 242 |   } | 
 | 243 |  | 
 | 244 |   // We conservatively treat all other cases having dependency, | 
 | 245 |   // for example, Invoke and ArrayGet. | 
 | 246 |   return true; | 
 | 247 | } | 
 | 248 |  | 
 | 249 | bool SchedulingGraph::HasExceptionDependency(const HInstruction* node, | 
 | 250 |                                              const HInstruction* other) const { | 
| Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 251 |   if (other->CanThrow() && node->GetSideEffects().DoesAnyWrite()) { | 
 | 252 |     return true; | 
 | 253 |   } | 
| Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 254 |   if (other->GetSideEffects().DoesAnyWrite() && node->CanThrow()) { | 
 | 255 |     return true; | 
 | 256 |   } | 
| Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 257 |   if (other->CanThrow() && node->CanThrow()) { | 
 | 258 |     return true; | 
 | 259 |   } | 
 | 260 |  | 
| xueliang.zhong | 2a3471f | 2017-05-08 18:36:40 +0100 | [diff] [blame] | 261 |   // Above checks should cover all cases where we cannot reorder two | 
 | 262 |   // instructions which may throw exception. | 
 | 263 |   return false; | 
 | 264 | } | 
 | 265 |  | 
 | 266 | // Check whether `node` depends on `other`, taking into account `SideEffect` | 
 | 267 | // information and `CanThrow` information. | 
 | 268 | bool SchedulingGraph::HasSideEffectDependency(const HInstruction* node, | 
 | 269 |                                               const HInstruction* other) const { | 
 | 270 |   if (HasMemoryDependency(node, other)) { | 
 | 271 |     return true; | 
 | 272 |   } | 
 | 273 |  | 
 | 274 |   // Even if above memory dependency check has passed, it is still necessary to | 
 | 275 |   // check dependencies between instructions that can throw and instructions | 
 | 276 |   // that write to memory. | 
 | 277 |   if (HasExceptionDependency(node, other)) { | 
| Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 278 |     return true; | 
 | 279 |   } | 
 | 280 |  | 
 | 281 |   return false; | 
 | 282 | } | 
 | 283 |  | 
 | 284 | void SchedulingGraph::AddDependencies(HInstruction* instruction, bool is_scheduling_barrier) { | 
 | 285 |   SchedulingNode* instruction_node = GetNode(instruction); | 
 | 286 |  | 
 | 287 |   // Define-use dependencies. | 
 | 288 |   for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) { | 
 | 289 |     AddDataDependency(GetNode(use.GetUser()), instruction_node); | 
 | 290 |   } | 
 | 291 |  | 
 | 292 |   // Scheduling barrier dependencies. | 
 | 293 |   DCHECK(!is_scheduling_barrier || contains_scheduling_barrier_); | 
 | 294 |   if (contains_scheduling_barrier_) { | 
 | 295 |     // A barrier depends on instructions after it. And instructions before the | 
 | 296 |     // barrier depend on it. | 
 | 297 |     for (HInstruction* other = instruction->GetNext(); other != nullptr; other = other->GetNext()) { | 
 | 298 |       SchedulingNode* other_node = GetNode(other); | 
| Nicolas Geoffray | 757b26c | 2017-06-29 16:11:41 +0100 | [diff] [blame] | 299 |       CHECK(other_node != nullptr) | 
 | 300 |           << other->DebugName() | 
 | 301 |           << " is in block " << other->GetBlock()->GetBlockId() | 
 | 302 |           << ", and expected in block " << instruction->GetBlock()->GetBlockId(); | 
| Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 303 |       bool other_is_barrier = other_node->IsSchedulingBarrier(); | 
 | 304 |       if (is_scheduling_barrier || other_is_barrier) { | 
 | 305 |         AddOtherDependency(other_node, instruction_node); | 
 | 306 |       } | 
 | 307 |       if (other_is_barrier) { | 
 | 308 |         // This other scheduling barrier guarantees ordering of instructions after | 
 | 309 |         // it, so avoid creating additional useless dependencies in the graph. | 
 | 310 |         // For example if we have | 
 | 311 |         //     instr_1 | 
 | 312 |         //     barrier_2 | 
 | 313 |         //     instr_3 | 
 | 314 |         //     barrier_4 | 
 | 315 |         //     instr_5 | 
 | 316 |         // we only create the following non-data dependencies | 
 | 317 |         //     1 -> 2 | 
 | 318 |         //     2 -> 3 | 
 | 319 |         //     2 -> 4 | 
 | 320 |         //     3 -> 4 | 
 | 321 |         //     4 -> 5 | 
 | 322 |         // and do not create | 
 | 323 |         //     1 -> 4 | 
 | 324 |         //     2 -> 5 | 
 | 325 |         // Note that in this example we could also avoid creating the dependency | 
 | 326 |         // `2 -> 4`.  But if we remove `instr_3` that dependency is required to | 
 | 327 |         // order the barriers. So we generate it to avoid a special case. | 
 | 328 |         break; | 
 | 329 |       } | 
 | 330 |     } | 
 | 331 |   } | 
 | 332 |  | 
 | 333 |   // Side effect dependencies. | 
 | 334 |   if (!instruction->GetSideEffects().DoesNothing() || instruction->CanThrow()) { | 
 | 335 |     for (HInstruction* other = instruction->GetNext(); other != nullptr; other = other->GetNext()) { | 
 | 336 |       SchedulingNode* other_node = GetNode(other); | 
 | 337 |       if (other_node->IsSchedulingBarrier()) { | 
 | 338 |         // We have reached a scheduling barrier so we can stop further | 
 | 339 |         // processing. | 
 | 340 |         DCHECK(HasImmediateOtherDependency(other_node, instruction_node)); | 
 | 341 |         break; | 
 | 342 |       } | 
 | 343 |       if (HasSideEffectDependency(other, instruction)) { | 
 | 344 |         AddOtherDependency(other_node, instruction_node); | 
 | 345 |       } | 
 | 346 |     } | 
 | 347 |   } | 
 | 348 |  | 
 | 349 |   // Environment dependencies. | 
 | 350 |   // We do not need to process those if the instruction is a scheduling barrier, | 
 | 351 |   // since the barrier already has non-data dependencies on all following | 
 | 352 |   // instructions. | 
 | 353 |   if (!is_scheduling_barrier) { | 
 | 354 |     for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) { | 
 | 355 |       // Note that here we could stop processing if the environment holder is | 
 | 356 |       // across a scheduling barrier. But checking this would likely require | 
 | 357 |       // more work than simply iterating through environment uses. | 
 | 358 |       AddOtherDependency(GetNode(use.GetUser()->GetHolder()), instruction_node); | 
 | 359 |     } | 
 | 360 |   } | 
 | 361 | } | 
 | 362 |  | 
 | 363 | bool SchedulingGraph::HasImmediateDataDependency(const SchedulingNode* node, | 
 | 364 |                                                  const SchedulingNode* other) const { | 
 | 365 |   return ContainsElement(node->GetDataPredecessors(), other); | 
 | 366 | } | 
 | 367 |  | 
 | 368 | bool SchedulingGraph::HasImmediateDataDependency(const HInstruction* instruction, | 
 | 369 |                                                  const HInstruction* other_instruction) const { | 
 | 370 |   const SchedulingNode* node = GetNode(instruction); | 
 | 371 |   const SchedulingNode* other = GetNode(other_instruction); | 
 | 372 |   if (node == nullptr || other == nullptr) { | 
 | 373 |     // Both instructions must be in current basic block, i.e. the SchedulingGraph can see their | 
 | 374 |     // corresponding SchedulingNode in the graph, and tell whether there is a dependency. | 
 | 375 |     // Otherwise there is no dependency from SchedulingGraph's perspective, for example, | 
 | 376 |     // instruction and other_instruction are in different basic blocks. | 
 | 377 |     return false; | 
 | 378 |   } | 
 | 379 |   return HasImmediateDataDependency(node, other); | 
 | 380 | } | 
 | 381 |  | 
 | 382 | bool SchedulingGraph::HasImmediateOtherDependency(const SchedulingNode* node, | 
 | 383 |                                                   const SchedulingNode* other) const { | 
 | 384 |   return ContainsElement(node->GetOtherPredecessors(), other); | 
 | 385 | } | 
 | 386 |  | 
 | 387 | bool SchedulingGraph::HasImmediateOtherDependency(const HInstruction* instruction, | 
 | 388 |                                                   const HInstruction* other_instruction) const { | 
 | 389 |   const SchedulingNode* node = GetNode(instruction); | 
 | 390 |   const SchedulingNode* other = GetNode(other_instruction); | 
 | 391 |   if (node == nullptr || other == nullptr) { | 
 | 392 |     // Both instructions must be in current basic block, i.e. the SchedulingGraph can see their | 
 | 393 |     // corresponding SchedulingNode in the graph, and tell whether there is a dependency. | 
 | 394 |     // Otherwise there is no dependency from SchedulingGraph's perspective, for example, | 
 | 395 |     // instruction and other_instruction are in different basic blocks. | 
 | 396 |     return false; | 
 | 397 |   } | 
 | 398 |   return HasImmediateOtherDependency(node, other); | 
 | 399 | } | 
 | 400 |  | 
 | 401 | static const std::string InstructionTypeId(const HInstruction* instruction) { | 
 | 402 |   std::string id; | 
 | 403 |   Primitive::Type type = instruction->GetType(); | 
 | 404 |   if (type == Primitive::kPrimNot) { | 
 | 405 |     id.append("l"); | 
 | 406 |   } else { | 
 | 407 |     id.append(Primitive::Descriptor(instruction->GetType())); | 
 | 408 |   } | 
 | 409 |   // Use lower-case to be closer to the `HGraphVisualizer` output. | 
 | 410 |   id[0] = std::tolower(id[0]); | 
 | 411 |   id.append(std::to_string(instruction->GetId())); | 
 | 412 |   return id; | 
 | 413 | } | 
 | 414 |  | 
 | 415 | // Ideally we would reuse the graph visualizer code, but it is not available | 
 | 416 | // from here and it is not worth moving all that code only for our use. | 
 | 417 | static void DumpAsDotNode(std::ostream& output, const SchedulingNode* node) { | 
 | 418 |   const HInstruction* instruction = node->GetInstruction(); | 
 | 419 |   // Use the instruction typed id as the node identifier. | 
 | 420 |   std::string instruction_id = InstructionTypeId(instruction); | 
 | 421 |   output << instruction_id << "[shape=record, label=\"" | 
 | 422 |       << instruction_id << ' ' << instruction->DebugName() << " ["; | 
 | 423 |   // List the instruction's inputs in its description. When visualizing the | 
 | 424 |   // graph this helps differentiating data inputs from other dependencies. | 
 | 425 |   const char* seperator = ""; | 
 | 426 |   for (const HInstruction* input : instruction->GetInputs()) { | 
 | 427 |     output << seperator << InstructionTypeId(input); | 
 | 428 |     seperator = ","; | 
 | 429 |   } | 
 | 430 |   output << "]"; | 
 | 431 |   // Other properties of the node. | 
 | 432 |   output << "\\ninternal_latency: " << node->GetInternalLatency(); | 
 | 433 |   output << "\\ncritical_path: " << node->GetCriticalPath(); | 
 | 434 |   if (node->IsSchedulingBarrier()) { | 
 | 435 |     output << "\\n(barrier)"; | 
 | 436 |   } | 
 | 437 |   output << "\"];\n"; | 
 | 438 |   // We want program order to go from top to bottom in the graph output, so we | 
 | 439 |   // reverse the edges and specify `dir=back`. | 
 | 440 |   for (const SchedulingNode* predecessor : node->GetDataPredecessors()) { | 
 | 441 |     const HInstruction* predecessor_instruction = predecessor->GetInstruction(); | 
 | 442 |     output << InstructionTypeId(predecessor_instruction) << ":s -> " << instruction_id << ":n " | 
 | 443 |         << "[label=\"" << predecessor->GetLatency() << "\",dir=back]\n"; | 
 | 444 |   } | 
 | 445 |   for (const SchedulingNode* predecessor : node->GetOtherPredecessors()) { | 
 | 446 |     const HInstruction* predecessor_instruction = predecessor->GetInstruction(); | 
 | 447 |     output << InstructionTypeId(predecessor_instruction) << ":s -> " << instruction_id << ":n " | 
 | 448 |         << "[dir=back,color=blue]\n"; | 
 | 449 |   } | 
 | 450 | } | 
 | 451 |  | 
 | 452 | void SchedulingGraph::DumpAsDotGraph(const std::string& description, | 
 | 453 |                                      const ArenaVector<SchedulingNode*>& initial_candidates) { | 
 | 454 |   // TODO(xueliang): ideally we should move scheduling information into HInstruction, after that | 
 | 455 |   // we should move this dotty graph dump feature to visualizer, and have a compiler option for it. | 
 | 456 |   std::ofstream output("scheduling_graphs.dot", std::ofstream::out | std::ofstream::app); | 
 | 457 |   // Description of this graph, as a comment. | 
 | 458 |   output << "// " << description << "\n"; | 
 | 459 |   // Start the dot graph. Use an increasing index for easier differentiation. | 
 | 460 |   output << "digraph G {\n"; | 
 | 461 |   for (const auto& entry : nodes_map_) { | 
| Vladimir Marko | 7d157fc | 2017-05-10 16:29:23 +0100 | [diff] [blame] | 462 |     SchedulingNode* node = entry.second; | 
 | 463 |     DumpAsDotNode(output, node); | 
| Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 464 |   } | 
 | 465 |   // Create a fake 'end_of_scheduling' node to help visualization of critical_paths. | 
| Vladimir Marko | 7d157fc | 2017-05-10 16:29:23 +0100 | [diff] [blame] | 466 |   for (SchedulingNode* node : initial_candidates) { | 
| Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 467 |     const HInstruction* instruction = node->GetInstruction(); | 
 | 468 |     output << InstructionTypeId(instruction) << ":s -> end_of_scheduling:n " | 
 | 469 |       << "[label=\"" << node->GetLatency() << "\",dir=back]\n"; | 
 | 470 |   } | 
 | 471 |   // End of the dot graph. | 
 | 472 |   output << "}\n"; | 
 | 473 |   output.close(); | 
 | 474 | } | 
 | 475 |  | 
 | 476 | SchedulingNode* CriticalPathSchedulingNodeSelector::SelectMaterializedCondition( | 
 | 477 |     ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) const { | 
 | 478 |   // Schedule condition inputs that can be materialized immediately before their use. | 
 | 479 |   // In following example, after we've scheduled HSelect, we want LessThan to be scheduled | 
 | 480 |   // immediately, because it is a materialized condition, and will be emitted right before HSelect | 
 | 481 |   // in codegen phase. | 
 | 482 |   // | 
 | 483 |   // i20 HLessThan [...]                  HLessThan    HAdd      HAdd | 
 | 484 |   // i21 HAdd [...]                ===>      |          |         | | 
 | 485 |   // i22 HAdd [...]                          +----------+---------+ | 
 | 486 |   // i23 HSelect [i21, i22, i20]                     HSelect | 
 | 487 |  | 
 | 488 |   if (prev_select_ == nullptr) { | 
 | 489 |     return nullptr; | 
 | 490 |   } | 
 | 491 |  | 
 | 492 |   const HInstruction* instruction = prev_select_->GetInstruction(); | 
 | 493 |   const HCondition* condition = nullptr; | 
 | 494 |   DCHECK(instruction != nullptr); | 
 | 495 |  | 
 | 496 |   if (instruction->IsIf()) { | 
 | 497 |     condition = instruction->AsIf()->InputAt(0)->AsCondition(); | 
 | 498 |   } else if (instruction->IsSelect()) { | 
 | 499 |     condition = instruction->AsSelect()->GetCondition()->AsCondition(); | 
 | 500 |   } | 
 | 501 |  | 
 | 502 |   SchedulingNode* condition_node = (condition != nullptr) ? graph.GetNode(condition) : nullptr; | 
 | 503 |  | 
 | 504 |   if ((condition_node != nullptr) && | 
 | 505 |       condition->HasOnlyOneNonEnvironmentUse() && | 
 | 506 |       ContainsElement(*nodes, condition_node)) { | 
 | 507 |     DCHECK(!condition_node->HasUnscheduledSuccessors()); | 
 | 508 |     // Remove the condition from the list of candidates and schedule it. | 
 | 509 |     RemoveElement(*nodes, condition_node); | 
 | 510 |     return condition_node; | 
 | 511 |   } | 
 | 512 |  | 
 | 513 |   return nullptr; | 
 | 514 | } | 
 | 515 |  | 
 | 516 | SchedulingNode* CriticalPathSchedulingNodeSelector::PopHighestPriorityNode( | 
 | 517 |     ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) { | 
 | 518 |   DCHECK(!nodes->empty()); | 
 | 519 |   SchedulingNode* select_node = nullptr; | 
 | 520 |  | 
 | 521 |   // Optimize for materialized condition and its emit before use scenario. | 
 | 522 |   select_node = SelectMaterializedCondition(nodes, graph); | 
 | 523 |  | 
 | 524 |   if (select_node == nullptr) { | 
 | 525 |     // Get highest priority node based on critical path information. | 
 | 526 |     select_node = (*nodes)[0]; | 
 | 527 |     size_t select = 0; | 
 | 528 |     for (size_t i = 1, e = nodes->size(); i < e; i++) { | 
 | 529 |       SchedulingNode* check = (*nodes)[i]; | 
 | 530 |       SchedulingNode* candidate = (*nodes)[select]; | 
 | 531 |       select_node = GetHigherPrioritySchedulingNode(candidate, check); | 
 | 532 |       if (select_node == check) { | 
 | 533 |         select = i; | 
 | 534 |       } | 
 | 535 |     } | 
 | 536 |     DeleteNodeAtIndex(nodes, select); | 
 | 537 |   } | 
 | 538 |  | 
 | 539 |   prev_select_ = select_node; | 
 | 540 |   return select_node; | 
 | 541 | } | 
 | 542 |  | 
 | 543 | SchedulingNode* CriticalPathSchedulingNodeSelector::GetHigherPrioritySchedulingNode( | 
 | 544 |     SchedulingNode* candidate, SchedulingNode* check) const { | 
 | 545 |   uint32_t candidate_path = candidate->GetCriticalPath(); | 
 | 546 |   uint32_t check_path = check->GetCriticalPath(); | 
 | 547 |   // First look at the critical_path. | 
 | 548 |   if (check_path != candidate_path) { | 
 | 549 |     return check_path < candidate_path ? check : candidate; | 
 | 550 |   } | 
 | 551 |   // If both critical paths are equal, schedule instructions with a higher latency | 
 | 552 |   // first in program order. | 
 | 553 |   return check->GetLatency() < candidate->GetLatency() ? check : candidate; | 
 | 554 | } | 
 | 555 |  | 
 | 556 | void HScheduler::Schedule(HGraph* graph) { | 
 | 557 |   for (HBasicBlock* block : graph->GetReversePostOrder()) { | 
 | 558 |     if (IsSchedulable(block)) { | 
 | 559 |       Schedule(block); | 
 | 560 |     } | 
 | 561 |   } | 
 | 562 | } | 
 | 563 |  | 
 | 564 | void HScheduler::Schedule(HBasicBlock* block) { | 
 | 565 |   ArenaVector<SchedulingNode*> scheduling_nodes(arena_->Adapter(kArenaAllocScheduler)); | 
 | 566 |  | 
 | 567 |   // Build the scheduling graph. | 
 | 568 |   scheduling_graph_.Clear(); | 
| xueliang.zhong | 2a3471f | 2017-05-08 18:36:40 +0100 | [diff] [blame] | 569 |  | 
 | 570 |   // Only perform LSA/HeapLocation analysis on the basic block that | 
 | 571 |   // is going to get instruction scheduled. | 
 | 572 |   HeapLocationCollector heap_location_collector(block->GetGraph()); | 
 | 573 |   heap_location_collector.VisitBasicBlock(block); | 
 | 574 |   heap_location_collector.BuildAliasingMatrix(); | 
 | 575 |   scheduling_graph_.SetHeapLocationCollector(heap_location_collector); | 
 | 576 |  | 
| Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 577 |   for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { | 
 | 578 |     HInstruction* instruction = it.Current(); | 
| Nicolas Geoffray | 757b26c | 2017-06-29 16:11:41 +0100 | [diff] [blame] | 579 |     CHECK_EQ(instruction->GetBlock(), block) | 
 | 580 |         << instruction->DebugName() | 
 | 581 |         << " is in block " << instruction->GetBlock()->GetBlockId() | 
 | 582 |         << ", and expected in block " << block->GetBlockId(); | 
| Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 583 |     SchedulingNode* node = scheduling_graph_.AddNode(instruction, IsSchedulingBarrier(instruction)); | 
 | 584 |     CalculateLatency(node); | 
 | 585 |     scheduling_nodes.push_back(node); | 
 | 586 |   } | 
 | 587 |  | 
 | 588 |   if (scheduling_graph_.Size() <= 1) { | 
 | 589 |     scheduling_graph_.Clear(); | 
 | 590 |     return; | 
 | 591 |   } | 
 | 592 |  | 
 | 593 |   cursor_ = block->GetLastInstruction(); | 
 | 594 |  | 
 | 595 |   // Find the initial candidates for scheduling. | 
 | 596 |   candidates_.clear(); | 
 | 597 |   for (SchedulingNode* node : scheduling_nodes) { | 
 | 598 |     if (!node->HasUnscheduledSuccessors()) { | 
 | 599 |       node->MaybeUpdateCriticalPath(node->GetLatency()); | 
 | 600 |       candidates_.push_back(node); | 
 | 601 |     } | 
 | 602 |   } | 
 | 603 |  | 
 | 604 |   ArenaVector<SchedulingNode*> initial_candidates(arena_->Adapter(kArenaAllocScheduler)); | 
 | 605 |   if (kDumpDotSchedulingGraphs) { | 
 | 606 |     // Remember the list of initial candidates for debug output purposes. | 
 | 607 |     initial_candidates.assign(candidates_.begin(), candidates_.end()); | 
 | 608 |   } | 
 | 609 |  | 
 | 610 |   // Schedule all nodes. | 
 | 611 |   while (!candidates_.empty()) { | 
 | 612 |     Schedule(selector_->PopHighestPriorityNode(&candidates_, scheduling_graph_)); | 
 | 613 |   } | 
 | 614 |  | 
 | 615 |   if (kDumpDotSchedulingGraphs) { | 
 | 616 |     // Dump the graph in `dot` format. | 
 | 617 |     HGraph* graph = block->GetGraph(); | 
 | 618 |     std::stringstream description; | 
 | 619 |     description << graph->GetDexFile().PrettyMethod(graph->GetMethodIdx()) | 
 | 620 |         << " B" << block->GetBlockId(); | 
 | 621 |     scheduling_graph_.DumpAsDotGraph(description.str(), initial_candidates); | 
 | 622 |   } | 
 | 623 | } | 
 | 624 |  | 
 | 625 | void HScheduler::Schedule(SchedulingNode* scheduling_node) { | 
 | 626 |   // Check whether any of the node's predecessors will be valid candidates after | 
 | 627 |   // this node is scheduled. | 
 | 628 |   uint32_t path_to_node = scheduling_node->GetCriticalPath(); | 
 | 629 |   for (SchedulingNode* predecessor : scheduling_node->GetDataPredecessors()) { | 
 | 630 |     predecessor->MaybeUpdateCriticalPath( | 
 | 631 |         path_to_node + predecessor->GetInternalLatency() + predecessor->GetLatency()); | 
 | 632 |     predecessor->DecrementNumberOfUnscheduledSuccessors(); | 
 | 633 |     if (!predecessor->HasUnscheduledSuccessors()) { | 
 | 634 |       candidates_.push_back(predecessor); | 
 | 635 |     } | 
 | 636 |   } | 
 | 637 |   for (SchedulingNode* predecessor : scheduling_node->GetOtherPredecessors()) { | 
 | 638 |     // Do not update the critical path. | 
 | 639 |     // The 'other' (so 'non-data') dependencies (usually) do not represent a | 
 | 640 |     // 'material' dependency of nodes on others. They exist for program | 
 | 641 |     // correctness. So we do not use them to compute the critical path. | 
 | 642 |     predecessor->DecrementNumberOfUnscheduledSuccessors(); | 
 | 643 |     if (!predecessor->HasUnscheduledSuccessors()) { | 
 | 644 |       candidates_.push_back(predecessor); | 
 | 645 |     } | 
 | 646 |   } | 
 | 647 |  | 
 | 648 |   Schedule(scheduling_node->GetInstruction()); | 
 | 649 | } | 
 | 650 |  | 
 | 651 | // Move an instruction after cursor instruction inside one basic block. | 
 | 652 | static void MoveAfterInBlock(HInstruction* instruction, HInstruction* cursor) { | 
 | 653 |   DCHECK_EQ(instruction->GetBlock(), cursor->GetBlock()); | 
 | 654 |   DCHECK_NE(cursor, cursor->GetBlock()->GetLastInstruction()); | 
 | 655 |   DCHECK(!instruction->IsControlFlow()); | 
 | 656 |   DCHECK(!cursor->IsControlFlow()); | 
 | 657 |   instruction->MoveBefore(cursor->GetNext(), /* do_checks */ false); | 
 | 658 | } | 
 | 659 |  | 
 | 660 | void HScheduler::Schedule(HInstruction* instruction) { | 
 | 661 |   if (instruction == cursor_) { | 
 | 662 |     cursor_ = cursor_->GetPrevious(); | 
 | 663 |   } else { | 
 | 664 |     MoveAfterInBlock(instruction, cursor_); | 
 | 665 |   } | 
 | 666 | } | 
 | 667 |  | 
 | 668 | bool HScheduler::IsSchedulable(const HInstruction* instruction) const { | 
 | 669 |   // We want to avoid exhaustively listing all instructions, so we first check | 
 | 670 |   // for instruction categories that we know are safe. | 
 | 671 |   if (instruction->IsControlFlow() || | 
 | 672 |       instruction->IsConstant()) { | 
 | 673 |     return true; | 
 | 674 |   } | 
 | 675 |   // Currently all unary and binary operations are safe to schedule, so avoid | 
 | 676 |   // checking for each of them individually. | 
 | 677 |   // Since nothing prevents a new scheduling-unsafe HInstruction to subclass | 
 | 678 |   // HUnaryOperation (or HBinaryOperation), check in debug mode that we have | 
 | 679 |   // the exhaustive lists here. | 
 | 680 |   if (instruction->IsUnaryOperation()) { | 
 | 681 |     DCHECK(instruction->IsBooleanNot() || | 
 | 682 |            instruction->IsNot() || | 
 | 683 |            instruction->IsNeg()) << "unexpected instruction " << instruction->DebugName(); | 
 | 684 |     return true; | 
 | 685 |   } | 
 | 686 |   if (instruction->IsBinaryOperation()) { | 
 | 687 |     DCHECK(instruction->IsAdd() || | 
 | 688 |            instruction->IsAnd() || | 
 | 689 |            instruction->IsCompare() || | 
 | 690 |            instruction->IsCondition() || | 
 | 691 |            instruction->IsDiv() || | 
 | 692 |            instruction->IsMul() || | 
 | 693 |            instruction->IsOr() || | 
 | 694 |            instruction->IsRem() || | 
 | 695 |            instruction->IsRor() || | 
 | 696 |            instruction->IsShl() || | 
 | 697 |            instruction->IsShr() || | 
 | 698 |            instruction->IsSub() || | 
 | 699 |            instruction->IsUShr() || | 
 | 700 |            instruction->IsXor()) << "unexpected instruction " << instruction->DebugName(); | 
 | 701 |     return true; | 
 | 702 |   } | 
 | 703 |   // The scheduler should not see any of these. | 
 | 704 |   DCHECK(!instruction->IsParallelMove()) << "unexpected instruction " << instruction->DebugName(); | 
 | 705 |   // List of instructions explicitly excluded: | 
 | 706 |   //    HClearException | 
 | 707 |   //    HClinitCheck | 
 | 708 |   //    HDeoptimize | 
 | 709 |   //    HLoadClass | 
 | 710 |   //    HLoadException | 
 | 711 |   //    HMemoryBarrier | 
 | 712 |   //    HMonitorOperation | 
 | 713 |   //    HNativeDebugInfo | 
 | 714 |   //    HThrow | 
 | 715 |   //    HTryBoundary | 
 | 716 |   // TODO: Some of the instructions above may be safe to schedule (maybe as | 
 | 717 |   // scheduling barriers). | 
 | 718 |   return instruction->IsArrayGet() || | 
 | 719 |       instruction->IsArraySet() || | 
 | 720 |       instruction->IsArrayLength() || | 
 | 721 |       instruction->IsBoundType() || | 
 | 722 |       instruction->IsBoundsCheck() || | 
 | 723 |       instruction->IsCheckCast() || | 
 | 724 |       instruction->IsClassTableGet() || | 
 | 725 |       instruction->IsCurrentMethod() || | 
 | 726 |       instruction->IsDivZeroCheck() || | 
 | 727 |       instruction->IsInstanceFieldGet() || | 
 | 728 |       instruction->IsInstanceFieldSet() || | 
 | 729 |       instruction->IsInstanceOf() || | 
 | 730 |       instruction->IsInvokeInterface() || | 
 | 731 |       instruction->IsInvokeStaticOrDirect() || | 
 | 732 |       instruction->IsInvokeUnresolved() || | 
 | 733 |       instruction->IsInvokeVirtual() || | 
 | 734 |       instruction->IsLoadString() || | 
 | 735 |       instruction->IsNewArray() || | 
 | 736 |       instruction->IsNewInstance() || | 
 | 737 |       instruction->IsNullCheck() || | 
 | 738 |       instruction->IsPackedSwitch() || | 
 | 739 |       instruction->IsParameterValue() || | 
 | 740 |       instruction->IsPhi() || | 
 | 741 |       instruction->IsReturn() || | 
 | 742 |       instruction->IsReturnVoid() || | 
 | 743 |       instruction->IsSelect() || | 
 | 744 |       instruction->IsStaticFieldGet() || | 
 | 745 |       instruction->IsStaticFieldSet() || | 
 | 746 |       instruction->IsSuspendCheck() || | 
 | 747 |       instruction->IsTypeConversion() || | 
 | 748 |       instruction->IsUnresolvedInstanceFieldGet() || | 
 | 749 |       instruction->IsUnresolvedInstanceFieldSet() || | 
 | 750 |       instruction->IsUnresolvedStaticFieldGet() || | 
 | 751 |       instruction->IsUnresolvedStaticFieldSet(); | 
 | 752 | } | 
 | 753 |  | 
 | 754 | bool HScheduler::IsSchedulable(const HBasicBlock* block) const { | 
 | 755 |   // We may be only interested in loop blocks. | 
 | 756 |   if (only_optimize_loop_blocks_ && !block->IsInLoop()) { | 
 | 757 |     return false; | 
 | 758 |   } | 
 | 759 |   if (block->GetTryCatchInformation() != nullptr) { | 
 | 760 |     // Do not schedule blocks that are part of try-catch. | 
 | 761 |     // Because scheduler cannot see if catch block has assumptions on the instruction order in | 
 | 762 |     // the try block. In following example, if we enable scheduler for the try block, | 
 | 763 |     // MulitiplyAccumulate may be scheduled before DivZeroCheck, | 
 | 764 |     // which can result in an incorrect value in the catch block. | 
 | 765 |     //   try { | 
 | 766 |     //     a = a/b;    // DivZeroCheck | 
 | 767 |     //                 // Div | 
 | 768 |     //     c = c*d+e;  // MulitiplyAccumulate | 
 | 769 |     //   } catch {System.out.print(c); } | 
 | 770 |     return false; | 
 | 771 |   } | 
 | 772 |   // Check whether all instructions in this block are schedulable. | 
 | 773 |   for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { | 
 | 774 |     if (!IsSchedulable(it.Current())) { | 
 | 775 |       return false; | 
 | 776 |     } | 
 | 777 |   } | 
 | 778 |   return true; | 
 | 779 | } | 
 | 780 |  | 
 | 781 | bool HScheduler::IsSchedulingBarrier(const HInstruction* instr) const { | 
 | 782 |   return instr->IsControlFlow() || | 
 | 783 |       // Don't break calling convention. | 
 | 784 |       instr->IsParameterValue() || | 
 | 785 |       // Code generation of goto relies on SuspendCheck's position. | 
 | 786 |       instr->IsSuspendCheck(); | 
 | 787 | } | 
 | 788 |  | 
 | 789 | void HInstructionScheduling::Run(bool only_optimize_loop_blocks, | 
 | 790 |                                  bool schedule_randomly) { | 
| xueliang.zhong | f7caf68 | 2017-03-01 16:07:02 +0000 | [diff] [blame] | 791 | #if defined(ART_ENABLE_CODEGEN_arm64) || defined(ART_ENABLE_CODEGEN_arm) | 
 | 792 |   // Phase-local allocator that allocates scheduler internal data structures like | 
 | 793 |   // scheduling nodes, internel nodes map, dependencies, etc. | 
 | 794 |   ArenaAllocator arena_allocator(graph_->GetArena()->GetArenaPool()); | 
 | 795 |   CriticalPathSchedulingNodeSelector critical_path_selector; | 
 | 796 |   RandomSchedulingNodeSelector random_selector; | 
 | 797 |   SchedulingNodeSelector* selector = schedule_randomly | 
 | 798 |       ? static_cast<SchedulingNodeSelector*>(&random_selector) | 
 | 799 |       : static_cast<SchedulingNodeSelector*>(&critical_path_selector); | 
 | 800 | #else | 
| Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 801 |   // Avoid compilation error when compiling for unsupported instruction set. | 
 | 802 |   UNUSED(only_optimize_loop_blocks); | 
 | 803 |   UNUSED(schedule_randomly); | 
| xueliang.zhong | 2a3471f | 2017-05-08 18:36:40 +0100 | [diff] [blame] | 804 |   UNUSED(codegen_); | 
| xueliang.zhong | f7caf68 | 2017-03-01 16:07:02 +0000 | [diff] [blame] | 805 | #endif | 
| xueliang.zhong | 2a3471f | 2017-05-08 18:36:40 +0100 | [diff] [blame] | 806 |  | 
| Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 807 |   switch (instruction_set_) { | 
 | 808 | #ifdef ART_ENABLE_CODEGEN_arm64 | 
 | 809 |     case kArm64: { | 
| Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 810 |       arm64::HSchedulerARM64 scheduler(&arena_allocator, selector); | 
 | 811 |       scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks); | 
 | 812 |       scheduler.Schedule(graph_); | 
 | 813 |       break; | 
 | 814 |     } | 
 | 815 | #endif | 
| xueliang.zhong | f7caf68 | 2017-03-01 16:07:02 +0000 | [diff] [blame] | 816 | #if defined(ART_ENABLE_CODEGEN_arm) | 
 | 817 |     case kThumb2: | 
 | 818 |     case kArm: { | 
 | 819 |       arm::SchedulingLatencyVisitorARM arm_latency_visitor(codegen_); | 
 | 820 |       arm::HSchedulerARM scheduler(&arena_allocator, selector, &arm_latency_visitor); | 
 | 821 |       scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks); | 
 | 822 |       scheduler.Schedule(graph_); | 
 | 823 |       break; | 
 | 824 |     } | 
 | 825 | #endif | 
| Alexandre Rames | 22aa54b | 2016-10-18 09:32:29 +0100 | [diff] [blame] | 826 |     default: | 
 | 827 |       break; | 
 | 828 |   } | 
 | 829 | } | 
 | 830 |  | 
 | 831 | }  // namespace art |