blob: 04bb90d30d6d999b1ba23bb9019a7bd30b6ac21d [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//===-- AArch64Subtarget.cpp - AArch64 Subtarget Information ----*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the AArch64 specific subclass of TargetSubtarget.
11//
12//===----------------------------------------------------------------------===//
13
Rafael Espindola6b4baa52016-05-25 21:37:29 +000014#include "AArch64Subtarget.h"
Quentin Colombetcdf8c812017-05-01 21:53:19 +000015
16#include "AArch64.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000017#include "AArch64InstrInfo.h"
Lang Hames8f31f442014-10-09 18:20:51 +000018#include "AArch64PBQPRegAlloc.h"
Quentin Colombetcdf8c812017-05-01 21:53:19 +000019#include "AArch64TargetMachine.h"
20
Quentin Colombetcdf8c812017-05-01 21:53:19 +000021#include "AArch64CallLowering.h"
22#include "AArch64LegalizerInfo.h"
23#include "AArch64RegisterBankInfo.h"
Quentin Colombetcdf8c812017-05-01 21:53:19 +000024#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000025#include "llvm/CodeGen/MachineScheduler.h"
26#include "llvm/IR/GlobalValue.h"
Peter Collingbournef11eb3e2018-04-04 21:55:44 +000027#include "llvm/Support/TargetParser.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000028
29using namespace llvm;
30
31#define DEBUG_TYPE "aarch64-subtarget"
32
33#define GET_SUBTARGETINFO_CTOR
34#define GET_SUBTARGETINFO_TARGET_DESC
35#include "AArch64GenSubtargetInfo.inc"
36
37static cl::opt<bool>
38EnableEarlyIfConvert("aarch64-early-ifcvt", cl::desc("Enable the early if "
39 "converter pass"), cl::init(true), cl::Hidden);
40
Tim Northover339c83e2015-11-10 00:44:23 +000041// If OS supports TBI, use this flag to enable it.
42static cl::opt<bool>
43UseAddressTopByteIgnored("aarch64-use-tbi", cl::desc("Assume that top byte of "
44 "an address is ignored"), cl::init(false), cl::Hidden);
45
Tim Northover46e36f02017-04-17 18:18:47 +000046static cl::opt<bool>
47 UseNonLazyBind("aarch64-enable-nonlazybind",
48 cl::desc("Call nonlazybind functions via direct GOT load"),
49 cl::init(false), cl::Hidden);
50
Eric Christopher7c9d4e02014-06-11 00:46:34 +000051AArch64Subtarget &
Matthias Brauna827ed82016-10-03 20:17:02 +000052AArch64Subtarget::initializeSubtargetDependencies(StringRef FS,
53 StringRef CPUString) {
Eric Christopher7c9d4e02014-06-11 00:46:34 +000054 // Determine default and user-specified characteristics
55
56 if (CPUString.empty())
57 CPUString = "generic";
58
59 ParseSubtargetFeatures(CPUString, FS);
Matthias Braun651cff42016-06-02 18:03:53 +000060 initializeProperties();
61
Eric Christopher7c9d4e02014-06-11 00:46:34 +000062 return *this;
63}
64
Matthias Braun651cff42016-06-02 18:03:53 +000065void AArch64Subtarget::initializeProperties() {
66 // Initialize CPU specific properties. We should add a tablegen feature for
67 // this in the future so we can specify it together with the subtarget
68 // features.
69 switch (ARMProcFamily) {
70 case Cyclone:
71 CacheLineSize = 64;
72 PrefetchDistance = 280;
73 MinPrefetchStride = 2048;
74 MaxPrefetchIterationsAhead = 3;
75 break;
76 case CortexA57:
77 MaxInterleaveFactor = 4;
Florian Hahnd4550ba2017-07-07 10:43:01 +000078 PrefFunctionAlignment = 4;
Matthias Braun651cff42016-06-02 18:03:53 +000079 break;
Evandro Menezesa3a0a602016-06-10 16:00:18 +000080 case ExynosM1:
Abderrazek Zaafrani9daf8112016-10-21 16:28:27 +000081 MaxInterleaveFactor = 4;
Evandro Menezes7696dc02016-10-25 20:05:42 +000082 MaxJumpTableSize = 8;
Evandro Menezesa3a0a602016-06-10 16:00:18 +000083 PrefFunctionAlignment = 4;
84 PrefLoopAlignment = 3;
85 break;
Evandro Menezes9f9daa12018-01-30 15:40:16 +000086 case ExynosM3:
87 MaxInterleaveFactor = 4;
88 MaxJumpTableSize = 20;
89 PrefFunctionAlignment = 5;
90 PrefLoopAlignment = 4;
91 break;
Chad Rosierecc77272016-11-22 14:25:02 +000092 case Falkor:
93 MaxInterleaveFactor = 4;
Adam Nemete29686e2017-05-15 21:15:01 +000094 // FIXME: remove this to enable 64-bit SLP if performance looks good.
95 MinVectorRegisterBitWidth = 128;
Haicheng Wuef790ff2017-06-12 16:34:19 +000096 CacheLineSize = 128;
97 PrefetchDistance = 820;
98 MinPrefetchStride = 2048;
99 MaxPrefetchIterationsAhead = 8;
Chad Rosierecc77272016-11-22 14:25:02 +0000100 break;
Chad Rosier71070852017-09-25 14:05:00 +0000101 case Saphira:
102 MaxInterleaveFactor = 4;
103 // FIXME: remove this to enable 64-bit SLP if performance looks good.
104 MinVectorRegisterBitWidth = 128;
105 break;
Matthias Braun651cff42016-06-02 18:03:53 +0000106 case Kryo:
107 MaxInterleaveFactor = 4;
108 VectorInsertExtractBaseCost = 2;
Haicheng Wua783bac2016-06-21 22:47:56 +0000109 CacheLineSize = 128;
110 PrefetchDistance = 740;
111 MinPrefetchStride = 1024;
112 MaxPrefetchIterationsAhead = 11;
Adam Nemete29686e2017-05-15 21:15:01 +0000113 // FIXME: remove this to enable 64-bit SLP if performance looks good.
114 MinVectorRegisterBitWidth = 128;
Matthias Braun651cff42016-06-02 18:03:53 +0000115 break;
Joel Jones28520882017-03-07 19:42:40 +0000116 case ThunderX2T99:
117 CacheLineSize = 64;
118 PrefFunctionAlignment = 3;
119 PrefLoopAlignment = 2;
Pankaj Godef4b25542016-06-30 06:42:31 +0000120 MaxInterleaveFactor = 4;
Joel Jones28520882017-03-07 19:42:40 +0000121 PrefetchDistance = 128;
122 MinPrefetchStride = 1024;
123 MaxPrefetchIterationsAhead = 4;
Adam Nemete29686e2017-05-15 21:15:01 +0000124 // FIXME: remove this to enable 64-bit SLP if performance looks good.
125 MinVectorRegisterBitWidth = 128;
Pankaj Godef4b25542016-06-30 06:42:31 +0000126 break;
Joel Jonesab0f3b42017-02-17 18:34:24 +0000127 case ThunderX:
128 case ThunderXT88:
129 case ThunderXT81:
130 case ThunderXT83:
131 CacheLineSize = 128;
Joel Jones28520882017-03-07 19:42:40 +0000132 PrefFunctionAlignment = 3;
133 PrefLoopAlignment = 2;
Adam Nemete29686e2017-05-15 21:15:01 +0000134 // FIXME: remove this to enable 64-bit SLP if performance looks good.
135 MinVectorRegisterBitWidth = 128;
Joel Jonesab0f3b42017-02-17 18:34:24 +0000136 break;
Matthias Braun651cff42016-06-02 18:03:53 +0000137 case CortexA35: break;
Florian Hahn2f86e3d2017-07-29 20:04:54 +0000138 case CortexA53:
139 PrefFunctionAlignment = 3;
140 break;
Sam Parkerb252ffd2017-08-21 08:43:06 +0000141 case CortexA55: break;
Florian Hahne3666ec2017-07-07 10:15:49 +0000142 case CortexA72:
Florian Hahn35300942017-07-18 09:31:18 +0000143 case CortexA73:
Sam Parkerb252ffd2017-08-21 08:43:06 +0000144 case CortexA75:
Florian Hahn35300942017-07-18 09:31:18 +0000145 PrefFunctionAlignment = 4;
146 break;
Evandro Menezesa3a0a602016-06-10 16:00:18 +0000147 case Others: break;
Matthias Braun651cff42016-06-02 18:03:53 +0000148 }
149}
150
Daniel Sandersa73f1fd2015-06-10 12:11:26 +0000151AArch64Subtarget::AArch64Subtarget(const Triple &TT, const std::string &CPU,
Eric Christopherf12e1ab2014-10-03 00:42:41 +0000152 const std::string &FS,
Daniel Sandersa1b2db792017-05-19 11:08:33 +0000153 const TargetMachine &TM, bool LittleEndian)
Mandeep Singh Grangd857b4c2017-07-18 20:41:33 +0000154 : AArch64GenSubtargetInfo(TT, CPU, FS),
Peter Collingbournef11eb3e2018-04-04 21:55:44 +0000155 ReserveX18(AArch64::isX18ReservedByDefault(TT)), IsLittle(LittleEndian),
156 TargetTriple(TT), FrameLowering(),
Matthias Brauna827ed82016-10-03 20:17:02 +0000157 InstrInfo(initializeSubtargetDependencies(FS, CPU)), TSInfo(),
Quentin Colombet61d71a12017-08-15 22:31:51 +0000158 TLInfo(TM, *this) {
159 CallLoweringInfo.reset(new AArch64CallLowering(*getTargetLowering()));
Daniel Sanders7fe7acc2017-11-28 20:21:15 +0000160 Legalizer.reset(new AArch64LegalizerInfo(*this));
Quentin Colombetcdf8c812017-05-01 21:53:19 +0000161
162 auto *RBI = new AArch64RegisterBankInfo(*getRegisterInfo());
163
164 // FIXME: At this point, we can't rely on Subtarget having RBI.
165 // It's awkward to mix passing RBI and the Subtarget; should we pass
166 // TII/TRI as well?
Quentin Colombet61d71a12017-08-15 22:31:51 +0000167 InstSelector.reset(createAArch64InstructionSelector(
Quentin Colombetcdf8c812017-05-01 21:53:19 +0000168 *static_cast<const AArch64TargetMachine *>(&TM), *this, *RBI));
169
Quentin Colombet61d71a12017-08-15 22:31:51 +0000170 RegBankInfo.reset(RBI);
Quentin Colombetcdf8c812017-05-01 21:53:19 +0000171}
Quentin Colombetba2a0162016-02-16 19:26:02 +0000172
173const CallLowering *AArch64Subtarget::getCallLowering() const {
Quentin Colombet61d71a12017-08-15 22:31:51 +0000174 return CallLoweringInfo.get();
Quentin Colombetc17f7442016-04-06 17:26:03 +0000175}
176
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000177const InstructionSelector *AArch64Subtarget::getInstructionSelector() const {
Quentin Colombet61d71a12017-08-15 22:31:51 +0000178 return InstSelector.get();
Ahmed Bougacha6756a2c2016-07-27 14:31:55 +0000179}
180
Tim Northover69fa84a2016-10-14 22:18:18 +0000181const LegalizerInfo *AArch64Subtarget::getLegalizerInfo() const {
Quentin Colombet61d71a12017-08-15 22:31:51 +0000182 return Legalizer.get();
Tim Northover33b07d62016-07-22 20:03:43 +0000183}
184
Quentin Colombetc17f7442016-04-06 17:26:03 +0000185const RegisterBankInfo *AArch64Subtarget::getRegBankInfo() const {
Quentin Colombet61d71a12017-08-15 22:31:51 +0000186 return RegBankInfo.get();
Quentin Colombetba2a0162016-02-16 19:26:02 +0000187}
Tim Northover3b0846e2014-05-24 12:50:23 +0000188
Rafael Espindola6b93bf52016-05-25 22:44:06 +0000189/// Find the target operand flags that describe how a global value should be
190/// referenced for the current subtarget.
Tim Northover3b0846e2014-05-24 12:50:23 +0000191unsigned char
192AArch64Subtarget::ClassifyGlobalReference(const GlobalValue *GV,
Rafael Espindola6b93bf52016-05-25 22:44:06 +0000193 const TargetMachine &TM) const {
Tim Northover3b0846e2014-05-24 12:50:23 +0000194 // MachO large model always goes via a GOT, simply to get a single 8-byte
195 // absolute relocation on all global addresses.
196 if (TM.getCodeModel() == CodeModel::Large && isTargetMachO())
197 return AArch64II::MO_GOT;
198
Martin Storsjo708498a2018-01-30 19:50:51 +0000199 unsigned Flags = GV->hasDLLImportStorageClass() ? AArch64II::MO_DLLIMPORT
200 : AArch64II::MO_NO_FLAG;
201
Rafael Espindola3beef8d2016-06-27 23:15:57 +0000202 if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
Martin Storsjo708498a2018-01-30 19:50:51 +0000203 return AArch64II::MO_GOT | Flags;
Rafael Espindolaa224de02016-05-26 12:42:55 +0000204
Petr Hosek9eb0a1e2017-04-04 19:51:53 +0000205 // The small code model's direct accesses use ADRP, which cannot
206 // necessarily produce the value 0 (if the code is above 4GB).
207 if (useSmallAddressing() && GV->hasExternalWeakLinkage())
Martin Storsjo708498a2018-01-30 19:50:51 +0000208 return AArch64II::MO_GOT | Flags;
Tim Northover3b0846e2014-05-24 12:50:23 +0000209
Martin Storsjo708498a2018-01-30 19:50:51 +0000210 return Flags;
Tim Northover3b0846e2014-05-24 12:50:23 +0000211}
212
Tim Northover879a0b22017-04-17 17:27:56 +0000213unsigned char AArch64Subtarget::classifyGlobalFunctionReference(
214 const GlobalValue *GV, const TargetMachine &TM) const {
215 // MachO large model always goes via a GOT, because we don't have the
216 // relocations available to do anything else..
217 if (TM.getCodeModel() == CodeModel::Large && isTargetMachO() &&
218 !GV->hasInternalLinkage())
219 return AArch64II::MO_GOT;
220
221 // NonLazyBind goes via GOT unless we know it's available locally.
222 auto *F = dyn_cast<Function>(GV);
Tim Northover46e36f02017-04-17 18:18:47 +0000223 if (UseNonLazyBind && F && F->hasFnAttribute(Attribute::NonLazyBind) &&
Tim Northover879a0b22017-04-17 17:27:56 +0000224 !TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
225 return AArch64II::MO_GOT;
226
227 return AArch64II::MO_NO_FLAG;
228}
229
Tim Northover3b0846e2014-05-24 12:50:23 +0000230void AArch64Subtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
Duncan P. N. Exon Smith63298722016-07-01 00:23:27 +0000231 unsigned NumRegionInstrs) const {
Tim Northover3b0846e2014-05-24 12:50:23 +0000232 // LNT run (at least on Cyclone) showed reasonably significant gains for
233 // bi-directional scheduling. 253.perlbmk.
234 Policy.OnlyTopDown = false;
235 Policy.OnlyBottomUp = false;
Matthias Braund276de62015-10-22 18:07:38 +0000236 // Enabling or Disabling the latency heuristic is a close call: It seems to
237 // help nearly no benchmark on out-of-order architectures, on the other hand
238 // it regresses register pressure on a few benchmarking.
Matthias Braun651cff42016-06-02 18:03:53 +0000239 Policy.DisableLatencyHeuristic = DisableLatencySchedHeuristic;
Tim Northover3b0846e2014-05-24 12:50:23 +0000240}
241
242bool AArch64Subtarget::enableEarlyIfConversion() const {
243 return EnableEarlyIfConvert;
244}
Lang Hames8f31f442014-10-09 18:20:51 +0000245
Tim Northover339c83e2015-11-10 00:44:23 +0000246bool AArch64Subtarget::supportsAddressTopByteIgnored() const {
247 if (!UseAddressTopByteIgnored)
248 return false;
249
250 if (TargetTriple.isiOS()) {
251 unsigned Major, Minor, Micro;
252 TargetTriple.getiOSVersion(Major, Minor, Micro);
253 return Major >= 8;
254 }
255
256 return false;
257}
258
Lang Hames8f31f442014-10-09 18:20:51 +0000259std::unique_ptr<PBQPRAConstraint>
260AArch64Subtarget::getCustomPBQPConstraints() const {
Matthias Braun651cff42016-06-02 18:03:53 +0000261 return balanceFPOps() ? llvm::make_unique<A57ChainingConstraint>() : nullptr;
Lang Hames8f31f442014-10-09 18:20:51 +0000262}
Matthias Braun5c290dc2018-01-19 03:16:36 +0000263
264void AArch64Subtarget::mirFileLoaded(MachineFunction &MF) const {
265 // We usually compute max call frame size after ISel. Do the computation now
266 // if the .mir file didn't specify it. Note that this will probably give you
267 // bogus values after PEI has eliminated the callframe setup/destroy pseudo
268 // instructions, specify explicitely if you need it to be correct.
269 MachineFrameInfo &MFI = MF.getFrameInfo();
270 if (!MFI.isMaxCallFrameSizeComputed())
271 MFI.computeMaxCallFrameSize(MF);
272}