blob: 511aa55919f0e82fe7742e68d43bcfac6a7f069b [file] [log] [blame]
Clement Courbet44b4c542018-06-19 11:28:59 +00001//===-- Target.cpp ----------------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9#include "../Target.h"
10
Clement Courbet4860b982018-06-26 08:49:30 +000011#include "../Latency.h"
12#include "../Uops.h"
Clement Courbet717c9762018-06-28 07:41:16 +000013#include "MCTargetDesc/X86BaseInfo.h"
Clement Courbeta51efc22018-06-25 13:12:02 +000014#include "MCTargetDesc/X86MCTargetDesc.h"
Clement Courbet6fd00e32018-06-20 11:54:35 +000015#include "X86.h"
Clement Courbeta51efc22018-06-25 13:12:02 +000016#include "X86RegisterInfo.h"
Clement Courbete7851692018-07-03 06:17:05 +000017#include "X86Subtarget.h"
Clement Courbeta51efc22018-06-25 13:12:02 +000018#include "llvm/MC/MCInstBuilder.h"
Clement Courbet6fd00e32018-06-20 11:54:35 +000019
Clement Courbet44b4c542018-06-19 11:28:59 +000020namespace exegesis {
21
22namespace {
23
Clement Courbet717c9762018-06-28 07:41:16 +000024// Common code for X86 Uops and Latency runners.
25template <typename Impl> class X86BenchmarkRunner : public Impl {
26 using Impl::Impl;
Clement Courbet4860b982018-06-26 08:49:30 +000027
Guillaume Chatelete60866a2018-08-03 09:29:38 +000028 llvm::Expected<CodeTemplate>
29 generateCodeTemplate(unsigned Opcode) const override {
Clement Courbet717c9762018-06-28 07:41:16 +000030 // Test whether we can generate a snippet for this instruction.
31 const auto &InstrInfo = this->State.getInstrInfo();
32 const auto OpcodeName = InstrInfo.getName(Opcode);
33 if (OpcodeName.startswith("POPF") || OpcodeName.startswith("PUSHF") ||
34 OpcodeName.startswith("ADJCALLSTACK")) {
35 return llvm::make_error<BenchmarkFailure>(
36 "Unsupported opcode: Push/Pop/AdjCallStack");
Clement Courbet4860b982018-06-26 08:49:30 +000037 }
Clement Courbet717c9762018-06-28 07:41:16 +000038
39 // Handle X87.
40 const auto &InstrDesc = InstrInfo.get(Opcode);
41 const unsigned FPInstClass = InstrDesc.TSFlags & llvm::X86II::FPTypeMask;
42 const Instruction Instr(InstrDesc, this->RATC);
43 switch (FPInstClass) {
44 case llvm::X86II::NotFP:
45 break;
46 case llvm::X86II::ZeroArgFP:
Clement Courbetf9a0bb32018-07-05 13:54:51 +000047 return llvm::make_error<BenchmarkFailure>("Unsupported x87 ZeroArgFP");
Clement Courbet717c9762018-06-28 07:41:16 +000048 case llvm::X86II::OneArgFP:
Clement Courbetf9a0bb32018-07-05 13:54:51 +000049 return llvm::make_error<BenchmarkFailure>("Unsupported x87 OneArgFP");
Clement Courbet717c9762018-06-28 07:41:16 +000050 case llvm::X86II::OneArgFPRW:
51 case llvm::X86II::TwoArgFP: {
52 // These are instructions like
53 // - `ST(0) = fsqrt(ST(0))` (OneArgFPRW)
54 // - `ST(0) = ST(0) + ST(i)` (TwoArgFP)
55 // They are intrinsically serial and do not modify the state of the stack.
56 // We generate the same code for latency and uops.
Guillaume Chatelete60866a2018-08-03 09:29:38 +000057 return this->generateSelfAliasingCodeTemplate(Instr);
Clement Courbet717c9762018-06-28 07:41:16 +000058 }
59 case llvm::X86II::CompareFP:
60 return Impl::handleCompareFP(Instr);
61 case llvm::X86II::CondMovFP:
62 return Impl::handleCondMovFP(Instr);
63 case llvm::X86II::SpecialFP:
Clement Courbetf9a0bb32018-07-05 13:54:51 +000064 return llvm::make_error<BenchmarkFailure>("Unsupported x87 SpecialFP");
Clement Courbet717c9762018-06-28 07:41:16 +000065 default:
66 llvm_unreachable("Unknown FP Type!");
67 }
68
69 // Fallback to generic implementation.
Guillaume Chatelete60866a2018-08-03 09:29:38 +000070 return Impl::Base::generateCodeTemplate(Opcode);
Clement Courbet4860b982018-06-26 08:49:30 +000071 }
72};
73
Clement Courbet717c9762018-06-28 07:41:16 +000074class X86LatencyImpl : public LatencyBenchmarkRunner {
75protected:
76 using Base = LatencyBenchmarkRunner;
77 using Base::Base;
Guillaume Chatelete60866a2018-08-03 09:29:38 +000078 llvm::Expected<CodeTemplate> handleCompareFP(const Instruction &Instr) const {
Clement Courbet717c9762018-06-28 07:41:16 +000079 return llvm::make_error<BenchmarkFailure>("Unsupported x87 CompareFP");
80 }
Guillaume Chatelete60866a2018-08-03 09:29:38 +000081 llvm::Expected<CodeTemplate> handleCondMovFP(const Instruction &Instr) const {
Clement Courbet717c9762018-06-28 07:41:16 +000082 return llvm::make_error<BenchmarkFailure>("Unsupported x87 CondMovFP");
83 }
Clement Courbet717c9762018-06-28 07:41:16 +000084};
85
86class X86UopsImpl : public UopsBenchmarkRunner {
87protected:
88 using Base = UopsBenchmarkRunner;
89 using Base::Base;
Clement Courbetf9a0bb32018-07-05 13:54:51 +000090 // We can compute uops for any FP instruction that does not grow or shrink the
91 // stack (either do not touch the stack or push as much as they pop).
Guillaume Chatelete60866a2018-08-03 09:29:38 +000092 llvm::Expected<CodeTemplate> handleCompareFP(const Instruction &Instr) const {
93 return generateUnconstrainedCodeTemplate(
Clement Courbetf9a0bb32018-07-05 13:54:51 +000094 Instr, "instruction does not grow/shrink the FP stack");
Clement Courbet717c9762018-06-28 07:41:16 +000095 }
Guillaume Chatelete60866a2018-08-03 09:29:38 +000096 llvm::Expected<CodeTemplate> handleCondMovFP(const Instruction &Instr) const {
97 return generateUnconstrainedCodeTemplate(
Clement Courbetf9a0bb32018-07-05 13:54:51 +000098 Instr, "instruction does not grow/shrink the FP stack");
Clement Courbet4860b982018-06-26 08:49:30 +000099 }
100};
101
Clement Courbet44b4c542018-06-19 11:28:59 +0000102class ExegesisX86Target : public ExegesisTarget {
Clement Courbet6fd00e32018-06-20 11:54:35 +0000103 void addTargetSpecificPasses(llvm::PassManagerBase &PM) const override {
104 // Lowers FP pseudo-instructions, e.g. ABS_Fp32 -> ABS_F.
Clement Courbet717c9762018-06-28 07:41:16 +0000105 PM.add(llvm::createX86FloatingPointStackifierPass());
Clement Courbet6fd00e32018-06-20 11:54:35 +0000106 }
107
Guillaume Chateletfb943542018-08-01 14:41:45 +0000108 unsigned getScratchMemoryRegister(const llvm::Triple &TT) const override {
109 if (!TT.isArch64Bit()) {
110 // FIXME: This would require popping from the stack, so we would have to
111 // add some additional setup code.
112 return 0;
113 }
114 return TT.isOSWindows() ? llvm::X86::RCX : llvm::X86::RDI;
115 }
116
117 unsigned getMaxMemoryAccessSize() const override { return 64; }
118
Guillaume Chatelet171f3f42018-08-02 11:12:02 +0000119 void fillMemoryOperands(InstructionBuilder &IB, unsigned Reg,
Guillaume Chateletfb943542018-08-01 14:41:45 +0000120 unsigned Offset) const override {
121 // FIXME: For instructions that read AND write to memory, we use the same
122 // value for input and output.
Guillaume Chatelet171f3f42018-08-02 11:12:02 +0000123 for (size_t I = 0, E = IB.Instr.Operands.size(); I < E; ++I) {
124 const Operand *Op = &IB.Instr.Operands[I];
Guillaume Chateletfb943542018-08-01 14:41:45 +0000125 if (Op->IsExplicit && Op->IsMem) {
126 // Case 1: 5-op memory.
127 assert((I + 5 <= E) && "x86 memory references are always 5 ops");
Guillaume Chatelet171f3f42018-08-02 11:12:02 +0000128 IB.getValueFor(*Op) = llvm::MCOperand::createReg(Reg); // BaseReg
129 Op = &IB.Instr.Operands[++I];
Guillaume Chateletfb943542018-08-01 14:41:45 +0000130 assert(Op->IsMem);
131 assert(Op->IsExplicit);
Guillaume Chatelet171f3f42018-08-02 11:12:02 +0000132 IB.getValueFor(*Op) = llvm::MCOperand::createImm(1); // ScaleAmt
133 Op = &IB.Instr.Operands[++I];
Guillaume Chateletfb943542018-08-01 14:41:45 +0000134 assert(Op->IsMem);
135 assert(Op->IsExplicit);
Guillaume Chatelet171f3f42018-08-02 11:12:02 +0000136 IB.getValueFor(*Op) = llvm::MCOperand::createReg(0); // IndexReg
137 Op = &IB.Instr.Operands[++I];
Guillaume Chateletfb943542018-08-01 14:41:45 +0000138 assert(Op->IsMem);
139 assert(Op->IsExplicit);
Guillaume Chatelet171f3f42018-08-02 11:12:02 +0000140 IB.getValueFor(*Op) = llvm::MCOperand::createImm(Offset); // Disp
141 Op = &IB.Instr.Operands[++I];
Guillaume Chateletfb943542018-08-01 14:41:45 +0000142 assert(Op->IsMem);
143 assert(Op->IsExplicit);
Guillaume Chatelet171f3f42018-08-02 11:12:02 +0000144 IB.getValueFor(*Op) = llvm::MCOperand::createReg(0); // Segment
Guillaume Chateletfb943542018-08-01 14:41:45 +0000145 // Case2: segment:index addressing. We assume that ES is 0.
146 }
147 }
148 }
149
Clement Courbete7851692018-07-03 06:17:05 +0000150 std::vector<llvm::MCInst> setRegToConstant(const llvm::MCSubtargetInfo &STI,
151 unsigned Reg) const override {
152 // GPR.
Clement Courbeta5334922018-07-02 06:39:55 +0000153 if (llvm::X86::GR8RegClass.contains(Reg))
Clement Courbeta51efc22018-06-25 13:12:02 +0000154 return {llvm::MCInstBuilder(llvm::X86::MOV8ri).addReg(Reg).addImm(1)};
Clement Courbeta5334922018-07-02 06:39:55 +0000155 if (llvm::X86::GR16RegClass.contains(Reg))
Clement Courbeta51efc22018-06-25 13:12:02 +0000156 return {llvm::MCInstBuilder(llvm::X86::MOV16ri).addReg(Reg).addImm(1)};
Clement Courbeta5334922018-07-02 06:39:55 +0000157 if (llvm::X86::GR32RegClass.contains(Reg))
Clement Courbeta51efc22018-06-25 13:12:02 +0000158 return {llvm::MCInstBuilder(llvm::X86::MOV32ri).addReg(Reg).addImm(1)};
Clement Courbeta5334922018-07-02 06:39:55 +0000159 if (llvm::X86::GR64RegClass.contains(Reg))
Clement Courbeta51efc22018-06-25 13:12:02 +0000160 return {llvm::MCInstBuilder(llvm::X86::MOV64ri32).addReg(Reg).addImm(1)};
Clement Courbete7851692018-07-03 06:17:05 +0000161 // MMX.
162 if (llvm::X86::VR64RegClass.contains(Reg))
163 return setVectorRegToConstant(Reg, 8, llvm::X86::MMX_MOVQ64rm);
164 // {X,Y,Z}MM.
165 if (llvm::X86::VR128XRegClass.contains(Reg)) {
166 if (STI.getFeatureBits()[llvm::X86::FeatureAVX512])
167 return setVectorRegToConstant(Reg, 16, llvm::X86::VMOVDQU32Z128rm);
168 if (STI.getFeatureBits()[llvm::X86::FeatureAVX])
169 return setVectorRegToConstant(Reg, 16, llvm::X86::VMOVDQUrm);
170 return setVectorRegToConstant(Reg, 16, llvm::X86::MOVDQUrm);
171 }
172 if (llvm::X86::VR256XRegClass.contains(Reg)) {
173 if (STI.getFeatureBits()[llvm::X86::FeatureAVX512])
174 return setVectorRegToConstant(Reg, 32, llvm::X86::VMOVDQU32Z256rm);
Clement Courbeta51efc22018-06-25 13:12:02 +0000175 return setVectorRegToConstant(Reg, 32, llvm::X86::VMOVDQUYrm);
Clement Courbete7851692018-07-03 06:17:05 +0000176 }
Clement Courbeta5334922018-07-02 06:39:55 +0000177 if (llvm::X86::VR512RegClass.contains(Reg))
Clement Courbete7851692018-07-03 06:17:05 +0000178 return setVectorRegToConstant(Reg, 64, llvm::X86::VMOVDQU32Zrm);
179 // X87.
Clement Courbet717c9762018-06-28 07:41:16 +0000180 if (llvm::X86::RFP32RegClass.contains(Reg) ||
181 llvm::X86::RFP64RegClass.contains(Reg) ||
Clement Courbeta5334922018-07-02 06:39:55 +0000182 llvm::X86::RFP80RegClass.contains(Reg))
Clement Courbet717c9762018-06-28 07:41:16 +0000183 return setVectorRegToConstant(Reg, 8, llvm::X86::LD_Fp64m);
Clement Courbetf9a0bb32018-07-05 13:54:51 +0000184 if (Reg == llvm::X86::EFLAGS) {
185 // Set all flags to 0 but the bits that are "reserved and set to 1".
186 constexpr const uint32_t kImmValue = 0x00007002u;
187 std::vector<llvm::MCInst> Result;
188 Result.push_back(allocateStackSpace(8));
189 Result.push_back(fillStackSpace(llvm::X86::MOV64mi32, 0, kImmValue));
190 Result.push_back(llvm::MCInstBuilder(llvm::X86::POPF64)); // Also pops.
191 return Result;
192 }
Clement Courbeta51efc22018-06-25 13:12:02 +0000193 return {};
194 }
195
Clement Courbet4860b982018-06-26 08:49:30 +0000196 std::unique_ptr<BenchmarkRunner>
197 createLatencyBenchmarkRunner(const LLVMState &State) const override {
Clement Courbete7851692018-07-03 06:17:05 +0000198 return llvm::make_unique<X86BenchmarkRunner<X86LatencyImpl>>(State);
Clement Courbet4860b982018-06-26 08:49:30 +0000199 }
200
201 std::unique_ptr<BenchmarkRunner>
202 createUopsBenchmarkRunner(const LLVMState &State) const override {
Clement Courbet717c9762018-06-28 07:41:16 +0000203 return llvm::make_unique<X86BenchmarkRunner<X86UopsImpl>>(State);
Clement Courbet4860b982018-06-26 08:49:30 +0000204 }
205
Clement Courbet44b4c542018-06-19 11:28:59 +0000206 bool matchesArch(llvm::Triple::ArchType Arch) const override {
207 return Arch == llvm::Triple::x86_64 || Arch == llvm::Triple::x86;
208 }
Clement Courbeta51efc22018-06-25 13:12:02 +0000209
210private:
211 // setRegToConstant() specialized for a vector register of size
212 // `RegSizeBytes`. `RMOpcode` is the opcode used to do a memory -> vector
213 // register load.
214 static std::vector<llvm::MCInst>
215 setVectorRegToConstant(const unsigned Reg, const unsigned RegSizeBytes,
216 const unsigned RMOpcode) {
217 // There is no instruction to directly set XMM, go through memory.
218 // Since vector values can be interpreted as integers of various sizes (8
219 // to 64 bits) as well as floats and double, so we chose an immediate
220 // value that has set bits for all byte values and is a normal float/
221 // double. 0x40404040 is ~32.5 when interpreted as a double and ~3.0f when
222 // interpreted as a float.
Clement Courbetf9a0bb32018-07-05 13:54:51 +0000223 constexpr const uint32_t kImmValue = 0x40404040u;
Clement Courbeta51efc22018-06-25 13:12:02 +0000224 std::vector<llvm::MCInst> Result;
Clement Courbetf9a0bb32018-07-05 13:54:51 +0000225 Result.push_back(allocateStackSpace(RegSizeBytes));
226 constexpr const unsigned kMov32NumBytes = 4;
227 for (unsigned Disp = 0; Disp < RegSizeBytes; Disp += kMov32NumBytes) {
228 Result.push_back(fillStackSpace(llvm::X86::MOV32mi, Disp, kImmValue));
Clement Courbeta51efc22018-06-25 13:12:02 +0000229 }
Clement Courbetf9a0bb32018-07-05 13:54:51 +0000230 Result.push_back(loadToReg(Reg, RMOpcode));
231 Result.push_back(releaseStackSpace(RegSizeBytes));
Clement Courbeta51efc22018-06-25 13:12:02 +0000232 return Result;
233 }
Clement Courbetf9a0bb32018-07-05 13:54:51 +0000234
235 // Allocates scratch memory on the stack.
236 static llvm::MCInst allocateStackSpace(unsigned Bytes) {
237 return llvm::MCInstBuilder(llvm::X86::SUB64ri8)
238 .addReg(llvm::X86::RSP)
239 .addReg(llvm::X86::RSP)
240 .addImm(Bytes);
241 }
242
243 // Fills scratch memory at offset `OffsetBytes` with value `Imm`.
244 static llvm::MCInst fillStackSpace(unsigned MovOpcode, unsigned OffsetBytes,
245 uint64_t Imm) {
246 return llvm::MCInstBuilder(MovOpcode)
247 // Address = ESP
248 .addReg(llvm::X86::RSP) // BaseReg
249 .addImm(1) // ScaleAmt
250 .addReg(0) // IndexReg
251 .addImm(OffsetBytes) // Disp
252 .addReg(0) // Segment
253 // Immediate.
254 .addImm(Imm);
255 }
256
257 // Loads scratch memory into register `Reg` using opcode `RMOpcode`.
258 static llvm::MCInst loadToReg(unsigned Reg, unsigned RMOpcode) {
259 return llvm::MCInstBuilder(RMOpcode)
260 .addReg(Reg)
261 // Address = ESP
262 .addReg(llvm::X86::RSP) // BaseReg
263 .addImm(1) // ScaleAmt
264 .addReg(0) // IndexReg
265 .addImm(0) // Disp
266 .addReg(0); // Segment
267 }
268
269 // Releases scratch memory.
270 static llvm::MCInst releaseStackSpace(unsigned Bytes) {
271 return llvm::MCInstBuilder(llvm::X86::ADD64ri8)
272 .addReg(llvm::X86::RSP)
273 .addReg(llvm::X86::RSP)
274 .addImm(Bytes);
275 }
Clement Courbet44b4c542018-06-19 11:28:59 +0000276};
277
278} // namespace
279
Clement Courbetcff2caa2018-06-25 11:22:23 +0000280static ExegesisTarget *getTheExegesisX86Target() {
Clement Courbet44b4c542018-06-19 11:28:59 +0000281 static ExegesisX86Target Target;
282 return &Target;
283}
284
285void InitializeX86ExegesisTarget() {
286 ExegesisTarget::registerTarget(getTheExegesisX86Target());
287}
288
Clement Courbetcff2caa2018-06-25 11:22:23 +0000289} // namespace exegesis