[RISCV] Tablegen-driven Instruction Compression.
Summary:
This patch implements a tablegen-driven Instruction Compression
mechanism for generating RISCV compressed instructions
(C Extension) from the expanded instruction form.
This tablegen backend processes CompressPat declarations in a
td file and generates all the compile-time and runtime checks
required to validate the declarations, validate the input
operands and generate correct instructions.
The checks include validating register operands, immediate
operands, fixed register operands and fixed immediate operands.
Example:
class CompressPat<dag input, dag output> {
dag Input = input;
dag Output = output;
list<Predicate> Predicates = [];
}
let Predicates = [HasStdExtC] in {
def : CompressPat<(ADD GPRNoX0:$rs1, GPRNoX0:$rs1, GPRNoX0:$rs2),
(C_ADD GPRNoX0:$rs1, GPRNoX0:$rs2)>;
}
The result is an auto-generated header file
'RISCVGenCompressEmitter.inc' which exports two functions for
compressing/uncompressing MCInst instructions, plus
some helper functions:
bool compressInst(MCInst& OutInst, const MCInst &MI,
const MCSubtargetInfo &STI,
MCContext &Context);
bool uncompressInst(MCInst& OutInst, const MCInst &MI,
const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI);
The clients that include this auto-generated header file and
invoke these functions can compress an instruction before emitting
it, in the target-specific ASM or ELF streamer, or can uncompress
an instruction before printing it, when the expanded instruction
format aliases is favored.
The following clients were added to implement compression\uncompression
for RISCV:
1) RISCVAsmParser::MatchAndEmitInstruction:
Inserted a call to compressInst() to compresses instructions
parsed by llvm-mc coming from an ASM input.
2) RISCVAsmPrinter::EmitInstruction:
Inserted a call to compressInst() to compress instructions that
were lowered from Machine Instructions (MachineInstr).
3) RVInstPrinter::printInst:
Inserted a call to uncompressInst() to print the expanded
version of the instruction instead of the compressed one (e.g,
add s0, s0, a5 instead of c.add s0, a5) when -riscv-no-aliases
is not passed.
This patch squashes D45119, D42780 and D41932. It was reviewed in smaller patches by
asb, efriedma, apazos and mgrang.
Reviewers: asb, efriedma, apazos, llvm-commits, sabuasal
Reviewed By: sabuasal
Subscribers: mgorny, eraman, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, niosHD, kito-cheng, shiva0217, zzheng
Differential Revision: https://reviews.llvm.org/D45385
llvm-svn: 329455
diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index 035c1ef..59654f9 100644
--- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -26,6 +26,10 @@
using namespace llvm;
+// Include the auto-generated portion of the compress emitter.
+#define GEN_COMPRESS_INSTR
+#include "RISCVGenCompressInstEmitter.inc"
+
namespace {
struct RISCVOperand;
@@ -595,10 +599,14 @@
switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
default:
break;
- case Match_Success:
+ case Match_Success: {
+ MCInst CInst;
+ bool Res = compressInst(CInst, Inst, getSTI(), Out.getContext());
+ CInst.setLoc(IDLoc);
Inst.setLoc(IDLoc);
- Out.EmitInstruction(Inst, getSTI());
+ Out.EmitInstruction((Res ? CInst : Inst), getSTI());
return false;
+ }
case Match_MissingFeature:
return Error(IDLoc, "instruction use requires an option to be enabled");
case Match_MnemonicFail:
diff --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt
index fa9adeb..cb18467 100644
--- a/llvm/lib/Target/RISCV/CMakeLists.txt
+++ b/llvm/lib/Target/RISCV/CMakeLists.txt
@@ -2,6 +2,7 @@
tablegen(LLVM RISCVGenAsmMatcher.inc -gen-asm-matcher)
tablegen(LLVM RISCVGenAsmWriter.inc -gen-asm-writer)
+tablegen(LLVM RISCVGenCompressInstEmitter.inc -gen-compress-inst-emitter)
tablegen(LLVM RISCVGenDAGISel.inc -gen-dag-isel)
tablegen(LLVM RISCVGenDisassemblerTables.inc -gen-disassembler)
tablegen(LLVM RISCVGenInstrInfo.inc -gen-instr-info)
diff --git a/llvm/lib/Target/RISCV/InstPrinter/RISCVInstPrinter.cpp b/llvm/lib/Target/RISCV/InstPrinter/RISCVInstPrinter.cpp
index f1fa2ec..300e6fd 100644
--- a/llvm/lib/Target/RISCV/InstPrinter/RISCVInstPrinter.cpp
+++ b/llvm/lib/Target/RISCV/InstPrinter/RISCVInstPrinter.cpp
@@ -13,6 +13,7 @@
#include "RISCVInstPrinter.h"
#include "MCTargetDesc/RISCVBaseInfo.h"
+#include "MCTargetDesc/RISCVMCExpr.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
@@ -30,6 +31,10 @@
#define PRINT_ALIAS_INSTR
#include "RISCVGenAsmWriter.inc"
+// Include the auto-generated portion of the compress emitter.
+#define GEN_UNCOMPRESS_INSTR
+#include "RISCVGenCompressInstEmitter.inc"
+
static cl::opt<bool>
NoAliases("riscv-no-aliases",
cl::desc("Disable the emission of assembler pseudo instructions"),
@@ -38,8 +43,15 @@
void RISCVInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
StringRef Annot, const MCSubtargetInfo &STI) {
- if (NoAliases || !printAliasInstr(MI, STI, O))
- printInstruction(MI, STI, O);
+ bool Res = false;
+ const MCInst *NewMI = MI;
+ MCInst UncompressedMI;
+ if (!NoAliases)
+ Res = uncompressInst(UncompressedMI, *MI, MRI, STI);
+ if (Res)
+ NewMI = const_cast<MCInst*>(&UncompressedMI);
+ if (NoAliases || !printAliasInstr(NewMI, STI, O))
+ printInstruction(NewMI, STI, O);
printAnnotation(O, Annot);
}
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
index 68cdb37..4d1573a 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
@@ -12,6 +12,7 @@
//
//===----------------------------------------------------------------------===//
+#include "RISCV.h"
#include "RISCVMCExpr.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.h
index c49593f..e428b0d 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.h
@@ -20,6 +20,7 @@
namespace llvm {
class StringRef;
+class MCOperand;
class RISCVMCExpr : public MCTargetExpr {
public:
enum VariantKind {
diff --git a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
index bbaa8ec..bdf8e5d 100644
--- a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
+++ b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
@@ -14,6 +14,7 @@
#include "RISCV.h"
#include "InstPrinter/RISCVInstPrinter.h"
+#include "MCTargetDesc/RISCVMCExpr.h"
#include "RISCVTargetMachine.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -48,6 +49,7 @@
unsigned AsmVariant, const char *ExtraCode,
raw_ostream &OS) override;
+ void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
bool emitPseudoExpansionLowering(MCStreamer &OutStreamer,
const MachineInstr *MI);
@@ -58,6 +60,15 @@
};
}
+#define GEN_COMPRESS_INSTR
+#include "RISCVGenCompressInstEmitter.inc"
+void RISCVAsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
+ MCInst CInst;
+ bool Res = compressInst(CInst, Inst, *TM.getMCSubtargetInfo(),
+ OutStreamer->getContext());
+ AsmPrinter::EmitToStreamer(*OutStreamer, Res ? CInst : Inst);
+}
+
// Simple pseudo-instructions have their lowering (with expansion to real
// instructions) auto-generated.
#include "RISCVGenMCPseudoLowering.inc"
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 8b5b89b..1c429da 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -83,6 +83,14 @@
let ParserMatchClass = UImmLog2XLenAsmOperand;
// TODO: should ensure invalid shamt is rejected when decoding.
let DecoderMethod = "decodeUImmOperand<6>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (!MCOp.evaluateAsConstantImm(Imm))
+ return false;
+ if (STI.getTargetTriple().isArch64Bit())
+ return isUInt<6>(Imm);
+ return isUInt<5>(Imm);
+ }];
}
def uimm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isUInt<5>(Imm);}]> {
@@ -94,6 +102,12 @@
let ParserMatchClass = SImmAsmOperand<12>;
let EncoderMethod = "getImmOpValue";
let DecoderMethod = "decodeSImmOperand<12>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return isInt<12>(Imm);
+ return MCOp.isBareSymbolRef();
+ }];
}
def uimm12 : Operand<XLenVT> {
@@ -106,12 +120,24 @@
let ParserMatchClass = SImmAsmOperand<13, "Lsb0">;
let EncoderMethod = "getImmOpValueAsr1";
let DecoderMethod = "decodeSImmOperandAndLsl1<13>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return isShiftedInt<12, 1>(Imm);
+ return MCOp.isBareSymbolRef();
+ }];
}
def uimm20 : Operand<XLenVT> {
let ParserMatchClass = UImmAsmOperand<20>;
let EncoderMethod = "getImmOpValue";
let DecoderMethod = "decodeUImmOperand<20>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return isUInt<20>(Imm);
+ return MCOp.isBareSymbolRef();
+ }];
}
// A 21-bit signed immediate where the least significant bit is zero.
@@ -119,6 +145,12 @@
let ParserMatchClass = SImmAsmOperand<21, "Lsb0">;
let EncoderMethod = "getImmOpValueAsr1";
let DecoderMethod = "decodeSImmOperandAndLsl1<21>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return isShiftedInt<20, 1>(Imm);
+ return MCOp.isBareSymbolRef();
+ }];
}
// A parameterized register class alternative to i32imm/i64imm from Target.td.
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoC.td b/llvm/lib/Target/RISCV/RISCVInstrInfoC.td
index b356687..c1cbfdc 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoC.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoC.td
@@ -27,12 +27,26 @@
let ParserMatchClass = UImmLog2XLenNonZeroAsmOperand;
// TODO: should ensure invalid shamt is rejected when decoding.
let DecoderMethod = "decodeUImmOperand<6>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (!MCOp.evaluateAsConstantImm(Imm))
+ return false;
+ if (STI.getTargetTriple().isArch64Bit())
+ return isUInt<6>(Imm) && (Imm != 0);
+ return isUInt<5>(Imm) && (Imm != 0);
+ }];
}
def simm6 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<6>(Imm);}]> {
let ParserMatchClass = SImmAsmOperand<6>;
let EncoderMethod = "getImmOpValue";
let DecoderMethod = "decodeSImmOperand<6>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return isInt<6>(Imm);
+ return MCOp.isBareSymbolRef();
+ }];
}
def simm6nonzero : Operand<XLenVT>,
@@ -40,6 +54,12 @@
let ParserMatchClass = SImmAsmOperand<6, "NonZero">;
let EncoderMethod = "getImmOpValue";
let DecoderMethod = "decodeSImmOperand<6>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return (Imm != 0) && isInt<6>(Imm);
+ return MCOp.isBareSymbolRef();
+ }];
}
def CLUIImmAsmOperand : AsmOperandClass {
@@ -61,6 +81,13 @@
let ParserMatchClass = CLUIImmAsmOperand;
let EncoderMethod = "getImmOpValue";
let DecoderMethod = "decodeCLUIImmOperand";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return (Imm != 0) && (isUInt<5>(Imm) ||
+ (Imm >= 0xfffe0 && Imm <= 0xfffff));
+ return MCOp.isBareSymbolRef();
+ }];
}
// A 7-bit unsigned immediate where the least significant two bits are zero.
@@ -69,6 +96,12 @@
let ParserMatchClass = UImmAsmOperand<7, "Lsb00">;
let EncoderMethod = "getImmOpValue";
let DecoderMethod = "decodeUImmOperand<7>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (!MCOp.evaluateAsConstantImm(Imm))
+ return false;
+ return isShiftedUInt<5, 2>(Imm);
+ }];
}
// A 8-bit unsigned immediate where the least significant two bits are zero.
@@ -77,6 +110,12 @@
let ParserMatchClass = UImmAsmOperand<8, "Lsb00">;
let EncoderMethod = "getImmOpValue";
let DecoderMethod = "decodeUImmOperand<8>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (!MCOp.evaluateAsConstantImm(Imm))
+ return false;
+ return isShiftedUInt<6, 2>(Imm);
+ }];
}
// A 8-bit unsigned immediate where the least significant three bits are zero.
@@ -85,6 +124,12 @@
let ParserMatchClass = UImmAsmOperand<8, "Lsb000">;
let EncoderMethod = "getImmOpValue";
let DecoderMethod = "decodeUImmOperand<8>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (!MCOp.evaluateAsConstantImm(Imm))
+ return false;
+ return isShiftedUInt<5, 3>(Imm);
+ }];
}
// A 9-bit signed immediate where the least significant bit is zero.
@@ -92,6 +137,13 @@
let ParserMatchClass = SImmAsmOperand<9, "Lsb0">;
let EncoderMethod = "getImmOpValueAsr1";
let DecoderMethod = "decodeSImmOperandAndLsl1<9>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return isShiftedInt<8, 1>(Imm);
+ return MCOp.isBareSymbolRef();
+
+ }];
}
// A 9-bit unsigned immediate where the least significant three bits are zero.
@@ -100,6 +152,12 @@
let ParserMatchClass = UImmAsmOperand<9, "Lsb000">;
let EncoderMethod = "getImmOpValue";
let DecoderMethod = "decodeUImmOperand<9>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (!MCOp.evaluateAsConstantImm(Imm))
+ return false;
+ return isShiftedUInt<6, 3>(Imm);
+ }];
}
// A 10-bit unsigned immediate where the least significant two bits are zero
@@ -110,6 +168,12 @@
let ParserMatchClass = UImmAsmOperand<10, "Lsb00NonZero">;
let EncoderMethod = "getImmOpValue";
let DecoderMethod = "decodeUImmOperand<10>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (!MCOp.evaluateAsConstantImm(Imm))
+ return false;
+ return isShiftedUInt<8, 2>(Imm) && (Imm != 0);
+ }];
}
// A 10-bit signed immediate where the least significant four bits are zero.
@@ -119,13 +183,25 @@
let ParserMatchClass = SImmAsmOperand<10, "Lsb0000NonZero">;
let EncoderMethod = "getImmOpValue";
let DecoderMethod = "decodeSImmOperand<10>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (!MCOp.evaluateAsConstantImm(Imm))
+ return false;
+ return isShiftedInt<6, 4>(Imm);
+ }];
}
// A 12-bit signed immediate where the least significant bit is zero.
-def simm12_lsb0 : Operand<OtherVT> {
+def simm12_lsb0 : Operand<XLenVT> {
let ParserMatchClass = SImmAsmOperand<12, "Lsb0">;
let EncoderMethod = "getImmOpValueAsr1";
let DecoderMethod = "decodeSImmOperandAndLsl1<12>";
+ let MCOperandPredicate = [{
+ int64_t Imm;
+ if (MCOp.evaluateAsConstantImm(Imm))
+ return isShiftedInt<11, 1>(Imm);
+ return MCOp.isBareSymbolRef();
+ }];
}
//===----------------------------------------------------------------------===//
@@ -442,3 +518,188 @@
}
} // Predicates = [HasStdExtC]
+
+//===----------------------------------------------------------------------===//
+// Compress Instruction tablegen backend.
+//===----------------------------------------------------------------------===//
+
+class CompressPat<dag input, dag output> {
+ dag Input = input;
+ dag Output = output;
+ list<Predicate> Predicates = [];
+}
+
+// Patterns are defined in the same order the compressed instructions appear
+// on page 82 of the ISA manual.
+
+// Quadrant 0
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(ADDI GPRC:$rd, SP:$rs1, uimm10_lsb00nonzero:$imm),
+ (C_ADDI4SPN GPRC:$rd, SP:$rs1, uimm10_lsb00nonzero:$imm)>;
+} // Predicates = [HasStdExtC]
+
+let Predicates = [HasStdExtC, HasStdExtD] in {
+def : CompressPat<(FLD FPR64C:$rd, GPRC:$rs1, uimm8_lsb000:$imm),
+ (C_FLD FPR64C:$rd, GPRC:$rs1, uimm8_lsb000:$imm)>;
+} // Predicates = [HasStdExtC, HasStdExtD]
+
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(LW GPRC:$rd, GPRC:$rs1, uimm7_lsb00:$imm),
+ (C_LW GPRC:$rd, GPRC:$rs1, uimm7_lsb00:$imm)>;
+} // Predicates = [HasStdExtC]
+
+let Predicates = [HasStdExtC, HasStdExtF, IsRV32] in {
+def : CompressPat<(FLW FPR32C:$rd, GPRC:$rs1, uimm7_lsb00:$imm),
+ (C_FLW FPR32C:$rd, GPRC:$rs1, uimm7_lsb00:$imm)>;
+} // Predicates = [HasStdExtC, HasStdExtF, IsRV32]
+
+let Predicates = [HasStdExtC, IsRV64] in {
+def : CompressPat<(LD GPRC:$rd, GPRC:$rs1, uimm8_lsb000:$imm),
+ (C_LD GPRC:$rd, GPRC:$rs1, uimm8_lsb000:$imm)>;
+} // Predicates = [HasStdExtC, IsRV64]
+
+let Predicates = [HasStdExtC, HasStdExtD] in {
+def : CompressPat<(FSD FPR64C:$rs2, GPRC:$rs1, uimm8_lsb000:$imm),
+ (C_FSD FPR64C:$rs2, GPRC:$rs1, uimm8_lsb000:$imm)>;
+} // Predicates = [HasStdExtC, HasStdExtD]
+
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(SW GPRC:$rs2, GPRC:$rs1, uimm7_lsb00:$imm),
+ (C_SW GPRC:$rs2, GPRC:$rs1, uimm7_lsb00:$imm)>;
+} // Predicates = [HasStdExtC]
+
+let Predicates = [HasStdExtC, HasStdExtF, IsRV32] in {
+def : CompressPat<(FSW FPR32C:$rs2, GPRC:$rs1,uimm7_lsb00:$imm),
+ (C_FSW FPR32C:$rs2, GPRC:$rs1, uimm7_lsb00:$imm)>;
+} // Predicate = [HasStdExtC, HasStdExtF, IsRV32]
+
+let Predicates = [HasStdExtC, IsRV64] in {
+def : CompressPat<(SD GPRC:$rs2, GPRC:$rs1, uimm8_lsb000:$imm),
+ (C_SD GPRC:$rs2, GPRC:$rs1, uimm8_lsb000:$imm)>;
+} // Predicates = [HasStdExtC, IsRV64]
+
+// Quadrant 1
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(ADDI X0, X0, 0), (C_NOP)>;
+def : CompressPat<(ADDI GPRNoX0:$rs1, GPRNoX0:$rs1, simm6nonzero:$imm),
+ (C_ADDI GPRNoX0:$rs1, simm6nonzero:$imm)>;
+} // Predicates = [HasStdExtC]
+
+let Predicates = [HasStdExtC, IsRV32] in {
+def : CompressPat<(JAL X1, simm12_lsb0:$offset),
+ (C_JAL simm12_lsb0:$offset)>;
+} // Predicates = [HasStdExtC, IsRV32]
+
+let Predicates = [HasStdExtC, IsRV64] in {
+def : CompressPat<(ADDIW GPRNoX0:$rs1, GPRNoX0:$rs1, simm6:$imm),
+ (C_ADDIW GPRNoX0:$rs1, simm6:$imm)>;
+} // Predicates = [HasStdExtC, IsRV64]
+
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(ADDI GPRNoX0:$rd, X0, simm6:$imm),
+ (C_LI GPRNoX0:$rd, simm6:$imm)>;
+def : CompressPat<(ADDI X2, X2, simm10_lsb0000nonzero:$imm),
+ (C_ADDI16SP X2, simm10_lsb0000nonzero:$imm)>;
+def : CompressPat<(LUI GPRNoX0X2:$rd, c_lui_imm:$imm),
+ (C_LUI GPRNoX0X2:$rd, c_lui_imm:$imm)>;
+def : CompressPat<(SRLI GPRC:$rs1, GPRC:$rs1, uimmlog2xlennonzero:$imm),
+ (C_SRLI GPRC:$rs1, uimmlog2xlennonzero:$imm)>;
+def : CompressPat<(SRAI GPRC:$rs1, GPRC:$rs1, uimmlog2xlennonzero:$imm),
+ (C_SRAI GPRC:$rs1, uimmlog2xlennonzero:$imm)>;
+def : CompressPat<(ANDI GPRC:$rs1, GPRC:$rs1, simm6:$imm),
+ (C_ANDI GPRC:$rs1, simm6:$imm)>;
+def : CompressPat<(SUB GPRC:$rs1, GPRC:$rs1, GPRC:$rs2),
+ (C_SUB GPRC:$rs1, GPRC:$rs2)>;
+def : CompressPat<(XOR GPRC:$rs1, GPRC:$rs1, GPRC:$rs2),
+ (C_XOR GPRC:$rs1, GPRC:$rs2)>;
+def : CompressPat<(XOR GPRC:$rs1, GPRC:$rs2, GPRC:$rs1),
+ (C_XOR GPRC:$rs1, GPRC:$rs2)>;
+def : CompressPat<(OR GPRC:$rs1, GPRC:$rs1, GPRC:$rs2),
+ (C_OR GPRC:$rs1, GPRC:$rs2)>;
+def : CompressPat<(OR GPRC:$rs1, GPRC:$rs2, GPRC:$rs1),
+ (C_OR GPRC:$rs1, GPRC:$rs2)>;
+def : CompressPat<(AND GPRC:$rs1, GPRC:$rs1, GPRC:$rs2),
+ (C_AND GPRC:$rs1, GPRC:$rs2)>;
+def : CompressPat<(AND GPRC:$rs1, GPRC:$rs2, GPRC:$rs1),
+ (C_AND GPRC:$rs1, GPRC:$rs2)>;
+} // Predicates = [HasStdExtC]
+
+let Predicates = [HasStdExtC, IsRV64] in {
+def : CompressPat<(SUBW GPRC:$rs1, GPRC:$rs1, GPRC:$rs2),
+ (C_SUBW GPRC:$rs1, GPRC:$rs2)>;
+def : CompressPat<(ADDW GPRC:$rs1, GPRC:$rs1, GPRC:$rs2),
+ (C_ADDW GPRC:$rs1, GPRC:$rs2)>;
+def : CompressPat<(ADDW GPRC:$rs1, GPRC:$rs2, GPRC:$rs1),
+ (C_ADDW GPRC:$rs1, GPRC:$rs2)>;
+} // Predicates = [HasStdExtC, IsRV64]
+
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(JAL X0, simm12_lsb0:$offset),
+ (C_J simm12_lsb0:$offset)>;
+def : CompressPat<(BEQ GPRC:$rs1, X0, simm9_lsb0:$imm),
+ (C_BEQZ GPRC:$rs1, simm9_lsb0:$imm)>;
+def : CompressPat<(BNE GPRC:$rs1, X0, simm9_lsb0:$imm),
+ (C_BNEZ GPRC:$rs1, simm9_lsb0:$imm)>;
+} // Predicates = [HasStdExtC]
+
+// Quadrant 2
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(SLLI GPRNoX0:$rs1, GPRNoX0:$rs1, uimmlog2xlennonzero:$imm),
+ (C_SLLI GPRNoX0:$rs1, uimmlog2xlennonzero:$imm)>;
+} // Predicates = [HasStdExtC]
+
+let Predicates = [HasStdExtC, HasStdExtD] in {
+def : CompressPat<(FLD FPR64:$rd, SP:$rs1, uimm9_lsb000:$imm),
+ (C_FLDSP FPR64:$rd, SP:$rs1, uimm9_lsb000:$imm)>;
+} // Predicates = [HasStdExtC, HasStdExtD]
+
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(LW GPRNoX0:$rd, SP:$rs1, uimm8_lsb00:$imm),
+ (C_LWSP GPRNoX0:$rd, SP:$rs1, uimm8_lsb00:$imm)>;
+} // Predicates = [HasStdExtC]
+
+let Predicates = [HasStdExtC, HasStdExtF, IsRV32] in {
+def : CompressPat<(FLW FPR32:$rd, SP:$rs1, uimm8_lsb00:$imm),
+ (C_FLWSP FPR32:$rd, SP:$rs1, uimm8_lsb00:$imm)>;
+} // Predicates = [HasStdExtC, HasStdExtF, IsRV32]
+
+let Predicates = [HasStdExtC, IsRV64] in {
+def : CompressPat<(LD GPRNoX0:$rd, SP:$rs1, uimm9_lsb000:$imm),
+ (C_LDSP GPRNoX0:$rd, SP:$rs1, uimm9_lsb000:$imm)>;
+} // Predicates = [HasStdExtC, IsRV64]
+
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(JALR X0, GPRNoX0:$rs1, 0),
+ (C_JR GPRNoX0:$rs1)>;
+def : CompressPat<(ADD GPRNoX0:$rs1, X0, GPRNoX0:$rs2),
+ (C_MV GPRNoX0:$rs1, GPRNoX0:$rs2)>;
+def : CompressPat<(ADD GPRNoX0:$rs1, GPRNoX0:$rs2, X0),
+ (C_MV GPRNoX0:$rs1, GPRNoX0:$rs2)>;
+def : CompressPat<(EBREAK), (C_EBREAK)>;
+def : CompressPat<(JALR X1, GPRNoX0:$rs1, 0),
+ (C_JALR GPRNoX0:$rs1)>;
+def : CompressPat<(ADD GPRNoX0:$rs1, GPRNoX0:$rs1, GPRNoX0:$rs2),
+ (C_ADD GPRNoX0:$rs1, GPRNoX0:$rs2)>;
+def : CompressPat<(ADD GPRNoX0:$rs1, GPRNoX0:$rs2, GPRNoX0:$rs1),
+ (C_ADD GPRNoX0:$rs1, GPRNoX0:$rs2)>;
+} // Predicates = [HasStdExtC]
+
+let Predicates = [HasStdExtC, HasStdExtD] in {
+def : CompressPat<(FSD FPR64:$rs2, SP:$rs1, uimm9_lsb000:$imm),
+ (C_FSDSP FPR64:$rs2, SP:$rs1, uimm9_lsb000:$imm)>;
+} // Predicates = [HasStdExtC, HasStdExtD]
+
+let Predicates = [HasStdExtC] in {
+def : CompressPat<(SW GPR:$rs2, SP:$rs1, uimm8_lsb00:$imm),
+ (C_SWSP GPR:$rs2, SP:$rs1, uimm8_lsb00:$imm)>;
+} // Predicates = [HasStdExtC]
+
+let Predicates = [HasStdExtC, HasStdExtF, IsRV32] in {
+def : CompressPat<(FSW FPR32:$rs2, SP:$rs1, uimm8_lsb00:$imm),
+ (C_FSWSP FPR32:$rs2, SP:$rs1, uimm8_lsb00:$imm)>;
+} // Predicates = [HasStdExtC, HasStdExtF, IsRV32]
+
+let Predicates = [HasStdExtC, IsRV64] in {
+def : CompressPat<(SD GPR:$rs2, SP:$rs1, uimm9_lsb000:$imm),
+ (C_SDSP GPR:$rs2, SP:$rs1, uimm9_lsb000:$imm)>;
+} // Predicates = [HasStdExtC, IsRV64]