1 //===- AMDGPUMCInstLower.cpp - Lower AMDGPU MachineInstr to an MCInst -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief Code to lower AMDGPU MachineInstrs to their corresponding MCInst. 12 // 13 //===----------------------------------------------------------------------===// 14 // 15 16 #include "AMDGPUMCInstLower.h" 17 #include "AMDGPUAsmPrinter.h" 18 #include "AMDGPUSubtarget.h" 19 #include "AMDGPUTargetMachine.h" 20 #include "InstPrinter/AMDGPUInstPrinter.h" 21 #include "SIInstrInfo.h" 22 #include "llvm/CodeGen/MachineBasicBlock.h" 23 #include "llvm/CodeGen/MachineInstr.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/Function.h" 26 #include "llvm/IR/GlobalVariable.h" 27 #include "llvm/MC/MCCodeEmitter.h" 28 #include "llvm/MC/MCContext.h" 29 #include "llvm/MC/MCExpr.h" 30 #include "llvm/MC/MCInst.h" 31 #include "llvm/MC/MCObjectStreamer.h" 32 #include "llvm/MC/MCStreamer.h" 33 #include "llvm/Support/ErrorHandling.h" 34 #include "llvm/Support/Format.h" 35 #include <algorithm> 36 37 using namespace llvm; 38 39 #include "AMDGPUGenMCPseudoLowering.inc" 40 41 AMDGPUMCInstLower::AMDGPUMCInstLower(MCContext &ctx, const AMDGPUSubtarget &st, 42 const AsmPrinter &ap): 43 Ctx(ctx), ST(st), AP(ap) { } 44 45 static MCSymbolRefExpr::VariantKind getVariantKind(unsigned MOFlags) { 46 switch (MOFlags) { 47 default: 48 return MCSymbolRefExpr::VK_None; 49 case SIInstrInfo::MO_GOTPCREL: 50 return MCSymbolRefExpr::VK_GOTPCREL; 51 case SIInstrInfo::MO_GOTPCREL32_LO: 52 return MCSymbolRefExpr::VK_AMDGPU_GOTPCREL32_LO; 53 case SIInstrInfo::MO_GOTPCREL32_HI: 54 return MCSymbolRefExpr::VK_AMDGPU_GOTPCREL32_HI; 55 case SIInstrInfo::MO_REL32_LO: 56 return MCSymbolRefExpr::VK_AMDGPU_REL32_LO; 57 case SIInstrInfo::MO_REL32_HI: 58 return MCSymbolRefExpr::VK_AMDGPU_REL32_HI; 59 } 60 } 61 62 const MCExpr *AMDGPUMCInstLower::getLongBranchBlockExpr( 63 const MachineBasicBlock &SrcBB, 64 const MachineOperand &MO) const { 65 const MCExpr *DestBBSym 66 = MCSymbolRefExpr::create(MO.getMBB()->getSymbol(), Ctx); 67 const MCExpr *SrcBBSym = MCSymbolRefExpr::create(SrcBB.getSymbol(), Ctx); 68 69 assert(SrcBB.front().getOpcode() == AMDGPU::S_GETPC_B64 && 70 ST.getInstrInfo()->get(AMDGPU::S_GETPC_B64).Size == 4); 71 72 // s_getpc_b64 returns the address of next instruction. 73 const MCConstantExpr *One = MCConstantExpr::create(4, Ctx); 74 SrcBBSym = MCBinaryExpr::createAdd(SrcBBSym, One, Ctx); 75 76 if (MO.getTargetFlags() == AMDGPU::TF_LONG_BRANCH_FORWARD) 77 return MCBinaryExpr::createSub(DestBBSym, SrcBBSym, Ctx); 78 79 assert(MO.getTargetFlags() == AMDGPU::TF_LONG_BRANCH_BACKWARD); 80 return MCBinaryExpr::createSub(SrcBBSym, DestBBSym, Ctx); 81 } 82 83 bool AMDGPUMCInstLower::lowerOperand(const MachineOperand &MO, 84 MCOperand &MCOp) const { 85 switch (MO.getType()) { 86 default: 87 llvm_unreachable("unknown operand type"); 88 case MachineOperand::MO_Immediate: 89 MCOp = MCOperand::createImm(MO.getImm()); 90 return true; 91 case MachineOperand::MO_Register: 92 MCOp = MCOperand::createReg(AMDGPU::getMCReg(MO.getReg(), ST)); 93 return true; 94 case MachineOperand::MO_MachineBasicBlock: { 95 if (MO.getTargetFlags() != 0) { 96 MCOp = MCOperand::createExpr( 97 getLongBranchBlockExpr(*MO.getParent()->getParent(), MO)); 98 } else { 99 MCOp = MCOperand::createExpr( 100 MCSymbolRefExpr::create(MO.getMBB()->getSymbol(), Ctx)); 101 } 102 103 return true; 104 } 105 case MachineOperand::MO_GlobalAddress: { 106 const GlobalValue *GV = MO.getGlobal(); 107 SmallString<128> SymbolName; 108 AP.getNameWithPrefix(SymbolName, GV); 109 MCSymbol *Sym = Ctx.getOrCreateSymbol(SymbolName); 110 const MCExpr *SymExpr = 111 MCSymbolRefExpr::create(Sym, getVariantKind(MO.getTargetFlags()),Ctx); 112 const MCExpr *Expr = MCBinaryExpr::createAdd(SymExpr, 113 MCConstantExpr::create(MO.getOffset(), Ctx), Ctx); 114 MCOp = MCOperand::createExpr(Expr); 115 return true; 116 } 117 case MachineOperand::MO_ExternalSymbol: { 118 MCSymbol *Sym = Ctx.getOrCreateSymbol(StringRef(MO.getSymbolName())); 119 Sym->setExternal(true); 120 const MCSymbolRefExpr *Expr = MCSymbolRefExpr::create(Sym, Ctx); 121 MCOp = MCOperand::createExpr(Expr); 122 return true; 123 } 124 case MachineOperand::MO_RegisterMask: 125 // Regmasks are like implicit defs. 126 return false; 127 } 128 } 129 130 void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { 131 unsigned Opcode = MI->getOpcode(); 132 133 // FIXME: Should be able to handle this with emitPseudoExpansionLowering. We 134 // need to select it to the subtarget specific version, and there's no way to 135 // do that with a single pseudo source operation. 136 if (Opcode == AMDGPU::S_SETPC_B64_return) 137 Opcode = AMDGPU::S_SETPC_B64; 138 139 int MCOpcode = ST.getInstrInfo()->pseudoToMCOpcode(Opcode); 140 if (MCOpcode == -1) { 141 LLVMContext &C = MI->getParent()->getParent()->getFunction()->getContext(); 142 C.emitError("AMDGPUMCInstLower::lower - Pseudo instruction doesn't have " 143 "a target-specific version: " + Twine(MI->getOpcode())); 144 } 145 146 OutMI.setOpcode(MCOpcode); 147 148 for (const MachineOperand &MO : MI->explicit_operands()) { 149 MCOperand MCOp; 150 lowerOperand(MO, MCOp); 151 OutMI.addOperand(MCOp); 152 } 153 } 154 155 bool AMDGPUAsmPrinter::lowerOperand(const MachineOperand &MO, 156 MCOperand &MCOp) const { 157 const AMDGPUSubtarget &STI = MF->getSubtarget<AMDGPUSubtarget>(); 158 AMDGPUMCInstLower MCInstLowering(OutContext, STI, *this); 159 return MCInstLowering.lowerOperand(MO, MCOp); 160 } 161 162 const MCExpr *AMDGPUAsmPrinter::lowerConstant(const Constant *CV) { 163 // TargetMachine does not support llvm-style cast. Use C++-style cast. 164 // This is safe since TM is always of type AMDGPUTargetMachine or its 165 // derived class. 166 auto *AT = static_cast<AMDGPUTargetMachine*>(&TM); 167 auto *CE = dyn_cast<ConstantExpr>(CV); 168 169 // Lower null pointers in private and local address space. 170 // Clang generates addrspacecast for null pointers in private and local 171 // address space, which needs to be lowered. 172 if (CE && CE->getOpcode() == Instruction::AddrSpaceCast) { 173 auto Op = CE->getOperand(0); 174 auto SrcAddr = Op->getType()->getPointerAddressSpace(); 175 if (Op->isNullValue() && AT->getNullPointerValue(SrcAddr) == 0) { 176 auto DstAddr = CE->getType()->getPointerAddressSpace(); 177 return MCConstantExpr::create(AT->getNullPointerValue(DstAddr), 178 OutContext); 179 } 180 } 181 return AsmPrinter::lowerConstant(CV); 182 } 183 184 void AMDGPUAsmPrinter::EmitInstruction(const MachineInstr *MI) { 185 if (emitPseudoExpansionLowering(*OutStreamer, MI)) 186 return; 187 188 const AMDGPUSubtarget &STI = MF->getSubtarget<AMDGPUSubtarget>(); 189 AMDGPUMCInstLower MCInstLowering(OutContext, STI, *this); 190 191 StringRef Err; 192 if (!STI.getInstrInfo()->verifyInstruction(*MI, Err)) { 193 LLVMContext &C = MI->getParent()->getParent()->getFunction()->getContext(); 194 C.emitError("Illegal instruction detected: " + Err); 195 MI->print(errs()); 196 } 197 198 if (MI->isBundle()) { 199 const MachineBasicBlock *MBB = MI->getParent(); 200 MachineBasicBlock::const_instr_iterator I = ++MI->getIterator(); 201 while (I != MBB->instr_end() && I->isInsideBundle()) { 202 EmitInstruction(&*I); 203 ++I; 204 } 205 } else { 206 // We don't want SI_MASK_BRANCH/SI_RETURN_TO_EPILOG encoded. They are 207 // placeholder terminator instructions and should only be printed as 208 // comments. 209 if (MI->getOpcode() == AMDGPU::SI_MASK_BRANCH) { 210 if (isVerbose()) { 211 SmallVector<char, 16> BBStr; 212 raw_svector_ostream Str(BBStr); 213 214 const MachineBasicBlock *MBB = MI->getOperand(0).getMBB(); 215 const MCSymbolRefExpr *Expr 216 = MCSymbolRefExpr::create(MBB->getSymbol(), OutContext); 217 Expr->print(Str, MAI); 218 OutStreamer->emitRawComment(" mask branch " + BBStr); 219 } 220 221 return; 222 } 223 224 if (MI->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) { 225 if (isVerbose()) 226 OutStreamer->emitRawComment(" return to shader part epilog"); 227 return; 228 } 229 230 if (MI->getOpcode() == AMDGPU::WAVE_BARRIER) { 231 if (isVerbose()) 232 OutStreamer->emitRawComment(" wave barrier"); 233 return; 234 } 235 236 if (MI->getOpcode() == AMDGPU::SI_MASKED_UNREACHABLE) { 237 if (isVerbose()) 238 OutStreamer->emitRawComment(" divergent unreachable"); 239 return; 240 } 241 242 MCInst TmpInst; 243 MCInstLowering.lower(MI, TmpInst); 244 EmitToStreamer(*OutStreamer, TmpInst); 245 246 if (STI.dumpCode()) { 247 // Disassemble instruction/operands to text. 248 DisasmLines.resize(DisasmLines.size() + 1); 249 std::string &DisasmLine = DisasmLines.back(); 250 raw_string_ostream DisasmStream(DisasmLine); 251 252 AMDGPUInstPrinter InstPrinter(*TM.getMCAsmInfo(), 253 *STI.getInstrInfo(), 254 *STI.getRegisterInfo()); 255 InstPrinter.printInst(&TmpInst, DisasmStream, StringRef(), STI); 256 257 // Disassemble instruction/operands to hex representation. 258 SmallVector<MCFixup, 4> Fixups; 259 SmallVector<char, 16> CodeBytes; 260 raw_svector_ostream CodeStream(CodeBytes); 261 262 auto &ObjStreamer = static_cast<MCObjectStreamer&>(*OutStreamer); 263 MCCodeEmitter &InstEmitter = ObjStreamer.getAssembler().getEmitter(); 264 InstEmitter.encodeInstruction(TmpInst, CodeStream, Fixups, 265 MF->getSubtarget<MCSubtargetInfo>()); 266 HexLines.resize(HexLines.size() + 1); 267 std::string &HexLine = HexLines.back(); 268 raw_string_ostream HexStream(HexLine); 269 270 for (size_t i = 0; i < CodeBytes.size(); i += 4) { 271 unsigned int CodeDWord = *(unsigned int *)&CodeBytes[i]; 272 HexStream << format("%s%08X", (i > 0 ? " " : ""), CodeDWord); 273 } 274 275 DisasmStream.flush(); 276 DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLine.size()); 277 } 278 } 279 } 280