1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 /// \file 9 //===----------------------------------------------------------------------===// 10 // 11 12 #include "AMDGPU.h" 13 #include "AMDGPUSubtarget.h" 14 #include "SIInstrInfo.h" 15 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 16 #include "llvm/CodeGen/MachineFunctionPass.h" 17 #include "llvm/CodeGen/MachineInstrBuilder.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/IR/Function.h" 20 #include "llvm/IR/LLVMContext.h" 21 #include "llvm/Support/Debug.h" 22 #include "llvm/Support/raw_ostream.h" 23 #include "llvm/Target/TargetMachine.h" 24 25 #define DEBUG_TYPE "si-fold-operands" 26 using namespace llvm; 27 28 namespace { 29 30 class SIFoldOperands : public MachineFunctionPass { 31 public: 32 static char ID; 33 34 public: 35 SIFoldOperands() : MachineFunctionPass(ID) { 36 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry()); 37 } 38 39 bool runOnMachineFunction(MachineFunction &MF) override; 40 41 const char *getPassName() const override { 42 return "SI Fold Operands"; 43 } 44 45 void getAnalysisUsage(AnalysisUsage &AU) const override { 46 AU.setPreservesCFG(); 47 MachineFunctionPass::getAnalysisUsage(AU); 48 } 49 }; 50 51 struct FoldCandidate { 52 MachineInstr *UseMI; 53 unsigned UseOpNo; 54 MachineOperand *OpToFold; 55 uint64_t ImmToFold; 56 57 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp) : 58 UseMI(MI), UseOpNo(OpNo) { 59 60 if (FoldOp->isImm()) { 61 OpToFold = nullptr; 62 ImmToFold = FoldOp->getImm(); 63 } else { 64 assert(FoldOp->isReg()); 65 OpToFold = FoldOp; 66 } 67 } 68 69 bool isImm() const { 70 return !OpToFold; 71 } 72 }; 73 74 } // End anonymous namespace. 75 76 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE, 77 "SI Fold Operands", false, false) 78 79 char SIFoldOperands::ID = 0; 80 81 char &llvm::SIFoldOperandsID = SIFoldOperands::ID; 82 83 FunctionPass *llvm::createSIFoldOperandsPass() { 84 return new SIFoldOperands(); 85 } 86 87 static bool isSafeToFold(unsigned Opcode) { 88 switch(Opcode) { 89 case AMDGPU::V_MOV_B32_e32: 90 case AMDGPU::V_MOV_B32_e64: 91 case AMDGPU::V_MOV_B64_PSEUDO: 92 case AMDGPU::S_MOV_B32: 93 case AMDGPU::S_MOV_B64: 94 case AMDGPU::COPY: 95 return true; 96 default: 97 return false; 98 } 99 } 100 101 static bool updateOperand(FoldCandidate &Fold, 102 const TargetRegisterInfo &TRI) { 103 MachineInstr *MI = Fold.UseMI; 104 MachineOperand &Old = MI->getOperand(Fold.UseOpNo); 105 assert(Old.isReg()); 106 107 if (Fold.isImm()) { 108 Old.ChangeToImmediate(Fold.ImmToFold); 109 return true; 110 } 111 112 MachineOperand *New = Fold.OpToFold; 113 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) && 114 TargetRegisterInfo::isVirtualRegister(New->getReg())) { 115 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI); 116 return true; 117 } 118 119 // FIXME: Handle physical registers. 120 121 return false; 122 } 123 124 static bool isUseMIInFoldList(const std::vector<FoldCandidate> &FoldList, 125 const MachineInstr *MI) { 126 for (auto Candidate : FoldList) { 127 if (Candidate.UseMI == MI) 128 return true; 129 } 130 return false; 131 } 132 133 static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList, 134 MachineInstr *MI, unsigned OpNo, 135 MachineOperand *OpToFold, 136 const SIInstrInfo *TII) { 137 if (!TII->isOperandLegal(MI, OpNo, OpToFold)) { 138 139 // Special case for v_mac_f32_e64 if we are trying to fold into src2 140 unsigned Opc = MI->getOpcode(); 141 if (Opc == AMDGPU::V_MAC_F32_e64 && 142 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) { 143 // Check if changing this to a v_mad_f32 instruction will allow us to 144 // fold the operand. 145 MI->setDesc(TII->get(AMDGPU::V_MAD_F32)); 146 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII); 147 if (FoldAsMAD) { 148 MI->untieRegOperand(OpNo); 149 return true; 150 } 151 MI->setDesc(TII->get(Opc)); 152 } 153 154 // If we are already folding into another operand of MI, then 155 // we can't commute the instruction, otherwise we risk making the 156 // other fold illegal. 157 if (isUseMIInFoldList(FoldList, MI)) 158 return false; 159 160 // Operand is not legal, so try to commute the instruction to 161 // see if this makes it possible to fold. 162 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex; 163 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex; 164 bool CanCommute = TII->findCommutedOpIndices(MI, CommuteIdx0, CommuteIdx1); 165 166 if (CanCommute) { 167 if (CommuteIdx0 == OpNo) 168 OpNo = CommuteIdx1; 169 else if (CommuteIdx1 == OpNo) 170 OpNo = CommuteIdx0; 171 } 172 173 // One of operands might be an Imm operand, and OpNo may refer to it after 174 // the call of commuteInstruction() below. Such situations are avoided 175 // here explicitly as OpNo must be a register operand to be a candidate 176 // for memory folding. 177 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() || 178 !MI->getOperand(CommuteIdx1).isReg())) 179 return false; 180 181 if (!CanCommute || 182 !TII->commuteInstruction(MI, false, CommuteIdx0, CommuteIdx1)) 183 return false; 184 185 if (!TII->isOperandLegal(MI, OpNo, OpToFold)) 186 return false; 187 } 188 189 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold)); 190 return true; 191 } 192 193 static void foldOperand(MachineOperand &OpToFold, MachineInstr *UseMI, 194 unsigned UseOpIdx, 195 std::vector<FoldCandidate> &FoldList, 196 SmallVectorImpl<MachineInstr *> &CopiesToReplace, 197 const SIInstrInfo *TII, const SIRegisterInfo &TRI, 198 MachineRegisterInfo &MRI) { 199 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx); 200 201 // FIXME: Fold operands with subregs. 202 if (UseOp.isReg() && ((UseOp.getSubReg() && OpToFold.isReg()) || 203 UseOp.isImplicit())) { 204 return; 205 } 206 207 bool FoldingImm = OpToFold.isImm(); 208 APInt Imm; 209 210 if (FoldingImm) { 211 unsigned UseReg = UseOp.getReg(); 212 const TargetRegisterClass *UseRC 213 = TargetRegisterInfo::isVirtualRegister(UseReg) ? 214 MRI.getRegClass(UseReg) : 215 TRI.getPhysRegClass(UseReg); 216 217 Imm = APInt(64, OpToFold.getImm()); 218 219 const MCInstrDesc &FoldDesc = TII->get(OpToFold.getParent()->getOpcode()); 220 const TargetRegisterClass *FoldRC = 221 TRI.getRegClass(FoldDesc.OpInfo[0].RegClass); 222 223 // Split 64-bit constants into 32-bits for folding. 224 if (FoldRC->getSize() == 8 && UseOp.getSubReg()) { 225 if (UseRC->getSize() != 8) 226 return; 227 228 if (UseOp.getSubReg() == AMDGPU::sub0) { 229 Imm = Imm.getLoBits(32); 230 } else { 231 assert(UseOp.getSubReg() == AMDGPU::sub1); 232 Imm = Imm.getHiBits(32); 233 } 234 } 235 236 // In order to fold immediates into copies, we need to change the 237 // copy to a MOV. 238 if (UseMI->getOpcode() == AMDGPU::COPY) { 239 unsigned DestReg = UseMI->getOperand(0).getReg(); 240 const TargetRegisterClass *DestRC 241 = TargetRegisterInfo::isVirtualRegister(DestReg) ? 242 MRI.getRegClass(DestReg) : 243 TRI.getPhysRegClass(DestReg); 244 245 unsigned MovOp = TII->getMovOpcode(DestRC); 246 if (MovOp == AMDGPU::COPY) 247 return; 248 249 UseMI->setDesc(TII->get(MovOp)); 250 CopiesToReplace.push_back(UseMI); 251 } 252 } 253 254 // Special case for REG_SEQUENCE: We can't fold literals into 255 // REG_SEQUENCE instructions, so we have to fold them into the 256 // uses of REG_SEQUENCE. 257 if (UseMI->getOpcode() == AMDGPU::REG_SEQUENCE) { 258 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg(); 259 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm(); 260 261 for (MachineRegisterInfo::use_iterator 262 RSUse = MRI.use_begin(RegSeqDstReg), 263 RSE = MRI.use_end(); RSUse != RSE; ++RSUse) { 264 265 MachineInstr *RSUseMI = RSUse->getParent(); 266 if (RSUse->getSubReg() != RegSeqDstSubReg) 267 continue; 268 269 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList, 270 CopiesToReplace, TII, TRI, MRI); 271 } 272 return; 273 } 274 275 const MCInstrDesc &UseDesc = UseMI->getDesc(); 276 277 // Don't fold into target independent nodes. Target independent opcodes 278 // don't have defined register classes. 279 if (UseDesc.isVariadic() || 280 UseDesc.OpInfo[UseOpIdx].RegClass == -1) 281 return; 282 283 if (FoldingImm) { 284 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue()); 285 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII); 286 return; 287 } 288 289 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII); 290 291 // FIXME: We could try to change the instruction from 64-bit to 32-bit 292 // to enable more folding opportunites. The shrink operands pass 293 // already does this. 294 return; 295 } 296 297 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) { 298 if (skipFunction(*MF.getFunction())) 299 return false; 300 301 MachineRegisterInfo &MRI = MF.getRegInfo(); 302 const SIInstrInfo *TII = 303 static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo()); 304 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 305 306 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); 307 BI != BE; ++BI) { 308 309 MachineBasicBlock &MBB = *BI; 310 MachineBasicBlock::iterator I, Next; 311 for (I = MBB.begin(); I != MBB.end(); I = Next) { 312 Next = std::next(I); 313 MachineInstr &MI = *I; 314 315 if (!isSafeToFold(MI.getOpcode())) 316 continue; 317 318 unsigned OpSize = TII->getOpSize(MI, 1); 319 MachineOperand &OpToFold = MI.getOperand(1); 320 bool FoldingImm = OpToFold.isImm(); 321 322 // FIXME: We could also be folding things like FrameIndexes and 323 // TargetIndexes. 324 if (!FoldingImm && !OpToFold.isReg()) 325 continue; 326 327 // Folding immediates with more than one use will increase program size. 328 // FIXME: This will also reduce register usage, which may be better 329 // in some cases. A better heuristic is needed. 330 if (FoldingImm && !TII->isInlineConstant(OpToFold, OpSize) && 331 !MRI.hasOneUse(MI.getOperand(0).getReg())) 332 continue; 333 334 if (OpToFold.isReg() && 335 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg())) 336 continue; 337 338 // Prevent folding operands backwards in the function. For example, 339 // the COPY opcode must not be replaced by 1 in this example: 340 // 341 // %vreg3<def> = COPY %VGPR0; VGPR_32:%vreg3 342 // ... 343 // %VGPR0<def> = V_MOV_B32_e32 1, %EXEC<imp-use> 344 MachineOperand &Dst = MI.getOperand(0); 345 if (Dst.isReg() && 346 !TargetRegisterInfo::isVirtualRegister(Dst.getReg())) 347 continue; 348 349 // We need mutate the operands of new mov instructions to add implicit 350 // uses of EXEC, but adding them invalidates the use_iterator, so defer 351 // this. 352 SmallVector<MachineInstr *, 4> CopiesToReplace; 353 354 std::vector<FoldCandidate> FoldList; 355 for (MachineRegisterInfo::use_iterator 356 Use = MRI.use_begin(MI.getOperand(0).getReg()), E = MRI.use_end(); 357 Use != E; ++Use) { 358 359 MachineInstr *UseMI = Use->getParent(); 360 361 foldOperand(OpToFold, UseMI, Use.getOperandNo(), FoldList, 362 CopiesToReplace, TII, TRI, MRI); 363 } 364 365 // Make sure we add EXEC uses to any new v_mov instructions created. 366 for (MachineInstr *Copy : CopiesToReplace) 367 Copy->addImplicitDefUseOperands(MF); 368 369 for (FoldCandidate &Fold : FoldList) { 370 if (updateOperand(Fold, TRI)) { 371 // Clear kill flags. 372 if (!Fold.isImm()) { 373 assert(Fold.OpToFold && Fold.OpToFold->isReg()); 374 // FIXME: Probably shouldn't bother trying to fold if not an 375 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR 376 // copies. 377 MRI.clearKillFlags(Fold.OpToFold->getReg()); 378 } 379 DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " << 380 Fold.UseOpNo << " of " << *Fold.UseMI << '\n'); 381 } 382 } 383 } 384 } 385 return false; 386 } 387