1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 /// \file 9 //===----------------------------------------------------------------------===// 10 // 11 12 #include "AMDGPU.h" 13 #include "AMDGPUSubtarget.h" 14 #include "SIInstrInfo.h" 15 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 16 #include "llvm/CodeGen/MachineFunctionPass.h" 17 #include "llvm/CodeGen/MachineInstrBuilder.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/Support/Debug.h" 20 #include "llvm/Support/raw_ostream.h" 21 #include "llvm/Target/TargetMachine.h" 22 23 #define DEBUG_TYPE "si-fold-operands" 24 using namespace llvm; 25 26 namespace { 27 28 class SIFoldOperands : public MachineFunctionPass { 29 public: 30 static char ID; 31 32 public: 33 SIFoldOperands() : MachineFunctionPass(ID) { 34 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry()); 35 } 36 37 bool runOnMachineFunction(MachineFunction &MF) override; 38 39 const char *getPassName() const override { 40 return "SI Fold Operands"; 41 } 42 43 void getAnalysisUsage(AnalysisUsage &AU) const override { 44 AU.setPreservesCFG(); 45 MachineFunctionPass::getAnalysisUsage(AU); 46 } 47 }; 48 49 struct FoldCandidate { 50 MachineInstr *UseMI; 51 unsigned UseOpNo; 52 MachineOperand *OpToFold; 53 uint64_t ImmToFold; 54 55 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp) : 56 UseMI(MI), UseOpNo(OpNo) { 57 58 if (FoldOp->isImm()) { 59 OpToFold = nullptr; 60 ImmToFold = FoldOp->getImm(); 61 } else { 62 assert(FoldOp->isReg()); 63 OpToFold = FoldOp; 64 } 65 } 66 67 bool isImm() const { 68 return !OpToFold; 69 } 70 }; 71 72 } // End anonymous namespace. 73 74 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE, 75 "SI Fold Operands", false, false) 76 77 char SIFoldOperands::ID = 0; 78 79 char &llvm::SIFoldOperandsID = SIFoldOperands::ID; 80 81 FunctionPass *llvm::createSIFoldOperandsPass() { 82 return new SIFoldOperands(); 83 } 84 85 static bool isSafeToFold(unsigned Opcode) { 86 switch(Opcode) { 87 case AMDGPU::V_MOV_B32_e32: 88 case AMDGPU::V_MOV_B32_e64: 89 case AMDGPU::V_MOV_B64_PSEUDO: 90 case AMDGPU::S_MOV_B32: 91 case AMDGPU::S_MOV_B64: 92 case AMDGPU::COPY: 93 return true; 94 default: 95 return false; 96 } 97 } 98 99 static bool updateOperand(FoldCandidate &Fold, 100 const TargetRegisterInfo &TRI) { 101 MachineInstr *MI = Fold.UseMI; 102 MachineOperand &Old = MI->getOperand(Fold.UseOpNo); 103 assert(Old.isReg()); 104 105 if (Fold.isImm()) { 106 Old.ChangeToImmediate(Fold.ImmToFold); 107 return true; 108 } 109 110 MachineOperand *New = Fold.OpToFold; 111 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) && 112 TargetRegisterInfo::isVirtualRegister(New->getReg())) { 113 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI); 114 return true; 115 } 116 117 // FIXME: Handle physical registers. 118 119 return false; 120 } 121 122 static bool isUseMIInFoldList(const std::vector<FoldCandidate> &FoldList, 123 const MachineInstr *MI) { 124 for (auto Candidate : FoldList) { 125 if (Candidate.UseMI == MI) 126 return true; 127 } 128 return false; 129 } 130 131 static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList, 132 MachineInstr *MI, unsigned OpNo, 133 MachineOperand *OpToFold, 134 const SIInstrInfo *TII) { 135 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) { 136 137 // Special case for v_mac_f32_e64 if we are trying to fold into src2 138 unsigned Opc = MI->getOpcode(); 139 if (Opc == AMDGPU::V_MAC_F32_e64 && 140 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) { 141 // Check if changing this to a v_mad_f32 instruction will allow us to 142 // fold the operand. 143 MI->setDesc(TII->get(AMDGPU::V_MAD_F32)); 144 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII); 145 if (FoldAsMAD) { 146 MI->untieRegOperand(OpNo); 147 return true; 148 } 149 MI->setDesc(TII->get(Opc)); 150 } 151 152 // If we are already folding into another operand of MI, then 153 // we can't commute the instruction, otherwise we risk making the 154 // other fold illegal. 155 if (isUseMIInFoldList(FoldList, MI)) 156 return false; 157 158 // Operand is not legal, so try to commute the instruction to 159 // see if this makes it possible to fold. 160 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex; 161 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex; 162 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1); 163 164 if (CanCommute) { 165 if (CommuteIdx0 == OpNo) 166 OpNo = CommuteIdx1; 167 else if (CommuteIdx1 == OpNo) 168 OpNo = CommuteIdx0; 169 } 170 171 // One of operands might be an Imm operand, and OpNo may refer to it after 172 // the call of commuteInstruction() below. Such situations are avoided 173 // here explicitly as OpNo must be a register operand to be a candidate 174 // for memory folding. 175 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() || 176 !MI->getOperand(CommuteIdx1).isReg())) 177 return false; 178 179 if (!CanCommute || 180 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1)) 181 return false; 182 183 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) 184 return false; 185 } 186 187 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold)); 188 return true; 189 } 190 191 static void foldOperand(MachineOperand &OpToFold, MachineInstr *UseMI, 192 unsigned UseOpIdx, 193 std::vector<FoldCandidate> &FoldList, 194 SmallVectorImpl<MachineInstr *> &CopiesToReplace, 195 const SIInstrInfo *TII, const SIRegisterInfo &TRI, 196 MachineRegisterInfo &MRI) { 197 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx); 198 199 // FIXME: Fold operands with subregs. 200 if (UseOp.isReg() && OpToFold.isReg()) { 201 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister) 202 return; 203 204 // Don't fold subregister extracts into tied operands, only if it is a full 205 // copy since a subregister use tied to a full register def doesn't really 206 // make sense. e.g. don't fold: 207 // 208 // %vreg1 = COPY %vreg0:sub1 209 // %vreg2<tied3> = V_MAC_F32 %vreg3, %vreg4, %vreg1<tied0> 210 // 211 // into 212 // %vreg2<tied3> = V_MAC_F32 %vreg3, %vreg4, %vreg0:sub1<tied0> 213 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister) 214 return; 215 } 216 217 bool FoldingImm = OpToFold.isImm(); 218 APInt Imm; 219 220 if (FoldingImm) { 221 unsigned UseReg = UseOp.getReg(); 222 const TargetRegisterClass *UseRC 223 = TargetRegisterInfo::isVirtualRegister(UseReg) ? 224 MRI.getRegClass(UseReg) : 225 TRI.getPhysRegClass(UseReg); 226 227 Imm = APInt(64, OpToFold.getImm()); 228 229 const MCInstrDesc &FoldDesc = TII->get(OpToFold.getParent()->getOpcode()); 230 const TargetRegisterClass *FoldRC = 231 TRI.getRegClass(FoldDesc.OpInfo[0].RegClass); 232 233 // Split 64-bit constants into 32-bits for folding. 234 if (FoldRC->getSize() == 8 && UseOp.getSubReg()) { 235 if (UseRC->getSize() != 8) 236 return; 237 238 if (UseOp.getSubReg() == AMDGPU::sub0) { 239 Imm = Imm.getLoBits(32); 240 } else { 241 assert(UseOp.getSubReg() == AMDGPU::sub1); 242 Imm = Imm.getHiBits(32); 243 } 244 } 245 246 // In order to fold immediates into copies, we need to change the 247 // copy to a MOV. 248 if (UseMI->getOpcode() == AMDGPU::COPY) { 249 unsigned DestReg = UseMI->getOperand(0).getReg(); 250 const TargetRegisterClass *DestRC 251 = TargetRegisterInfo::isVirtualRegister(DestReg) ? 252 MRI.getRegClass(DestReg) : 253 TRI.getPhysRegClass(DestReg); 254 255 unsigned MovOp = TII->getMovOpcode(DestRC); 256 if (MovOp == AMDGPU::COPY) 257 return; 258 259 UseMI->setDesc(TII->get(MovOp)); 260 CopiesToReplace.push_back(UseMI); 261 } 262 } 263 264 // Special case for REG_SEQUENCE: We can't fold literals into 265 // REG_SEQUENCE instructions, so we have to fold them into the 266 // uses of REG_SEQUENCE. 267 if (UseMI->getOpcode() == AMDGPU::REG_SEQUENCE) { 268 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg(); 269 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm(); 270 271 for (MachineRegisterInfo::use_iterator 272 RSUse = MRI.use_begin(RegSeqDstReg), 273 RSE = MRI.use_end(); RSUse != RSE; ++RSUse) { 274 275 MachineInstr *RSUseMI = RSUse->getParent(); 276 if (RSUse->getSubReg() != RegSeqDstSubReg) 277 continue; 278 279 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList, 280 CopiesToReplace, TII, TRI, MRI); 281 } 282 return; 283 } 284 285 const MCInstrDesc &UseDesc = UseMI->getDesc(); 286 287 // Don't fold into target independent nodes. Target independent opcodes 288 // don't have defined register classes. 289 if (UseDesc.isVariadic() || 290 UseDesc.OpInfo[UseOpIdx].RegClass == -1) 291 return; 292 293 if (FoldingImm) { 294 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue()); 295 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII); 296 return; 297 } 298 299 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII); 300 301 // FIXME: We could try to change the instruction from 64-bit to 32-bit 302 // to enable more folding opportunites. The shrink operands pass 303 // already does this. 304 return; 305 } 306 307 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) { 308 if (skipFunction(*MF.getFunction())) 309 return false; 310 311 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 312 313 MachineRegisterInfo &MRI = MF.getRegInfo(); 314 const SIInstrInfo *TII = ST.getInstrInfo(); 315 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 316 317 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); 318 BI != BE; ++BI) { 319 320 MachineBasicBlock &MBB = *BI; 321 MachineBasicBlock::iterator I, Next; 322 for (I = MBB.begin(); I != MBB.end(); I = Next) { 323 Next = std::next(I); 324 MachineInstr &MI = *I; 325 326 if (!isSafeToFold(MI.getOpcode())) 327 continue; 328 329 unsigned OpSize = TII->getOpSize(MI, 1); 330 MachineOperand &OpToFold = MI.getOperand(1); 331 bool FoldingImm = OpToFold.isImm(); 332 333 // FIXME: We could also be folding things like FrameIndexes and 334 // TargetIndexes. 335 if (!FoldingImm && !OpToFold.isReg()) 336 continue; 337 338 // Folding immediates with more than one use will increase program size. 339 // FIXME: This will also reduce register usage, which may be better 340 // in some cases. A better heuristic is needed. 341 if (FoldingImm && !TII->isInlineConstant(OpToFold, OpSize) && 342 !MRI.hasOneUse(MI.getOperand(0).getReg())) 343 continue; 344 345 if (OpToFold.isReg() && 346 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg())) 347 continue; 348 349 // Prevent folding operands backwards in the function. For example, 350 // the COPY opcode must not be replaced by 1 in this example: 351 // 352 // %vreg3<def> = COPY %VGPR0; VGPR_32:%vreg3 353 // ... 354 // %VGPR0<def> = V_MOV_B32_e32 1, %EXEC<imp-use> 355 MachineOperand &Dst = MI.getOperand(0); 356 if (Dst.isReg() && 357 !TargetRegisterInfo::isVirtualRegister(Dst.getReg())) 358 continue; 359 360 // We need mutate the operands of new mov instructions to add implicit 361 // uses of EXEC, but adding them invalidates the use_iterator, so defer 362 // this. 363 SmallVector<MachineInstr *, 4> CopiesToReplace; 364 365 std::vector<FoldCandidate> FoldList; 366 for (MachineRegisterInfo::use_iterator 367 Use = MRI.use_begin(MI.getOperand(0).getReg()), E = MRI.use_end(); 368 Use != E; ++Use) { 369 370 MachineInstr *UseMI = Use->getParent(); 371 372 foldOperand(OpToFold, UseMI, Use.getOperandNo(), FoldList, 373 CopiesToReplace, TII, TRI, MRI); 374 } 375 376 // Make sure we add EXEC uses to any new v_mov instructions created. 377 for (MachineInstr *Copy : CopiesToReplace) 378 Copy->addImplicitDefUseOperands(MF); 379 380 for (FoldCandidate &Fold : FoldList) { 381 if (updateOperand(Fold, TRI)) { 382 // Clear kill flags. 383 if (!Fold.isImm()) { 384 assert(Fold.OpToFold && Fold.OpToFold->isReg()); 385 // FIXME: Probably shouldn't bother trying to fold if not an 386 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR 387 // copies. 388 MRI.clearKillFlags(Fold.OpToFold->getReg()); 389 } 390 DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " << 391 Fold.UseOpNo << " of " << *Fold.UseMI << '\n'); 392 } 393 } 394 } 395 } 396 return false; 397 } 398