1 //===-- lib/CodeGen/MachineInstr.cpp --------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Methods common to all machine instructions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/MachineInstr.h" 15 #include "llvm/ADT/FoldingSet.h" 16 #include "llvm/ADT/Hashing.h" 17 #include "llvm/Analysis/AliasAnalysis.h" 18 #include "llvm/CodeGen/MachineConstantPool.h" 19 #include "llvm/CodeGen/MachineFunction.h" 20 #include "llvm/CodeGen/MachineMemOperand.h" 21 #include "llvm/CodeGen/MachineModuleInfo.h" 22 #include "llvm/CodeGen/MachineRegisterInfo.h" 23 #include "llvm/CodeGen/PseudoSourceValue.h" 24 #include "llvm/IR/Constants.h" 25 #include "llvm/IR/DebugInfo.h" 26 #include "llvm/IR/Function.h" 27 #include "llvm/IR/InlineAsm.h" 28 #include "llvm/IR/LLVMContext.h" 29 #include "llvm/IR/Metadata.h" 30 #include "llvm/IR/Module.h" 31 #include "llvm/IR/Type.h" 32 #include "llvm/IR/Value.h" 33 #include "llvm/MC/MCInstrDesc.h" 34 #include "llvm/MC/MCSymbol.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/ErrorHandling.h" 37 #include "llvm/Support/MathExtras.h" 38 #include "llvm/Support/raw_ostream.h" 39 #include "llvm/Target/TargetInstrInfo.h" 40 #include "llvm/Target/TargetMachine.h" 41 #include "llvm/Target/TargetRegisterInfo.h" 42 #include "llvm/Target/TargetSubtargetInfo.h" 43 using namespace llvm; 44 45 //===----------------------------------------------------------------------===// 46 // MachineOperand Implementation 47 //===----------------------------------------------------------------------===// 48 49 void MachineOperand::setReg(unsigned Reg) { 50 if (getReg() == Reg) return; // No change. 51 52 // Otherwise, we have to change the register. If this operand is embedded 53 // into a machine function, we need to update the old and new register's 54 // use/def lists. 55 if (MachineInstr *MI = getParent()) 56 if (MachineBasicBlock *MBB = MI->getParent()) 57 if (MachineFunction *MF = MBB->getParent()) { 58 MachineRegisterInfo &MRI = MF->getRegInfo(); 59 MRI.removeRegOperandFromUseList(this); 60 SmallContents.RegNo = Reg; 61 MRI.addRegOperandToUseList(this); 62 return; 63 } 64 65 // Otherwise, just change the register, no problem. :) 66 SmallContents.RegNo = Reg; 67 } 68 69 void MachineOperand::substVirtReg(unsigned Reg, unsigned SubIdx, 70 const TargetRegisterInfo &TRI) { 71 assert(TargetRegisterInfo::isVirtualRegister(Reg)); 72 if (SubIdx && getSubReg()) 73 SubIdx = TRI.composeSubRegIndices(SubIdx, getSubReg()); 74 setReg(Reg); 75 if (SubIdx) 76 setSubReg(SubIdx); 77 } 78 79 void MachineOperand::substPhysReg(unsigned Reg, const TargetRegisterInfo &TRI) { 80 assert(TargetRegisterInfo::isPhysicalRegister(Reg)); 81 if (getSubReg()) { 82 Reg = TRI.getSubReg(Reg, getSubReg()); 83 // Note that getSubReg() may return 0 if the sub-register doesn't exist. 84 // That won't happen in legal code. 85 setSubReg(0); 86 } 87 setReg(Reg); 88 } 89 90 /// Change a def to a use, or a use to a def. 91 void MachineOperand::setIsDef(bool Val) { 92 assert(isReg() && "Wrong MachineOperand accessor"); 93 assert((!Val || !isDebug()) && "Marking a debug operation as def"); 94 if (IsDef == Val) 95 return; 96 // MRI may keep uses and defs in different list positions. 97 if (MachineInstr *MI = getParent()) 98 if (MachineBasicBlock *MBB = MI->getParent()) 99 if (MachineFunction *MF = MBB->getParent()) { 100 MachineRegisterInfo &MRI = MF->getRegInfo(); 101 MRI.removeRegOperandFromUseList(this); 102 IsDef = Val; 103 MRI.addRegOperandToUseList(this); 104 return; 105 } 106 IsDef = Val; 107 } 108 109 // If this operand is currently a register operand, and if this is in a 110 // function, deregister the operand from the register's use/def list. 111 void MachineOperand::removeRegFromUses() { 112 if (!isReg() || !isOnRegUseList()) 113 return; 114 115 if (MachineInstr *MI = getParent()) { 116 if (MachineBasicBlock *MBB = MI->getParent()) { 117 if (MachineFunction *MF = MBB->getParent()) 118 MF->getRegInfo().removeRegOperandFromUseList(this); 119 } 120 } 121 } 122 123 /// ChangeToImmediate - Replace this operand with a new immediate operand of 124 /// the specified value. If an operand is known to be an immediate already, 125 /// the setImm method should be used. 126 void MachineOperand::ChangeToImmediate(int64_t ImmVal) { 127 assert((!isReg() || !isTied()) && "Cannot change a tied operand into an imm"); 128 129 removeRegFromUses(); 130 131 OpKind = MO_Immediate; 132 Contents.ImmVal = ImmVal; 133 } 134 135 void MachineOperand::ChangeToFPImmediate(const ConstantFP *FPImm) { 136 assert((!isReg() || !isTied()) && "Cannot change a tied operand into an imm"); 137 138 removeRegFromUses(); 139 140 OpKind = MO_FPImmediate; 141 Contents.CFP = FPImm; 142 } 143 144 /// ChangeToRegister - Replace this operand with a new register operand of 145 /// the specified value. If an operand is known to be an register already, 146 /// the setReg method should be used. 147 void MachineOperand::ChangeToRegister(unsigned Reg, bool isDef, bool isImp, 148 bool isKill, bool isDead, bool isUndef, 149 bool isDebug) { 150 MachineRegisterInfo *RegInfo = nullptr; 151 if (MachineInstr *MI = getParent()) 152 if (MachineBasicBlock *MBB = MI->getParent()) 153 if (MachineFunction *MF = MBB->getParent()) 154 RegInfo = &MF->getRegInfo(); 155 // If this operand is already a register operand, remove it from the 156 // register's use/def lists. 157 bool WasReg = isReg(); 158 if (RegInfo && WasReg) 159 RegInfo->removeRegOperandFromUseList(this); 160 161 // Change this to a register and set the reg#. 162 OpKind = MO_Register; 163 SmallContents.RegNo = Reg; 164 SubReg_TargetFlags = 0; 165 IsDef = isDef; 166 IsImp = isImp; 167 IsKill = isKill; 168 IsDead = isDead; 169 IsUndef = isUndef; 170 IsInternalRead = false; 171 IsEarlyClobber = false; 172 IsDebug = isDebug; 173 // Ensure isOnRegUseList() returns false. 174 Contents.Reg.Prev = nullptr; 175 // Preserve the tie when the operand was already a register. 176 if (!WasReg) 177 TiedTo = 0; 178 179 // If this operand is embedded in a function, add the operand to the 180 // register's use/def list. 181 if (RegInfo) 182 RegInfo->addRegOperandToUseList(this); 183 } 184 185 /// isIdenticalTo - Return true if this operand is identical to the specified 186 /// operand. Note that this should stay in sync with the hash_value overload 187 /// below. 188 bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const { 189 if (getType() != Other.getType() || 190 getTargetFlags() != Other.getTargetFlags()) 191 return false; 192 193 switch (getType()) { 194 case MachineOperand::MO_Register: 195 return getReg() == Other.getReg() && isDef() == Other.isDef() && 196 getSubReg() == Other.getSubReg(); 197 case MachineOperand::MO_Immediate: 198 return getImm() == Other.getImm(); 199 case MachineOperand::MO_CImmediate: 200 return getCImm() == Other.getCImm(); 201 case MachineOperand::MO_FPImmediate: 202 return getFPImm() == Other.getFPImm(); 203 case MachineOperand::MO_MachineBasicBlock: 204 return getMBB() == Other.getMBB(); 205 case MachineOperand::MO_FrameIndex: 206 return getIndex() == Other.getIndex(); 207 case MachineOperand::MO_ConstantPoolIndex: 208 case MachineOperand::MO_TargetIndex: 209 return getIndex() == Other.getIndex() && getOffset() == Other.getOffset(); 210 case MachineOperand::MO_JumpTableIndex: 211 return getIndex() == Other.getIndex(); 212 case MachineOperand::MO_GlobalAddress: 213 return getGlobal() == Other.getGlobal() && getOffset() == Other.getOffset(); 214 case MachineOperand::MO_ExternalSymbol: 215 return !strcmp(getSymbolName(), Other.getSymbolName()) && 216 getOffset() == Other.getOffset(); 217 case MachineOperand::MO_BlockAddress: 218 return getBlockAddress() == Other.getBlockAddress() && 219 getOffset() == Other.getOffset(); 220 case MachineOperand::MO_RegisterMask: 221 case MachineOperand::MO_RegisterLiveOut: 222 return getRegMask() == Other.getRegMask(); 223 case MachineOperand::MO_MCSymbol: 224 return getMCSymbol() == Other.getMCSymbol(); 225 case MachineOperand::MO_CFIIndex: 226 return getCFIIndex() == Other.getCFIIndex(); 227 case MachineOperand::MO_Metadata: 228 return getMetadata() == Other.getMetadata(); 229 } 230 llvm_unreachable("Invalid machine operand type"); 231 } 232 233 // Note: this must stay exactly in sync with isIdenticalTo above. 234 hash_code llvm::hash_value(const MachineOperand &MO) { 235 switch (MO.getType()) { 236 case MachineOperand::MO_Register: 237 // Register operands don't have target flags. 238 return hash_combine(MO.getType(), MO.getReg(), MO.getSubReg(), MO.isDef()); 239 case MachineOperand::MO_Immediate: 240 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getImm()); 241 case MachineOperand::MO_CImmediate: 242 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getCImm()); 243 case MachineOperand::MO_FPImmediate: 244 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getFPImm()); 245 case MachineOperand::MO_MachineBasicBlock: 246 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMBB()); 247 case MachineOperand::MO_FrameIndex: 248 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex()); 249 case MachineOperand::MO_ConstantPoolIndex: 250 case MachineOperand::MO_TargetIndex: 251 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex(), 252 MO.getOffset()); 253 case MachineOperand::MO_JumpTableIndex: 254 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIndex()); 255 case MachineOperand::MO_ExternalSymbol: 256 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getOffset(), 257 MO.getSymbolName()); 258 case MachineOperand::MO_GlobalAddress: 259 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getGlobal(), 260 MO.getOffset()); 261 case MachineOperand::MO_BlockAddress: 262 return hash_combine(MO.getType(), MO.getTargetFlags(), 263 MO.getBlockAddress(), MO.getOffset()); 264 case MachineOperand::MO_RegisterMask: 265 case MachineOperand::MO_RegisterLiveOut: 266 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getRegMask()); 267 case MachineOperand::MO_Metadata: 268 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMetadata()); 269 case MachineOperand::MO_MCSymbol: 270 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMCSymbol()); 271 case MachineOperand::MO_CFIIndex: 272 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getCFIIndex()); 273 } 274 llvm_unreachable("Invalid machine operand type"); 275 } 276 277 /// print - Print the specified machine operand. 278 /// 279 void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const { 280 // If the instruction is embedded into a basic block, we can find the 281 // target info for the instruction. 282 if (!TM) 283 if (const MachineInstr *MI = getParent()) 284 if (const MachineBasicBlock *MBB = MI->getParent()) 285 if (const MachineFunction *MF = MBB->getParent()) 286 TM = &MF->getTarget(); 287 const TargetRegisterInfo *TRI = 288 TM ? TM->getSubtargetImpl()->getRegisterInfo() : nullptr; 289 290 switch (getType()) { 291 case MachineOperand::MO_Register: 292 OS << PrintReg(getReg(), TRI, getSubReg()); 293 294 if (isDef() || isKill() || isDead() || isImplicit() || isUndef() || 295 isInternalRead() || isEarlyClobber() || isTied()) { 296 OS << '<'; 297 bool NeedComma = false; 298 if (isDef()) { 299 if (NeedComma) OS << ','; 300 if (isEarlyClobber()) 301 OS << "earlyclobber,"; 302 if (isImplicit()) 303 OS << "imp-"; 304 OS << "def"; 305 NeedComma = true; 306 // <def,read-undef> only makes sense when getSubReg() is set. 307 // Don't clutter the output otherwise. 308 if (isUndef() && getSubReg()) 309 OS << ",read-undef"; 310 } else if (isImplicit()) { 311 OS << "imp-use"; 312 NeedComma = true; 313 } 314 315 if (isKill()) { 316 if (NeedComma) OS << ','; 317 OS << "kill"; 318 NeedComma = true; 319 } 320 if (isDead()) { 321 if (NeedComma) OS << ','; 322 OS << "dead"; 323 NeedComma = true; 324 } 325 if (isUndef() && isUse()) { 326 if (NeedComma) OS << ','; 327 OS << "undef"; 328 NeedComma = true; 329 } 330 if (isInternalRead()) { 331 if (NeedComma) OS << ','; 332 OS << "internal"; 333 NeedComma = true; 334 } 335 if (isTied()) { 336 if (NeedComma) OS << ','; 337 OS << "tied"; 338 if (TiedTo != 15) 339 OS << unsigned(TiedTo - 1); 340 } 341 OS << '>'; 342 } 343 break; 344 case MachineOperand::MO_Immediate: 345 OS << getImm(); 346 break; 347 case MachineOperand::MO_CImmediate: 348 getCImm()->getValue().print(OS, false); 349 break; 350 case MachineOperand::MO_FPImmediate: 351 if (getFPImm()->getType()->isFloatTy()) 352 OS << getFPImm()->getValueAPF().convertToFloat(); 353 else 354 OS << getFPImm()->getValueAPF().convertToDouble(); 355 break; 356 case MachineOperand::MO_MachineBasicBlock: 357 OS << "<BB#" << getMBB()->getNumber() << ">"; 358 break; 359 case MachineOperand::MO_FrameIndex: 360 OS << "<fi#" << getIndex() << '>'; 361 break; 362 case MachineOperand::MO_ConstantPoolIndex: 363 OS << "<cp#" << getIndex(); 364 if (getOffset()) OS << "+" << getOffset(); 365 OS << '>'; 366 break; 367 case MachineOperand::MO_TargetIndex: 368 OS << "<ti#" << getIndex(); 369 if (getOffset()) OS << "+" << getOffset(); 370 OS << '>'; 371 break; 372 case MachineOperand::MO_JumpTableIndex: 373 OS << "<jt#" << getIndex() << '>'; 374 break; 375 case MachineOperand::MO_GlobalAddress: 376 OS << "<ga:"; 377 getGlobal()->printAsOperand(OS, /*PrintType=*/false); 378 if (getOffset()) OS << "+" << getOffset(); 379 OS << '>'; 380 break; 381 case MachineOperand::MO_ExternalSymbol: 382 OS << "<es:" << getSymbolName(); 383 if (getOffset()) OS << "+" << getOffset(); 384 OS << '>'; 385 break; 386 case MachineOperand::MO_BlockAddress: 387 OS << '<'; 388 getBlockAddress()->printAsOperand(OS, /*PrintType=*/false); 389 if (getOffset()) OS << "+" << getOffset(); 390 OS << '>'; 391 break; 392 case MachineOperand::MO_RegisterMask: 393 OS << "<regmask>"; 394 break; 395 case MachineOperand::MO_RegisterLiveOut: 396 OS << "<regliveout>"; 397 break; 398 case MachineOperand::MO_Metadata: 399 OS << '<'; 400 getMetadata()->printAsOperand(OS); 401 OS << '>'; 402 break; 403 case MachineOperand::MO_MCSymbol: 404 OS << "<MCSym=" << *getMCSymbol() << '>'; 405 break; 406 case MachineOperand::MO_CFIIndex: 407 OS << "<call frame instruction>"; 408 break; 409 } 410 411 if (unsigned TF = getTargetFlags()) 412 OS << "[TF=" << TF << ']'; 413 } 414 415 //===----------------------------------------------------------------------===// 416 // MachineMemOperand Implementation 417 //===----------------------------------------------------------------------===// 418 419 /// getAddrSpace - Return the LLVM IR address space number that this pointer 420 /// points into. 421 unsigned MachinePointerInfo::getAddrSpace() const { 422 if (V.isNull() || V.is<const PseudoSourceValue*>()) return 0; 423 return cast<PointerType>(V.get<const Value*>()->getType())->getAddressSpace(); 424 } 425 426 /// getConstantPool - Return a MachinePointerInfo record that refers to the 427 /// constant pool. 428 MachinePointerInfo MachinePointerInfo::getConstantPool() { 429 return MachinePointerInfo(PseudoSourceValue::getConstantPool()); 430 } 431 432 /// getFixedStack - Return a MachinePointerInfo record that refers to the 433 /// the specified FrameIndex. 434 MachinePointerInfo MachinePointerInfo::getFixedStack(int FI, int64_t offset) { 435 return MachinePointerInfo(PseudoSourceValue::getFixedStack(FI), offset); 436 } 437 438 MachinePointerInfo MachinePointerInfo::getJumpTable() { 439 return MachinePointerInfo(PseudoSourceValue::getJumpTable()); 440 } 441 442 MachinePointerInfo MachinePointerInfo::getGOT() { 443 return MachinePointerInfo(PseudoSourceValue::getGOT()); 444 } 445 446 MachinePointerInfo MachinePointerInfo::getStack(int64_t Offset) { 447 return MachinePointerInfo(PseudoSourceValue::getStack(), Offset); 448 } 449 450 MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, unsigned f, 451 uint64_t s, unsigned int a, 452 const AAMDNodes &AAInfo, 453 const MDNode *Ranges) 454 : PtrInfo(ptrinfo), Size(s), 455 Flags((f & ((1 << MOMaxBits) - 1)) | ((Log2_32(a) + 1) << MOMaxBits)), 456 AAInfo(AAInfo), Ranges(Ranges) { 457 assert((PtrInfo.V.isNull() || PtrInfo.V.is<const PseudoSourceValue*>() || 458 isa<PointerType>(PtrInfo.V.get<const Value*>()->getType())) && 459 "invalid pointer value"); 460 assert(getBaseAlignment() == a && "Alignment is not a power of 2!"); 461 assert((isLoad() || isStore()) && "Not a load/store!"); 462 } 463 464 /// Profile - Gather unique data for the object. 465 /// 466 void MachineMemOperand::Profile(FoldingSetNodeID &ID) const { 467 ID.AddInteger(getOffset()); 468 ID.AddInteger(Size); 469 ID.AddPointer(getOpaqueValue()); 470 ID.AddInteger(Flags); 471 } 472 473 void MachineMemOperand::refineAlignment(const MachineMemOperand *MMO) { 474 // The Value and Offset may differ due to CSE. But the flags and size 475 // should be the same. 476 assert(MMO->getFlags() == getFlags() && "Flags mismatch!"); 477 assert(MMO->getSize() == getSize() && "Size mismatch!"); 478 479 if (MMO->getBaseAlignment() >= getBaseAlignment()) { 480 // Update the alignment value. 481 Flags = (Flags & ((1 << MOMaxBits) - 1)) | 482 ((Log2_32(MMO->getBaseAlignment()) + 1) << MOMaxBits); 483 // Also update the base and offset, because the new alignment may 484 // not be applicable with the old ones. 485 PtrInfo = MMO->PtrInfo; 486 } 487 } 488 489 /// getAlignment - Return the minimum known alignment in bytes of the 490 /// actual memory reference. 491 uint64_t MachineMemOperand::getAlignment() const { 492 return MinAlign(getBaseAlignment(), getOffset()); 493 } 494 495 raw_ostream &llvm::operator<<(raw_ostream &OS, const MachineMemOperand &MMO) { 496 assert((MMO.isLoad() || MMO.isStore()) && 497 "SV has to be a load, store or both."); 498 499 if (MMO.isVolatile()) 500 OS << "Volatile "; 501 502 if (MMO.isLoad()) 503 OS << "LD"; 504 if (MMO.isStore()) 505 OS << "ST"; 506 OS << MMO.getSize(); 507 508 // Print the address information. 509 OS << "["; 510 if (const Value *V = MMO.getValue()) 511 V->printAsOperand(OS, /*PrintType=*/false); 512 else if (const PseudoSourceValue *PSV = MMO.getPseudoValue()) 513 PSV->printCustom(OS); 514 else 515 OS << "<unknown>"; 516 517 unsigned AS = MMO.getAddrSpace(); 518 if (AS != 0) 519 OS << "(addrspace=" << AS << ')'; 520 521 // If the alignment of the memory reference itself differs from the alignment 522 // of the base pointer, print the base alignment explicitly, next to the base 523 // pointer. 524 if (MMO.getBaseAlignment() != MMO.getAlignment()) 525 OS << "(align=" << MMO.getBaseAlignment() << ")"; 526 527 if (MMO.getOffset() != 0) 528 OS << "+" << MMO.getOffset(); 529 OS << "]"; 530 531 // Print the alignment of the reference. 532 if (MMO.getBaseAlignment() != MMO.getAlignment() || 533 MMO.getBaseAlignment() != MMO.getSize()) 534 OS << "(align=" << MMO.getAlignment() << ")"; 535 536 // Print TBAA info. 537 if (const MDNode *TBAAInfo = MMO.getAAInfo().TBAA) { 538 OS << "(tbaa="; 539 if (TBAAInfo->getNumOperands() > 0) 540 TBAAInfo->getOperand(0)->printAsOperand(OS); 541 else 542 OS << "<unknown>"; 543 OS << ")"; 544 } 545 546 // Print AA scope info. 547 if (const MDNode *ScopeInfo = MMO.getAAInfo().Scope) { 548 OS << "(alias.scope="; 549 if (ScopeInfo->getNumOperands() > 0) 550 for (unsigned i = 0, ie = ScopeInfo->getNumOperands(); i != ie; ++i) { 551 ScopeInfo->getOperand(i)->printAsOperand(OS); 552 if (i != ie-1) 553 OS << ","; 554 } 555 else 556 OS << "<unknown>"; 557 OS << ")"; 558 } 559 560 // Print AA noalias scope info. 561 if (const MDNode *NoAliasInfo = MMO.getAAInfo().NoAlias) { 562 OS << "(noalias="; 563 if (NoAliasInfo->getNumOperands() > 0) 564 for (unsigned i = 0, ie = NoAliasInfo->getNumOperands(); i != ie; ++i) { 565 NoAliasInfo->getOperand(i)->printAsOperand(OS); 566 if (i != ie-1) 567 OS << ","; 568 } 569 else 570 OS << "<unknown>"; 571 OS << ")"; 572 } 573 574 // Print nontemporal info. 575 if (MMO.isNonTemporal()) 576 OS << "(nontemporal)"; 577 578 return OS; 579 } 580 581 //===----------------------------------------------------------------------===// 582 // MachineInstr Implementation 583 //===----------------------------------------------------------------------===// 584 585 void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) { 586 if (MCID->ImplicitDefs) 587 for (const uint16_t *ImpDefs = MCID->getImplicitDefs(); *ImpDefs; ++ImpDefs) 588 addOperand(MF, MachineOperand::CreateReg(*ImpDefs, true, true)); 589 if (MCID->ImplicitUses) 590 for (const uint16_t *ImpUses = MCID->getImplicitUses(); *ImpUses; ++ImpUses) 591 addOperand(MF, MachineOperand::CreateReg(*ImpUses, false, true)); 592 } 593 594 /// MachineInstr ctor - This constructor creates a MachineInstr and adds the 595 /// implicit operands. It reserves space for the number of operands specified by 596 /// the MCInstrDesc. 597 MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &tid, 598 const DebugLoc dl, bool NoImp) 599 : MCID(&tid), Parent(nullptr), Operands(nullptr), NumOperands(0), 600 Flags(0), AsmPrinterFlags(0), 601 NumMemRefs(0), MemRefs(nullptr), debugLoc(dl) { 602 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor"); 603 604 // Reserve space for the expected number of operands. 605 if (unsigned NumOps = MCID->getNumOperands() + 606 MCID->getNumImplicitDefs() + MCID->getNumImplicitUses()) { 607 CapOperands = OperandCapacity::get(NumOps); 608 Operands = MF.allocateOperandArray(CapOperands); 609 } 610 611 if (!NoImp) 612 addImplicitDefUseOperands(MF); 613 } 614 615 /// MachineInstr ctor - Copies MachineInstr arg exactly 616 /// 617 MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI) 618 : MCID(&MI.getDesc()), Parent(nullptr), Operands(nullptr), NumOperands(0), 619 Flags(0), AsmPrinterFlags(0), 620 NumMemRefs(MI.NumMemRefs), MemRefs(MI.MemRefs), 621 debugLoc(MI.getDebugLoc()) { 622 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor"); 623 624 CapOperands = OperandCapacity::get(MI.getNumOperands()); 625 Operands = MF.allocateOperandArray(CapOperands); 626 627 // Copy operands. 628 for (unsigned i = 0; i != MI.getNumOperands(); ++i) 629 addOperand(MF, MI.getOperand(i)); 630 631 // Copy all the sensible flags. 632 setFlags(MI.Flags); 633 } 634 635 /// getRegInfo - If this instruction is embedded into a MachineFunction, 636 /// return the MachineRegisterInfo object for the current function, otherwise 637 /// return null. 638 MachineRegisterInfo *MachineInstr::getRegInfo() { 639 if (MachineBasicBlock *MBB = getParent()) 640 return &MBB->getParent()->getRegInfo(); 641 return nullptr; 642 } 643 644 /// RemoveRegOperandsFromUseLists - Unlink all of the register operands in 645 /// this instruction from their respective use lists. This requires that the 646 /// operands already be on their use lists. 647 void MachineInstr::RemoveRegOperandsFromUseLists(MachineRegisterInfo &MRI) { 648 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 649 if (Operands[i].isReg()) 650 MRI.removeRegOperandFromUseList(&Operands[i]); 651 } 652 653 /// AddRegOperandsToUseLists - Add all of the register operands in 654 /// this instruction from their respective use lists. This requires that the 655 /// operands not be on their use lists yet. 656 void MachineInstr::AddRegOperandsToUseLists(MachineRegisterInfo &MRI) { 657 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 658 if (Operands[i].isReg()) 659 MRI.addRegOperandToUseList(&Operands[i]); 660 } 661 662 void MachineInstr::addOperand(const MachineOperand &Op) { 663 MachineBasicBlock *MBB = getParent(); 664 assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs"); 665 MachineFunction *MF = MBB->getParent(); 666 assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs"); 667 addOperand(*MF, Op); 668 } 669 670 /// Move NumOps MachineOperands from Src to Dst, with support for overlapping 671 /// ranges. If MRI is non-null also update use-def chains. 672 static void moveOperands(MachineOperand *Dst, MachineOperand *Src, 673 unsigned NumOps, MachineRegisterInfo *MRI) { 674 if (MRI) 675 return MRI->moveOperands(Dst, Src, NumOps); 676 677 // Here it would be convenient to call memmove, so that isn't allowed because 678 // MachineOperand has a constructor and so isn't a POD type. 679 if (Dst < Src) 680 for (unsigned i = 0; i != NumOps; ++i) 681 new (Dst + i) MachineOperand(Src[i]); 682 else 683 for (unsigned i = NumOps; i ; --i) 684 new (Dst + i - 1) MachineOperand(Src[i - 1]); 685 } 686 687 /// addOperand - Add the specified operand to the instruction. If it is an 688 /// implicit operand, it is added to the end of the operand list. If it is 689 /// an explicit operand it is added at the end of the explicit operand list 690 /// (before the first implicit operand). 691 void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) { 692 assert(MCID && "Cannot add operands before providing an instr descriptor"); 693 694 // Check if we're adding one of our existing operands. 695 if (&Op >= Operands && &Op < Operands + NumOperands) { 696 // This is unusual: MI->addOperand(MI->getOperand(i)). 697 // If adding Op requires reallocating or moving existing operands around, 698 // the Op reference could go stale. Support it by copying Op. 699 MachineOperand CopyOp(Op); 700 return addOperand(MF, CopyOp); 701 } 702 703 // Find the insert location for the new operand. Implicit registers go at 704 // the end, everything else goes before the implicit regs. 705 // 706 // FIXME: Allow mixed explicit and implicit operands on inline asm. 707 // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as 708 // implicit-defs, but they must not be moved around. See the FIXME in 709 // InstrEmitter.cpp. 710 unsigned OpNo = getNumOperands(); 711 bool isImpReg = Op.isReg() && Op.isImplicit(); 712 if (!isImpReg && !isInlineAsm()) { 713 while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) { 714 --OpNo; 715 assert(!Operands[OpNo].isTied() && "Cannot move tied operands"); 716 } 717 } 718 719 #ifndef NDEBUG 720 bool isMetaDataOp = Op.getType() == MachineOperand::MO_Metadata; 721 // OpNo now points as the desired insertion point. Unless this is a variadic 722 // instruction, only implicit regs are allowed beyond MCID->getNumOperands(). 723 // RegMask operands go between the explicit and implicit operands. 724 assert((isImpReg || Op.isRegMask() || MCID->isVariadic() || 725 OpNo < MCID->getNumOperands() || isMetaDataOp) && 726 "Trying to add an operand to a machine instr that is already done!"); 727 #endif 728 729 MachineRegisterInfo *MRI = getRegInfo(); 730 731 // Determine if the Operands array needs to be reallocated. 732 // Save the old capacity and operand array. 733 OperandCapacity OldCap = CapOperands; 734 MachineOperand *OldOperands = Operands; 735 if (!OldOperands || OldCap.getSize() == getNumOperands()) { 736 CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1); 737 Operands = MF.allocateOperandArray(CapOperands); 738 // Move the operands before the insertion point. 739 if (OpNo) 740 moveOperands(Operands, OldOperands, OpNo, MRI); 741 } 742 743 // Move the operands following the insertion point. 744 if (OpNo != NumOperands) 745 moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo, 746 MRI); 747 ++NumOperands; 748 749 // Deallocate the old operand array. 750 if (OldOperands != Operands && OldOperands) 751 MF.deallocateOperandArray(OldCap, OldOperands); 752 753 // Copy Op into place. It still needs to be inserted into the MRI use lists. 754 MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op); 755 NewMO->ParentMI = this; 756 757 // When adding a register operand, tell MRI about it. 758 if (NewMO->isReg()) { 759 // Ensure isOnRegUseList() returns false, regardless of Op's status. 760 NewMO->Contents.Reg.Prev = nullptr; 761 // Ignore existing ties. This is not a property that can be copied. 762 NewMO->TiedTo = 0; 763 // Add the new operand to MRI, but only for instructions in an MBB. 764 if (MRI) 765 MRI->addRegOperandToUseList(NewMO); 766 // The MCID operand information isn't accurate until we start adding 767 // explicit operands. The implicit operands are added first, then the 768 // explicits are inserted before them. 769 if (!isImpReg) { 770 // Tie uses to defs as indicated in MCInstrDesc. 771 if (NewMO->isUse()) { 772 int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO); 773 if (DefIdx != -1) 774 tieOperands(DefIdx, OpNo); 775 } 776 // If the register operand is flagged as early, mark the operand as such. 777 if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1) 778 NewMO->setIsEarlyClobber(true); 779 } 780 } 781 } 782 783 /// RemoveOperand - Erase an operand from an instruction, leaving it with one 784 /// fewer operand than it started with. 785 /// 786 void MachineInstr::RemoveOperand(unsigned OpNo) { 787 assert(OpNo < getNumOperands() && "Invalid operand number"); 788 untieRegOperand(OpNo); 789 790 #ifndef NDEBUG 791 // Moving tied operands would break the ties. 792 for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i) 793 if (Operands[i].isReg()) 794 assert(!Operands[i].isTied() && "Cannot move tied operands"); 795 #endif 796 797 MachineRegisterInfo *MRI = getRegInfo(); 798 if (MRI && Operands[OpNo].isReg()) 799 MRI->removeRegOperandFromUseList(Operands + OpNo); 800 801 // Don't call the MachineOperand destructor. A lot of this code depends on 802 // MachineOperand having a trivial destructor anyway, and adding a call here 803 // wouldn't make it 'destructor-correct'. 804 805 if (unsigned N = NumOperands - 1 - OpNo) 806 moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI); 807 --NumOperands; 808 } 809 810 /// addMemOperand - Add a MachineMemOperand to the machine instruction. 811 /// This function should be used only occasionally. The setMemRefs function 812 /// is the primary method for setting up a MachineInstr's MemRefs list. 813 void MachineInstr::addMemOperand(MachineFunction &MF, 814 MachineMemOperand *MO) { 815 mmo_iterator OldMemRefs = MemRefs; 816 unsigned OldNumMemRefs = NumMemRefs; 817 818 unsigned NewNum = NumMemRefs + 1; 819 mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NewNum); 820 821 std::copy(OldMemRefs, OldMemRefs + OldNumMemRefs, NewMemRefs); 822 NewMemRefs[NewNum - 1] = MO; 823 setMemRefs(NewMemRefs, NewMemRefs + NewNum); 824 } 825 826 bool MachineInstr::hasPropertyInBundle(unsigned Mask, QueryType Type) const { 827 assert(!isBundledWithPred() && "Must be called on bundle header"); 828 for (MachineBasicBlock::const_instr_iterator MII = this;; ++MII) { 829 if (MII->getDesc().getFlags() & Mask) { 830 if (Type == AnyInBundle) 831 return true; 832 } else { 833 if (Type == AllInBundle && !MII->isBundle()) 834 return false; 835 } 836 // This was the last instruction in the bundle. 837 if (!MII->isBundledWithSucc()) 838 return Type == AllInBundle; 839 } 840 } 841 842 bool MachineInstr::isIdenticalTo(const MachineInstr *Other, 843 MICheckType Check) const { 844 // If opcodes or number of operands are not the same then the two 845 // instructions are obviously not identical. 846 if (Other->getOpcode() != getOpcode() || 847 Other->getNumOperands() != getNumOperands()) 848 return false; 849 850 if (isBundle()) { 851 // Both instructions are bundles, compare MIs inside the bundle. 852 MachineBasicBlock::const_instr_iterator I1 = *this; 853 MachineBasicBlock::const_instr_iterator E1 = getParent()->instr_end(); 854 MachineBasicBlock::const_instr_iterator I2 = *Other; 855 MachineBasicBlock::const_instr_iterator E2= Other->getParent()->instr_end(); 856 while (++I1 != E1 && I1->isInsideBundle()) { 857 ++I2; 858 if (I2 == E2 || !I2->isInsideBundle() || !I1->isIdenticalTo(I2, Check)) 859 return false; 860 } 861 } 862 863 // Check operands to make sure they match. 864 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 865 const MachineOperand &MO = getOperand(i); 866 const MachineOperand &OMO = Other->getOperand(i); 867 if (!MO.isReg()) { 868 if (!MO.isIdenticalTo(OMO)) 869 return false; 870 continue; 871 } 872 873 // Clients may or may not want to ignore defs when testing for equality. 874 // For example, machine CSE pass only cares about finding common 875 // subexpressions, so it's safe to ignore virtual register defs. 876 if (MO.isDef()) { 877 if (Check == IgnoreDefs) 878 continue; 879 else if (Check == IgnoreVRegDefs) { 880 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()) || 881 TargetRegisterInfo::isPhysicalRegister(OMO.getReg())) 882 if (MO.getReg() != OMO.getReg()) 883 return false; 884 } else { 885 if (!MO.isIdenticalTo(OMO)) 886 return false; 887 if (Check == CheckKillDead && MO.isDead() != OMO.isDead()) 888 return false; 889 } 890 } else { 891 if (!MO.isIdenticalTo(OMO)) 892 return false; 893 if (Check == CheckKillDead && MO.isKill() != OMO.isKill()) 894 return false; 895 } 896 } 897 // If DebugLoc does not match then two dbg.values are not identical. 898 if (isDebugValue()) 899 if (!getDebugLoc().isUnknown() && !Other->getDebugLoc().isUnknown() 900 && getDebugLoc() != Other->getDebugLoc()) 901 return false; 902 return true; 903 } 904 905 MachineInstr *MachineInstr::removeFromParent() { 906 assert(getParent() && "Not embedded in a basic block!"); 907 return getParent()->remove(this); 908 } 909 910 MachineInstr *MachineInstr::removeFromBundle() { 911 assert(getParent() && "Not embedded in a basic block!"); 912 return getParent()->remove_instr(this); 913 } 914 915 void MachineInstr::eraseFromParent() { 916 assert(getParent() && "Not embedded in a basic block!"); 917 getParent()->erase(this); 918 } 919 920 void MachineInstr::eraseFromParentAndMarkDBGValuesForRemoval() { 921 assert(getParent() && "Not embedded in a basic block!"); 922 MachineBasicBlock *MBB = getParent(); 923 MachineFunction *MF = MBB->getParent(); 924 assert(MF && "Not embedded in a function!"); 925 926 MachineInstr *MI = (MachineInstr *)this; 927 MachineRegisterInfo &MRI = MF->getRegInfo(); 928 929 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 930 const MachineOperand &MO = MI->getOperand(i); 931 if (!MO.isReg() || !MO.isDef()) 932 continue; 933 unsigned Reg = MO.getReg(); 934 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 935 continue; 936 MRI.markUsesInDebugValueAsUndef(Reg); 937 } 938 MI->eraseFromParent(); 939 } 940 941 void MachineInstr::eraseFromBundle() { 942 assert(getParent() && "Not embedded in a basic block!"); 943 getParent()->erase_instr(this); 944 } 945 946 /// getNumExplicitOperands - Returns the number of non-implicit operands. 947 /// 948 unsigned MachineInstr::getNumExplicitOperands() const { 949 unsigned NumOperands = MCID->getNumOperands(); 950 if (!MCID->isVariadic()) 951 return NumOperands; 952 953 for (unsigned i = NumOperands, e = getNumOperands(); i != e; ++i) { 954 const MachineOperand &MO = getOperand(i); 955 if (!MO.isReg() || !MO.isImplicit()) 956 NumOperands++; 957 } 958 return NumOperands; 959 } 960 961 void MachineInstr::bundleWithPred() { 962 assert(!isBundledWithPred() && "MI is already bundled with its predecessor"); 963 setFlag(BundledPred); 964 MachineBasicBlock::instr_iterator Pred = this; 965 --Pred; 966 assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags"); 967 Pred->setFlag(BundledSucc); 968 } 969 970 void MachineInstr::bundleWithSucc() { 971 assert(!isBundledWithSucc() && "MI is already bundled with its successor"); 972 setFlag(BundledSucc); 973 MachineBasicBlock::instr_iterator Succ = this; 974 ++Succ; 975 assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags"); 976 Succ->setFlag(BundledPred); 977 } 978 979 void MachineInstr::unbundleFromPred() { 980 assert(isBundledWithPred() && "MI isn't bundled with its predecessor"); 981 clearFlag(BundledPred); 982 MachineBasicBlock::instr_iterator Pred = this; 983 --Pred; 984 assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags"); 985 Pred->clearFlag(BundledSucc); 986 } 987 988 void MachineInstr::unbundleFromSucc() { 989 assert(isBundledWithSucc() && "MI isn't bundled with its successor"); 990 clearFlag(BundledSucc); 991 MachineBasicBlock::instr_iterator Succ = this; 992 ++Succ; 993 assert(Succ->isBundledWithPred() && "Inconsistent bundle flags"); 994 Succ->clearFlag(BundledPred); 995 } 996 997 bool MachineInstr::isStackAligningInlineAsm() const { 998 if (isInlineAsm()) { 999 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 1000 if (ExtraInfo & InlineAsm::Extra_IsAlignStack) 1001 return true; 1002 } 1003 return false; 1004 } 1005 1006 InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const { 1007 assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!"); 1008 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 1009 return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0); 1010 } 1011 1012 int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx, 1013 unsigned *GroupNo) const { 1014 assert(isInlineAsm() && "Expected an inline asm instruction"); 1015 assert(OpIdx < getNumOperands() && "OpIdx out of range"); 1016 1017 // Ignore queries about the initial operands. 1018 if (OpIdx < InlineAsm::MIOp_FirstOperand) 1019 return -1; 1020 1021 unsigned Group = 0; 1022 unsigned NumOps; 1023 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e; 1024 i += NumOps) { 1025 const MachineOperand &FlagMO = getOperand(i); 1026 // If we reach the implicit register operands, stop looking. 1027 if (!FlagMO.isImm()) 1028 return -1; 1029 NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm()); 1030 if (i + NumOps > OpIdx) { 1031 if (GroupNo) 1032 *GroupNo = Group; 1033 return i; 1034 } 1035 ++Group; 1036 } 1037 return -1; 1038 } 1039 1040 const TargetRegisterClass* 1041 MachineInstr::getRegClassConstraint(unsigned OpIdx, 1042 const TargetInstrInfo *TII, 1043 const TargetRegisterInfo *TRI) const { 1044 assert(getParent() && "Can't have an MBB reference here!"); 1045 assert(getParent()->getParent() && "Can't have an MF reference here!"); 1046 const MachineFunction &MF = *getParent()->getParent(); 1047 1048 // Most opcodes have fixed constraints in their MCInstrDesc. 1049 if (!isInlineAsm()) 1050 return TII->getRegClass(getDesc(), OpIdx, TRI, MF); 1051 1052 if (!getOperand(OpIdx).isReg()) 1053 return nullptr; 1054 1055 // For tied uses on inline asm, get the constraint from the def. 1056 unsigned DefIdx; 1057 if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx)) 1058 OpIdx = DefIdx; 1059 1060 // Inline asm stores register class constraints in the flag word. 1061 int FlagIdx = findInlineAsmFlagIdx(OpIdx); 1062 if (FlagIdx < 0) 1063 return nullptr; 1064 1065 unsigned Flag = getOperand(FlagIdx).getImm(); 1066 unsigned RCID; 1067 if (InlineAsm::hasRegClassConstraint(Flag, RCID)) 1068 return TRI->getRegClass(RCID); 1069 1070 // Assume that all registers in a memory operand are pointers. 1071 if (InlineAsm::getKind(Flag) == InlineAsm::Kind_Mem) 1072 return TRI->getPointerRegClass(MF); 1073 1074 return nullptr; 1075 } 1076 1077 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg( 1078 unsigned Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, 1079 const TargetRegisterInfo *TRI, bool ExploreBundle) const { 1080 // Check every operands inside the bundle if we have 1081 // been asked to. 1082 if (ExploreBundle) 1083 for (ConstMIBundleOperands OpndIt(this); OpndIt.isValid() && CurRC; 1084 ++OpndIt) 1085 CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl( 1086 OpndIt.getOperandNo(), Reg, CurRC, TII, TRI); 1087 else 1088 // Otherwise, just check the current operands. 1089 for (ConstMIOperands OpndIt(this); OpndIt.isValid() && CurRC; ++OpndIt) 1090 CurRC = getRegClassConstraintEffectForVRegImpl(OpndIt.getOperandNo(), Reg, 1091 CurRC, TII, TRI); 1092 return CurRC; 1093 } 1094 1095 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl( 1096 unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC, 1097 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const { 1098 assert(CurRC && "Invalid initial register class"); 1099 // Check if Reg is constrained by some of its use/def from MI. 1100 const MachineOperand &MO = getOperand(OpIdx); 1101 if (!MO.isReg() || MO.getReg() != Reg) 1102 return CurRC; 1103 // If yes, accumulate the constraints through the operand. 1104 return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI); 1105 } 1106 1107 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect( 1108 unsigned OpIdx, const TargetRegisterClass *CurRC, 1109 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const { 1110 const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI); 1111 const MachineOperand &MO = getOperand(OpIdx); 1112 assert(MO.isReg() && 1113 "Cannot get register constraints for non-register operand"); 1114 assert(CurRC && "Invalid initial register class"); 1115 if (unsigned SubIdx = MO.getSubReg()) { 1116 if (OpRC) 1117 CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx); 1118 else 1119 CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx); 1120 } else if (OpRC) 1121 CurRC = TRI->getCommonSubClass(CurRC, OpRC); 1122 return CurRC; 1123 } 1124 1125 /// Return the number of instructions inside the MI bundle, not counting the 1126 /// header instruction. 1127 unsigned MachineInstr::getBundleSize() const { 1128 MachineBasicBlock::const_instr_iterator I = this; 1129 unsigned Size = 0; 1130 while (I->isBundledWithSucc()) 1131 ++Size, ++I; 1132 return Size; 1133 } 1134 1135 /// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of 1136 /// the specific register or -1 if it is not found. It further tightens 1137 /// the search criteria to a use that kills the register if isKill is true. 1138 int MachineInstr::findRegisterUseOperandIdx(unsigned Reg, bool isKill, 1139 const TargetRegisterInfo *TRI) const { 1140 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 1141 const MachineOperand &MO = getOperand(i); 1142 if (!MO.isReg() || !MO.isUse()) 1143 continue; 1144 unsigned MOReg = MO.getReg(); 1145 if (!MOReg) 1146 continue; 1147 if (MOReg == Reg || 1148 (TRI && 1149 TargetRegisterInfo::isPhysicalRegister(MOReg) && 1150 TargetRegisterInfo::isPhysicalRegister(Reg) && 1151 TRI->isSubRegister(MOReg, Reg))) 1152 if (!isKill || MO.isKill()) 1153 return i; 1154 } 1155 return -1; 1156 } 1157 1158 /// readsWritesVirtualRegister - Return a pair of bools (reads, writes) 1159 /// indicating if this instruction reads or writes Reg. This also considers 1160 /// partial defines. 1161 std::pair<bool,bool> 1162 MachineInstr::readsWritesVirtualRegister(unsigned Reg, 1163 SmallVectorImpl<unsigned> *Ops) const { 1164 bool PartDef = false; // Partial redefine. 1165 bool FullDef = false; // Full define. 1166 bool Use = false; 1167 1168 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 1169 const MachineOperand &MO = getOperand(i); 1170 if (!MO.isReg() || MO.getReg() != Reg) 1171 continue; 1172 if (Ops) 1173 Ops->push_back(i); 1174 if (MO.isUse()) 1175 Use |= !MO.isUndef(); 1176 else if (MO.getSubReg() && !MO.isUndef()) 1177 // A partial <def,undef> doesn't count as reading the register. 1178 PartDef = true; 1179 else 1180 FullDef = true; 1181 } 1182 // A partial redefine uses Reg unless there is also a full define. 1183 return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef); 1184 } 1185 1186 /// findRegisterDefOperandIdx() - Returns the operand index that is a def of 1187 /// the specified register or -1 if it is not found. If isDead is true, defs 1188 /// that are not dead are skipped. If TargetRegisterInfo is non-null, then it 1189 /// also checks if there is a def of a super-register. 1190 int 1191 MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap, 1192 const TargetRegisterInfo *TRI) const { 1193 bool isPhys = TargetRegisterInfo::isPhysicalRegister(Reg); 1194 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 1195 const MachineOperand &MO = getOperand(i); 1196 // Accept regmask operands when Overlap is set. 1197 // Ignore them when looking for a specific def operand (Overlap == false). 1198 if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg)) 1199 return i; 1200 if (!MO.isReg() || !MO.isDef()) 1201 continue; 1202 unsigned MOReg = MO.getReg(); 1203 bool Found = (MOReg == Reg); 1204 if (!Found && TRI && isPhys && 1205 TargetRegisterInfo::isPhysicalRegister(MOReg)) { 1206 if (Overlap) 1207 Found = TRI->regsOverlap(MOReg, Reg); 1208 else 1209 Found = TRI->isSubRegister(MOReg, Reg); 1210 } 1211 if (Found && (!isDead || MO.isDead())) 1212 return i; 1213 } 1214 return -1; 1215 } 1216 1217 /// findFirstPredOperandIdx() - Find the index of the first operand in the 1218 /// operand list that is used to represent the predicate. It returns -1 if 1219 /// none is found. 1220 int MachineInstr::findFirstPredOperandIdx() const { 1221 // Don't call MCID.findFirstPredOperandIdx() because this variant 1222 // is sometimes called on an instruction that's not yet complete, and 1223 // so the number of operands is less than the MCID indicates. In 1224 // particular, the PTX target does this. 1225 const MCInstrDesc &MCID = getDesc(); 1226 if (MCID.isPredicable()) { 1227 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 1228 if (MCID.OpInfo[i].isPredicate()) 1229 return i; 1230 } 1231 1232 return -1; 1233 } 1234 1235 // MachineOperand::TiedTo is 4 bits wide. 1236 const unsigned TiedMax = 15; 1237 1238 /// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other. 1239 /// 1240 /// Use and def operands can be tied together, indicated by a non-zero TiedTo 1241 /// field. TiedTo can have these values: 1242 /// 1243 /// 0: Operand is not tied to anything. 1244 /// 1 to TiedMax-1: Tied to getOperand(TiedTo-1). 1245 /// TiedMax: Tied to an operand >= TiedMax-1. 1246 /// 1247 /// The tied def must be one of the first TiedMax operands on a normal 1248 /// instruction. INLINEASM instructions allow more tied defs. 1249 /// 1250 void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) { 1251 MachineOperand &DefMO = getOperand(DefIdx); 1252 MachineOperand &UseMO = getOperand(UseIdx); 1253 assert(DefMO.isDef() && "DefIdx must be a def operand"); 1254 assert(UseMO.isUse() && "UseIdx must be a use operand"); 1255 assert(!DefMO.isTied() && "Def is already tied to another use"); 1256 assert(!UseMO.isTied() && "Use is already tied to another def"); 1257 1258 if (DefIdx < TiedMax) 1259 UseMO.TiedTo = DefIdx + 1; 1260 else { 1261 // Inline asm can use the group descriptors to find tied operands, but on 1262 // normal instruction, the tied def must be within the first TiedMax 1263 // operands. 1264 assert(isInlineAsm() && "DefIdx out of range"); 1265 UseMO.TiedTo = TiedMax; 1266 } 1267 1268 // UseIdx can be out of range, we'll search for it in findTiedOperandIdx(). 1269 DefMO.TiedTo = std::min(UseIdx + 1, TiedMax); 1270 } 1271 1272 /// Given the index of a tied register operand, find the operand it is tied to. 1273 /// Defs are tied to uses and vice versa. Returns the index of the tied operand 1274 /// which must exist. 1275 unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const { 1276 const MachineOperand &MO = getOperand(OpIdx); 1277 assert(MO.isTied() && "Operand isn't tied"); 1278 1279 // Normally TiedTo is in range. 1280 if (MO.TiedTo < TiedMax) 1281 return MO.TiedTo - 1; 1282 1283 // Uses on normal instructions can be out of range. 1284 if (!isInlineAsm()) { 1285 // Normal tied defs must be in the 0..TiedMax-1 range. 1286 if (MO.isUse()) 1287 return TiedMax - 1; 1288 // MO is a def. Search for the tied use. 1289 for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) { 1290 const MachineOperand &UseMO = getOperand(i); 1291 if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1) 1292 return i; 1293 } 1294 llvm_unreachable("Can't find tied use"); 1295 } 1296 1297 // Now deal with inline asm by parsing the operand group descriptor flags. 1298 // Find the beginning of each operand group. 1299 SmallVector<unsigned, 8> GroupIdx; 1300 unsigned OpIdxGroup = ~0u; 1301 unsigned NumOps; 1302 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e; 1303 i += NumOps) { 1304 const MachineOperand &FlagMO = getOperand(i); 1305 assert(FlagMO.isImm() && "Invalid tied operand on inline asm"); 1306 unsigned CurGroup = GroupIdx.size(); 1307 GroupIdx.push_back(i); 1308 NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm()); 1309 // OpIdx belongs to this operand group. 1310 if (OpIdx > i && OpIdx < i + NumOps) 1311 OpIdxGroup = CurGroup; 1312 unsigned TiedGroup; 1313 if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup)) 1314 continue; 1315 // Operands in this group are tied to operands in TiedGroup which must be 1316 // earlier. Find the number of operands between the two groups. 1317 unsigned Delta = i - GroupIdx[TiedGroup]; 1318 1319 // OpIdx is a use tied to TiedGroup. 1320 if (OpIdxGroup == CurGroup) 1321 return OpIdx - Delta; 1322 1323 // OpIdx is a def tied to this use group. 1324 if (OpIdxGroup == TiedGroup) 1325 return OpIdx + Delta; 1326 } 1327 llvm_unreachable("Invalid tied operand on inline asm"); 1328 } 1329 1330 /// clearKillInfo - Clears kill flags on all operands. 1331 /// 1332 void MachineInstr::clearKillInfo() { 1333 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 1334 MachineOperand &MO = getOperand(i); 1335 if (MO.isReg() && MO.isUse()) 1336 MO.setIsKill(false); 1337 } 1338 } 1339 1340 void MachineInstr::substituteRegister(unsigned FromReg, 1341 unsigned ToReg, 1342 unsigned SubIdx, 1343 const TargetRegisterInfo &RegInfo) { 1344 if (TargetRegisterInfo::isPhysicalRegister(ToReg)) { 1345 if (SubIdx) 1346 ToReg = RegInfo.getSubReg(ToReg, SubIdx); 1347 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 1348 MachineOperand &MO = getOperand(i); 1349 if (!MO.isReg() || MO.getReg() != FromReg) 1350 continue; 1351 MO.substPhysReg(ToReg, RegInfo); 1352 } 1353 } else { 1354 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 1355 MachineOperand &MO = getOperand(i); 1356 if (!MO.isReg() || MO.getReg() != FromReg) 1357 continue; 1358 MO.substVirtReg(ToReg, SubIdx, RegInfo); 1359 } 1360 } 1361 } 1362 1363 /// isSafeToMove - Return true if it is safe to move this instruction. If 1364 /// SawStore is set to true, it means that there is a store (or call) between 1365 /// the instruction's location and its intended destination. 1366 bool MachineInstr::isSafeToMove(const TargetInstrInfo *TII, 1367 AliasAnalysis *AA, 1368 bool &SawStore) const { 1369 // Ignore stuff that we obviously can't move. 1370 // 1371 // Treat volatile loads as stores. This is not strictly necessary for 1372 // volatiles, but it is required for atomic loads. It is not allowed to move 1373 // a load across an atomic load with Ordering > Monotonic. 1374 if (mayStore() || isCall() || 1375 (mayLoad() && hasOrderedMemoryRef())) { 1376 SawStore = true; 1377 return false; 1378 } 1379 1380 if (isPosition() || isDebugValue() || isTerminator() || 1381 hasUnmodeledSideEffects()) 1382 return false; 1383 1384 // See if this instruction does a load. If so, we have to guarantee that the 1385 // loaded value doesn't change between the load and the its intended 1386 // destination. The check for isInvariantLoad gives the targe the chance to 1387 // classify the load as always returning a constant, e.g. a constant pool 1388 // load. 1389 if (mayLoad() && !isInvariantLoad(AA)) 1390 // Otherwise, this is a real load. If there is a store between the load and 1391 // end of block, we can't move it. 1392 return !SawStore; 1393 1394 return true; 1395 } 1396 1397 /// hasOrderedMemoryRef - Return true if this instruction may have an ordered 1398 /// or volatile memory reference, or if the information describing the memory 1399 /// reference is not available. Return false if it is known to have no ordered 1400 /// memory references. 1401 bool MachineInstr::hasOrderedMemoryRef() const { 1402 // An instruction known never to access memory won't have a volatile access. 1403 if (!mayStore() && 1404 !mayLoad() && 1405 !isCall() && 1406 !hasUnmodeledSideEffects()) 1407 return false; 1408 1409 // Otherwise, if the instruction has no memory reference information, 1410 // conservatively assume it wasn't preserved. 1411 if (memoperands_empty()) 1412 return true; 1413 1414 // Check the memory reference information for ordered references. 1415 for (mmo_iterator I = memoperands_begin(), E = memoperands_end(); I != E; ++I) 1416 if (!(*I)->isUnordered()) 1417 return true; 1418 1419 return false; 1420 } 1421 1422 /// isInvariantLoad - Return true if this instruction is loading from a 1423 /// location whose value is invariant across the function. For example, 1424 /// loading a value from the constant pool or from the argument area 1425 /// of a function if it does not change. This should only return true of 1426 /// *all* loads the instruction does are invariant (if it does multiple loads). 1427 bool MachineInstr::isInvariantLoad(AliasAnalysis *AA) const { 1428 // If the instruction doesn't load at all, it isn't an invariant load. 1429 if (!mayLoad()) 1430 return false; 1431 1432 // If the instruction has lost its memoperands, conservatively assume that 1433 // it may not be an invariant load. 1434 if (memoperands_empty()) 1435 return false; 1436 1437 const MachineFrameInfo *MFI = getParent()->getParent()->getFrameInfo(); 1438 1439 for (mmo_iterator I = memoperands_begin(), 1440 E = memoperands_end(); I != E; ++I) { 1441 if ((*I)->isVolatile()) return false; 1442 if ((*I)->isStore()) return false; 1443 if ((*I)->isInvariant()) return true; 1444 1445 1446 // A load from a constant PseudoSourceValue is invariant. 1447 if (const PseudoSourceValue *PSV = (*I)->getPseudoValue()) 1448 if (PSV->isConstant(MFI)) 1449 continue; 1450 1451 if (const Value *V = (*I)->getValue()) { 1452 // If we have an AliasAnalysis, ask it whether the memory is constant. 1453 if (AA && AA->pointsToConstantMemory( 1454 AliasAnalysis::Location(V, (*I)->getSize(), 1455 (*I)->getAAInfo()))) 1456 continue; 1457 } 1458 1459 // Otherwise assume conservatively. 1460 return false; 1461 } 1462 1463 // Everything checks out. 1464 return true; 1465 } 1466 1467 /// isConstantValuePHI - If the specified instruction is a PHI that always 1468 /// merges together the same virtual register, return the register, otherwise 1469 /// return 0. 1470 unsigned MachineInstr::isConstantValuePHI() const { 1471 if (!isPHI()) 1472 return 0; 1473 assert(getNumOperands() >= 3 && 1474 "It's illegal to have a PHI without source operands"); 1475 1476 unsigned Reg = getOperand(1).getReg(); 1477 for (unsigned i = 3, e = getNumOperands(); i < e; i += 2) 1478 if (getOperand(i).getReg() != Reg) 1479 return 0; 1480 return Reg; 1481 } 1482 1483 bool MachineInstr::hasUnmodeledSideEffects() const { 1484 if (hasProperty(MCID::UnmodeledSideEffects)) 1485 return true; 1486 if (isInlineAsm()) { 1487 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 1488 if (ExtraInfo & InlineAsm::Extra_HasSideEffects) 1489 return true; 1490 } 1491 1492 return false; 1493 } 1494 1495 /// allDefsAreDead - Return true if all the defs of this instruction are dead. 1496 /// 1497 bool MachineInstr::allDefsAreDead() const { 1498 for (unsigned i = 0, e = getNumOperands(); i < e; ++i) { 1499 const MachineOperand &MO = getOperand(i); 1500 if (!MO.isReg() || MO.isUse()) 1501 continue; 1502 if (!MO.isDead()) 1503 return false; 1504 } 1505 return true; 1506 } 1507 1508 /// copyImplicitOps - Copy implicit register operands from specified 1509 /// instruction to this instruction. 1510 void MachineInstr::copyImplicitOps(MachineFunction &MF, 1511 const MachineInstr *MI) { 1512 for (unsigned i = MI->getDesc().getNumOperands(), e = MI->getNumOperands(); 1513 i != e; ++i) { 1514 const MachineOperand &MO = MI->getOperand(i); 1515 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask()) 1516 addOperand(MF, MO); 1517 } 1518 } 1519 1520 void MachineInstr::dump() const { 1521 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1522 dbgs() << " " << *this; 1523 #endif 1524 } 1525 1526 static void printDebugLoc(DebugLoc DL, const MachineFunction *MF, 1527 raw_ostream &CommentOS) { 1528 const LLVMContext &Ctx = MF->getFunction()->getContext(); 1529 DL.print(Ctx, CommentOS); 1530 } 1531 1532 void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM, 1533 bool SkipOpers) const { 1534 // We can be a bit tidier if we know the TargetMachine and/or MachineFunction. 1535 const MachineFunction *MF = nullptr; 1536 const MachineRegisterInfo *MRI = nullptr; 1537 if (const MachineBasicBlock *MBB = getParent()) { 1538 MF = MBB->getParent(); 1539 if (!TM && MF) 1540 TM = &MF->getTarget(); 1541 if (MF) 1542 MRI = &MF->getRegInfo(); 1543 } 1544 1545 // Save a list of virtual registers. 1546 SmallVector<unsigned, 8> VirtRegs; 1547 1548 // Print explicitly defined operands on the left of an assignment syntax. 1549 unsigned StartOp = 0, e = getNumOperands(); 1550 for (; StartOp < e && getOperand(StartOp).isReg() && 1551 getOperand(StartOp).isDef() && 1552 !getOperand(StartOp).isImplicit(); 1553 ++StartOp) { 1554 if (StartOp != 0) OS << ", "; 1555 getOperand(StartOp).print(OS, TM); 1556 unsigned Reg = getOperand(StartOp).getReg(); 1557 if (TargetRegisterInfo::isVirtualRegister(Reg)) 1558 VirtRegs.push_back(Reg); 1559 } 1560 1561 if (StartOp != 0) 1562 OS << " = "; 1563 1564 // Print the opcode name. 1565 if (TM && TM->getSubtargetImpl()->getInstrInfo()) 1566 OS << TM->getSubtargetImpl()->getInstrInfo()->getName(getOpcode()); 1567 else 1568 OS << "UNKNOWN"; 1569 1570 if (SkipOpers) 1571 return; 1572 1573 // Print the rest of the operands. 1574 bool OmittedAnyCallClobbers = false; 1575 bool FirstOp = true; 1576 unsigned AsmDescOp = ~0u; 1577 unsigned AsmOpCount = 0; 1578 1579 if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) { 1580 // Print asm string. 1581 OS << " "; 1582 getOperand(InlineAsm::MIOp_AsmString).print(OS, TM); 1583 1584 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack 1585 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 1586 if (ExtraInfo & InlineAsm::Extra_HasSideEffects) 1587 OS << " [sideeffect]"; 1588 if (ExtraInfo & InlineAsm::Extra_MayLoad) 1589 OS << " [mayload]"; 1590 if (ExtraInfo & InlineAsm::Extra_MayStore) 1591 OS << " [maystore]"; 1592 if (ExtraInfo & InlineAsm::Extra_IsAlignStack) 1593 OS << " [alignstack]"; 1594 if (getInlineAsmDialect() == InlineAsm::AD_ATT) 1595 OS << " [attdialect]"; 1596 if (getInlineAsmDialect() == InlineAsm::AD_Intel) 1597 OS << " [inteldialect]"; 1598 1599 StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand; 1600 FirstOp = false; 1601 } 1602 1603 1604 for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) { 1605 const MachineOperand &MO = getOperand(i); 1606 1607 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) 1608 VirtRegs.push_back(MO.getReg()); 1609 1610 // Omit call-clobbered registers which aren't used anywhere. This makes 1611 // call instructions much less noisy on targets where calls clobber lots 1612 // of registers. Don't rely on MO.isDead() because we may be called before 1613 // LiveVariables is run, or we may be looking at a non-allocatable reg. 1614 if (MRI && isCall() && 1615 MO.isReg() && MO.isImplicit() && MO.isDef()) { 1616 unsigned Reg = MO.getReg(); 1617 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 1618 if (MRI->use_empty(Reg)) { 1619 bool HasAliasLive = false; 1620 for (MCRegAliasIterator AI( 1621 Reg, TM->getSubtargetImpl()->getRegisterInfo(), true); 1622 AI.isValid(); ++AI) { 1623 unsigned AliasReg = *AI; 1624 if (!MRI->use_empty(AliasReg)) { 1625 HasAliasLive = true; 1626 break; 1627 } 1628 } 1629 if (!HasAliasLive) { 1630 OmittedAnyCallClobbers = true; 1631 continue; 1632 } 1633 } 1634 } 1635 } 1636 1637 if (FirstOp) FirstOp = false; else OS << ","; 1638 OS << " "; 1639 if (i < getDesc().NumOperands) { 1640 const MCOperandInfo &MCOI = getDesc().OpInfo[i]; 1641 if (MCOI.isPredicate()) 1642 OS << "pred:"; 1643 if (MCOI.isOptionalDef()) 1644 OS << "opt:"; 1645 } 1646 if (isDebugValue() && MO.isMetadata()) { 1647 // Pretty print DBG_VALUE instructions. 1648 const MDNode *MD = MO.getMetadata(); 1649 DIDescriptor DI(MD); 1650 DIVariable DIV(MD); 1651 1652 if (DI.isVariable() && !DIV.getName().empty()) 1653 OS << "!\"" << DIV.getName() << '\"'; 1654 else 1655 MO.print(OS, TM); 1656 } else if (TM && (isInsertSubreg() || isRegSequence()) && MO.isImm()) { 1657 OS << TM->getSubtargetImpl()->getRegisterInfo()->getSubRegIndexName( 1658 MO.getImm()); 1659 } else if (i == AsmDescOp && MO.isImm()) { 1660 // Pretty print the inline asm operand descriptor. 1661 OS << '$' << AsmOpCount++; 1662 unsigned Flag = MO.getImm(); 1663 switch (InlineAsm::getKind(Flag)) { 1664 case InlineAsm::Kind_RegUse: OS << ":[reguse"; break; 1665 case InlineAsm::Kind_RegDef: OS << ":[regdef"; break; 1666 case InlineAsm::Kind_RegDefEarlyClobber: OS << ":[regdef-ec"; break; 1667 case InlineAsm::Kind_Clobber: OS << ":[clobber"; break; 1668 case InlineAsm::Kind_Imm: OS << ":[imm"; break; 1669 case InlineAsm::Kind_Mem: OS << ":[mem"; break; 1670 default: OS << ":[??" << InlineAsm::getKind(Flag); break; 1671 } 1672 1673 unsigned RCID = 0; 1674 if (InlineAsm::hasRegClassConstraint(Flag, RCID)) { 1675 if (TM) { 1676 const TargetRegisterInfo *TRI = 1677 TM->getSubtargetImpl()->getRegisterInfo(); 1678 OS << ':' 1679 << TRI->getRegClassName(TRI->getRegClass(RCID)); 1680 } else 1681 OS << ":RC" << RCID; 1682 } 1683 1684 unsigned TiedTo = 0; 1685 if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo)) 1686 OS << " tiedto:$" << TiedTo; 1687 1688 OS << ']'; 1689 1690 // Compute the index of the next operand descriptor. 1691 AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag); 1692 } else 1693 MO.print(OS, TM); 1694 } 1695 1696 // Briefly indicate whether any call clobbers were omitted. 1697 if (OmittedAnyCallClobbers) { 1698 if (!FirstOp) OS << ","; 1699 OS << " ..."; 1700 } 1701 1702 bool HaveSemi = false; 1703 const unsigned PrintableFlags = FrameSetup; 1704 if (Flags & PrintableFlags) { 1705 if (!HaveSemi) OS << ";"; HaveSemi = true; 1706 OS << " flags: "; 1707 1708 if (Flags & FrameSetup) 1709 OS << "FrameSetup"; 1710 } 1711 1712 if (!memoperands_empty()) { 1713 if (!HaveSemi) OS << ";"; HaveSemi = true; 1714 1715 OS << " mem:"; 1716 for (mmo_iterator i = memoperands_begin(), e = memoperands_end(); 1717 i != e; ++i) { 1718 OS << **i; 1719 if (std::next(i) != e) 1720 OS << " "; 1721 } 1722 } 1723 1724 // Print the regclass of any virtual registers encountered. 1725 if (MRI && !VirtRegs.empty()) { 1726 if (!HaveSemi) OS << ";"; HaveSemi = true; 1727 for (unsigned i = 0; i != VirtRegs.size(); ++i) { 1728 const TargetRegisterClass *RC = MRI->getRegClass(VirtRegs[i]); 1729 OS << " " << MRI->getTargetRegisterInfo()->getRegClassName(RC) 1730 << ':' << PrintReg(VirtRegs[i]); 1731 for (unsigned j = i+1; j != VirtRegs.size();) { 1732 if (MRI->getRegClass(VirtRegs[j]) != RC) { 1733 ++j; 1734 continue; 1735 } 1736 if (VirtRegs[i] != VirtRegs[j]) 1737 OS << "," << PrintReg(VirtRegs[j]); 1738 VirtRegs.erase(VirtRegs.begin()+j); 1739 } 1740 } 1741 } 1742 1743 // Print debug location information. 1744 if (isDebugValue() && getOperand(e - 1).isMetadata()) { 1745 if (!HaveSemi) OS << ";"; 1746 DIVariable DV(getOperand(e - 1).getMetadata()); 1747 OS << " line no:" << DV.getLineNumber(); 1748 if (MDNode *InlinedAt = DV.getInlinedAt()) { 1749 DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(InlinedAt); 1750 if (!InlinedAtDL.isUnknown() && MF) { 1751 OS << " inlined @[ "; 1752 printDebugLoc(InlinedAtDL, MF, OS); 1753 OS << " ]"; 1754 } 1755 } 1756 if (isIndirectDebugValue()) 1757 OS << " indirect"; 1758 } else if (!debugLoc.isUnknown() && MF) { 1759 if (!HaveSemi) OS << ";"; 1760 OS << " dbg:"; 1761 printDebugLoc(debugLoc, MF, OS); 1762 } 1763 1764 OS << '\n'; 1765 } 1766 1767 bool MachineInstr::addRegisterKilled(unsigned IncomingReg, 1768 const TargetRegisterInfo *RegInfo, 1769 bool AddIfNotFound) { 1770 bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(IncomingReg); 1771 bool hasAliases = isPhysReg && 1772 MCRegAliasIterator(IncomingReg, RegInfo, false).isValid(); 1773 bool Found = false; 1774 SmallVector<unsigned,4> DeadOps; 1775 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 1776 MachineOperand &MO = getOperand(i); 1777 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) 1778 continue; 1779 unsigned Reg = MO.getReg(); 1780 if (!Reg) 1781 continue; 1782 1783 if (Reg == IncomingReg) { 1784 if (!Found) { 1785 if (MO.isKill()) 1786 // The register is already marked kill. 1787 return true; 1788 if (isPhysReg && isRegTiedToDefOperand(i)) 1789 // Two-address uses of physregs must not be marked kill. 1790 return true; 1791 MO.setIsKill(); 1792 Found = true; 1793 } 1794 } else if (hasAliases && MO.isKill() && 1795 TargetRegisterInfo::isPhysicalRegister(Reg)) { 1796 // A super-register kill already exists. 1797 if (RegInfo->isSuperRegister(IncomingReg, Reg)) 1798 return true; 1799 if (RegInfo->isSubRegister(IncomingReg, Reg)) 1800 DeadOps.push_back(i); 1801 } 1802 } 1803 1804 // Trim unneeded kill operands. 1805 while (!DeadOps.empty()) { 1806 unsigned OpIdx = DeadOps.back(); 1807 if (getOperand(OpIdx).isImplicit()) 1808 RemoveOperand(OpIdx); 1809 else 1810 getOperand(OpIdx).setIsKill(false); 1811 DeadOps.pop_back(); 1812 } 1813 1814 // If not found, this means an alias of one of the operands is killed. Add a 1815 // new implicit operand if required. 1816 if (!Found && AddIfNotFound) { 1817 addOperand(MachineOperand::CreateReg(IncomingReg, 1818 false /*IsDef*/, 1819 true /*IsImp*/, 1820 true /*IsKill*/)); 1821 return true; 1822 } 1823 return Found; 1824 } 1825 1826 void MachineInstr::clearRegisterKills(unsigned Reg, 1827 const TargetRegisterInfo *RegInfo) { 1828 if (!TargetRegisterInfo::isPhysicalRegister(Reg)) 1829 RegInfo = nullptr; 1830 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 1831 MachineOperand &MO = getOperand(i); 1832 if (!MO.isReg() || !MO.isUse() || !MO.isKill()) 1833 continue; 1834 unsigned OpReg = MO.getReg(); 1835 if (OpReg == Reg || (RegInfo && RegInfo->isSuperRegister(Reg, OpReg))) 1836 MO.setIsKill(false); 1837 } 1838 } 1839 1840 bool MachineInstr::addRegisterDead(unsigned Reg, 1841 const TargetRegisterInfo *RegInfo, 1842 bool AddIfNotFound) { 1843 bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(Reg); 1844 bool hasAliases = isPhysReg && 1845 MCRegAliasIterator(Reg, RegInfo, false).isValid(); 1846 bool Found = false; 1847 SmallVector<unsigned,4> DeadOps; 1848 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 1849 MachineOperand &MO = getOperand(i); 1850 if (!MO.isReg() || !MO.isDef()) 1851 continue; 1852 unsigned MOReg = MO.getReg(); 1853 if (!MOReg) 1854 continue; 1855 1856 if (MOReg == Reg) { 1857 MO.setIsDead(); 1858 Found = true; 1859 } else if (hasAliases && MO.isDead() && 1860 TargetRegisterInfo::isPhysicalRegister(MOReg)) { 1861 // There exists a super-register that's marked dead. 1862 if (RegInfo->isSuperRegister(Reg, MOReg)) 1863 return true; 1864 if (RegInfo->isSubRegister(Reg, MOReg)) 1865 DeadOps.push_back(i); 1866 } 1867 } 1868 1869 // Trim unneeded dead operands. 1870 while (!DeadOps.empty()) { 1871 unsigned OpIdx = DeadOps.back(); 1872 if (getOperand(OpIdx).isImplicit()) 1873 RemoveOperand(OpIdx); 1874 else 1875 getOperand(OpIdx).setIsDead(false); 1876 DeadOps.pop_back(); 1877 } 1878 1879 // If not found, this means an alias of one of the operands is dead. Add a 1880 // new implicit operand if required. 1881 if (Found || !AddIfNotFound) 1882 return Found; 1883 1884 addOperand(MachineOperand::CreateReg(Reg, 1885 true /*IsDef*/, 1886 true /*IsImp*/, 1887 false /*IsKill*/, 1888 true /*IsDead*/)); 1889 return true; 1890 } 1891 1892 void MachineInstr::addRegisterDefined(unsigned Reg, 1893 const TargetRegisterInfo *RegInfo) { 1894 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 1895 MachineOperand *MO = findRegisterDefOperand(Reg, false, RegInfo); 1896 if (MO) 1897 return; 1898 } else { 1899 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 1900 const MachineOperand &MO = getOperand(i); 1901 if (MO.isReg() && MO.getReg() == Reg && MO.isDef() && 1902 MO.getSubReg() == 0) 1903 return; 1904 } 1905 } 1906 addOperand(MachineOperand::CreateReg(Reg, 1907 true /*IsDef*/, 1908 true /*IsImp*/)); 1909 } 1910 1911 void MachineInstr::setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs, 1912 const TargetRegisterInfo &TRI) { 1913 bool HasRegMask = false; 1914 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 1915 MachineOperand &MO = getOperand(i); 1916 if (MO.isRegMask()) { 1917 HasRegMask = true; 1918 continue; 1919 } 1920 if (!MO.isReg() || !MO.isDef()) continue; 1921 unsigned Reg = MO.getReg(); 1922 if (!TargetRegisterInfo::isPhysicalRegister(Reg)) continue; 1923 bool Dead = true; 1924 for (ArrayRef<unsigned>::iterator I = UsedRegs.begin(), E = UsedRegs.end(); 1925 I != E; ++I) 1926 if (TRI.regsOverlap(*I, Reg)) { 1927 Dead = false; 1928 break; 1929 } 1930 // If there are no uses, including partial uses, the def is dead. 1931 if (Dead) MO.setIsDead(); 1932 } 1933 1934 // This is a call with a register mask operand. 1935 // Mask clobbers are always dead, so add defs for the non-dead defines. 1936 if (HasRegMask) 1937 for (ArrayRef<unsigned>::iterator I = UsedRegs.begin(), E = UsedRegs.end(); 1938 I != E; ++I) 1939 addRegisterDefined(*I, &TRI); 1940 } 1941 1942 unsigned 1943 MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) { 1944 // Build up a buffer of hash code components. 1945 SmallVector<size_t, 8> HashComponents; 1946 HashComponents.reserve(MI->getNumOperands() + 1); 1947 HashComponents.push_back(MI->getOpcode()); 1948 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1949 const MachineOperand &MO = MI->getOperand(i); 1950 if (MO.isReg() && MO.isDef() && 1951 TargetRegisterInfo::isVirtualRegister(MO.getReg())) 1952 continue; // Skip virtual register defs. 1953 1954 HashComponents.push_back(hash_value(MO)); 1955 } 1956 return hash_combine_range(HashComponents.begin(), HashComponents.end()); 1957 } 1958 1959 void MachineInstr::emitError(StringRef Msg) const { 1960 // Find the source location cookie. 1961 unsigned LocCookie = 0; 1962 const MDNode *LocMD = nullptr; 1963 for (unsigned i = getNumOperands(); i != 0; --i) { 1964 if (getOperand(i-1).isMetadata() && 1965 (LocMD = getOperand(i-1).getMetadata()) && 1966 LocMD->getNumOperands() != 0) { 1967 if (const ConstantInt *CI = 1968 mdconst::dyn_extract<ConstantInt>(LocMD->getOperand(0))) { 1969 LocCookie = CI->getZExtValue(); 1970 break; 1971 } 1972 } 1973 } 1974 1975 if (const MachineBasicBlock *MBB = getParent()) 1976 if (const MachineFunction *MF = MBB->getParent()) 1977 return MF->getMMI().getModule()->getContext().emitError(LocCookie, Msg); 1978 report_fatal_error(Msg); 1979 } 1980