1 //===- LiveInterval.cpp - Live Interval Representation --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the LiveRange and LiveInterval classes. Given some 11 // numbering of each the machine instructions an interval [i, j) is said to be a 12 // live range for register v if there is no instruction with number j' >= j 13 // such that v is live at j' and there is no instruction with number i' < i such 14 // that v is live at i'. In this implementation ranges can have holes, 15 // i.e. a range might look like [1,20), [50,65), [1000,1001). Each 16 // individual segment is represented as an instance of LiveRange::Segment, 17 // and the whole range is represented as an instance of LiveRange. 18 // 19 //===----------------------------------------------------------------------===// 20 21 #include "llvm/CodeGen/LiveInterval.h" 22 #include "LiveRangeUtils.h" 23 #include "RegisterCoalescer.h" 24 #include "llvm/ADT/ArrayRef.h" 25 #include "llvm/ADT/STLExtras.h" 26 #include "llvm/ADT/SmallPtrSet.h" 27 #include "llvm/ADT/SmallVector.h" 28 #include "llvm/ADT/iterator_range.h" 29 #include "llvm/CodeGen/LiveIntervals.h" 30 #include "llvm/CodeGen/MachineBasicBlock.h" 31 #include "llvm/CodeGen/MachineInstr.h" 32 #include "llvm/CodeGen/MachineOperand.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/CodeGen/SlotIndexes.h" 35 #include "llvm/CodeGen/TargetRegisterInfo.h" 36 #include "llvm/Config/llvm-config.h" 37 #include "llvm/MC/LaneBitmask.h" 38 #include "llvm/Support/Compiler.h" 39 #include "llvm/Support/Debug.h" 40 #include "llvm/Support/raw_ostream.h" 41 #include <algorithm> 42 #include <cassert> 43 #include <cstddef> 44 #include <iterator> 45 #include <utility> 46 47 using namespace llvm; 48 49 namespace { 50 51 //===----------------------------------------------------------------------===// 52 // Implementation of various methods necessary for calculation of live ranges. 53 // The implementation of the methods abstracts from the concrete type of the 54 // segment collection. 55 // 56 // Implementation of the class follows the Template design pattern. The base 57 // class contains generic algorithms that call collection-specific methods, 58 // which are provided in concrete subclasses. In order to avoid virtual calls 59 // these methods are provided by means of C++ template instantiation. 60 // The base class calls the methods of the subclass through method impl(), 61 // which casts 'this' pointer to the type of the subclass. 62 // 63 //===----------------------------------------------------------------------===// 64 65 template <typename ImplT, typename IteratorT, typename CollectionT> 66 class CalcLiveRangeUtilBase { 67 protected: 68 LiveRange *LR; 69 70 protected: 71 CalcLiveRangeUtilBase(LiveRange *LR) : LR(LR) {} 72 73 public: 74 using Segment = LiveRange::Segment; 75 using iterator = IteratorT; 76 77 /// A counterpart of LiveRange::createDeadDef: Make sure the range has a 78 /// value defined at @p Def. 79 /// If @p ForVNI is null, and there is no value defined at @p Def, a new 80 /// value will be allocated using @p VNInfoAllocator. 81 /// If @p ForVNI is null, the return value is the value defined at @p Def, 82 /// either a pre-existing one, or the one newly created. 83 /// If @p ForVNI is not null, then @p Def should be the location where 84 /// @p ForVNI is defined. If the range does not have a value defined at 85 /// @p Def, the value @p ForVNI will be used instead of allocating a new 86 /// one. If the range already has a value defined at @p Def, it must be 87 /// same as @p ForVNI. In either case, @p ForVNI will be the return value. 88 VNInfo *createDeadDef(SlotIndex Def, VNInfo::Allocator *VNInfoAllocator, 89 VNInfo *ForVNI) { 90 assert(!Def.isDead() && "Cannot define a value at the dead slot"); 91 assert((!ForVNI || ForVNI->def == Def) && 92 "If ForVNI is specified, it must match Def"); 93 iterator I = impl().find(Def); 94 if (I == segments().end()) { 95 VNInfo *VNI = ForVNI ? ForVNI : LR->getNextValue(Def, *VNInfoAllocator); 96 impl().insertAtEnd(Segment(Def, Def.getDeadSlot(), VNI)); 97 return VNI; 98 } 99 100 Segment *S = segmentAt(I); 101 if (SlotIndex::isSameInstr(Def, S->start)) { 102 assert((!ForVNI || ForVNI == S->valno) && "Value number mismatch"); 103 assert(S->valno->def == S->start && "Inconsistent existing value def"); 104 105 // It is possible to have both normal and early-clobber defs of the same 106 // register on an instruction. It doesn't make a lot of sense, but it is 107 // possible to specify in inline assembly. 108 // 109 // Just convert everything to early-clobber. 110 Def = std::min(Def, S->start); 111 if (Def != S->start) 112 S->start = S->valno->def = Def; 113 return S->valno; 114 } 115 assert(SlotIndex::isEarlierInstr(Def, S->start) && "Already live at def"); 116 VNInfo *VNI = ForVNI ? ForVNI : LR->getNextValue(Def, *VNInfoAllocator); 117 segments().insert(I, Segment(Def, Def.getDeadSlot(), VNI)); 118 return VNI; 119 } 120 121 VNInfo *extendInBlock(SlotIndex StartIdx, SlotIndex Use) { 122 if (segments().empty()) 123 return nullptr; 124 iterator I = 125 impl().findInsertPos(Segment(Use.getPrevSlot(), Use, nullptr)); 126 if (I == segments().begin()) 127 return nullptr; 128 --I; 129 if (I->end <= StartIdx) 130 return nullptr; 131 if (I->end < Use) 132 extendSegmentEndTo(I, Use); 133 return I->valno; 134 } 135 136 std::pair<VNInfo*,bool> extendInBlock(ArrayRef<SlotIndex> Undefs, 137 SlotIndex StartIdx, SlotIndex Use) { 138 if (segments().empty()) 139 return std::make_pair(nullptr, false); 140 SlotIndex BeforeUse = Use.getPrevSlot(); 141 iterator I = impl().findInsertPos(Segment(BeforeUse, Use, nullptr)); 142 if (I == segments().begin()) 143 return std::make_pair(nullptr, LR->isUndefIn(Undefs, StartIdx, BeforeUse)); 144 --I; 145 if (I->end <= StartIdx) 146 return std::make_pair(nullptr, LR->isUndefIn(Undefs, StartIdx, BeforeUse)); 147 if (I->end < Use) { 148 if (LR->isUndefIn(Undefs, I->end, BeforeUse)) 149 return std::make_pair(nullptr, true); 150 extendSegmentEndTo(I, Use); 151 } 152 return std::make_pair(I->valno, false); 153 } 154 155 /// This method is used when we want to extend the segment specified 156 /// by I to end at the specified endpoint. To do this, we should 157 /// merge and eliminate all segments that this will overlap 158 /// with. The iterator is not invalidated. 159 void extendSegmentEndTo(iterator I, SlotIndex NewEnd) { 160 assert(I != segments().end() && "Not a valid segment!"); 161 Segment *S = segmentAt(I); 162 VNInfo *ValNo = I->valno; 163 164 // Search for the first segment that we can't merge with. 165 iterator MergeTo = std::next(I); 166 for (; MergeTo != segments().end() && NewEnd >= MergeTo->end; ++MergeTo) 167 assert(MergeTo->valno == ValNo && "Cannot merge with differing values!"); 168 169 // If NewEnd was in the middle of a segment, make sure to get its endpoint. 170 S->end = std::max(NewEnd, std::prev(MergeTo)->end); 171 172 // If the newly formed segment now touches the segment after it and if they 173 // have the same value number, merge the two segments into one segment. 174 if (MergeTo != segments().end() && MergeTo->start <= I->end && 175 MergeTo->valno == ValNo) { 176 S->end = MergeTo->end; 177 ++MergeTo; 178 } 179 180 // Erase any dead segments. 181 segments().erase(std::next(I), MergeTo); 182 } 183 184 /// This method is used when we want to extend the segment specified 185 /// by I to start at the specified endpoint. To do this, we should 186 /// merge and eliminate all segments that this will overlap with. 187 iterator extendSegmentStartTo(iterator I, SlotIndex NewStart) { 188 assert(I != segments().end() && "Not a valid segment!"); 189 Segment *S = segmentAt(I); 190 VNInfo *ValNo = I->valno; 191 192 // Search for the first segment that we can't merge with. 193 iterator MergeTo = I; 194 do { 195 if (MergeTo == segments().begin()) { 196 S->start = NewStart; 197 segments().erase(MergeTo, I); 198 return I; 199 } 200 assert(MergeTo->valno == ValNo && "Cannot merge with differing values!"); 201 --MergeTo; 202 } while (NewStart <= MergeTo->start); 203 204 // If we start in the middle of another segment, just delete a range and 205 // extend that segment. 206 if (MergeTo->end >= NewStart && MergeTo->valno == ValNo) { 207 segmentAt(MergeTo)->end = S->end; 208 } else { 209 // Otherwise, extend the segment right after. 210 ++MergeTo; 211 Segment *MergeToSeg = segmentAt(MergeTo); 212 MergeToSeg->start = NewStart; 213 MergeToSeg->end = S->end; 214 } 215 216 segments().erase(std::next(MergeTo), std::next(I)); 217 return MergeTo; 218 } 219 220 iterator addSegment(Segment S) { 221 SlotIndex Start = S.start, End = S.end; 222 iterator I = impl().findInsertPos(S); 223 224 // If the inserted segment starts in the middle or right at the end of 225 // another segment, just extend that segment to contain the segment of S. 226 if (I != segments().begin()) { 227 iterator B = std::prev(I); 228 if (S.valno == B->valno) { 229 if (B->start <= Start && B->end >= Start) { 230 extendSegmentEndTo(B, End); 231 return B; 232 } 233 } else { 234 // Check to make sure that we are not overlapping two live segments with 235 // different valno's. 236 assert(B->end <= Start && 237 "Cannot overlap two segments with differing ValID's" 238 " (did you def the same reg twice in a MachineInstr?)"); 239 } 240 } 241 242 // Otherwise, if this segment ends in the middle of, or right next 243 // to, another segment, merge it into that segment. 244 if (I != segments().end()) { 245 if (S.valno == I->valno) { 246 if (I->start <= End) { 247 I = extendSegmentStartTo(I, Start); 248 249 // If S is a complete superset of a segment, we may need to grow its 250 // endpoint as well. 251 if (End > I->end) 252 extendSegmentEndTo(I, End); 253 return I; 254 } 255 } else { 256 // Check to make sure that we are not overlapping two live segments with 257 // different valno's. 258 assert(I->start >= End && 259 "Cannot overlap two segments with differing ValID's"); 260 } 261 } 262 263 // Otherwise, this is just a new segment that doesn't interact with 264 // anything. 265 // Insert it. 266 return segments().insert(I, S); 267 } 268 269 private: 270 ImplT &impl() { return *static_cast<ImplT *>(this); } 271 272 CollectionT &segments() { return impl().segmentsColl(); } 273 274 Segment *segmentAt(iterator I) { return const_cast<Segment *>(&(*I)); } 275 }; 276 277 //===----------------------------------------------------------------------===// 278 // Instantiation of the methods for calculation of live ranges 279 // based on a segment vector. 280 //===----------------------------------------------------------------------===// 281 282 class CalcLiveRangeUtilVector; 283 using CalcLiveRangeUtilVectorBase = 284 CalcLiveRangeUtilBase<CalcLiveRangeUtilVector, LiveRange::iterator, 285 LiveRange::Segments>; 286 287 class CalcLiveRangeUtilVector : public CalcLiveRangeUtilVectorBase { 288 public: 289 CalcLiveRangeUtilVector(LiveRange *LR) : CalcLiveRangeUtilVectorBase(LR) {} 290 291 private: 292 friend CalcLiveRangeUtilVectorBase; 293 294 LiveRange::Segments &segmentsColl() { return LR->segments; } 295 296 void insertAtEnd(const Segment &S) { LR->segments.push_back(S); } 297 298 iterator find(SlotIndex Pos) { return LR->find(Pos); } 299 300 iterator findInsertPos(Segment S) { 301 return std::upper_bound(LR->begin(), LR->end(), S.start); 302 } 303 }; 304 305 //===----------------------------------------------------------------------===// 306 // Instantiation of the methods for calculation of live ranges 307 // based on a segment set. 308 //===----------------------------------------------------------------------===// 309 310 class CalcLiveRangeUtilSet; 311 using CalcLiveRangeUtilSetBase = 312 CalcLiveRangeUtilBase<CalcLiveRangeUtilSet, LiveRange::SegmentSet::iterator, 313 LiveRange::SegmentSet>; 314 315 class CalcLiveRangeUtilSet : public CalcLiveRangeUtilSetBase { 316 public: 317 CalcLiveRangeUtilSet(LiveRange *LR) : CalcLiveRangeUtilSetBase(LR) {} 318 319 private: 320 friend CalcLiveRangeUtilSetBase; 321 322 LiveRange::SegmentSet &segmentsColl() { return *LR->segmentSet; } 323 324 void insertAtEnd(const Segment &S) { 325 LR->segmentSet->insert(LR->segmentSet->end(), S); 326 } 327 328 iterator find(SlotIndex Pos) { 329 iterator I = 330 LR->segmentSet->upper_bound(Segment(Pos, Pos.getNextSlot(), nullptr)); 331 if (I == LR->segmentSet->begin()) 332 return I; 333 iterator PrevI = std::prev(I); 334 if (Pos < (*PrevI).end) 335 return PrevI; 336 return I; 337 } 338 339 iterator findInsertPos(Segment S) { 340 iterator I = LR->segmentSet->upper_bound(S); 341 if (I != LR->segmentSet->end() && !(S.start < *I)) 342 ++I; 343 return I; 344 } 345 }; 346 347 } // end anonymous namespace 348 349 //===----------------------------------------------------------------------===// 350 // LiveRange methods 351 //===----------------------------------------------------------------------===// 352 353 LiveRange::iterator LiveRange::find(SlotIndex Pos) { 354 // This algorithm is basically std::upper_bound. 355 // Unfortunately, std::upper_bound cannot be used with mixed types until we 356 // adopt C++0x. Many libraries can do it, but not all. 357 if (empty() || Pos >= endIndex()) 358 return end(); 359 iterator I = begin(); 360 size_t Len = size(); 361 do { 362 size_t Mid = Len >> 1; 363 if (Pos < I[Mid].end) { 364 Len = Mid; 365 } else { 366 I += Mid + 1; 367 Len -= Mid + 1; 368 } 369 } while (Len); 370 return I; 371 } 372 373 VNInfo *LiveRange::createDeadDef(SlotIndex Def, VNInfo::Allocator &VNIAlloc) { 374 // Use the segment set, if it is available. 375 if (segmentSet != nullptr) 376 return CalcLiveRangeUtilSet(this).createDeadDef(Def, &VNIAlloc, nullptr); 377 // Otherwise use the segment vector. 378 return CalcLiveRangeUtilVector(this).createDeadDef(Def, &VNIAlloc, nullptr); 379 } 380 381 VNInfo *LiveRange::createDeadDef(VNInfo *VNI) { 382 // Use the segment set, if it is available. 383 if (segmentSet != nullptr) 384 return CalcLiveRangeUtilSet(this).createDeadDef(VNI->def, nullptr, VNI); 385 // Otherwise use the segment vector. 386 return CalcLiveRangeUtilVector(this).createDeadDef(VNI->def, nullptr, VNI); 387 } 388 389 // overlaps - Return true if the intersection of the two live ranges is 390 // not empty. 391 // 392 // An example for overlaps(): 393 // 394 // 0: A = ... 395 // 4: B = ... 396 // 8: C = A + B ;; last use of A 397 // 398 // The live ranges should look like: 399 // 400 // A = [3, 11) 401 // B = [7, x) 402 // C = [11, y) 403 // 404 // A->overlaps(C) should return false since we want to be able to join 405 // A and C. 406 // 407 bool LiveRange::overlapsFrom(const LiveRange& other, 408 const_iterator StartPos) const { 409 assert(!empty() && "empty range"); 410 const_iterator i = begin(); 411 const_iterator ie = end(); 412 const_iterator j = StartPos; 413 const_iterator je = other.end(); 414 415 assert((StartPos->start <= i->start || StartPos == other.begin()) && 416 StartPos != other.end() && "Bogus start position hint!"); 417 418 if (i->start < j->start) { 419 i = std::upper_bound(i, ie, j->start); 420 if (i != begin()) --i; 421 } else if (j->start < i->start) { 422 ++StartPos; 423 if (StartPos != other.end() && StartPos->start <= i->start) { 424 assert(StartPos < other.end() && i < end()); 425 j = std::upper_bound(j, je, i->start); 426 if (j != other.begin()) --j; 427 } 428 } else { 429 return true; 430 } 431 432 if (j == je) return false; 433 434 while (i != ie) { 435 if (i->start > j->start) { 436 std::swap(i, j); 437 std::swap(ie, je); 438 } 439 440 if (i->end > j->start) 441 return true; 442 ++i; 443 } 444 445 return false; 446 } 447 448 bool LiveRange::overlaps(const LiveRange &Other, const CoalescerPair &CP, 449 const SlotIndexes &Indexes) const { 450 assert(!empty() && "empty range"); 451 if (Other.empty()) 452 return false; 453 454 // Use binary searches to find initial positions. 455 const_iterator I = find(Other.beginIndex()); 456 const_iterator IE = end(); 457 if (I == IE) 458 return false; 459 const_iterator J = Other.find(I->start); 460 const_iterator JE = Other.end(); 461 if (J == JE) 462 return false; 463 464 while (true) { 465 // J has just been advanced to satisfy: 466 assert(J->end >= I->start); 467 // Check for an overlap. 468 if (J->start < I->end) { 469 // I and J are overlapping. Find the later start. 470 SlotIndex Def = std::max(I->start, J->start); 471 // Allow the overlap if Def is a coalescable copy. 472 if (Def.isBlock() || 473 !CP.isCoalescable(Indexes.getInstructionFromIndex(Def))) 474 return true; 475 } 476 // Advance the iterator that ends first to check for more overlaps. 477 if (J->end > I->end) { 478 std::swap(I, J); 479 std::swap(IE, JE); 480 } 481 // Advance J until J->end >= I->start. 482 do 483 if (++J == JE) 484 return false; 485 while (J->end < I->start); 486 } 487 } 488 489 /// overlaps - Return true if the live range overlaps an interval specified 490 /// by [Start, End). 491 bool LiveRange::overlaps(SlotIndex Start, SlotIndex End) const { 492 assert(Start < End && "Invalid range"); 493 const_iterator I = std::lower_bound(begin(), end(), End); 494 return I != begin() && (--I)->end > Start; 495 } 496 497 bool LiveRange::covers(const LiveRange &Other) const { 498 if (empty()) 499 return Other.empty(); 500 501 const_iterator I = begin(); 502 for (const Segment &O : Other.segments) { 503 I = advanceTo(I, O.start); 504 if (I == end() || I->start > O.start) 505 return false; 506 507 // Check adjacent live segments and see if we can get behind O.end. 508 while (I->end < O.end) { 509 const_iterator Last = I; 510 // Get next segment and abort if it was not adjacent. 511 ++I; 512 if (I == end() || Last->end != I->start) 513 return false; 514 } 515 } 516 return true; 517 } 518 519 /// ValNo is dead, remove it. If it is the largest value number, just nuke it 520 /// (and any other deleted values neighboring it), otherwise mark it as ~1U so 521 /// it can be nuked later. 522 void LiveRange::markValNoForDeletion(VNInfo *ValNo) { 523 if (ValNo->id == getNumValNums()-1) { 524 do { 525 valnos.pop_back(); 526 } while (!valnos.empty() && valnos.back()->isUnused()); 527 } else { 528 ValNo->markUnused(); 529 } 530 } 531 532 /// RenumberValues - Renumber all values in order of appearance and delete the 533 /// remaining unused values. 534 void LiveRange::RenumberValues() { 535 SmallPtrSet<VNInfo*, 8> Seen; 536 valnos.clear(); 537 for (const Segment &S : segments) { 538 VNInfo *VNI = S.valno; 539 if (!Seen.insert(VNI).second) 540 continue; 541 assert(!VNI->isUnused() && "Unused valno used by live segment"); 542 VNI->id = (unsigned)valnos.size(); 543 valnos.push_back(VNI); 544 } 545 } 546 547 void LiveRange::addSegmentToSet(Segment S) { 548 CalcLiveRangeUtilSet(this).addSegment(S); 549 } 550 551 LiveRange::iterator LiveRange::addSegment(Segment S) { 552 // Use the segment set, if it is available. 553 if (segmentSet != nullptr) { 554 addSegmentToSet(S); 555 return end(); 556 } 557 // Otherwise use the segment vector. 558 return CalcLiveRangeUtilVector(this).addSegment(S); 559 } 560 561 void LiveRange::append(const Segment S) { 562 // Check that the segment belongs to the back of the list. 563 assert(segments.empty() || segments.back().end <= S.start); 564 segments.push_back(S); 565 } 566 567 std::pair<VNInfo*,bool> LiveRange::extendInBlock(ArrayRef<SlotIndex> Undefs, 568 SlotIndex StartIdx, SlotIndex Kill) { 569 // Use the segment set, if it is available. 570 if (segmentSet != nullptr) 571 return CalcLiveRangeUtilSet(this).extendInBlock(Undefs, StartIdx, Kill); 572 // Otherwise use the segment vector. 573 return CalcLiveRangeUtilVector(this).extendInBlock(Undefs, StartIdx, Kill); 574 } 575 576 VNInfo *LiveRange::extendInBlock(SlotIndex StartIdx, SlotIndex Kill) { 577 // Use the segment set, if it is available. 578 if (segmentSet != nullptr) 579 return CalcLiveRangeUtilSet(this).extendInBlock(StartIdx, Kill); 580 // Otherwise use the segment vector. 581 return CalcLiveRangeUtilVector(this).extendInBlock(StartIdx, Kill); 582 } 583 584 /// Remove the specified segment from this range. Note that the segment must 585 /// be in a single Segment in its entirety. 586 void LiveRange::removeSegment(SlotIndex Start, SlotIndex End, 587 bool RemoveDeadValNo) { 588 // Find the Segment containing this span. 589 iterator I = find(Start); 590 assert(I != end() && "Segment is not in range!"); 591 assert(I->containsInterval(Start, End) 592 && "Segment is not entirely in range!"); 593 594 // If the span we are removing is at the start of the Segment, adjust it. 595 VNInfo *ValNo = I->valno; 596 if (I->start == Start) { 597 if (I->end == End) { 598 if (RemoveDeadValNo) { 599 // Check if val# is dead. 600 bool isDead = true; 601 for (const_iterator II = begin(), EE = end(); II != EE; ++II) 602 if (II != I && II->valno == ValNo) { 603 isDead = false; 604 break; 605 } 606 if (isDead) { 607 // Now that ValNo is dead, remove it. 608 markValNoForDeletion(ValNo); 609 } 610 } 611 612 segments.erase(I); // Removed the whole Segment. 613 } else 614 I->start = End; 615 return; 616 } 617 618 // Otherwise if the span we are removing is at the end of the Segment, 619 // adjust the other way. 620 if (I->end == End) { 621 I->end = Start; 622 return; 623 } 624 625 // Otherwise, we are splitting the Segment into two pieces. 626 SlotIndex OldEnd = I->end; 627 I->end = Start; // Trim the old segment. 628 629 // Insert the new one. 630 segments.insert(std::next(I), Segment(End, OldEnd, ValNo)); 631 } 632 633 /// removeValNo - Remove all the segments defined by the specified value#. 634 /// Also remove the value# from value# list. 635 void LiveRange::removeValNo(VNInfo *ValNo) { 636 if (empty()) return; 637 segments.erase(remove_if(*this, [ValNo](const Segment &S) { 638 return S.valno == ValNo; 639 }), end()); 640 // Now that ValNo is dead, remove it. 641 markValNoForDeletion(ValNo); 642 } 643 644 void LiveRange::join(LiveRange &Other, 645 const int *LHSValNoAssignments, 646 const int *RHSValNoAssignments, 647 SmallVectorImpl<VNInfo *> &NewVNInfo) { 648 verify(); 649 650 // Determine if any of our values are mapped. This is uncommon, so we want 651 // to avoid the range scan if not. 652 bool MustMapCurValNos = false; 653 unsigned NumVals = getNumValNums(); 654 unsigned NumNewVals = NewVNInfo.size(); 655 for (unsigned i = 0; i != NumVals; ++i) { 656 unsigned LHSValID = LHSValNoAssignments[i]; 657 if (i != LHSValID || 658 (NewVNInfo[LHSValID] && NewVNInfo[LHSValID] != getValNumInfo(i))) { 659 MustMapCurValNos = true; 660 break; 661 } 662 } 663 664 // If we have to apply a mapping to our base range assignment, rewrite it now. 665 if (MustMapCurValNos && !empty()) { 666 // Map the first live range. 667 668 iterator OutIt = begin(); 669 OutIt->valno = NewVNInfo[LHSValNoAssignments[OutIt->valno->id]]; 670 for (iterator I = std::next(OutIt), E = end(); I != E; ++I) { 671 VNInfo* nextValNo = NewVNInfo[LHSValNoAssignments[I->valno->id]]; 672 assert(nextValNo && "Huh?"); 673 674 // If this live range has the same value # as its immediate predecessor, 675 // and if they are neighbors, remove one Segment. This happens when we 676 // have [0,4:0)[4,7:1) and map 0/1 onto the same value #. 677 if (OutIt->valno == nextValNo && OutIt->end == I->start) { 678 OutIt->end = I->end; 679 } else { 680 // Didn't merge. Move OutIt to the next segment, 681 ++OutIt; 682 OutIt->valno = nextValNo; 683 if (OutIt != I) { 684 OutIt->start = I->start; 685 OutIt->end = I->end; 686 } 687 } 688 } 689 // If we merge some segments, chop off the end. 690 ++OutIt; 691 segments.erase(OutIt, end()); 692 } 693 694 // Rewrite Other values before changing the VNInfo ids. 695 // This can leave Other in an invalid state because we're not coalescing 696 // touching segments that now have identical values. That's OK since Other is 697 // not supposed to be valid after calling join(); 698 for (Segment &S : Other.segments) 699 S.valno = NewVNInfo[RHSValNoAssignments[S.valno->id]]; 700 701 // Update val# info. Renumber them and make sure they all belong to this 702 // LiveRange now. Also remove dead val#'s. 703 unsigned NumValNos = 0; 704 for (unsigned i = 0; i < NumNewVals; ++i) { 705 VNInfo *VNI = NewVNInfo[i]; 706 if (VNI) { 707 if (NumValNos >= NumVals) 708 valnos.push_back(VNI); 709 else 710 valnos[NumValNos] = VNI; 711 VNI->id = NumValNos++; // Renumber val#. 712 } 713 } 714 if (NumNewVals < NumVals) 715 valnos.resize(NumNewVals); // shrinkify 716 717 // Okay, now insert the RHS live segments into the LHS. 718 LiveRangeUpdater Updater(this); 719 for (Segment &S : Other.segments) 720 Updater.add(S); 721 } 722 723 /// Merge all of the segments in RHS into this live range as the specified 724 /// value number. The segments in RHS are allowed to overlap with segments in 725 /// the current range, but only if the overlapping segments have the 726 /// specified value number. 727 void LiveRange::MergeSegmentsInAsValue(const LiveRange &RHS, 728 VNInfo *LHSValNo) { 729 LiveRangeUpdater Updater(this); 730 for (const Segment &S : RHS.segments) 731 Updater.add(S.start, S.end, LHSValNo); 732 } 733 734 /// MergeValueInAsValue - Merge all of the live segments of a specific val# 735 /// in RHS into this live range as the specified value number. 736 /// The segments in RHS are allowed to overlap with segments in the 737 /// current range, it will replace the value numbers of the overlaped 738 /// segments with the specified value number. 739 void LiveRange::MergeValueInAsValue(const LiveRange &RHS, 740 const VNInfo *RHSValNo, 741 VNInfo *LHSValNo) { 742 LiveRangeUpdater Updater(this); 743 for (const Segment &S : RHS.segments) 744 if (S.valno == RHSValNo) 745 Updater.add(S.start, S.end, LHSValNo); 746 } 747 748 /// MergeValueNumberInto - This method is called when two value nubmers 749 /// are found to be equivalent. This eliminates V1, replacing all 750 /// segments with the V1 value number with the V2 value number. This can 751 /// cause merging of V1/V2 values numbers and compaction of the value space. 752 VNInfo *LiveRange::MergeValueNumberInto(VNInfo *V1, VNInfo *V2) { 753 assert(V1 != V2 && "Identical value#'s are always equivalent!"); 754 755 // This code actually merges the (numerically) larger value number into the 756 // smaller value number, which is likely to allow us to compactify the value 757 // space. The only thing we have to be careful of is to preserve the 758 // instruction that defines the result value. 759 760 // Make sure V2 is smaller than V1. 761 if (V1->id < V2->id) { 762 V1->copyFrom(*V2); 763 std::swap(V1, V2); 764 } 765 766 // Merge V1 segments into V2. 767 for (iterator I = begin(); I != end(); ) { 768 iterator S = I++; 769 if (S->valno != V1) continue; // Not a V1 Segment. 770 771 // Okay, we found a V1 live range. If it had a previous, touching, V2 live 772 // range, extend it. 773 if (S != begin()) { 774 iterator Prev = S-1; 775 if (Prev->valno == V2 && Prev->end == S->start) { 776 Prev->end = S->end; 777 778 // Erase this live-range. 779 segments.erase(S); 780 I = Prev+1; 781 S = Prev; 782 } 783 } 784 785 // Okay, now we have a V1 or V2 live range that is maximally merged forward. 786 // Ensure that it is a V2 live-range. 787 S->valno = V2; 788 789 // If we can merge it into later V2 segments, do so now. We ignore any 790 // following V1 segments, as they will be merged in subsequent iterations 791 // of the loop. 792 if (I != end()) { 793 if (I->start == S->end && I->valno == V2) { 794 S->end = I->end; 795 segments.erase(I); 796 I = S+1; 797 } 798 } 799 } 800 801 // Now that V1 is dead, remove it. 802 markValNoForDeletion(V1); 803 804 return V2; 805 } 806 807 void LiveRange::flushSegmentSet() { 808 assert(segmentSet != nullptr && "segment set must have been created"); 809 assert( 810 segments.empty() && 811 "segment set can be used only initially before switching to the array"); 812 segments.append(segmentSet->begin(), segmentSet->end()); 813 segmentSet = nullptr; 814 verify(); 815 } 816 817 bool LiveRange::isLiveAtIndexes(ArrayRef<SlotIndex> Slots) const { 818 ArrayRef<SlotIndex>::iterator SlotI = Slots.begin(); 819 ArrayRef<SlotIndex>::iterator SlotE = Slots.end(); 820 821 // If there are no regmask slots, we have nothing to search. 822 if (SlotI == SlotE) 823 return false; 824 825 // Start our search at the first segment that ends after the first slot. 826 const_iterator SegmentI = find(*SlotI); 827 const_iterator SegmentE = end(); 828 829 // If there are no segments that end after the first slot, we're done. 830 if (SegmentI == SegmentE) 831 return false; 832 833 // Look for each slot in the live range. 834 for ( ; SlotI != SlotE; ++SlotI) { 835 // Go to the next segment that ends after the current slot. 836 // The slot may be within a hole in the range. 837 SegmentI = advanceTo(SegmentI, *SlotI); 838 if (SegmentI == SegmentE) 839 return false; 840 841 // If this segment contains the slot, we're done. 842 if (SegmentI->contains(*SlotI)) 843 return true; 844 // Otherwise, look for the next slot. 845 } 846 847 // We didn't find a segment containing any of the slots. 848 return false; 849 } 850 851 void LiveInterval::freeSubRange(SubRange *S) { 852 S->~SubRange(); 853 // Memory was allocated with BumpPtr allocator and is not freed here. 854 } 855 856 void LiveInterval::removeEmptySubRanges() { 857 SubRange **NextPtr = &SubRanges; 858 SubRange *I = *NextPtr; 859 while (I != nullptr) { 860 if (!I->empty()) { 861 NextPtr = &I->Next; 862 I = *NextPtr; 863 continue; 864 } 865 // Skip empty subranges until we find the first nonempty one. 866 do { 867 SubRange *Next = I->Next; 868 freeSubRange(I); 869 I = Next; 870 } while (I != nullptr && I->empty()); 871 *NextPtr = I; 872 } 873 } 874 875 void LiveInterval::clearSubRanges() { 876 for (SubRange *I = SubRanges, *Next; I != nullptr; I = Next) { 877 Next = I->Next; 878 freeSubRange(I); 879 } 880 SubRanges = nullptr; 881 } 882 883 void LiveInterval::refineSubRanges(BumpPtrAllocator &Allocator, 884 LaneBitmask LaneMask, std::function<void(LiveInterval::SubRange&)> Apply) { 885 LaneBitmask ToApply = LaneMask; 886 for (SubRange &SR : subranges()) { 887 LaneBitmask SRMask = SR.LaneMask; 888 LaneBitmask Matching = SRMask & LaneMask; 889 if (Matching.none()) 890 continue; 891 892 SubRange *MatchingRange; 893 if (SRMask == Matching) { 894 // The subrange fits (it does not cover bits outside \p LaneMask). 895 MatchingRange = &SR; 896 } else { 897 // We have to split the subrange into a matching and non-matching part. 898 // Reduce lanemask of existing lane to non-matching part. 899 SR.LaneMask = SRMask & ~Matching; 900 // Create a new subrange for the matching part 901 MatchingRange = createSubRangeFrom(Allocator, Matching, SR); 902 } 903 Apply(*MatchingRange); 904 ToApply &= ~Matching; 905 } 906 // Create a new subrange if there are uncovered bits left. 907 if (ToApply.any()) { 908 SubRange *NewRange = createSubRange(Allocator, ToApply); 909 Apply(*NewRange); 910 } 911 } 912 913 unsigned LiveInterval::getSize() const { 914 unsigned Sum = 0; 915 for (const Segment &S : segments) 916 Sum += S.start.distance(S.end); 917 return Sum; 918 } 919 920 void LiveInterval::computeSubRangeUndefs(SmallVectorImpl<SlotIndex> &Undefs, 921 LaneBitmask LaneMask, 922 const MachineRegisterInfo &MRI, 923 const SlotIndexes &Indexes) const { 924 assert(TargetRegisterInfo::isVirtualRegister(reg)); 925 LaneBitmask VRegMask = MRI.getMaxLaneMaskForVReg(reg); 926 assert((VRegMask & LaneMask).any()); 927 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); 928 for (const MachineOperand &MO : MRI.def_operands(reg)) { 929 if (!MO.isUndef()) 930 continue; 931 unsigned SubReg = MO.getSubReg(); 932 assert(SubReg != 0 && "Undef should only be set on subreg defs"); 933 LaneBitmask DefMask = TRI.getSubRegIndexLaneMask(SubReg); 934 LaneBitmask UndefMask = VRegMask & ~DefMask; 935 if ((UndefMask & LaneMask).any()) { 936 const MachineInstr &MI = *MO.getParent(); 937 bool EarlyClobber = MO.isEarlyClobber(); 938 SlotIndex Pos = Indexes.getInstructionIndex(MI).getRegSlot(EarlyClobber); 939 Undefs.push_back(Pos); 940 } 941 } 942 } 943 944 raw_ostream& llvm::operator<<(raw_ostream& OS, const LiveRange::Segment &S) { 945 return OS << '[' << S.start << ',' << S.end << ':' << S.valno->id << ')'; 946 } 947 948 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 949 LLVM_DUMP_METHOD void LiveRange::Segment::dump() const { 950 dbgs() << *this << '\n'; 951 } 952 #endif 953 954 void LiveRange::print(raw_ostream &OS) const { 955 if (empty()) 956 OS << "EMPTY"; 957 else { 958 for (const Segment &S : segments) { 959 OS << S; 960 assert(S.valno == getValNumInfo(S.valno->id) && "Bad VNInfo"); 961 } 962 } 963 964 // Print value number info. 965 if (getNumValNums()) { 966 OS << " "; 967 unsigned vnum = 0; 968 for (const_vni_iterator i = vni_begin(), e = vni_end(); i != e; 969 ++i, ++vnum) { 970 const VNInfo *vni = *i; 971 if (vnum) OS << ' '; 972 OS << vnum << '@'; 973 if (vni->isUnused()) { 974 OS << 'x'; 975 } else { 976 OS << vni->def; 977 if (vni->isPHIDef()) 978 OS << "-phi"; 979 } 980 } 981 } 982 } 983 984 void LiveInterval::SubRange::print(raw_ostream &OS) const { 985 OS << " L" << PrintLaneMask(LaneMask) << ' ' 986 << static_cast<const LiveRange&>(*this); 987 } 988 989 void LiveInterval::print(raw_ostream &OS) const { 990 OS << printReg(reg) << ' '; 991 super::print(OS); 992 // Print subranges 993 for (const SubRange &SR : subranges()) 994 OS << SR; 995 OS << " weight:" << weight; 996 } 997 998 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 999 LLVM_DUMP_METHOD void LiveRange::dump() const { 1000 dbgs() << *this << '\n'; 1001 } 1002 1003 LLVM_DUMP_METHOD void LiveInterval::SubRange::dump() const { 1004 dbgs() << *this << '\n'; 1005 } 1006 1007 LLVM_DUMP_METHOD void LiveInterval::dump() const { 1008 dbgs() << *this << '\n'; 1009 } 1010 #endif 1011 1012 #ifndef NDEBUG 1013 void LiveRange::verify() const { 1014 for (const_iterator I = begin(), E = end(); I != E; ++I) { 1015 assert(I->start.isValid()); 1016 assert(I->end.isValid()); 1017 assert(I->start < I->end); 1018 assert(I->valno != nullptr); 1019 assert(I->valno->id < valnos.size()); 1020 assert(I->valno == valnos[I->valno->id]); 1021 if (std::next(I) != E) { 1022 assert(I->end <= std::next(I)->start); 1023 if (I->end == std::next(I)->start) 1024 assert(I->valno != std::next(I)->valno); 1025 } 1026 } 1027 } 1028 1029 void LiveInterval::verify(const MachineRegisterInfo *MRI) const { 1030 super::verify(); 1031 1032 // Make sure SubRanges are fine and LaneMasks are disjunct. 1033 LaneBitmask Mask; 1034 LaneBitmask MaxMask = MRI != nullptr ? MRI->getMaxLaneMaskForVReg(reg) 1035 : LaneBitmask::getAll(); 1036 for (const SubRange &SR : subranges()) { 1037 // Subrange lanemask should be disjunct to any previous subrange masks. 1038 assert((Mask & SR.LaneMask).none()); 1039 Mask |= SR.LaneMask; 1040 1041 // subrange mask should not contained in maximum lane mask for the vreg. 1042 assert((Mask & ~MaxMask).none()); 1043 // empty subranges must be removed. 1044 assert(!SR.empty()); 1045 1046 SR.verify(); 1047 // Main liverange should cover subrange. 1048 assert(covers(SR)); 1049 } 1050 } 1051 #endif 1052 1053 //===----------------------------------------------------------------------===// 1054 // LiveRangeUpdater class 1055 //===----------------------------------------------------------------------===// 1056 // 1057 // The LiveRangeUpdater class always maintains these invariants: 1058 // 1059 // - When LastStart is invalid, Spills is empty and the iterators are invalid. 1060 // This is the initial state, and the state created by flush(). 1061 // In this state, isDirty() returns false. 1062 // 1063 // Otherwise, segments are kept in three separate areas: 1064 // 1065 // 1. [begin; WriteI) at the front of LR. 1066 // 2. [ReadI; end) at the back of LR. 1067 // 3. Spills. 1068 // 1069 // - LR.begin() <= WriteI <= ReadI <= LR.end(). 1070 // - Segments in all three areas are fully ordered and coalesced. 1071 // - Segments in area 1 precede and can't coalesce with segments in area 2. 1072 // - Segments in Spills precede and can't coalesce with segments in area 2. 1073 // - No coalescing is possible between segments in Spills and segments in area 1074 // 1, and there are no overlapping segments. 1075 // 1076 // The segments in Spills are not ordered with respect to the segments in area 1077 // 1. They need to be merged. 1078 // 1079 // When they exist, Spills.back().start <= LastStart, 1080 // and WriteI[-1].start <= LastStart. 1081 1082 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1083 void LiveRangeUpdater::print(raw_ostream &OS) const { 1084 if (!isDirty()) { 1085 if (LR) 1086 OS << "Clean updater: " << *LR << '\n'; 1087 else 1088 OS << "Null updater.\n"; 1089 return; 1090 } 1091 assert(LR && "Can't have null LR in dirty updater."); 1092 OS << " updater with gap = " << (ReadI - WriteI) 1093 << ", last start = " << LastStart 1094 << ":\n Area 1:"; 1095 for (const auto &S : make_range(LR->begin(), WriteI)) 1096 OS << ' ' << S; 1097 OS << "\n Spills:"; 1098 for (unsigned I = 0, E = Spills.size(); I != E; ++I) 1099 OS << ' ' << Spills[I]; 1100 OS << "\n Area 2:"; 1101 for (const auto &S : make_range(ReadI, LR->end())) 1102 OS << ' ' << S; 1103 OS << '\n'; 1104 } 1105 1106 LLVM_DUMP_METHOD void LiveRangeUpdater::dump() const { 1107 print(errs()); 1108 } 1109 #endif 1110 1111 // Determine if A and B should be coalesced. 1112 static inline bool coalescable(const LiveRange::Segment &A, 1113 const LiveRange::Segment &B) { 1114 assert(A.start <= B.start && "Unordered live segments."); 1115 if (A.end == B.start) 1116 return A.valno == B.valno; 1117 if (A.end < B.start) 1118 return false; 1119 assert(A.valno == B.valno && "Cannot overlap different values"); 1120 return true; 1121 } 1122 1123 void LiveRangeUpdater::add(LiveRange::Segment Seg) { 1124 assert(LR && "Cannot add to a null destination"); 1125 1126 // Fall back to the regular add method if the live range 1127 // is using the segment set instead of the segment vector. 1128 if (LR->segmentSet != nullptr) { 1129 LR->addSegmentToSet(Seg); 1130 return; 1131 } 1132 1133 // Flush the state if Start moves backwards. 1134 if (!LastStart.isValid() || LastStart > Seg.start) { 1135 if (isDirty()) 1136 flush(); 1137 // This brings us to an uninitialized state. Reinitialize. 1138 assert(Spills.empty() && "Leftover spilled segments"); 1139 WriteI = ReadI = LR->begin(); 1140 } 1141 1142 // Remember start for next time. 1143 LastStart = Seg.start; 1144 1145 // Advance ReadI until it ends after Seg.start. 1146 LiveRange::iterator E = LR->end(); 1147 if (ReadI != E && ReadI->end <= Seg.start) { 1148 // First try to close the gap between WriteI and ReadI with spills. 1149 if (ReadI != WriteI) 1150 mergeSpills(); 1151 // Then advance ReadI. 1152 if (ReadI == WriteI) 1153 ReadI = WriteI = LR->find(Seg.start); 1154 else 1155 while (ReadI != E && ReadI->end <= Seg.start) 1156 *WriteI++ = *ReadI++; 1157 } 1158 1159 assert(ReadI == E || ReadI->end > Seg.start); 1160 1161 // Check if the ReadI segment begins early. 1162 if (ReadI != E && ReadI->start <= Seg.start) { 1163 assert(ReadI->valno == Seg.valno && "Cannot overlap different values"); 1164 // Bail if Seg is completely contained in ReadI. 1165 if (ReadI->end >= Seg.end) 1166 return; 1167 // Coalesce into Seg. 1168 Seg.start = ReadI->start; 1169 ++ReadI; 1170 } 1171 1172 // Coalesce as much as possible from ReadI into Seg. 1173 while (ReadI != E && coalescable(Seg, *ReadI)) { 1174 Seg.end = std::max(Seg.end, ReadI->end); 1175 ++ReadI; 1176 } 1177 1178 // Try coalescing Spills.back() into Seg. 1179 if (!Spills.empty() && coalescable(Spills.back(), Seg)) { 1180 Seg.start = Spills.back().start; 1181 Seg.end = std::max(Spills.back().end, Seg.end); 1182 Spills.pop_back(); 1183 } 1184 1185 // Try coalescing Seg into WriteI[-1]. 1186 if (WriteI != LR->begin() && coalescable(WriteI[-1], Seg)) { 1187 WriteI[-1].end = std::max(WriteI[-1].end, Seg.end); 1188 return; 1189 } 1190 1191 // Seg doesn't coalesce with anything, and needs to be inserted somewhere. 1192 if (WriteI != ReadI) { 1193 *WriteI++ = Seg; 1194 return; 1195 } 1196 1197 // Finally, append to LR or Spills. 1198 if (WriteI == E) { 1199 LR->segments.push_back(Seg); 1200 WriteI = ReadI = LR->end(); 1201 } else 1202 Spills.push_back(Seg); 1203 } 1204 1205 // Merge as many spilled segments as possible into the gap between WriteI 1206 // and ReadI. Advance WriteI to reflect the inserted instructions. 1207 void LiveRangeUpdater::mergeSpills() { 1208 // Perform a backwards merge of Spills and [SpillI;WriteI). 1209 size_t GapSize = ReadI - WriteI; 1210 size_t NumMoved = std::min(Spills.size(), GapSize); 1211 LiveRange::iterator Src = WriteI; 1212 LiveRange::iterator Dst = Src + NumMoved; 1213 LiveRange::iterator SpillSrc = Spills.end(); 1214 LiveRange::iterator B = LR->begin(); 1215 1216 // This is the new WriteI position after merging spills. 1217 WriteI = Dst; 1218 1219 // Now merge Src and Spills backwards. 1220 while (Src != Dst) { 1221 if (Src != B && Src[-1].start > SpillSrc[-1].start) 1222 *--Dst = *--Src; 1223 else 1224 *--Dst = *--SpillSrc; 1225 } 1226 assert(NumMoved == size_t(Spills.end() - SpillSrc)); 1227 Spills.erase(SpillSrc, Spills.end()); 1228 } 1229 1230 void LiveRangeUpdater::flush() { 1231 if (!isDirty()) 1232 return; 1233 // Clear the dirty state. 1234 LastStart = SlotIndex(); 1235 1236 assert(LR && "Cannot add to a null destination"); 1237 1238 // Nothing to merge? 1239 if (Spills.empty()) { 1240 LR->segments.erase(WriteI, ReadI); 1241 LR->verify(); 1242 return; 1243 } 1244 1245 // Resize the WriteI - ReadI gap to match Spills. 1246 size_t GapSize = ReadI - WriteI; 1247 if (GapSize < Spills.size()) { 1248 // The gap is too small. Make some room. 1249 size_t WritePos = WriteI - LR->begin(); 1250 LR->segments.insert(ReadI, Spills.size() - GapSize, LiveRange::Segment()); 1251 // This also invalidated ReadI, but it is recomputed below. 1252 WriteI = LR->begin() + WritePos; 1253 } else { 1254 // Shrink the gap if necessary. 1255 LR->segments.erase(WriteI + Spills.size(), ReadI); 1256 } 1257 ReadI = WriteI + Spills.size(); 1258 mergeSpills(); 1259 LR->verify(); 1260 } 1261 1262 unsigned ConnectedVNInfoEqClasses::Classify(const LiveRange &LR) { 1263 // Create initial equivalence classes. 1264 EqClass.clear(); 1265 EqClass.grow(LR.getNumValNums()); 1266 1267 const VNInfo *used = nullptr, *unused = nullptr; 1268 1269 // Determine connections. 1270 for (const VNInfo *VNI : LR.valnos) { 1271 // Group all unused values into one class. 1272 if (VNI->isUnused()) { 1273 if (unused) 1274 EqClass.join(unused->id, VNI->id); 1275 unused = VNI; 1276 continue; 1277 } 1278 used = VNI; 1279 if (VNI->isPHIDef()) { 1280 const MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def); 1281 assert(MBB && "Phi-def has no defining MBB"); 1282 // Connect to values live out of predecessors. 1283 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(), 1284 PE = MBB->pred_end(); PI != PE; ++PI) 1285 if (const VNInfo *PVNI = LR.getVNInfoBefore(LIS.getMBBEndIdx(*PI))) 1286 EqClass.join(VNI->id, PVNI->id); 1287 } else { 1288 // Normal value defined by an instruction. Check for two-addr redef. 1289 // FIXME: This could be coincidental. Should we really check for a tied 1290 // operand constraint? 1291 // Note that VNI->def may be a use slot for an early clobber def. 1292 if (const VNInfo *UVNI = LR.getVNInfoBefore(VNI->def)) 1293 EqClass.join(VNI->id, UVNI->id); 1294 } 1295 } 1296 1297 // Lump all the unused values in with the last used value. 1298 if (used && unused) 1299 EqClass.join(used->id, unused->id); 1300 1301 EqClass.compress(); 1302 return EqClass.getNumClasses(); 1303 } 1304 1305 void ConnectedVNInfoEqClasses::Distribute(LiveInterval &LI, LiveInterval *LIV[], 1306 MachineRegisterInfo &MRI) { 1307 // Rewrite instructions. 1308 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LI.reg), 1309 RE = MRI.reg_end(); RI != RE;) { 1310 MachineOperand &MO = *RI; 1311 MachineInstr *MI = RI->getParent(); 1312 ++RI; 1313 // DBG_VALUE instructions don't have slot indexes, so get the index of the 1314 // instruction before them. 1315 // Normally, DBG_VALUE instructions are removed before this function is 1316 // called, but it is not a requirement. 1317 SlotIndex Idx; 1318 if (MI->isDebugValue()) 1319 Idx = LIS.getSlotIndexes()->getIndexBefore(*MI); 1320 else 1321 Idx = LIS.getInstructionIndex(*MI); 1322 LiveQueryResult LRQ = LI.Query(Idx); 1323 const VNInfo *VNI = MO.readsReg() ? LRQ.valueIn() : LRQ.valueDefined(); 1324 // In the case of an <undef> use that isn't tied to any def, VNI will be 1325 // NULL. If the use is tied to a def, VNI will be the defined value. 1326 if (!VNI) 1327 continue; 1328 if (unsigned EqClass = getEqClass(VNI)) 1329 MO.setReg(LIV[EqClass-1]->reg); 1330 } 1331 1332 // Distribute subregister liveranges. 1333 if (LI.hasSubRanges()) { 1334 unsigned NumComponents = EqClass.getNumClasses(); 1335 SmallVector<unsigned, 8> VNIMapping; 1336 SmallVector<LiveInterval::SubRange*, 8> SubRanges; 1337 BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator(); 1338 for (LiveInterval::SubRange &SR : LI.subranges()) { 1339 // Create new subranges in the split intervals and construct a mapping 1340 // for the VNInfos in the subrange. 1341 unsigned NumValNos = SR.valnos.size(); 1342 VNIMapping.clear(); 1343 VNIMapping.reserve(NumValNos); 1344 SubRanges.clear(); 1345 SubRanges.resize(NumComponents-1, nullptr); 1346 for (unsigned I = 0; I < NumValNos; ++I) { 1347 const VNInfo &VNI = *SR.valnos[I]; 1348 unsigned ComponentNum; 1349 if (VNI.isUnused()) { 1350 ComponentNum = 0; 1351 } else { 1352 const VNInfo *MainRangeVNI = LI.getVNInfoAt(VNI.def); 1353 assert(MainRangeVNI != nullptr 1354 && "SubRange def must have corresponding main range def"); 1355 ComponentNum = getEqClass(MainRangeVNI); 1356 if (ComponentNum > 0 && SubRanges[ComponentNum-1] == nullptr) { 1357 SubRanges[ComponentNum-1] 1358 = LIV[ComponentNum-1]->createSubRange(Allocator, SR.LaneMask); 1359 } 1360 } 1361 VNIMapping.push_back(ComponentNum); 1362 } 1363 DistributeRange(SR, SubRanges.data(), VNIMapping); 1364 } 1365 LI.removeEmptySubRanges(); 1366 } 1367 1368 // Distribute main liverange. 1369 DistributeRange(LI, LIV, EqClass); 1370 } 1371