1 //===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This transformation implements the well known scalar replacement of 10 /// aggregates transformation. It tries to identify promotable elements of an 11 /// aggregate alloca, and promote them to registers. It will also try to 12 /// convert uses of an element (or set of elements) of an alloca into a vector 13 /// or bitfield-style integer scalar if appropriate. 14 /// 15 /// It works to do this with minimal slicing of the alloca so that regions 16 /// which are merely transferred in and out of external memory remain unchanged 17 /// and are not decomposed to scalar code. 18 /// 19 /// Because this also performs alloca promotion, it can be thought of as also 20 /// serving the purpose of SSA formation. The algorithm iterates on the 21 /// function until all opportunities for promotion have been realized. 22 /// 23 //===----------------------------------------------------------------------===// 24 25 #include "llvm/Transforms/Scalar/SROA.h" 26 #include "llvm/ADT/APInt.h" 27 #include "llvm/ADT/ArrayRef.h" 28 #include "llvm/ADT/DenseMap.h" 29 #include "llvm/ADT/PointerIntPair.h" 30 #include "llvm/ADT/STLExtras.h" 31 #include "llvm/ADT/SetVector.h" 32 #include "llvm/ADT/SmallBitVector.h" 33 #include "llvm/ADT/SmallPtrSet.h" 34 #include "llvm/ADT/SmallVector.h" 35 #include "llvm/ADT/Statistic.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/ADT/Twine.h" 38 #include "llvm/ADT/iterator.h" 39 #include "llvm/ADT/iterator_range.h" 40 #include "llvm/Analysis/AssumptionCache.h" 41 #include "llvm/Analysis/GlobalsModRef.h" 42 #include "llvm/Analysis/Loads.h" 43 #include "llvm/Analysis/PtrUseVisitor.h" 44 #include "llvm/Config/llvm-config.h" 45 #include "llvm/IR/BasicBlock.h" 46 #include "llvm/IR/Constant.h" 47 #include "llvm/IR/ConstantFolder.h" 48 #include "llvm/IR/Constants.h" 49 #include "llvm/IR/DIBuilder.h" 50 #include "llvm/IR/DataLayout.h" 51 #include "llvm/IR/DebugInfoMetadata.h" 52 #include "llvm/IR/DerivedTypes.h" 53 #include "llvm/IR/Dominators.h" 54 #include "llvm/IR/Function.h" 55 #include "llvm/IR/GetElementPtrTypeIterator.h" 56 #include "llvm/IR/GlobalAlias.h" 57 #include "llvm/IR/IRBuilder.h" 58 #include "llvm/IR/InstVisitor.h" 59 #include "llvm/IR/InstrTypes.h" 60 #include "llvm/IR/Instruction.h" 61 #include "llvm/IR/Instructions.h" 62 #include "llvm/IR/IntrinsicInst.h" 63 #include "llvm/IR/Intrinsics.h" 64 #include "llvm/IR/LLVMContext.h" 65 #include "llvm/IR/Metadata.h" 66 #include "llvm/IR/Module.h" 67 #include "llvm/IR/Operator.h" 68 #include "llvm/IR/PassManager.h" 69 #include "llvm/IR/Type.h" 70 #include "llvm/IR/Use.h" 71 #include "llvm/IR/User.h" 72 #include "llvm/IR/Value.h" 73 #include "llvm/InitializePasses.h" 74 #include "llvm/Pass.h" 75 #include "llvm/Support/Casting.h" 76 #include "llvm/Support/CommandLine.h" 77 #include "llvm/Support/Compiler.h" 78 #include "llvm/Support/Debug.h" 79 #include "llvm/Support/ErrorHandling.h" 80 #include "llvm/Support/MathExtras.h" 81 #include "llvm/Support/raw_ostream.h" 82 #include "llvm/Transforms/Scalar.h" 83 #include "llvm/Transforms/Utils/Local.h" 84 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 85 #include <algorithm> 86 #include <cassert> 87 #include <chrono> 88 #include <cstddef> 89 #include <cstdint> 90 #include <cstring> 91 #include <iterator> 92 #include <string> 93 #include <tuple> 94 #include <utility> 95 #include <vector> 96 97 #ifndef NDEBUG 98 // We only use this for a debug check. 99 #include <random> 100 #endif 101 102 using namespace llvm; 103 using namespace llvm::sroa; 104 105 #define DEBUG_TYPE "sroa" 106 107 STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement"); 108 STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed"); 109 STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca"); 110 STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten"); 111 STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition"); 112 STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced"); 113 STATISTIC(NumPromoted, "Number of allocas promoted to SSA values"); 114 STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion"); 115 STATISTIC(NumDeleted, "Number of instructions deleted"); 116 STATISTIC(NumVectorized, "Number of vectorized aggregates"); 117 118 /// Hidden option to enable randomly shuffling the slices to help uncover 119 /// instability in their order. 120 static cl::opt<bool> SROARandomShuffleSlices("sroa-random-shuffle-slices", 121 cl::init(false), cl::Hidden); 122 123 /// Hidden option to experiment with completely strict handling of inbounds 124 /// GEPs. 125 static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false), 126 cl::Hidden); 127 128 namespace { 129 130 /// A custom IRBuilder inserter which prefixes all names, but only in 131 /// Assert builds. 132 class IRBuilderPrefixedInserter : public IRBuilderDefaultInserter { 133 std::string Prefix; 134 135 const Twine getNameWithPrefix(const Twine &Name) const { 136 return Name.isTriviallyEmpty() ? Name : Prefix + Name; 137 } 138 139 public: 140 void SetNamePrefix(const Twine &P) { Prefix = P.str(); } 141 142 protected: 143 void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB, 144 BasicBlock::iterator InsertPt) const { 145 IRBuilderDefaultInserter::InsertHelper(I, getNameWithPrefix(Name), BB, 146 InsertPt); 147 } 148 }; 149 150 /// Provide a type for IRBuilder that drops names in release builds. 151 using IRBuilderTy = IRBuilder<ConstantFolder, IRBuilderPrefixedInserter>; 152 153 /// A used slice of an alloca. 154 /// 155 /// This structure represents a slice of an alloca used by some instruction. It 156 /// stores both the begin and end offsets of this use, a pointer to the use 157 /// itself, and a flag indicating whether we can classify the use as splittable 158 /// or not when forming partitions of the alloca. 159 class Slice { 160 /// The beginning offset of the range. 161 uint64_t BeginOffset = 0; 162 163 /// The ending offset, not included in the range. 164 uint64_t EndOffset = 0; 165 166 /// Storage for both the use of this slice and whether it can be 167 /// split. 168 PointerIntPair<Use *, 1, bool> UseAndIsSplittable; 169 170 public: 171 Slice() = default; 172 173 Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable) 174 : BeginOffset(BeginOffset), EndOffset(EndOffset), 175 UseAndIsSplittable(U, IsSplittable) {} 176 177 uint64_t beginOffset() const { return BeginOffset; } 178 uint64_t endOffset() const { return EndOffset; } 179 180 bool isSplittable() const { return UseAndIsSplittable.getInt(); } 181 void makeUnsplittable() { UseAndIsSplittable.setInt(false); } 182 183 Use *getUse() const { return UseAndIsSplittable.getPointer(); } 184 185 bool isDead() const { return getUse() == nullptr; } 186 void kill() { UseAndIsSplittable.setPointer(nullptr); } 187 188 /// Support for ordering ranges. 189 /// 190 /// This provides an ordering over ranges such that start offsets are 191 /// always increasing, and within equal start offsets, the end offsets are 192 /// decreasing. Thus the spanning range comes first in a cluster with the 193 /// same start position. 194 bool operator<(const Slice &RHS) const { 195 if (beginOffset() < RHS.beginOffset()) 196 return true; 197 if (beginOffset() > RHS.beginOffset()) 198 return false; 199 if (isSplittable() != RHS.isSplittable()) 200 return !isSplittable(); 201 if (endOffset() > RHS.endOffset()) 202 return true; 203 return false; 204 } 205 206 /// Support comparison with a single offset to allow binary searches. 207 friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS, 208 uint64_t RHSOffset) { 209 return LHS.beginOffset() < RHSOffset; 210 } 211 friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset, 212 const Slice &RHS) { 213 return LHSOffset < RHS.beginOffset(); 214 } 215 216 bool operator==(const Slice &RHS) const { 217 return isSplittable() == RHS.isSplittable() && 218 beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset(); 219 } 220 bool operator!=(const Slice &RHS) const { return !operator==(RHS); } 221 }; 222 223 } // end anonymous namespace 224 225 /// Representation of the alloca slices. 226 /// 227 /// This class represents the slices of an alloca which are formed by its 228 /// various uses. If a pointer escapes, we can't fully build a representation 229 /// for the slices used and we reflect that in this structure. The uses are 230 /// stored, sorted by increasing beginning offset and with unsplittable slices 231 /// starting at a particular offset before splittable slices. 232 class llvm::sroa::AllocaSlices { 233 public: 234 /// Construct the slices of a particular alloca. 235 AllocaSlices(const DataLayout &DL, AllocaInst &AI); 236 237 /// Test whether a pointer to the allocation escapes our analysis. 238 /// 239 /// If this is true, the slices are never fully built and should be 240 /// ignored. 241 bool isEscaped() const { return PointerEscapingInstr; } 242 243 /// Support for iterating over the slices. 244 /// @{ 245 using iterator = SmallVectorImpl<Slice>::iterator; 246 using range = iterator_range<iterator>; 247 248 iterator begin() { return Slices.begin(); } 249 iterator end() { return Slices.end(); } 250 251 using const_iterator = SmallVectorImpl<Slice>::const_iterator; 252 using const_range = iterator_range<const_iterator>; 253 254 const_iterator begin() const { return Slices.begin(); } 255 const_iterator end() const { return Slices.end(); } 256 /// @} 257 258 /// Erase a range of slices. 259 void erase(iterator Start, iterator Stop) { Slices.erase(Start, Stop); } 260 261 /// Insert new slices for this alloca. 262 /// 263 /// This moves the slices into the alloca's slices collection, and re-sorts 264 /// everything so that the usual ordering properties of the alloca's slices 265 /// hold. 266 void insert(ArrayRef<Slice> NewSlices) { 267 int OldSize = Slices.size(); 268 Slices.append(NewSlices.begin(), NewSlices.end()); 269 auto SliceI = Slices.begin() + OldSize; 270 llvm::sort(SliceI, Slices.end()); 271 std::inplace_merge(Slices.begin(), SliceI, Slices.end()); 272 } 273 274 // Forward declare the iterator and range accessor for walking the 275 // partitions. 276 class partition_iterator; 277 iterator_range<partition_iterator> partitions(); 278 279 /// Access the dead users for this alloca. 280 ArrayRef<Instruction *> getDeadUsers() const { return DeadUsers; } 281 282 /// Access the dead operands referring to this alloca. 283 /// 284 /// These are operands which have cannot actually be used to refer to the 285 /// alloca as they are outside its range and the user doesn't correct for 286 /// that. These mostly consist of PHI node inputs and the like which we just 287 /// need to replace with undef. 288 ArrayRef<Use *> getDeadOperands() const { return DeadOperands; } 289 290 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 291 void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; 292 void printSlice(raw_ostream &OS, const_iterator I, 293 StringRef Indent = " ") const; 294 void printUse(raw_ostream &OS, const_iterator I, 295 StringRef Indent = " ") const; 296 void print(raw_ostream &OS) const; 297 void dump(const_iterator I) const; 298 void dump() const; 299 #endif 300 301 private: 302 template <typename DerivedT, typename RetT = void> class BuilderBase; 303 class SliceBuilder; 304 305 friend class AllocaSlices::SliceBuilder; 306 307 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 308 /// Handle to alloca instruction to simplify method interfaces. 309 AllocaInst &AI; 310 #endif 311 312 /// The instruction responsible for this alloca not having a known set 313 /// of slices. 314 /// 315 /// When an instruction (potentially) escapes the pointer to the alloca, we 316 /// store a pointer to that here and abort trying to form slices of the 317 /// alloca. This will be null if the alloca slices are analyzed successfully. 318 Instruction *PointerEscapingInstr; 319 320 /// The slices of the alloca. 321 /// 322 /// We store a vector of the slices formed by uses of the alloca here. This 323 /// vector is sorted by increasing begin offset, and then the unsplittable 324 /// slices before the splittable ones. See the Slice inner class for more 325 /// details. 326 SmallVector<Slice, 8> Slices; 327 328 /// Instructions which will become dead if we rewrite the alloca. 329 /// 330 /// Note that these are not separated by slice. This is because we expect an 331 /// alloca to be completely rewritten or not rewritten at all. If rewritten, 332 /// all these instructions can simply be removed and replaced with undef as 333 /// they come from outside of the allocated space. 334 SmallVector<Instruction *, 8> DeadUsers; 335 336 /// Operands which will become dead if we rewrite the alloca. 337 /// 338 /// These are operands that in their particular use can be replaced with 339 /// undef when we rewrite the alloca. These show up in out-of-bounds inputs 340 /// to PHI nodes and the like. They aren't entirely dead (there might be 341 /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we 342 /// want to swap this particular input for undef to simplify the use lists of 343 /// the alloca. 344 SmallVector<Use *, 8> DeadOperands; 345 }; 346 347 /// A partition of the slices. 348 /// 349 /// An ephemeral representation for a range of slices which can be viewed as 350 /// a partition of the alloca. This range represents a span of the alloca's 351 /// memory which cannot be split, and provides access to all of the slices 352 /// overlapping some part of the partition. 353 /// 354 /// Objects of this type are produced by traversing the alloca's slices, but 355 /// are only ephemeral and not persistent. 356 class llvm::sroa::Partition { 357 private: 358 friend class AllocaSlices; 359 friend class AllocaSlices::partition_iterator; 360 361 using iterator = AllocaSlices::iterator; 362 363 /// The beginning and ending offsets of the alloca for this 364 /// partition. 365 uint64_t BeginOffset = 0, EndOffset = 0; 366 367 /// The start and end iterators of this partition. 368 iterator SI, SJ; 369 370 /// A collection of split slice tails overlapping the partition. 371 SmallVector<Slice *, 4> SplitTails; 372 373 /// Raw constructor builds an empty partition starting and ending at 374 /// the given iterator. 375 Partition(iterator SI) : SI(SI), SJ(SI) {} 376 377 public: 378 /// The start offset of this partition. 379 /// 380 /// All of the contained slices start at or after this offset. 381 uint64_t beginOffset() const { return BeginOffset; } 382 383 /// The end offset of this partition. 384 /// 385 /// All of the contained slices end at or before this offset. 386 uint64_t endOffset() const { return EndOffset; } 387 388 /// The size of the partition. 389 /// 390 /// Note that this can never be zero. 391 uint64_t size() const { 392 assert(BeginOffset < EndOffset && "Partitions must span some bytes!"); 393 return EndOffset - BeginOffset; 394 } 395 396 /// Test whether this partition contains no slices, and merely spans 397 /// a region occupied by split slices. 398 bool empty() const { return SI == SJ; } 399 400 /// \name Iterate slices that start within the partition. 401 /// These may be splittable or unsplittable. They have a begin offset >= the 402 /// partition begin offset. 403 /// @{ 404 // FIXME: We should probably define a "concat_iterator" helper and use that 405 // to stitch together pointee_iterators over the split tails and the 406 // contiguous iterators of the partition. That would give a much nicer 407 // interface here. We could then additionally expose filtered iterators for 408 // split, unsplit, and unsplittable splices based on the usage patterns. 409 iterator begin() const { return SI; } 410 iterator end() const { return SJ; } 411 /// @} 412 413 /// Get the sequence of split slice tails. 414 /// 415 /// These tails are of slices which start before this partition but are 416 /// split and overlap into the partition. We accumulate these while forming 417 /// partitions. 418 ArrayRef<Slice *> splitSliceTails() const { return SplitTails; } 419 }; 420 421 /// An iterator over partitions of the alloca's slices. 422 /// 423 /// This iterator implements the core algorithm for partitioning the alloca's 424 /// slices. It is a forward iterator as we don't support backtracking for 425 /// efficiency reasons, and re-use a single storage area to maintain the 426 /// current set of split slices. 427 /// 428 /// It is templated on the slice iterator type to use so that it can operate 429 /// with either const or non-const slice iterators. 430 class AllocaSlices::partition_iterator 431 : public iterator_facade_base<partition_iterator, std::forward_iterator_tag, 432 Partition> { 433 friend class AllocaSlices; 434 435 /// Most of the state for walking the partitions is held in a class 436 /// with a nice interface for examining them. 437 Partition P; 438 439 /// We need to keep the end of the slices to know when to stop. 440 AllocaSlices::iterator SE; 441 442 /// We also need to keep track of the maximum split end offset seen. 443 /// FIXME: Do we really? 444 uint64_t MaxSplitSliceEndOffset = 0; 445 446 /// Sets the partition to be empty at given iterator, and sets the 447 /// end iterator. 448 partition_iterator(AllocaSlices::iterator SI, AllocaSlices::iterator SE) 449 : P(SI), SE(SE) { 450 // If not already at the end, advance our state to form the initial 451 // partition. 452 if (SI != SE) 453 advance(); 454 } 455 456 /// Advance the iterator to the next partition. 457 /// 458 /// Requires that the iterator not be at the end of the slices. 459 void advance() { 460 assert((P.SI != SE || !P.SplitTails.empty()) && 461 "Cannot advance past the end of the slices!"); 462 463 // Clear out any split uses which have ended. 464 if (!P.SplitTails.empty()) { 465 if (P.EndOffset >= MaxSplitSliceEndOffset) { 466 // If we've finished all splits, this is easy. 467 P.SplitTails.clear(); 468 MaxSplitSliceEndOffset = 0; 469 } else { 470 // Remove the uses which have ended in the prior partition. This 471 // cannot change the max split slice end because we just checked that 472 // the prior partition ended prior to that max. 473 P.SplitTails.erase(llvm::remove_if(P.SplitTails, 474 [&](Slice *S) { 475 return S->endOffset() <= 476 P.EndOffset; 477 }), 478 P.SplitTails.end()); 479 assert(llvm::any_of(P.SplitTails, 480 [&](Slice *S) { 481 return S->endOffset() == MaxSplitSliceEndOffset; 482 }) && 483 "Could not find the current max split slice offset!"); 484 assert(llvm::all_of(P.SplitTails, 485 [&](Slice *S) { 486 return S->endOffset() <= MaxSplitSliceEndOffset; 487 }) && 488 "Max split slice end offset is not actually the max!"); 489 } 490 } 491 492 // If P.SI is already at the end, then we've cleared the split tail and 493 // now have an end iterator. 494 if (P.SI == SE) { 495 assert(P.SplitTails.empty() && "Failed to clear the split slices!"); 496 return; 497 } 498 499 // If we had a non-empty partition previously, set up the state for 500 // subsequent partitions. 501 if (P.SI != P.SJ) { 502 // Accumulate all the splittable slices which started in the old 503 // partition into the split list. 504 for (Slice &S : P) 505 if (S.isSplittable() && S.endOffset() > P.EndOffset) { 506 P.SplitTails.push_back(&S); 507 MaxSplitSliceEndOffset = 508 std::max(S.endOffset(), MaxSplitSliceEndOffset); 509 } 510 511 // Start from the end of the previous partition. 512 P.SI = P.SJ; 513 514 // If P.SI is now at the end, we at most have a tail of split slices. 515 if (P.SI == SE) { 516 P.BeginOffset = P.EndOffset; 517 P.EndOffset = MaxSplitSliceEndOffset; 518 return; 519 } 520 521 // If the we have split slices and the next slice is after a gap and is 522 // not splittable immediately form an empty partition for the split 523 // slices up until the next slice begins. 524 if (!P.SplitTails.empty() && P.SI->beginOffset() != P.EndOffset && 525 !P.SI->isSplittable()) { 526 P.BeginOffset = P.EndOffset; 527 P.EndOffset = P.SI->beginOffset(); 528 return; 529 } 530 } 531 532 // OK, we need to consume new slices. Set the end offset based on the 533 // current slice, and step SJ past it. The beginning offset of the 534 // partition is the beginning offset of the next slice unless we have 535 // pre-existing split slices that are continuing, in which case we begin 536 // at the prior end offset. 537 P.BeginOffset = P.SplitTails.empty() ? P.SI->beginOffset() : P.EndOffset; 538 P.EndOffset = P.SI->endOffset(); 539 ++P.SJ; 540 541 // There are two strategies to form a partition based on whether the 542 // partition starts with an unsplittable slice or a splittable slice. 543 if (!P.SI->isSplittable()) { 544 // When we're forming an unsplittable region, it must always start at 545 // the first slice and will extend through its end. 546 assert(P.BeginOffset == P.SI->beginOffset()); 547 548 // Form a partition including all of the overlapping slices with this 549 // unsplittable slice. 550 while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { 551 if (!P.SJ->isSplittable()) 552 P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); 553 ++P.SJ; 554 } 555 556 // We have a partition across a set of overlapping unsplittable 557 // partitions. 558 return; 559 } 560 561 // If we're starting with a splittable slice, then we need to form 562 // a synthetic partition spanning it and any other overlapping splittable 563 // splices. 564 assert(P.SI->isSplittable() && "Forming a splittable partition!"); 565 566 // Collect all of the overlapping splittable slices. 567 while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset && 568 P.SJ->isSplittable()) { 569 P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); 570 ++P.SJ; 571 } 572 573 // Back upiP.EndOffset if we ended the span early when encountering an 574 // unsplittable slice. This synthesizes the early end offset of 575 // a partition spanning only splittable slices. 576 if (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { 577 assert(!P.SJ->isSplittable()); 578 P.EndOffset = P.SJ->beginOffset(); 579 } 580 } 581 582 public: 583 bool operator==(const partition_iterator &RHS) const { 584 assert(SE == RHS.SE && 585 "End iterators don't match between compared partition iterators!"); 586 587 // The observed positions of partitions is marked by the P.SI iterator and 588 // the emptiness of the split slices. The latter is only relevant when 589 // P.SI == SE, as the end iterator will additionally have an empty split 590 // slices list, but the prior may have the same P.SI and a tail of split 591 // slices. 592 if (P.SI == RHS.P.SI && P.SplitTails.empty() == RHS.P.SplitTails.empty()) { 593 assert(P.SJ == RHS.P.SJ && 594 "Same set of slices formed two different sized partitions!"); 595 assert(P.SplitTails.size() == RHS.P.SplitTails.size() && 596 "Same slice position with differently sized non-empty split " 597 "slice tails!"); 598 return true; 599 } 600 return false; 601 } 602 603 partition_iterator &operator++() { 604 advance(); 605 return *this; 606 } 607 608 Partition &operator*() { return P; } 609 }; 610 611 /// A forward range over the partitions of the alloca's slices. 612 /// 613 /// This accesses an iterator range over the partitions of the alloca's 614 /// slices. It computes these partitions on the fly based on the overlapping 615 /// offsets of the slices and the ability to split them. It will visit "empty" 616 /// partitions to cover regions of the alloca only accessed via split 617 /// slices. 618 iterator_range<AllocaSlices::partition_iterator> AllocaSlices::partitions() { 619 return make_range(partition_iterator(begin(), end()), 620 partition_iterator(end(), end())); 621 } 622 623 static Value *foldSelectInst(SelectInst &SI) { 624 // If the condition being selected on is a constant or the same value is 625 // being selected between, fold the select. Yes this does (rarely) happen 626 // early on. 627 if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition())) 628 return SI.getOperand(1 + CI->isZero()); 629 if (SI.getOperand(1) == SI.getOperand(2)) 630 return SI.getOperand(1); 631 632 return nullptr; 633 } 634 635 /// A helper that folds a PHI node or a select. 636 static Value *foldPHINodeOrSelectInst(Instruction &I) { 637 if (PHINode *PN = dyn_cast<PHINode>(&I)) { 638 // If PN merges together the same value, return that value. 639 return PN->hasConstantValue(); 640 } 641 return foldSelectInst(cast<SelectInst>(I)); 642 } 643 644 /// Builder for the alloca slices. 645 /// 646 /// This class builds a set of alloca slices by recursively visiting the uses 647 /// of an alloca and making a slice for each load and store at each offset. 648 class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> { 649 friend class PtrUseVisitor<SliceBuilder>; 650 friend class InstVisitor<SliceBuilder>; 651 652 using Base = PtrUseVisitor<SliceBuilder>; 653 654 const uint64_t AllocSize; 655 AllocaSlices &AS; 656 657 SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap; 658 SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes; 659 660 /// Set to de-duplicate dead instructions found in the use walk. 661 SmallPtrSet<Instruction *, 4> VisitedDeadInsts; 662 663 public: 664 SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &AS) 665 : PtrUseVisitor<SliceBuilder>(DL), 666 AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), AS(AS) {} 667 668 private: 669 void markAsDead(Instruction &I) { 670 if (VisitedDeadInsts.insert(&I).second) 671 AS.DeadUsers.push_back(&I); 672 } 673 674 void insertUse(Instruction &I, const APInt &Offset, uint64_t Size, 675 bool IsSplittable = false) { 676 // Completely skip uses which have a zero size or start either before or 677 // past the end of the allocation. 678 if (Size == 0 || Offset.uge(AllocSize)) { 679 LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" 680 << Offset 681 << " which has zero size or starts outside of the " 682 << AllocSize << " byte alloca:\n" 683 << " alloca: " << AS.AI << "\n" 684 << " use: " << I << "\n"); 685 return markAsDead(I); 686 } 687 688 uint64_t BeginOffset = Offset.getZExtValue(); 689 uint64_t EndOffset = BeginOffset + Size; 690 691 // Clamp the end offset to the end of the allocation. Note that this is 692 // formulated to handle even the case where "BeginOffset + Size" overflows. 693 // This may appear superficially to be something we could ignore entirely, 694 // but that is not so! There may be widened loads or PHI-node uses where 695 // some instructions are dead but not others. We can't completely ignore 696 // them, and so have to record at least the information here. 697 assert(AllocSize >= BeginOffset); // Established above. 698 if (Size > AllocSize - BeginOffset) { 699 LLVM_DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" 700 << Offset << " to remain within the " << AllocSize 701 << " byte alloca:\n" 702 << " alloca: " << AS.AI << "\n" 703 << " use: " << I << "\n"); 704 EndOffset = AllocSize; 705 } 706 707 AS.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable)); 708 } 709 710 void visitBitCastInst(BitCastInst &BC) { 711 if (BC.use_empty()) 712 return markAsDead(BC); 713 714 return Base::visitBitCastInst(BC); 715 } 716 717 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 718 if (ASC.use_empty()) 719 return markAsDead(ASC); 720 721 return Base::visitAddrSpaceCastInst(ASC); 722 } 723 724 void visitGetElementPtrInst(GetElementPtrInst &GEPI) { 725 if (GEPI.use_empty()) 726 return markAsDead(GEPI); 727 728 if (SROAStrictInbounds && GEPI.isInBounds()) { 729 // FIXME: This is a manually un-factored variant of the basic code inside 730 // of GEPs with checking of the inbounds invariant specified in the 731 // langref in a very strict sense. If we ever want to enable 732 // SROAStrictInbounds, this code should be factored cleanly into 733 // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds 734 // by writing out the code here where we have the underlying allocation 735 // size readily available. 736 APInt GEPOffset = Offset; 737 const DataLayout &DL = GEPI.getModule()->getDataLayout(); 738 for (gep_type_iterator GTI = gep_type_begin(GEPI), 739 GTE = gep_type_end(GEPI); 740 GTI != GTE; ++GTI) { 741 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); 742 if (!OpC) 743 break; 744 745 // Handle a struct index, which adds its field offset to the pointer. 746 if (StructType *STy = GTI.getStructTypeOrNull()) { 747 unsigned ElementIdx = OpC->getZExtValue(); 748 const StructLayout *SL = DL.getStructLayout(STy); 749 GEPOffset += 750 APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)); 751 } else { 752 // For array or vector indices, scale the index by the size of the 753 // type. 754 APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth()); 755 GEPOffset += Index * APInt(Offset.getBitWidth(), 756 DL.getTypeAllocSize(GTI.getIndexedType())); 757 } 758 759 // If this index has computed an intermediate pointer which is not 760 // inbounds, then the result of the GEP is a poison value and we can 761 // delete it and all uses. 762 if (GEPOffset.ugt(AllocSize)) 763 return markAsDead(GEPI); 764 } 765 } 766 767 return Base::visitGetElementPtrInst(GEPI); 768 } 769 770 void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset, 771 uint64_t Size, bool IsVolatile) { 772 // We allow splitting of non-volatile loads and stores where the type is an 773 // integer type. These may be used to implement 'memcpy' or other "transfer 774 // of bits" patterns. 775 bool IsSplittable = Ty->isIntegerTy() && !IsVolatile; 776 777 insertUse(I, Offset, Size, IsSplittable); 778 } 779 780 void visitLoadInst(LoadInst &LI) { 781 assert((!LI.isSimple() || LI.getType()->isSingleValueType()) && 782 "All simple FCA loads should have been pre-split"); 783 784 if (!IsOffsetKnown) 785 return PI.setAborted(&LI); 786 787 if (LI.isVolatile() && 788 LI.getPointerAddressSpace() != DL.getAllocaAddrSpace()) 789 return PI.setAborted(&LI); 790 791 uint64_t Size = DL.getTypeStoreSize(LI.getType()); 792 return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile()); 793 } 794 795 void visitStoreInst(StoreInst &SI) { 796 Value *ValOp = SI.getValueOperand(); 797 if (ValOp == *U) 798 return PI.setEscapedAndAborted(&SI); 799 if (!IsOffsetKnown) 800 return PI.setAborted(&SI); 801 802 if (SI.isVolatile() && 803 SI.getPointerAddressSpace() != DL.getAllocaAddrSpace()) 804 return PI.setAborted(&SI); 805 806 uint64_t Size = DL.getTypeStoreSize(ValOp->getType()); 807 808 // If this memory access can be shown to *statically* extend outside the 809 // bounds of the allocation, it's behavior is undefined, so simply 810 // ignore it. Note that this is more strict than the generic clamping 811 // behavior of insertUse. We also try to handle cases which might run the 812 // risk of overflow. 813 // FIXME: We should instead consider the pointer to have escaped if this 814 // function is being instrumented for addressing bugs or race conditions. 815 if (Size > AllocSize || Offset.ugt(AllocSize - Size)) { 816 LLVM_DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" 817 << Offset << " which extends past the end of the " 818 << AllocSize << " byte alloca:\n" 819 << " alloca: " << AS.AI << "\n" 820 << " use: " << SI << "\n"); 821 return markAsDead(SI); 822 } 823 824 assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) && 825 "All simple FCA stores should have been pre-split"); 826 handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile()); 827 } 828 829 void visitMemSetInst(MemSetInst &II) { 830 assert(II.getRawDest() == *U && "Pointer use is not the destination?"); 831 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); 832 if ((Length && Length->getValue() == 0) || 833 (IsOffsetKnown && Offset.uge(AllocSize))) 834 // Zero-length mem transfer intrinsics can be ignored entirely. 835 return markAsDead(II); 836 837 if (!IsOffsetKnown) 838 return PI.setAborted(&II); 839 840 // Don't replace this with a store with a different address space. TODO: 841 // Use a store with the casted new alloca? 842 if (II.isVolatile() && II.getDestAddressSpace() != DL.getAllocaAddrSpace()) 843 return PI.setAborted(&II); 844 845 insertUse(II, Offset, Length ? Length->getLimitedValue() 846 : AllocSize - Offset.getLimitedValue(), 847 (bool)Length); 848 } 849 850 void visitMemTransferInst(MemTransferInst &II) { 851 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); 852 if (Length && Length->getValue() == 0) 853 // Zero-length mem transfer intrinsics can be ignored entirely. 854 return markAsDead(II); 855 856 // Because we can visit these intrinsics twice, also check to see if the 857 // first time marked this instruction as dead. If so, skip it. 858 if (VisitedDeadInsts.count(&II)) 859 return; 860 861 if (!IsOffsetKnown) 862 return PI.setAborted(&II); 863 864 // Don't replace this with a load/store with a different address space. 865 // TODO: Use a store with the casted new alloca? 866 if (II.isVolatile() && 867 (II.getDestAddressSpace() != DL.getAllocaAddrSpace() || 868 II.getSourceAddressSpace() != DL.getAllocaAddrSpace())) 869 return PI.setAborted(&II); 870 871 // This side of the transfer is completely out-of-bounds, and so we can 872 // nuke the entire transfer. However, we also need to nuke the other side 873 // if already added to our partitions. 874 // FIXME: Yet another place we really should bypass this when 875 // instrumenting for ASan. 876 if (Offset.uge(AllocSize)) { 877 SmallDenseMap<Instruction *, unsigned>::iterator MTPI = 878 MemTransferSliceMap.find(&II); 879 if (MTPI != MemTransferSliceMap.end()) 880 AS.Slices[MTPI->second].kill(); 881 return markAsDead(II); 882 } 883 884 uint64_t RawOffset = Offset.getLimitedValue(); 885 uint64_t Size = Length ? Length->getLimitedValue() : AllocSize - RawOffset; 886 887 // Check for the special case where the same exact value is used for both 888 // source and dest. 889 if (*U == II.getRawDest() && *U == II.getRawSource()) { 890 // For non-volatile transfers this is a no-op. 891 if (!II.isVolatile()) 892 return markAsDead(II); 893 894 return insertUse(II, Offset, Size, /*IsSplittable=*/false); 895 } 896 897 // If we have seen both source and destination for a mem transfer, then 898 // they both point to the same alloca. 899 bool Inserted; 900 SmallDenseMap<Instruction *, unsigned>::iterator MTPI; 901 std::tie(MTPI, Inserted) = 902 MemTransferSliceMap.insert(std::make_pair(&II, AS.Slices.size())); 903 unsigned PrevIdx = MTPI->second; 904 if (!Inserted) { 905 Slice &PrevP = AS.Slices[PrevIdx]; 906 907 // Check if the begin offsets match and this is a non-volatile transfer. 908 // In that case, we can completely elide the transfer. 909 if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) { 910 PrevP.kill(); 911 return markAsDead(II); 912 } 913 914 // Otherwise we have an offset transfer within the same alloca. We can't 915 // split those. 916 PrevP.makeUnsplittable(); 917 } 918 919 // Insert the use now that we've fixed up the splittable nature. 920 insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length); 921 922 // Check that we ended up with a valid index in the map. 923 assert(AS.Slices[PrevIdx].getUse()->getUser() == &II && 924 "Map index doesn't point back to a slice with this user."); 925 } 926 927 // Disable SRoA for any intrinsics except for lifetime invariants. 928 // FIXME: What about debug intrinsics? This matches old behavior, but 929 // doesn't make sense. 930 void visitIntrinsicInst(IntrinsicInst &II) { 931 if (!IsOffsetKnown) 932 return PI.setAborted(&II); 933 934 if (II.isLifetimeStartOrEnd()) { 935 ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0)); 936 uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(), 937 Length->getLimitedValue()); 938 insertUse(II, Offset, Size, true); 939 return; 940 } 941 942 Base::visitIntrinsicInst(II); 943 } 944 945 Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) { 946 // We consider any PHI or select that results in a direct load or store of 947 // the same offset to be a viable use for slicing purposes. These uses 948 // are considered unsplittable and the size is the maximum loaded or stored 949 // size. 950 SmallPtrSet<Instruction *, 4> Visited; 951 SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses; 952 Visited.insert(Root); 953 Uses.push_back(std::make_pair(cast<Instruction>(*U), Root)); 954 const DataLayout &DL = Root->getModule()->getDataLayout(); 955 // If there are no loads or stores, the access is dead. We mark that as 956 // a size zero access. 957 Size = 0; 958 do { 959 Instruction *I, *UsedI; 960 std::tie(UsedI, I) = Uses.pop_back_val(); 961 962 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 963 Size = std::max(Size, 964 DL.getTypeStoreSize(LI->getType()).getFixedSize()); 965 continue; 966 } 967 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 968 Value *Op = SI->getOperand(0); 969 if (Op == UsedI) 970 return SI; 971 Size = std::max(Size, 972 DL.getTypeStoreSize(Op->getType()).getFixedSize()); 973 continue; 974 } 975 976 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { 977 if (!GEP->hasAllZeroIndices()) 978 return GEP; 979 } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) && 980 !isa<SelectInst>(I) && !isa<AddrSpaceCastInst>(I)) { 981 return I; 982 } 983 984 for (User *U : I->users()) 985 if (Visited.insert(cast<Instruction>(U)).second) 986 Uses.push_back(std::make_pair(I, cast<Instruction>(U))); 987 } while (!Uses.empty()); 988 989 return nullptr; 990 } 991 992 void visitPHINodeOrSelectInst(Instruction &I) { 993 assert(isa<PHINode>(I) || isa<SelectInst>(I)); 994 if (I.use_empty()) 995 return markAsDead(I); 996 997 // TODO: We could use SimplifyInstruction here to fold PHINodes and 998 // SelectInsts. However, doing so requires to change the current 999 // dead-operand-tracking mechanism. For instance, suppose neither loading 1000 // from %U nor %other traps. Then "load (select undef, %U, %other)" does not 1001 // trap either. However, if we simply replace %U with undef using the 1002 // current dead-operand-tracking mechanism, "load (select undef, undef, 1003 // %other)" may trap because the select may return the first operand 1004 // "undef". 1005 if (Value *Result = foldPHINodeOrSelectInst(I)) { 1006 if (Result == *U) 1007 // If the result of the constant fold will be the pointer, recurse 1008 // through the PHI/select as if we had RAUW'ed it. 1009 enqueueUsers(I); 1010 else 1011 // Otherwise the operand to the PHI/select is dead, and we can replace 1012 // it with undef. 1013 AS.DeadOperands.push_back(U); 1014 1015 return; 1016 } 1017 1018 if (!IsOffsetKnown) 1019 return PI.setAborted(&I); 1020 1021 // See if we already have computed info on this node. 1022 uint64_t &Size = PHIOrSelectSizes[&I]; 1023 if (!Size) { 1024 // This is a new PHI/Select, check for an unsafe use of it. 1025 if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&I, Size)) 1026 return PI.setAborted(UnsafeI); 1027 } 1028 1029 // For PHI and select operands outside the alloca, we can't nuke the entire 1030 // phi or select -- the other side might still be relevant, so we special 1031 // case them here and use a separate structure to track the operands 1032 // themselves which should be replaced with undef. 1033 // FIXME: This should instead be escaped in the event we're instrumenting 1034 // for address sanitization. 1035 if (Offset.uge(AllocSize)) { 1036 AS.DeadOperands.push_back(U); 1037 return; 1038 } 1039 1040 insertUse(I, Offset, Size); 1041 } 1042 1043 void visitPHINode(PHINode &PN) { visitPHINodeOrSelectInst(PN); } 1044 1045 void visitSelectInst(SelectInst &SI) { visitPHINodeOrSelectInst(SI); } 1046 1047 /// Disable SROA entirely if there are unhandled users of the alloca. 1048 void visitInstruction(Instruction &I) { PI.setAborted(&I); } 1049 }; 1050 1051 AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI) 1052 : 1053 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1054 AI(AI), 1055 #endif 1056 PointerEscapingInstr(nullptr) { 1057 SliceBuilder PB(DL, AI, *this); 1058 SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI); 1059 if (PtrI.isEscaped() || PtrI.isAborted()) { 1060 // FIXME: We should sink the escape vs. abort info into the caller nicely, 1061 // possibly by just storing the PtrInfo in the AllocaSlices. 1062 PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst() 1063 : PtrI.getAbortingInst(); 1064 assert(PointerEscapingInstr && "Did not track a bad instruction"); 1065 return; 1066 } 1067 1068 Slices.erase( 1069 llvm::remove_if(Slices, [](const Slice &S) { return S.isDead(); }), 1070 Slices.end()); 1071 1072 #ifndef NDEBUG 1073 if (SROARandomShuffleSlices) { 1074 std::mt19937 MT(static_cast<unsigned>( 1075 std::chrono::system_clock::now().time_since_epoch().count())); 1076 std::shuffle(Slices.begin(), Slices.end(), MT); 1077 } 1078 #endif 1079 1080 // Sort the uses. This arranges for the offsets to be in ascending order, 1081 // and the sizes to be in descending order. 1082 llvm::sort(Slices); 1083 } 1084 1085 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1086 1087 void AllocaSlices::print(raw_ostream &OS, const_iterator I, 1088 StringRef Indent) const { 1089 printSlice(OS, I, Indent); 1090 OS << "\n"; 1091 printUse(OS, I, Indent); 1092 } 1093 1094 void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I, 1095 StringRef Indent) const { 1096 OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")" 1097 << " slice #" << (I - begin()) 1098 << (I->isSplittable() ? " (splittable)" : ""); 1099 } 1100 1101 void AllocaSlices::printUse(raw_ostream &OS, const_iterator I, 1102 StringRef Indent) const { 1103 OS << Indent << " used by: " << *I->getUse()->getUser() << "\n"; 1104 } 1105 1106 void AllocaSlices::print(raw_ostream &OS) const { 1107 if (PointerEscapingInstr) { 1108 OS << "Can't analyze slices for alloca: " << AI << "\n" 1109 << " A pointer to this alloca escaped by:\n" 1110 << " " << *PointerEscapingInstr << "\n"; 1111 return; 1112 } 1113 1114 OS << "Slices of alloca: " << AI << "\n"; 1115 for (const_iterator I = begin(), E = end(); I != E; ++I) 1116 print(OS, I); 1117 } 1118 1119 LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const { 1120 print(dbgs(), I); 1121 } 1122 LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); } 1123 1124 #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1125 1126 /// Walk the range of a partitioning looking for a common type to cover this 1127 /// sequence of slices. 1128 static Type *findCommonType(AllocaSlices::const_iterator B, 1129 AllocaSlices::const_iterator E, 1130 uint64_t EndOffset) { 1131 Type *Ty = nullptr; 1132 bool TyIsCommon = true; 1133 IntegerType *ITy = nullptr; 1134 1135 // Note that we need to look at *every* alloca slice's Use to ensure we 1136 // always get consistent results regardless of the order of slices. 1137 for (AllocaSlices::const_iterator I = B; I != E; ++I) { 1138 Use *U = I->getUse(); 1139 if (isa<IntrinsicInst>(*U->getUser())) 1140 continue; 1141 if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset) 1142 continue; 1143 1144 Type *UserTy = nullptr; 1145 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1146 UserTy = LI->getType(); 1147 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 1148 UserTy = SI->getValueOperand()->getType(); 1149 } 1150 1151 if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) { 1152 // If the type is larger than the partition, skip it. We only encounter 1153 // this for split integer operations where we want to use the type of the 1154 // entity causing the split. Also skip if the type is not a byte width 1155 // multiple. 1156 if (UserITy->getBitWidth() % 8 != 0 || 1157 UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset())) 1158 continue; 1159 1160 // Track the largest bitwidth integer type used in this way in case there 1161 // is no common type. 1162 if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth()) 1163 ITy = UserITy; 1164 } 1165 1166 // To avoid depending on the order of slices, Ty and TyIsCommon must not 1167 // depend on types skipped above. 1168 if (!UserTy || (Ty && Ty != UserTy)) 1169 TyIsCommon = false; // Give up on anything but an iN type. 1170 else 1171 Ty = UserTy; 1172 } 1173 1174 return TyIsCommon ? Ty : ITy; 1175 } 1176 1177 /// PHI instructions that use an alloca and are subsequently loaded can be 1178 /// rewritten to load both input pointers in the pred blocks and then PHI the 1179 /// results, allowing the load of the alloca to be promoted. 1180 /// From this: 1181 /// %P2 = phi [i32* %Alloca, i32* %Other] 1182 /// %V = load i32* %P2 1183 /// to: 1184 /// %V1 = load i32* %Alloca -> will be mem2reg'd 1185 /// ... 1186 /// %V2 = load i32* %Other 1187 /// ... 1188 /// %V = phi [i32 %V1, i32 %V2] 1189 /// 1190 /// We can do this to a select if its only uses are loads and if the operands 1191 /// to the select can be loaded unconditionally. 1192 /// 1193 /// FIXME: This should be hoisted into a generic utility, likely in 1194 /// Transforms/Util/Local.h 1195 static bool isSafePHIToSpeculate(PHINode &PN) { 1196 const DataLayout &DL = PN.getModule()->getDataLayout(); 1197 1198 // For now, we can only do this promotion if the load is in the same block 1199 // as the PHI, and if there are no stores between the phi and load. 1200 // TODO: Allow recursive phi users. 1201 // TODO: Allow stores. 1202 BasicBlock *BB = PN.getParent(); 1203 MaybeAlign MaxAlign; 1204 uint64_t APWidth = DL.getIndexTypeSizeInBits(PN.getType()); 1205 APInt MaxSize(APWidth, 0); 1206 bool HaveLoad = false; 1207 for (User *U : PN.users()) { 1208 LoadInst *LI = dyn_cast<LoadInst>(U); 1209 if (!LI || !LI->isSimple()) 1210 return false; 1211 1212 // For now we only allow loads in the same block as the PHI. This is 1213 // a common case that happens when instcombine merges two loads through 1214 // a PHI. 1215 if (LI->getParent() != BB) 1216 return false; 1217 1218 // Ensure that there are no instructions between the PHI and the load that 1219 // could store. 1220 for (BasicBlock::iterator BBI(PN); &*BBI != LI; ++BBI) 1221 if (BBI->mayWriteToMemory()) 1222 return false; 1223 1224 uint64_t Size = DL.getTypeStoreSize(LI->getType()); 1225 MaxAlign = std::max(MaxAlign, MaybeAlign(LI->getAlignment())); 1226 MaxSize = MaxSize.ult(Size) ? APInt(APWidth, Size) : MaxSize; 1227 HaveLoad = true; 1228 } 1229 1230 if (!HaveLoad) 1231 return false; 1232 1233 // We can only transform this if it is safe to push the loads into the 1234 // predecessor blocks. The only thing to watch out for is that we can't put 1235 // a possibly trapping load in the predecessor if it is a critical edge. 1236 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { 1237 Instruction *TI = PN.getIncomingBlock(Idx)->getTerminator(); 1238 Value *InVal = PN.getIncomingValue(Idx); 1239 1240 // If the value is produced by the terminator of the predecessor (an 1241 // invoke) or it has side-effects, there is no valid place to put a load 1242 // in the predecessor. 1243 if (TI == InVal || TI->mayHaveSideEffects()) 1244 return false; 1245 1246 // If the predecessor has a single successor, then the edge isn't 1247 // critical. 1248 if (TI->getNumSuccessors() == 1) 1249 continue; 1250 1251 // If this pointer is always safe to load, or if we can prove that there 1252 // is already a load in the block, then we can move the load to the pred 1253 // block. 1254 if (isSafeToLoadUnconditionally(InVal, MaxAlign, MaxSize, DL, TI)) 1255 continue; 1256 1257 return false; 1258 } 1259 1260 return true; 1261 } 1262 1263 static void speculatePHINodeLoads(PHINode &PN) { 1264 LLVM_DEBUG(dbgs() << " original: " << PN << "\n"); 1265 1266 LoadInst *SomeLoad = cast<LoadInst>(PN.user_back()); 1267 Type *LoadTy = SomeLoad->getType(); 1268 IRBuilderTy PHIBuilder(&PN); 1269 PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(), 1270 PN.getName() + ".sroa.speculated"); 1271 1272 // Get the AA tags and alignment to use from one of the loads. It does not 1273 // matter which one we get and if any differ. 1274 AAMDNodes AATags; 1275 SomeLoad->getAAMetadata(AATags); 1276 const MaybeAlign Align = MaybeAlign(SomeLoad->getAlignment()); 1277 1278 // Rewrite all loads of the PN to use the new PHI. 1279 while (!PN.use_empty()) { 1280 LoadInst *LI = cast<LoadInst>(PN.user_back()); 1281 LI->replaceAllUsesWith(NewPN); 1282 LI->eraseFromParent(); 1283 } 1284 1285 // Inject loads into all of the pred blocks. 1286 DenseMap<BasicBlock*, Value*> InjectedLoads; 1287 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { 1288 BasicBlock *Pred = PN.getIncomingBlock(Idx); 1289 Value *InVal = PN.getIncomingValue(Idx); 1290 1291 // A PHI node is allowed to have multiple (duplicated) entries for the same 1292 // basic block, as long as the value is the same. So if we already injected 1293 // a load in the predecessor, then we should reuse the same load for all 1294 // duplicated entries. 1295 if (Value* V = InjectedLoads.lookup(Pred)) { 1296 NewPN->addIncoming(V, Pred); 1297 continue; 1298 } 1299 1300 Instruction *TI = Pred->getTerminator(); 1301 IRBuilderTy PredBuilder(TI); 1302 1303 LoadInst *Load = PredBuilder.CreateLoad( 1304 LoadTy, InVal, 1305 (PN.getName() + ".sroa.speculate.load." + Pred->getName())); 1306 ++NumLoadsSpeculated; 1307 Load->setAlignment(Align); 1308 if (AATags) 1309 Load->setAAMetadata(AATags); 1310 NewPN->addIncoming(Load, Pred); 1311 InjectedLoads[Pred] = Load; 1312 } 1313 1314 LLVM_DEBUG(dbgs() << " speculated to: " << *NewPN << "\n"); 1315 PN.eraseFromParent(); 1316 } 1317 1318 /// Select instructions that use an alloca and are subsequently loaded can be 1319 /// rewritten to load both input pointers and then select between the result, 1320 /// allowing the load of the alloca to be promoted. 1321 /// From this: 1322 /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other 1323 /// %V = load i32* %P2 1324 /// to: 1325 /// %V1 = load i32* %Alloca -> will be mem2reg'd 1326 /// %V2 = load i32* %Other 1327 /// %V = select i1 %cond, i32 %V1, i32 %V2 1328 /// 1329 /// We can do this to a select if its only uses are loads and if the operand 1330 /// to the select can be loaded unconditionally. 1331 static bool isSafeSelectToSpeculate(SelectInst &SI) { 1332 Value *TValue = SI.getTrueValue(); 1333 Value *FValue = SI.getFalseValue(); 1334 const DataLayout &DL = SI.getModule()->getDataLayout(); 1335 1336 for (User *U : SI.users()) { 1337 LoadInst *LI = dyn_cast<LoadInst>(U); 1338 if (!LI || !LI->isSimple()) 1339 return false; 1340 1341 // Both operands to the select need to be dereferenceable, either 1342 // absolutely (e.g. allocas) or at this point because we can see other 1343 // accesses to it. 1344 if (!isSafeToLoadUnconditionally(TValue, LI->getType(), 1345 MaybeAlign(LI->getAlignment()), DL, LI)) 1346 return false; 1347 if (!isSafeToLoadUnconditionally(FValue, LI->getType(), 1348 MaybeAlign(LI->getAlignment()), DL, LI)) 1349 return false; 1350 } 1351 1352 return true; 1353 } 1354 1355 static void speculateSelectInstLoads(SelectInst &SI) { 1356 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 1357 1358 IRBuilderTy IRB(&SI); 1359 Value *TV = SI.getTrueValue(); 1360 Value *FV = SI.getFalseValue(); 1361 // Replace the loads of the select with a select of two loads. 1362 while (!SI.use_empty()) { 1363 LoadInst *LI = cast<LoadInst>(SI.user_back()); 1364 assert(LI->isSimple() && "We only speculate simple loads"); 1365 1366 IRB.SetInsertPoint(LI); 1367 LoadInst *TL = IRB.CreateLoad(LI->getType(), TV, 1368 LI->getName() + ".sroa.speculate.load.true"); 1369 LoadInst *FL = IRB.CreateLoad(LI->getType(), FV, 1370 LI->getName() + ".sroa.speculate.load.false"); 1371 NumLoadsSpeculated += 2; 1372 1373 // Transfer alignment and AA info if present. 1374 TL->setAlignment(MaybeAlign(LI->getAlignment())); 1375 FL->setAlignment(MaybeAlign(LI->getAlignment())); 1376 1377 AAMDNodes Tags; 1378 LI->getAAMetadata(Tags); 1379 if (Tags) { 1380 TL->setAAMetadata(Tags); 1381 FL->setAAMetadata(Tags); 1382 } 1383 1384 Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL, 1385 LI->getName() + ".sroa.speculated"); 1386 1387 LLVM_DEBUG(dbgs() << " speculated to: " << *V << "\n"); 1388 LI->replaceAllUsesWith(V); 1389 LI->eraseFromParent(); 1390 } 1391 SI.eraseFromParent(); 1392 } 1393 1394 /// Build a GEP out of a base pointer and indices. 1395 /// 1396 /// This will return the BasePtr if that is valid, or build a new GEP 1397 /// instruction using the IRBuilder if GEP-ing is needed. 1398 static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, 1399 SmallVectorImpl<Value *> &Indices, Twine NamePrefix) { 1400 if (Indices.empty()) 1401 return BasePtr; 1402 1403 // A single zero index is a no-op, so check for this and avoid building a GEP 1404 // in that case. 1405 if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero()) 1406 return BasePtr; 1407 1408 return IRB.CreateInBoundsGEP(BasePtr->getType()->getPointerElementType(), 1409 BasePtr, Indices, NamePrefix + "sroa_idx"); 1410 } 1411 1412 /// Get a natural GEP off of the BasePtr walking through Ty toward 1413 /// TargetTy without changing the offset of the pointer. 1414 /// 1415 /// This routine assumes we've already established a properly offset GEP with 1416 /// Indices, and arrived at the Ty type. The goal is to continue to GEP with 1417 /// zero-indices down through type layers until we find one the same as 1418 /// TargetTy. If we can't find one with the same type, we at least try to use 1419 /// one with the same size. If none of that works, we just produce the GEP as 1420 /// indicated by Indices to have the correct offset. 1421 static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, 1422 Value *BasePtr, Type *Ty, Type *TargetTy, 1423 SmallVectorImpl<Value *> &Indices, 1424 Twine NamePrefix) { 1425 if (Ty == TargetTy) 1426 return buildGEP(IRB, BasePtr, Indices, NamePrefix); 1427 1428 // Offset size to use for the indices. 1429 unsigned OffsetSize = DL.getIndexTypeSizeInBits(BasePtr->getType()); 1430 1431 // See if we can descend into a struct and locate a field with the correct 1432 // type. 1433 unsigned NumLayers = 0; 1434 Type *ElementTy = Ty; 1435 do { 1436 if (ElementTy->isPointerTy()) 1437 break; 1438 1439 if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) { 1440 ElementTy = ArrayTy->getElementType(); 1441 Indices.push_back(IRB.getIntN(OffsetSize, 0)); 1442 } else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) { 1443 ElementTy = VectorTy->getElementType(); 1444 Indices.push_back(IRB.getInt32(0)); 1445 } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) { 1446 if (STy->element_begin() == STy->element_end()) 1447 break; // Nothing left to descend into. 1448 ElementTy = *STy->element_begin(); 1449 Indices.push_back(IRB.getInt32(0)); 1450 } else { 1451 break; 1452 } 1453 ++NumLayers; 1454 } while (ElementTy != TargetTy); 1455 if (ElementTy != TargetTy) 1456 Indices.erase(Indices.end() - NumLayers, Indices.end()); 1457 1458 return buildGEP(IRB, BasePtr, Indices, NamePrefix); 1459 } 1460 1461 /// Recursively compute indices for a natural GEP. 1462 /// 1463 /// This is the recursive step for getNaturalGEPWithOffset that walks down the 1464 /// element types adding appropriate indices for the GEP. 1465 static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, 1466 Value *Ptr, Type *Ty, APInt &Offset, 1467 Type *TargetTy, 1468 SmallVectorImpl<Value *> &Indices, 1469 Twine NamePrefix) { 1470 if (Offset == 0) 1471 return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices, 1472 NamePrefix); 1473 1474 // We can't recurse through pointer types. 1475 if (Ty->isPointerTy()) 1476 return nullptr; 1477 1478 // We try to analyze GEPs over vectors here, but note that these GEPs are 1479 // extremely poorly defined currently. The long-term goal is to remove GEPing 1480 // over a vector from the IR completely. 1481 if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) { 1482 unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType()); 1483 if (ElementSizeInBits % 8 != 0) { 1484 // GEPs over non-multiple of 8 size vector elements are invalid. 1485 return nullptr; 1486 } 1487 APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8); 1488 APInt NumSkippedElements = Offset.sdiv(ElementSize); 1489 if (NumSkippedElements.ugt(VecTy->getNumElements())) 1490 return nullptr; 1491 Offset -= NumSkippedElements * ElementSize; 1492 Indices.push_back(IRB.getInt(NumSkippedElements)); 1493 return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(), 1494 Offset, TargetTy, Indices, NamePrefix); 1495 } 1496 1497 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) { 1498 Type *ElementTy = ArrTy->getElementType(); 1499 APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); 1500 APInt NumSkippedElements = Offset.sdiv(ElementSize); 1501 if (NumSkippedElements.ugt(ArrTy->getNumElements())) 1502 return nullptr; 1503 1504 Offset -= NumSkippedElements * ElementSize; 1505 Indices.push_back(IRB.getInt(NumSkippedElements)); 1506 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, 1507 Indices, NamePrefix); 1508 } 1509 1510 StructType *STy = dyn_cast<StructType>(Ty); 1511 if (!STy) 1512 return nullptr; 1513 1514 const StructLayout *SL = DL.getStructLayout(STy); 1515 uint64_t StructOffset = Offset.getZExtValue(); 1516 if (StructOffset >= SL->getSizeInBytes()) 1517 return nullptr; 1518 unsigned Index = SL->getElementContainingOffset(StructOffset); 1519 Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index)); 1520 Type *ElementTy = STy->getElementType(Index); 1521 if (Offset.uge(DL.getTypeAllocSize(ElementTy))) 1522 return nullptr; // The offset points into alignment padding. 1523 1524 Indices.push_back(IRB.getInt32(Index)); 1525 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, 1526 Indices, NamePrefix); 1527 } 1528 1529 /// Get a natural GEP from a base pointer to a particular offset and 1530 /// resulting in a particular type. 1531 /// 1532 /// The goal is to produce a "natural" looking GEP that works with the existing 1533 /// composite types to arrive at the appropriate offset and element type for 1534 /// a pointer. TargetTy is the element type the returned GEP should point-to if 1535 /// possible. We recurse by decreasing Offset, adding the appropriate index to 1536 /// Indices, and setting Ty to the result subtype. 1537 /// 1538 /// If no natural GEP can be constructed, this function returns null. 1539 static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, 1540 Value *Ptr, APInt Offset, Type *TargetTy, 1541 SmallVectorImpl<Value *> &Indices, 1542 Twine NamePrefix) { 1543 PointerType *Ty = cast<PointerType>(Ptr->getType()); 1544 1545 // Don't consider any GEPs through an i8* as natural unless the TargetTy is 1546 // an i8. 1547 if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8)) 1548 return nullptr; 1549 1550 Type *ElementTy = Ty->getElementType(); 1551 if (!ElementTy->isSized()) 1552 return nullptr; // We can't GEP through an unsized element. 1553 APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); 1554 if (ElementSize == 0) 1555 return nullptr; // Zero-length arrays can't help us build a natural GEP. 1556 APInt NumSkippedElements = Offset.sdiv(ElementSize); 1557 1558 Offset -= NumSkippedElements * ElementSize; 1559 Indices.push_back(IRB.getInt(NumSkippedElements)); 1560 return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, 1561 Indices, NamePrefix); 1562 } 1563 1564 /// Compute an adjusted pointer from Ptr by Offset bytes where the 1565 /// resulting pointer has PointerTy. 1566 /// 1567 /// This tries very hard to compute a "natural" GEP which arrives at the offset 1568 /// and produces the pointer type desired. Where it cannot, it will try to use 1569 /// the natural GEP to arrive at the offset and bitcast to the type. Where that 1570 /// fails, it will try to use an existing i8* and GEP to the byte offset and 1571 /// bitcast to the type. 1572 /// 1573 /// The strategy for finding the more natural GEPs is to peel off layers of the 1574 /// pointer, walking back through bit casts and GEPs, searching for a base 1575 /// pointer from which we can compute a natural GEP with the desired 1576 /// properties. The algorithm tries to fold as many constant indices into 1577 /// a single GEP as possible, thus making each GEP more independent of the 1578 /// surrounding code. 1579 static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, 1580 APInt Offset, Type *PointerTy, Twine NamePrefix) { 1581 // Even though we don't look through PHI nodes, we could be called on an 1582 // instruction in an unreachable block, which may be on a cycle. 1583 SmallPtrSet<Value *, 4> Visited; 1584 Visited.insert(Ptr); 1585 SmallVector<Value *, 4> Indices; 1586 1587 // We may end up computing an offset pointer that has the wrong type. If we 1588 // never are able to compute one directly that has the correct type, we'll 1589 // fall back to it, so keep it and the base it was computed from around here. 1590 Value *OffsetPtr = nullptr; 1591 Value *OffsetBasePtr; 1592 1593 // Remember any i8 pointer we come across to re-use if we need to do a raw 1594 // byte offset. 1595 Value *Int8Ptr = nullptr; 1596 APInt Int8PtrOffset(Offset.getBitWidth(), 0); 1597 1598 PointerType *TargetPtrTy = cast<PointerType>(PointerTy); 1599 Type *TargetTy = TargetPtrTy->getElementType(); 1600 1601 // As `addrspacecast` is , `Ptr` (the storage pointer) may have different 1602 // address space from the expected `PointerTy` (the pointer to be used). 1603 // Adjust the pointer type based the original storage pointer. 1604 auto AS = cast<PointerType>(Ptr->getType())->getAddressSpace(); 1605 PointerTy = TargetTy->getPointerTo(AS); 1606 1607 do { 1608 // First fold any existing GEPs into the offset. 1609 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 1610 APInt GEPOffset(Offset.getBitWidth(), 0); 1611 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 1612 break; 1613 Offset += GEPOffset; 1614 Ptr = GEP->getPointerOperand(); 1615 if (!Visited.insert(Ptr).second) 1616 break; 1617 } 1618 1619 // See if we can perform a natural GEP here. 1620 Indices.clear(); 1621 if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy, 1622 Indices, NamePrefix)) { 1623 // If we have a new natural pointer at the offset, clear out any old 1624 // offset pointer we computed. Unless it is the base pointer or 1625 // a non-instruction, we built a GEP we don't need. Zap it. 1626 if (OffsetPtr && OffsetPtr != OffsetBasePtr) 1627 if (Instruction *I = dyn_cast<Instruction>(OffsetPtr)) { 1628 assert(I->use_empty() && "Built a GEP with uses some how!"); 1629 I->eraseFromParent(); 1630 } 1631 OffsetPtr = P; 1632 OffsetBasePtr = Ptr; 1633 // If we also found a pointer of the right type, we're done. 1634 if (P->getType() == PointerTy) 1635 break; 1636 } 1637 1638 // Stash this pointer if we've found an i8*. 1639 if (Ptr->getType()->isIntegerTy(8)) { 1640 Int8Ptr = Ptr; 1641 Int8PtrOffset = Offset; 1642 } 1643 1644 // Peel off a layer of the pointer and update the offset appropriately. 1645 if (Operator::getOpcode(Ptr) == Instruction::BitCast) { 1646 Ptr = cast<Operator>(Ptr)->getOperand(0); 1647 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 1648 if (GA->isInterposable()) 1649 break; 1650 Ptr = GA->getAliasee(); 1651 } else { 1652 break; 1653 } 1654 assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!"); 1655 } while (Visited.insert(Ptr).second); 1656 1657 if (!OffsetPtr) { 1658 if (!Int8Ptr) { 1659 Int8Ptr = IRB.CreateBitCast( 1660 Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()), 1661 NamePrefix + "sroa_raw_cast"); 1662 Int8PtrOffset = Offset; 1663 } 1664 1665 OffsetPtr = Int8PtrOffset == 0 1666 ? Int8Ptr 1667 : IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Int8Ptr, 1668 IRB.getInt(Int8PtrOffset), 1669 NamePrefix + "sroa_raw_idx"); 1670 } 1671 Ptr = OffsetPtr; 1672 1673 // On the off chance we were targeting i8*, guard the bitcast here. 1674 if (cast<PointerType>(Ptr->getType()) != TargetPtrTy) { 1675 Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, 1676 TargetPtrTy, 1677 NamePrefix + "sroa_cast"); 1678 } 1679 1680 return Ptr; 1681 } 1682 1683 /// Compute the adjusted alignment for a load or store from an offset. 1684 static Align getAdjustedAlignment(Instruction *I, uint64_t Offset, 1685 const DataLayout &DL) { 1686 MaybeAlign Alignment; 1687 Type *Ty; 1688 if (auto *LI = dyn_cast<LoadInst>(I)) { 1689 Alignment = MaybeAlign(LI->getAlignment()); 1690 Ty = LI->getType(); 1691 } else if (auto *SI = dyn_cast<StoreInst>(I)) { 1692 Alignment = MaybeAlign(SI->getAlignment()); 1693 Ty = SI->getValueOperand()->getType(); 1694 } else { 1695 llvm_unreachable("Only loads and stores are allowed!"); 1696 } 1697 return commonAlignment(DL.getValueOrABITypeAlignment(Alignment, Ty), Offset); 1698 } 1699 1700 /// Test whether we can convert a value from the old to the new type. 1701 /// 1702 /// This predicate should be used to guard calls to convertValue in order to 1703 /// ensure that we only try to convert viable values. The strategy is that we 1704 /// will peel off single element struct and array wrappings to get to an 1705 /// underlying value, and convert that value. 1706 static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) { 1707 if (OldTy == NewTy) 1708 return true; 1709 1710 // For integer types, we can't handle any bit-width differences. This would 1711 // break both vector conversions with extension and introduce endianness 1712 // issues when in conjunction with loads and stores. 1713 if (isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) { 1714 assert(cast<IntegerType>(OldTy)->getBitWidth() != 1715 cast<IntegerType>(NewTy)->getBitWidth() && 1716 "We can't have the same bitwidth for different int types"); 1717 return false; 1718 } 1719 1720 if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy)) 1721 return false; 1722 if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType()) 1723 return false; 1724 1725 // We can convert pointers to integers and vice-versa. Same for vectors 1726 // of pointers and integers. 1727 OldTy = OldTy->getScalarType(); 1728 NewTy = NewTy->getScalarType(); 1729 if (NewTy->isPointerTy() || OldTy->isPointerTy()) { 1730 if (NewTy->isPointerTy() && OldTy->isPointerTy()) { 1731 return cast<PointerType>(NewTy)->getPointerAddressSpace() == 1732 cast<PointerType>(OldTy)->getPointerAddressSpace(); 1733 } 1734 1735 // We can convert integers to integral pointers, but not to non-integral 1736 // pointers. 1737 if (OldTy->isIntegerTy()) 1738 return !DL.isNonIntegralPointerType(NewTy); 1739 1740 // We can convert integral pointers to integers, but non-integral pointers 1741 // need to remain pointers. 1742 if (!DL.isNonIntegralPointerType(OldTy)) 1743 return NewTy->isIntegerTy(); 1744 1745 return false; 1746 } 1747 1748 return true; 1749 } 1750 1751 /// Generic routine to convert an SSA value to a value of a different 1752 /// type. 1753 /// 1754 /// This will try various different casting techniques, such as bitcasts, 1755 /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test 1756 /// two types for viability with this routine. 1757 static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, 1758 Type *NewTy) { 1759 Type *OldTy = V->getType(); 1760 assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type"); 1761 1762 if (OldTy == NewTy) 1763 return V; 1764 1765 assert(!(isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) && 1766 "Integer types must be the exact same to convert."); 1767 1768 // See if we need inttoptr for this type pair. A cast involving both scalars 1769 // and vectors requires and additional bitcast. 1770 if (OldTy->isIntOrIntVectorTy() && NewTy->isPtrOrPtrVectorTy()) { 1771 // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8* 1772 if (OldTy->isVectorTy() && !NewTy->isVectorTy()) 1773 return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), 1774 NewTy); 1775 1776 // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*> 1777 if (!OldTy->isVectorTy() && NewTy->isVectorTy()) 1778 return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), 1779 NewTy); 1780 1781 return IRB.CreateIntToPtr(V, NewTy); 1782 } 1783 1784 // See if we need ptrtoint for this type pair. A cast involving both scalars 1785 // and vectors requires and additional bitcast. 1786 if (OldTy->isPtrOrPtrVectorTy() && NewTy->isIntOrIntVectorTy()) { 1787 // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128 1788 if (OldTy->isVectorTy() && !NewTy->isVectorTy()) 1789 return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), 1790 NewTy); 1791 1792 // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32> 1793 if (!OldTy->isVectorTy() && NewTy->isVectorTy()) 1794 return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), 1795 NewTy); 1796 1797 return IRB.CreatePtrToInt(V, NewTy); 1798 } 1799 1800 return IRB.CreateBitCast(V, NewTy); 1801 } 1802 1803 /// Test whether the given slice use can be promoted to a vector. 1804 /// 1805 /// This function is called to test each entry in a partition which is slated 1806 /// for a single slice. 1807 static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S, 1808 VectorType *Ty, 1809 uint64_t ElementSize, 1810 const DataLayout &DL) { 1811 // First validate the slice offsets. 1812 uint64_t BeginOffset = 1813 std::max(S.beginOffset(), P.beginOffset()) - P.beginOffset(); 1814 uint64_t BeginIndex = BeginOffset / ElementSize; 1815 if (BeginIndex * ElementSize != BeginOffset || 1816 BeginIndex >= Ty->getNumElements()) 1817 return false; 1818 uint64_t EndOffset = 1819 std::min(S.endOffset(), P.endOffset()) - P.beginOffset(); 1820 uint64_t EndIndex = EndOffset / ElementSize; 1821 if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements()) 1822 return false; 1823 1824 assert(EndIndex > BeginIndex && "Empty vector!"); 1825 uint64_t NumElements = EndIndex - BeginIndex; 1826 Type *SliceTy = (NumElements == 1) 1827 ? Ty->getElementType() 1828 : VectorType::get(Ty->getElementType(), NumElements); 1829 1830 Type *SplitIntTy = 1831 Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8); 1832 1833 Use *U = S.getUse(); 1834 1835 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { 1836 if (MI->isVolatile()) 1837 return false; 1838 if (!S.isSplittable()) 1839 return false; // Skip any unsplittable intrinsics. 1840 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { 1841 if (!II->isLifetimeStartOrEnd()) 1842 return false; 1843 } else if (U->get()->getType()->getPointerElementType()->isStructTy()) { 1844 // Disable vector promotion when there are loads or stores of an FCA. 1845 return false; 1846 } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 1847 if (LI->isVolatile()) 1848 return false; 1849 Type *LTy = LI->getType(); 1850 if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { 1851 assert(LTy->isIntegerTy()); 1852 LTy = SplitIntTy; 1853 } 1854 if (!canConvertValue(DL, SliceTy, LTy)) 1855 return false; 1856 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 1857 if (SI->isVolatile()) 1858 return false; 1859 Type *STy = SI->getValueOperand()->getType(); 1860 if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { 1861 assert(STy->isIntegerTy()); 1862 STy = SplitIntTy; 1863 } 1864 if (!canConvertValue(DL, STy, SliceTy)) 1865 return false; 1866 } else { 1867 return false; 1868 } 1869 1870 return true; 1871 } 1872 1873 /// Test whether the given alloca partitioning and range of slices can be 1874 /// promoted to a vector. 1875 /// 1876 /// This is a quick test to check whether we can rewrite a particular alloca 1877 /// partition (and its newly formed alloca) into a vector alloca with only 1878 /// whole-vector loads and stores such that it could be promoted to a vector 1879 /// SSA value. We only can ensure this for a limited set of operations, and we 1880 /// don't want to do the rewrites unless we are confident that the result will 1881 /// be promotable, so we have an early test here. 1882 static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) { 1883 // Collect the candidate types for vector-based promotion. Also track whether 1884 // we have different element types. 1885 SmallVector<VectorType *, 4> CandidateTys; 1886 Type *CommonEltTy = nullptr; 1887 bool HaveCommonEltTy = true; 1888 auto CheckCandidateType = [&](Type *Ty) { 1889 if (auto *VTy = dyn_cast<VectorType>(Ty)) { 1890 // Return if bitcast to vectors is different for total size in bits. 1891 if (!CandidateTys.empty()) { 1892 VectorType *V = CandidateTys[0]; 1893 if (DL.getTypeSizeInBits(VTy) != DL.getTypeSizeInBits(V)) { 1894 CandidateTys.clear(); 1895 return; 1896 } 1897 } 1898 CandidateTys.push_back(VTy); 1899 if (!CommonEltTy) 1900 CommonEltTy = VTy->getElementType(); 1901 else if (CommonEltTy != VTy->getElementType()) 1902 HaveCommonEltTy = false; 1903 } 1904 }; 1905 // Consider any loads or stores that are the exact size of the slice. 1906 for (const Slice &S : P) 1907 if (S.beginOffset() == P.beginOffset() && 1908 S.endOffset() == P.endOffset()) { 1909 if (auto *LI = dyn_cast<LoadInst>(S.getUse()->getUser())) 1910 CheckCandidateType(LI->getType()); 1911 else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser())) 1912 CheckCandidateType(SI->getValueOperand()->getType()); 1913 } 1914 1915 // If we didn't find a vector type, nothing to do here. 1916 if (CandidateTys.empty()) 1917 return nullptr; 1918 1919 // Remove non-integer vector types if we had multiple common element types. 1920 // FIXME: It'd be nice to replace them with integer vector types, but we can't 1921 // do that until all the backends are known to produce good code for all 1922 // integer vector types. 1923 if (!HaveCommonEltTy) { 1924 CandidateTys.erase( 1925 llvm::remove_if(CandidateTys, 1926 [](VectorType *VTy) { 1927 return !VTy->getElementType()->isIntegerTy(); 1928 }), 1929 CandidateTys.end()); 1930 1931 // If there were no integer vector types, give up. 1932 if (CandidateTys.empty()) 1933 return nullptr; 1934 1935 // Rank the remaining candidate vector types. This is easy because we know 1936 // they're all integer vectors. We sort by ascending number of elements. 1937 auto RankVectorTypes = [&DL](VectorType *RHSTy, VectorType *LHSTy) { 1938 (void)DL; 1939 assert(DL.getTypeSizeInBits(RHSTy) == DL.getTypeSizeInBits(LHSTy) && 1940 "Cannot have vector types of different sizes!"); 1941 assert(RHSTy->getElementType()->isIntegerTy() && 1942 "All non-integer types eliminated!"); 1943 assert(LHSTy->getElementType()->isIntegerTy() && 1944 "All non-integer types eliminated!"); 1945 return RHSTy->getNumElements() < LHSTy->getNumElements(); 1946 }; 1947 llvm::sort(CandidateTys, RankVectorTypes); 1948 CandidateTys.erase( 1949 std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes), 1950 CandidateTys.end()); 1951 } else { 1952 // The only way to have the same element type in every vector type is to 1953 // have the same vector type. Check that and remove all but one. 1954 #ifndef NDEBUG 1955 for (VectorType *VTy : CandidateTys) { 1956 assert(VTy->getElementType() == CommonEltTy && 1957 "Unaccounted for element type!"); 1958 assert(VTy == CandidateTys[0] && 1959 "Different vector types with the same element type!"); 1960 } 1961 #endif 1962 CandidateTys.resize(1); 1963 } 1964 1965 // Try each vector type, and return the one which works. 1966 auto CheckVectorTypeForPromotion = [&](VectorType *VTy) { 1967 uint64_t ElementSize = DL.getTypeSizeInBits(VTy->getElementType()); 1968 1969 // While the definition of LLVM vectors is bitpacked, we don't support sizes 1970 // that aren't byte sized. 1971 if (ElementSize % 8) 1972 return false; 1973 assert((DL.getTypeSizeInBits(VTy) % 8) == 0 && 1974 "vector size not a multiple of element size?"); 1975 ElementSize /= 8; 1976 1977 for (const Slice &S : P) 1978 if (!isVectorPromotionViableForSlice(P, S, VTy, ElementSize, DL)) 1979 return false; 1980 1981 for (const Slice *S : P.splitSliceTails()) 1982 if (!isVectorPromotionViableForSlice(P, *S, VTy, ElementSize, DL)) 1983 return false; 1984 1985 return true; 1986 }; 1987 for (VectorType *VTy : CandidateTys) 1988 if (CheckVectorTypeForPromotion(VTy)) 1989 return VTy; 1990 1991 return nullptr; 1992 } 1993 1994 /// Test whether a slice of an alloca is valid for integer widening. 1995 /// 1996 /// This implements the necessary checking for the \c isIntegerWideningViable 1997 /// test below on a single slice of the alloca. 1998 static bool isIntegerWideningViableForSlice(const Slice &S, 1999 uint64_t AllocBeginOffset, 2000 Type *AllocaTy, 2001 const DataLayout &DL, 2002 bool &WholeAllocaOp) { 2003 uint64_t Size = DL.getTypeStoreSize(AllocaTy); 2004 2005 uint64_t RelBegin = S.beginOffset() - AllocBeginOffset; 2006 uint64_t RelEnd = S.endOffset() - AllocBeginOffset; 2007 2008 // We can't reasonably handle cases where the load or store extends past 2009 // the end of the alloca's type and into its padding. 2010 if (RelEnd > Size) 2011 return false; 2012 2013 Use *U = S.getUse(); 2014 2015 if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { 2016 if (LI->isVolatile()) 2017 return false; 2018 // We can't handle loads that extend past the allocated memory. 2019 if (DL.getTypeStoreSize(LI->getType()) > Size) 2020 return false; 2021 // So far, AllocaSliceRewriter does not support widening split slice tails 2022 // in rewriteIntegerLoad. 2023 if (S.beginOffset() < AllocBeginOffset) 2024 return false; 2025 // Note that we don't count vector loads or stores as whole-alloca 2026 // operations which enable integer widening because we would prefer to use 2027 // vector widening instead. 2028 if (!isa<VectorType>(LI->getType()) && RelBegin == 0 && RelEnd == Size) 2029 WholeAllocaOp = true; 2030 if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) { 2031 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy)) 2032 return false; 2033 } else if (RelBegin != 0 || RelEnd != Size || 2034 !canConvertValue(DL, AllocaTy, LI->getType())) { 2035 // Non-integer loads need to be convertible from the alloca type so that 2036 // they are promotable. 2037 return false; 2038 } 2039 } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { 2040 Type *ValueTy = SI->getValueOperand()->getType(); 2041 if (SI->isVolatile()) 2042 return false; 2043 // We can't handle stores that extend past the allocated memory. 2044 if (DL.getTypeStoreSize(ValueTy) > Size) 2045 return false; 2046 // So far, AllocaSliceRewriter does not support widening split slice tails 2047 // in rewriteIntegerStore. 2048 if (S.beginOffset() < AllocBeginOffset) 2049 return false; 2050 // Note that we don't count vector loads or stores as whole-alloca 2051 // operations which enable integer widening because we would prefer to use 2052 // vector widening instead. 2053 if (!isa<VectorType>(ValueTy) && RelBegin == 0 && RelEnd == Size) 2054 WholeAllocaOp = true; 2055 if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) { 2056 if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy)) 2057 return false; 2058 } else if (RelBegin != 0 || RelEnd != Size || 2059 !canConvertValue(DL, ValueTy, AllocaTy)) { 2060 // Non-integer stores need to be convertible to the alloca type so that 2061 // they are promotable. 2062 return false; 2063 } 2064 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { 2065 if (MI->isVolatile() || !isa<Constant>(MI->getLength())) 2066 return false; 2067 if (!S.isSplittable()) 2068 return false; // Skip any unsplittable intrinsics. 2069 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { 2070 if (!II->isLifetimeStartOrEnd()) 2071 return false; 2072 } else { 2073 return false; 2074 } 2075 2076 return true; 2077 } 2078 2079 /// Test whether the given alloca partition's integer operations can be 2080 /// widened to promotable ones. 2081 /// 2082 /// This is a quick test to check whether we can rewrite the integer loads and 2083 /// stores to a particular alloca into wider loads and stores and be able to 2084 /// promote the resulting alloca. 2085 static bool isIntegerWideningViable(Partition &P, Type *AllocaTy, 2086 const DataLayout &DL) { 2087 uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy); 2088 // Don't create integer types larger than the maximum bitwidth. 2089 if (SizeInBits > IntegerType::MAX_INT_BITS) 2090 return false; 2091 2092 // Don't try to handle allocas with bit-padding. 2093 if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy)) 2094 return false; 2095 2096 // We need to ensure that an integer type with the appropriate bitwidth can 2097 // be converted to the alloca type, whatever that is. We don't want to force 2098 // the alloca itself to have an integer type if there is a more suitable one. 2099 Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits); 2100 if (!canConvertValue(DL, AllocaTy, IntTy) || 2101 !canConvertValue(DL, IntTy, AllocaTy)) 2102 return false; 2103 2104 // While examining uses, we ensure that the alloca has a covering load or 2105 // store. We don't want to widen the integer operations only to fail to 2106 // promote due to some other unsplittable entry (which we may make splittable 2107 // later). However, if there are only splittable uses, go ahead and assume 2108 // that we cover the alloca. 2109 // FIXME: We shouldn't consider split slices that happen to start in the 2110 // partition here... 2111 bool WholeAllocaOp = 2112 P.begin() != P.end() ? false : DL.isLegalInteger(SizeInBits); 2113 2114 for (const Slice &S : P) 2115 if (!isIntegerWideningViableForSlice(S, P.beginOffset(), AllocaTy, DL, 2116 WholeAllocaOp)) 2117 return false; 2118 2119 for (const Slice *S : P.splitSliceTails()) 2120 if (!isIntegerWideningViableForSlice(*S, P.beginOffset(), AllocaTy, DL, 2121 WholeAllocaOp)) 2122 return false; 2123 2124 return WholeAllocaOp; 2125 } 2126 2127 static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V, 2128 IntegerType *Ty, uint64_t Offset, 2129 const Twine &Name) { 2130 LLVM_DEBUG(dbgs() << " start: " << *V << "\n"); 2131 IntegerType *IntTy = cast<IntegerType>(V->getType()); 2132 assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && 2133 "Element extends past full value"); 2134 uint64_t ShAmt = 8 * Offset; 2135 if (DL.isBigEndian()) 2136 ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); 2137 if (ShAmt) { 2138 V = IRB.CreateLShr(V, ShAmt, Name + ".shift"); 2139 LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n"); 2140 } 2141 assert(Ty->getBitWidth() <= IntTy->getBitWidth() && 2142 "Cannot extract to a larger integer!"); 2143 if (Ty != IntTy) { 2144 V = IRB.CreateTrunc(V, Ty, Name + ".trunc"); 2145 LLVM_DEBUG(dbgs() << " trunced: " << *V << "\n"); 2146 } 2147 return V; 2148 } 2149 2150 static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old, 2151 Value *V, uint64_t Offset, const Twine &Name) { 2152 IntegerType *IntTy = cast<IntegerType>(Old->getType()); 2153 IntegerType *Ty = cast<IntegerType>(V->getType()); 2154 assert(Ty->getBitWidth() <= IntTy->getBitWidth() && 2155 "Cannot insert a larger integer!"); 2156 LLVM_DEBUG(dbgs() << " start: " << *V << "\n"); 2157 if (Ty != IntTy) { 2158 V = IRB.CreateZExt(V, IntTy, Name + ".ext"); 2159 LLVM_DEBUG(dbgs() << " extended: " << *V << "\n"); 2160 } 2161 assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && 2162 "Element store outside of alloca store"); 2163 uint64_t ShAmt = 8 * Offset; 2164 if (DL.isBigEndian()) 2165 ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); 2166 if (ShAmt) { 2167 V = IRB.CreateShl(V, ShAmt, Name + ".shift"); 2168 LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n"); 2169 } 2170 2171 if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) { 2172 APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt); 2173 Old = IRB.CreateAnd(Old, Mask, Name + ".mask"); 2174 LLVM_DEBUG(dbgs() << " masked: " << *Old << "\n"); 2175 V = IRB.CreateOr(Old, V, Name + ".insert"); 2176 LLVM_DEBUG(dbgs() << " inserted: " << *V << "\n"); 2177 } 2178 return V; 2179 } 2180 2181 static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex, 2182 unsigned EndIndex, const Twine &Name) { 2183 VectorType *VecTy = cast<VectorType>(V->getType()); 2184 unsigned NumElements = EndIndex - BeginIndex; 2185 assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); 2186 2187 if (NumElements == VecTy->getNumElements()) 2188 return V; 2189 2190 if (NumElements == 1) { 2191 V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex), 2192 Name + ".extract"); 2193 LLVM_DEBUG(dbgs() << " extract: " << *V << "\n"); 2194 return V; 2195 } 2196 2197 SmallVector<Constant *, 8> Mask; 2198 Mask.reserve(NumElements); 2199 for (unsigned i = BeginIndex; i != EndIndex; ++i) 2200 Mask.push_back(IRB.getInt32(i)); 2201 V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), 2202 ConstantVector::get(Mask), Name + ".extract"); 2203 LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n"); 2204 return V; 2205 } 2206 2207 static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V, 2208 unsigned BeginIndex, const Twine &Name) { 2209 VectorType *VecTy = cast<VectorType>(Old->getType()); 2210 assert(VecTy && "Can only insert a vector into a vector"); 2211 2212 VectorType *Ty = dyn_cast<VectorType>(V->getType()); 2213 if (!Ty) { 2214 // Single element to insert. 2215 V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex), 2216 Name + ".insert"); 2217 LLVM_DEBUG(dbgs() << " insert: " << *V << "\n"); 2218 return V; 2219 } 2220 2221 assert(Ty->getNumElements() <= VecTy->getNumElements() && 2222 "Too many elements!"); 2223 if (Ty->getNumElements() == VecTy->getNumElements()) { 2224 assert(V->getType() == VecTy && "Vector type mismatch"); 2225 return V; 2226 } 2227 unsigned EndIndex = BeginIndex + Ty->getNumElements(); 2228 2229 // When inserting a smaller vector into the larger to store, we first 2230 // use a shuffle vector to widen it with undef elements, and then 2231 // a second shuffle vector to select between the loaded vector and the 2232 // incoming vector. 2233 SmallVector<Constant *, 8> Mask; 2234 Mask.reserve(VecTy->getNumElements()); 2235 for (unsigned i = 0; i != VecTy->getNumElements(); ++i) 2236 if (i >= BeginIndex && i < EndIndex) 2237 Mask.push_back(IRB.getInt32(i - BeginIndex)); 2238 else 2239 Mask.push_back(UndefValue::get(IRB.getInt32Ty())); 2240 V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), 2241 ConstantVector::get(Mask), Name + ".expand"); 2242 LLVM_DEBUG(dbgs() << " shuffle: " << *V << "\n"); 2243 2244 Mask.clear(); 2245 for (unsigned i = 0; i != VecTy->getNumElements(); ++i) 2246 Mask.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex)); 2247 2248 V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend"); 2249 2250 LLVM_DEBUG(dbgs() << " blend: " << *V << "\n"); 2251 return V; 2252 } 2253 2254 /// Visitor to rewrite instructions using p particular slice of an alloca 2255 /// to use a new alloca. 2256 /// 2257 /// Also implements the rewriting to vector-based accesses when the partition 2258 /// passes the isVectorPromotionViable predicate. Most of the rewriting logic 2259 /// lives here. 2260 class llvm::sroa::AllocaSliceRewriter 2261 : public InstVisitor<AllocaSliceRewriter, bool> { 2262 // Befriend the base class so it can delegate to private visit methods. 2263 friend class InstVisitor<AllocaSliceRewriter, bool>; 2264 2265 using Base = InstVisitor<AllocaSliceRewriter, bool>; 2266 2267 const DataLayout &DL; 2268 AllocaSlices &AS; 2269 SROA &Pass; 2270 AllocaInst &OldAI, &NewAI; 2271 const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset; 2272 Type *NewAllocaTy; 2273 2274 // This is a convenience and flag variable that will be null unless the new 2275 // alloca's integer operations should be widened to this integer type due to 2276 // passing isIntegerWideningViable above. If it is non-null, the desired 2277 // integer type will be stored here for easy access during rewriting. 2278 IntegerType *IntTy; 2279 2280 // If we are rewriting an alloca partition which can be written as pure 2281 // vector operations, we stash extra information here. When VecTy is 2282 // non-null, we have some strict guarantees about the rewritten alloca: 2283 // - The new alloca is exactly the size of the vector type here. 2284 // - The accesses all either map to the entire vector or to a single 2285 // element. 2286 // - The set of accessing instructions is only one of those handled above 2287 // in isVectorPromotionViable. Generally these are the same access kinds 2288 // which are promotable via mem2reg. 2289 VectorType *VecTy; 2290 Type *ElementTy; 2291 uint64_t ElementSize; 2292 2293 // The original offset of the slice currently being rewritten relative to 2294 // the original alloca. 2295 uint64_t BeginOffset = 0; 2296 uint64_t EndOffset = 0; 2297 2298 // The new offsets of the slice currently being rewritten relative to the 2299 // original alloca. 2300 uint64_t NewBeginOffset = 0, NewEndOffset = 0; 2301 2302 uint64_t SliceSize = 0; 2303 bool IsSplittable = false; 2304 bool IsSplit = false; 2305 Use *OldUse = nullptr; 2306 Instruction *OldPtr = nullptr; 2307 2308 // Track post-rewrite users which are PHI nodes and Selects. 2309 SmallSetVector<PHINode *, 8> &PHIUsers; 2310 SmallSetVector<SelectInst *, 8> &SelectUsers; 2311 2312 // Utility IR builder, whose name prefix is setup for each visited use, and 2313 // the insertion point is set to point to the user. 2314 IRBuilderTy IRB; 2315 2316 public: 2317 AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &AS, SROA &Pass, 2318 AllocaInst &OldAI, AllocaInst &NewAI, 2319 uint64_t NewAllocaBeginOffset, 2320 uint64_t NewAllocaEndOffset, bool IsIntegerPromotable, 2321 VectorType *PromotableVecTy, 2322 SmallSetVector<PHINode *, 8> &PHIUsers, 2323 SmallSetVector<SelectInst *, 8> &SelectUsers) 2324 : DL(DL), AS(AS), Pass(Pass), OldAI(OldAI), NewAI(NewAI), 2325 NewAllocaBeginOffset(NewAllocaBeginOffset), 2326 NewAllocaEndOffset(NewAllocaEndOffset), 2327 NewAllocaTy(NewAI.getAllocatedType()), 2328 IntTy(IsIntegerPromotable 2329 ? Type::getIntNTy( 2330 NewAI.getContext(), 2331 DL.getTypeSizeInBits(NewAI.getAllocatedType())) 2332 : nullptr), 2333 VecTy(PromotableVecTy), 2334 ElementTy(VecTy ? VecTy->getElementType() : nullptr), 2335 ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0), 2336 PHIUsers(PHIUsers), SelectUsers(SelectUsers), 2337 IRB(NewAI.getContext(), ConstantFolder()) { 2338 if (VecTy) { 2339 assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 && 2340 "Only multiple-of-8 sized vector elements are viable"); 2341 ++NumVectorized; 2342 } 2343 assert((!IntTy && !VecTy) || (IntTy && !VecTy) || (!IntTy && VecTy)); 2344 } 2345 2346 bool visit(AllocaSlices::const_iterator I) { 2347 bool CanSROA = true; 2348 BeginOffset = I->beginOffset(); 2349 EndOffset = I->endOffset(); 2350 IsSplittable = I->isSplittable(); 2351 IsSplit = 2352 BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset; 2353 LLVM_DEBUG(dbgs() << " rewriting " << (IsSplit ? "split " : "")); 2354 LLVM_DEBUG(AS.printSlice(dbgs(), I, "")); 2355 LLVM_DEBUG(dbgs() << "\n"); 2356 2357 // Compute the intersecting offset range. 2358 assert(BeginOffset < NewAllocaEndOffset); 2359 assert(EndOffset > NewAllocaBeginOffset); 2360 NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); 2361 NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); 2362 2363 SliceSize = NewEndOffset - NewBeginOffset; 2364 2365 OldUse = I->getUse(); 2366 OldPtr = cast<Instruction>(OldUse->get()); 2367 2368 Instruction *OldUserI = cast<Instruction>(OldUse->getUser()); 2369 IRB.SetInsertPoint(OldUserI); 2370 IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc()); 2371 IRB.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + "."); 2372 2373 CanSROA &= visit(cast<Instruction>(OldUse->getUser())); 2374 if (VecTy || IntTy) 2375 assert(CanSROA); 2376 return CanSROA; 2377 } 2378 2379 private: 2380 // Make sure the other visit overloads are visible. 2381 using Base::visit; 2382 2383 // Every instruction which can end up as a user must have a rewrite rule. 2384 bool visitInstruction(Instruction &I) { 2385 LLVM_DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n"); 2386 llvm_unreachable("No rewrite rule for this instruction!"); 2387 } 2388 2389 Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) { 2390 // Note that the offset computation can use BeginOffset or NewBeginOffset 2391 // interchangeably for unsplit slices. 2392 assert(IsSplit || BeginOffset == NewBeginOffset); 2393 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2394 2395 #ifndef NDEBUG 2396 StringRef OldName = OldPtr->getName(); 2397 // Skip through the last '.sroa.' component of the name. 2398 size_t LastSROAPrefix = OldName.rfind(".sroa."); 2399 if (LastSROAPrefix != StringRef::npos) { 2400 OldName = OldName.substr(LastSROAPrefix + strlen(".sroa.")); 2401 // Look for an SROA slice index. 2402 size_t IndexEnd = OldName.find_first_not_of("0123456789"); 2403 if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') { 2404 // Strip the index and look for the offset. 2405 OldName = OldName.substr(IndexEnd + 1); 2406 size_t OffsetEnd = OldName.find_first_not_of("0123456789"); 2407 if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.') 2408 // Strip the offset. 2409 OldName = OldName.substr(OffsetEnd + 1); 2410 } 2411 } 2412 // Strip any SROA suffixes as well. 2413 OldName = OldName.substr(0, OldName.find(".sroa_")); 2414 #endif 2415 2416 return getAdjustedPtr(IRB, DL, &NewAI, 2417 APInt(DL.getIndexTypeSizeInBits(PointerTy), Offset), 2418 PointerTy, 2419 #ifndef NDEBUG 2420 Twine(OldName) + "." 2421 #else 2422 Twine() 2423 #endif 2424 ); 2425 } 2426 2427 /// Compute suitable alignment to access this slice of the *new* 2428 /// alloca. 2429 /// 2430 /// You can optionally pass a type to this routine and if that type's ABI 2431 /// alignment is itself suitable, this will return zero. 2432 MaybeAlign getSliceAlign(Type *Ty = nullptr) { 2433 const MaybeAlign NewAIAlign = DL.getValueOrABITypeAlignment( 2434 MaybeAlign(NewAI.getAlignment()), NewAI.getAllocatedType()); 2435 const MaybeAlign Align = 2436 commonAlignment(NewAIAlign, NewBeginOffset - NewAllocaBeginOffset); 2437 return (Ty && Align && Align->value() == DL.getABITypeAlignment(Ty)) 2438 ? None 2439 : Align; 2440 } 2441 2442 unsigned getIndex(uint64_t Offset) { 2443 assert(VecTy && "Can only call getIndex when rewriting a vector"); 2444 uint64_t RelOffset = Offset - NewAllocaBeginOffset; 2445 assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds"); 2446 uint32_t Index = RelOffset / ElementSize; 2447 assert(Index * ElementSize == RelOffset); 2448 return Index; 2449 } 2450 2451 void deleteIfTriviallyDead(Value *V) { 2452 Instruction *I = cast<Instruction>(V); 2453 if (isInstructionTriviallyDead(I)) 2454 Pass.DeadInsts.insert(I); 2455 } 2456 2457 Value *rewriteVectorizedLoadInst() { 2458 unsigned BeginIndex = getIndex(NewBeginOffset); 2459 unsigned EndIndex = getIndex(NewEndOffset); 2460 assert(EndIndex > BeginIndex && "Empty vector!"); 2461 2462 Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2463 NewAI.getAlignment(), "load"); 2464 return extractVector(IRB, V, BeginIndex, EndIndex, "vec"); 2465 } 2466 2467 Value *rewriteIntegerLoad(LoadInst &LI) { 2468 assert(IntTy && "We cannot insert an integer to the alloca"); 2469 assert(!LI.isVolatile()); 2470 Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2471 NewAI.getAlignment(), "load"); 2472 V = convertValue(DL, IRB, V, IntTy); 2473 assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); 2474 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2475 if (Offset > 0 || NewEndOffset < NewAllocaEndOffset) { 2476 IntegerType *ExtractTy = Type::getIntNTy(LI.getContext(), SliceSize * 8); 2477 V = extractInteger(DL, IRB, V, ExtractTy, Offset, "extract"); 2478 } 2479 // It is possible that the extracted type is not the load type. This 2480 // happens if there is a load past the end of the alloca, and as 2481 // a consequence the slice is narrower but still a candidate for integer 2482 // lowering. To handle this case, we just zero extend the extracted 2483 // integer. 2484 assert(cast<IntegerType>(LI.getType())->getBitWidth() >= SliceSize * 8 && 2485 "Can only handle an extract for an overly wide load"); 2486 if (cast<IntegerType>(LI.getType())->getBitWidth() > SliceSize * 8) 2487 V = IRB.CreateZExt(V, LI.getType()); 2488 return V; 2489 } 2490 2491 bool visitLoadInst(LoadInst &LI) { 2492 LLVM_DEBUG(dbgs() << " original: " << LI << "\n"); 2493 Value *OldOp = LI.getOperand(0); 2494 assert(OldOp == OldPtr); 2495 2496 AAMDNodes AATags; 2497 LI.getAAMetadata(AATags); 2498 2499 unsigned AS = LI.getPointerAddressSpace(); 2500 2501 Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8) 2502 : LI.getType(); 2503 const bool IsLoadPastEnd = DL.getTypeStoreSize(TargetTy) > SliceSize; 2504 bool IsPtrAdjusted = false; 2505 Value *V; 2506 if (VecTy) { 2507 V = rewriteVectorizedLoadInst(); 2508 } else if (IntTy && LI.getType()->isIntegerTy()) { 2509 V = rewriteIntegerLoad(LI); 2510 } else if (NewBeginOffset == NewAllocaBeginOffset && 2511 NewEndOffset == NewAllocaEndOffset && 2512 (canConvertValue(DL, NewAllocaTy, TargetTy) || 2513 (IsLoadPastEnd && NewAllocaTy->isIntegerTy() && 2514 TargetTy->isIntegerTy()))) { 2515 LoadInst *NewLI = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2516 NewAI.getAlignment(), 2517 LI.isVolatile(), LI.getName()); 2518 if (AATags) 2519 NewLI->setAAMetadata(AATags); 2520 if (LI.isVolatile()) 2521 NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); 2522 2523 // Any !nonnull metadata or !range metadata on the old load is also valid 2524 // on the new load. This is even true in some cases even when the loads 2525 // are different types, for example by mapping !nonnull metadata to 2526 // !range metadata by modeling the null pointer constant converted to the 2527 // integer type. 2528 // FIXME: Add support for range metadata here. Currently the utilities 2529 // for this don't propagate range metadata in trivial cases from one 2530 // integer load to another, don't handle non-addrspace-0 null pointers 2531 // correctly, and don't have any support for mapping ranges as the 2532 // integer type becomes winder or narrower. 2533 if (MDNode *N = LI.getMetadata(LLVMContext::MD_nonnull)) 2534 copyNonnullMetadata(LI, N, *NewLI); 2535 2536 // Try to preserve nonnull metadata 2537 V = NewLI; 2538 2539 // If this is an integer load past the end of the slice (which means the 2540 // bytes outside the slice are undef or this load is dead) just forcibly 2541 // fix the integer size with correct handling of endianness. 2542 if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy)) 2543 if (auto *TITy = dyn_cast<IntegerType>(TargetTy)) 2544 if (AITy->getBitWidth() < TITy->getBitWidth()) { 2545 V = IRB.CreateZExt(V, TITy, "load.ext"); 2546 if (DL.isBigEndian()) 2547 V = IRB.CreateShl(V, TITy->getBitWidth() - AITy->getBitWidth(), 2548 "endian_shift"); 2549 } 2550 } else { 2551 Type *LTy = TargetTy->getPointerTo(AS); 2552 LoadInst *NewLI = IRB.CreateAlignedLoad( 2553 TargetTy, getNewAllocaSlicePtr(IRB, LTy), getSliceAlign(TargetTy), 2554 LI.isVolatile(), LI.getName()); 2555 if (AATags) 2556 NewLI->setAAMetadata(AATags); 2557 if (LI.isVolatile()) 2558 NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); 2559 2560 V = NewLI; 2561 IsPtrAdjusted = true; 2562 } 2563 V = convertValue(DL, IRB, V, TargetTy); 2564 2565 if (IsSplit) { 2566 assert(!LI.isVolatile()); 2567 assert(LI.getType()->isIntegerTy() && 2568 "Only integer type loads and stores are split"); 2569 assert(SliceSize < DL.getTypeStoreSize(LI.getType()) && 2570 "Split load isn't smaller than original load"); 2571 assert(DL.typeSizeEqualsStoreSize(LI.getType()) && 2572 "Non-byte-multiple bit width"); 2573 // Move the insertion point just past the load so that we can refer to it. 2574 IRB.SetInsertPoint(&*std::next(BasicBlock::iterator(&LI))); 2575 // Create a placeholder value with the same type as LI to use as the 2576 // basis for the new value. This allows us to replace the uses of LI with 2577 // the computed value, and then replace the placeholder with LI, leaving 2578 // LI only used for this computation. 2579 Value *Placeholder = new LoadInst( 2580 LI.getType(), UndefValue::get(LI.getType()->getPointerTo(AS))); 2581 V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset, 2582 "insert"); 2583 LI.replaceAllUsesWith(V); 2584 Placeholder->replaceAllUsesWith(&LI); 2585 Placeholder->deleteValue(); 2586 } else { 2587 LI.replaceAllUsesWith(V); 2588 } 2589 2590 Pass.DeadInsts.insert(&LI); 2591 deleteIfTriviallyDead(OldOp); 2592 LLVM_DEBUG(dbgs() << " to: " << *V << "\n"); 2593 return !LI.isVolatile() && !IsPtrAdjusted; 2594 } 2595 2596 bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp, 2597 AAMDNodes AATags) { 2598 if (V->getType() != VecTy) { 2599 unsigned BeginIndex = getIndex(NewBeginOffset); 2600 unsigned EndIndex = getIndex(NewEndOffset); 2601 assert(EndIndex > BeginIndex && "Empty vector!"); 2602 unsigned NumElements = EndIndex - BeginIndex; 2603 assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); 2604 Type *SliceTy = (NumElements == 1) 2605 ? ElementTy 2606 : VectorType::get(ElementTy, NumElements); 2607 if (V->getType() != SliceTy) 2608 V = convertValue(DL, IRB, V, SliceTy); 2609 2610 // Mix in the existing elements. 2611 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2612 NewAI.getAlignment(), "load"); 2613 V = insertVector(IRB, Old, V, BeginIndex, "vec"); 2614 } 2615 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); 2616 if (AATags) 2617 Store->setAAMetadata(AATags); 2618 Pass.DeadInsts.insert(&SI); 2619 2620 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 2621 return true; 2622 } 2623 2624 bool rewriteIntegerStore(Value *V, StoreInst &SI, AAMDNodes AATags) { 2625 assert(IntTy && "We cannot extract an integer from the alloca"); 2626 assert(!SI.isVolatile()); 2627 if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) { 2628 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2629 NewAI.getAlignment(), "oldload"); 2630 Old = convertValue(DL, IRB, Old, IntTy); 2631 assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); 2632 uint64_t Offset = BeginOffset - NewAllocaBeginOffset; 2633 V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, "insert"); 2634 } 2635 V = convertValue(DL, IRB, V, NewAllocaTy); 2636 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); 2637 Store->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access, 2638 LLVMContext::MD_access_group}); 2639 if (AATags) 2640 Store->setAAMetadata(AATags); 2641 Pass.DeadInsts.insert(&SI); 2642 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 2643 return true; 2644 } 2645 2646 bool visitStoreInst(StoreInst &SI) { 2647 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 2648 Value *OldOp = SI.getOperand(1); 2649 assert(OldOp == OldPtr); 2650 2651 AAMDNodes AATags; 2652 SI.getAAMetadata(AATags); 2653 2654 Value *V = SI.getValueOperand(); 2655 2656 // Strip all inbounds GEPs and pointer casts to try to dig out any root 2657 // alloca that should be re-examined after promoting this alloca. 2658 if (V->getType()->isPointerTy()) 2659 if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets())) 2660 Pass.PostPromotionWorklist.insert(AI); 2661 2662 if (SliceSize < DL.getTypeStoreSize(V->getType())) { 2663 assert(!SI.isVolatile()); 2664 assert(V->getType()->isIntegerTy() && 2665 "Only integer type loads and stores are split"); 2666 assert(DL.typeSizeEqualsStoreSize(V->getType()) && 2667 "Non-byte-multiple bit width"); 2668 IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8); 2669 V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset - BeginOffset, 2670 "extract"); 2671 } 2672 2673 if (VecTy) 2674 return rewriteVectorizedStoreInst(V, SI, OldOp, AATags); 2675 if (IntTy && V->getType()->isIntegerTy()) 2676 return rewriteIntegerStore(V, SI, AATags); 2677 2678 const bool IsStorePastEnd = DL.getTypeStoreSize(V->getType()) > SliceSize; 2679 StoreInst *NewSI; 2680 if (NewBeginOffset == NewAllocaBeginOffset && 2681 NewEndOffset == NewAllocaEndOffset && 2682 (canConvertValue(DL, V->getType(), NewAllocaTy) || 2683 (IsStorePastEnd && NewAllocaTy->isIntegerTy() && 2684 V->getType()->isIntegerTy()))) { 2685 // If this is an integer store past the end of slice (and thus the bytes 2686 // past that point are irrelevant or this is unreachable), truncate the 2687 // value prior to storing. 2688 if (auto *VITy = dyn_cast<IntegerType>(V->getType())) 2689 if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy)) 2690 if (VITy->getBitWidth() > AITy->getBitWidth()) { 2691 if (DL.isBigEndian()) 2692 V = IRB.CreateLShr(V, VITy->getBitWidth() - AITy->getBitWidth(), 2693 "endian_shift"); 2694 V = IRB.CreateTrunc(V, AITy, "load.trunc"); 2695 } 2696 2697 V = convertValue(DL, IRB, V, NewAllocaTy); 2698 NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), 2699 SI.isVolatile()); 2700 } else { 2701 unsigned AS = SI.getPointerAddressSpace(); 2702 Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo(AS)); 2703 NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()), 2704 SI.isVolatile()); 2705 } 2706 NewSI->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access, 2707 LLVMContext::MD_access_group}); 2708 if (AATags) 2709 NewSI->setAAMetadata(AATags); 2710 if (SI.isVolatile()) 2711 NewSI->setAtomic(SI.getOrdering(), SI.getSyncScopeID()); 2712 Pass.DeadInsts.insert(&SI); 2713 deleteIfTriviallyDead(OldOp); 2714 2715 LLVM_DEBUG(dbgs() << " to: " << *NewSI << "\n"); 2716 return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile(); 2717 } 2718 2719 /// Compute an integer value from splatting an i8 across the given 2720 /// number of bytes. 2721 /// 2722 /// Note that this routine assumes an i8 is a byte. If that isn't true, don't 2723 /// call this routine. 2724 /// FIXME: Heed the advice above. 2725 /// 2726 /// \param V The i8 value to splat. 2727 /// \param Size The number of bytes in the output (assuming i8 is one byte) 2728 Value *getIntegerSplat(Value *V, unsigned Size) { 2729 assert(Size > 0 && "Expected a positive number of bytes."); 2730 IntegerType *VTy = cast<IntegerType>(V->getType()); 2731 assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte"); 2732 if (Size == 1) 2733 return V; 2734 2735 Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size * 8); 2736 V = IRB.CreateMul( 2737 IRB.CreateZExt(V, SplatIntTy, "zext"), 2738 ConstantExpr::getUDiv( 2739 Constant::getAllOnesValue(SplatIntTy), 2740 ConstantExpr::getZExt(Constant::getAllOnesValue(V->getType()), 2741 SplatIntTy)), 2742 "isplat"); 2743 return V; 2744 } 2745 2746 /// Compute a vector splat for a given element value. 2747 Value *getVectorSplat(Value *V, unsigned NumElements) { 2748 V = IRB.CreateVectorSplat(NumElements, V, "vsplat"); 2749 LLVM_DEBUG(dbgs() << " splat: " << *V << "\n"); 2750 return V; 2751 } 2752 2753 bool visitMemSetInst(MemSetInst &II) { 2754 LLVM_DEBUG(dbgs() << " original: " << II << "\n"); 2755 assert(II.getRawDest() == OldPtr); 2756 2757 AAMDNodes AATags; 2758 II.getAAMetadata(AATags); 2759 2760 // If the memset has a variable size, it cannot be split, just adjust the 2761 // pointer to the new alloca. 2762 if (!isa<Constant>(II.getLength())) { 2763 assert(!IsSplit); 2764 assert(NewBeginOffset == BeginOffset); 2765 II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType())); 2766 II.setDestAlignment(getSliceAlign()); 2767 2768 deleteIfTriviallyDead(OldPtr); 2769 return false; 2770 } 2771 2772 // Record this instruction for deletion. 2773 Pass.DeadInsts.insert(&II); 2774 2775 Type *AllocaTy = NewAI.getAllocatedType(); 2776 Type *ScalarTy = AllocaTy->getScalarType(); 2777 2778 const bool CanContinue = [&]() { 2779 if (VecTy || IntTy) 2780 return true; 2781 if (BeginOffset > NewAllocaBeginOffset || 2782 EndOffset < NewAllocaEndOffset) 2783 return false; 2784 auto *C = cast<ConstantInt>(II.getLength()); 2785 if (C->getBitWidth() > 64) 2786 return false; 2787 const auto Len = C->getZExtValue(); 2788 auto *Int8Ty = IntegerType::getInt8Ty(NewAI.getContext()); 2789 auto *SrcTy = VectorType::get(Int8Ty, Len); 2790 return canConvertValue(DL, SrcTy, AllocaTy) && 2791 DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy)); 2792 }(); 2793 2794 // If this doesn't map cleanly onto the alloca type, and that type isn't 2795 // a single value type, just emit a memset. 2796 if (!CanContinue) { 2797 Type *SizeTy = II.getLength()->getType(); 2798 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); 2799 CallInst *New = IRB.CreateMemSet( 2800 getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size, 2801 MaybeAlign(getSliceAlign()), II.isVolatile()); 2802 if (AATags) 2803 New->setAAMetadata(AATags); 2804 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 2805 return false; 2806 } 2807 2808 // If we can represent this as a simple value, we have to build the actual 2809 // value to store, which requires expanding the byte present in memset to 2810 // a sensible representation for the alloca type. This is essentially 2811 // splatting the byte to a sufficiently wide integer, splatting it across 2812 // any desired vector width, and bitcasting to the final type. 2813 Value *V; 2814 2815 if (VecTy) { 2816 // If this is a memset of a vectorized alloca, insert it. 2817 assert(ElementTy == ScalarTy); 2818 2819 unsigned BeginIndex = getIndex(NewBeginOffset); 2820 unsigned EndIndex = getIndex(NewEndOffset); 2821 assert(EndIndex > BeginIndex && "Empty vector!"); 2822 unsigned NumElements = EndIndex - BeginIndex; 2823 assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); 2824 2825 Value *Splat = 2826 getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ElementTy) / 8); 2827 Splat = convertValue(DL, IRB, Splat, ElementTy); 2828 if (NumElements > 1) 2829 Splat = getVectorSplat(Splat, NumElements); 2830 2831 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2832 NewAI.getAlignment(), "oldload"); 2833 V = insertVector(IRB, Old, Splat, BeginIndex, "vec"); 2834 } else if (IntTy) { 2835 // If this is a memset on an alloca where we can widen stores, insert the 2836 // set integer. 2837 assert(!II.isVolatile()); 2838 2839 uint64_t Size = NewEndOffset - NewBeginOffset; 2840 V = getIntegerSplat(II.getValue(), Size); 2841 2842 if (IntTy && (BeginOffset != NewAllocaBeginOffset || 2843 EndOffset != NewAllocaBeginOffset)) { 2844 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 2845 NewAI.getAlignment(), "oldload"); 2846 Old = convertValue(DL, IRB, Old, IntTy); 2847 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 2848 V = insertInteger(DL, IRB, Old, V, Offset, "insert"); 2849 } else { 2850 assert(V->getType() == IntTy && 2851 "Wrong type for an alloca wide integer!"); 2852 } 2853 V = convertValue(DL, IRB, V, AllocaTy); 2854 } else { 2855 // Established these invariants above. 2856 assert(NewBeginOffset == NewAllocaBeginOffset); 2857 assert(NewEndOffset == NewAllocaEndOffset); 2858 2859 V = getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ScalarTy) / 8); 2860 if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy)) 2861 V = getVectorSplat(V, AllocaVecTy->getNumElements()); 2862 2863 V = convertValue(DL, IRB, V, AllocaTy); 2864 } 2865 2866 StoreInst *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), 2867 II.isVolatile()); 2868 if (AATags) 2869 New->setAAMetadata(AATags); 2870 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 2871 return !II.isVolatile(); 2872 } 2873 2874 bool visitMemTransferInst(MemTransferInst &II) { 2875 // Rewriting of memory transfer instructions can be a bit tricky. We break 2876 // them into two categories: split intrinsics and unsplit intrinsics. 2877 2878 LLVM_DEBUG(dbgs() << " original: " << II << "\n"); 2879 2880 AAMDNodes AATags; 2881 II.getAAMetadata(AATags); 2882 2883 bool IsDest = &II.getRawDestUse() == OldUse; 2884 assert((IsDest && II.getRawDest() == OldPtr) || 2885 (!IsDest && II.getRawSource() == OldPtr)); 2886 2887 MaybeAlign SliceAlign = getSliceAlign(); 2888 2889 // For unsplit intrinsics, we simply modify the source and destination 2890 // pointers in place. This isn't just an optimization, it is a matter of 2891 // correctness. With unsplit intrinsics we may be dealing with transfers 2892 // within a single alloca before SROA ran, or with transfers that have 2893 // a variable length. We may also be dealing with memmove instead of 2894 // memcpy, and so simply updating the pointers is the necessary for us to 2895 // update both source and dest of a single call. 2896 if (!IsSplittable) { 2897 Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 2898 if (IsDest) { 2899 II.setDest(AdjustedPtr); 2900 II.setDestAlignment(SliceAlign); 2901 } 2902 else { 2903 II.setSource(AdjustedPtr); 2904 II.setSourceAlignment(SliceAlign); 2905 } 2906 2907 LLVM_DEBUG(dbgs() << " to: " << II << "\n"); 2908 deleteIfTriviallyDead(OldPtr); 2909 return false; 2910 } 2911 // For split transfer intrinsics we have an incredibly useful assurance: 2912 // the source and destination do not reside within the same alloca, and at 2913 // least one of them does not escape. This means that we can replace 2914 // memmove with memcpy, and we don't need to worry about all manner of 2915 // downsides to splitting and transforming the operations. 2916 2917 // If this doesn't map cleanly onto the alloca type, and that type isn't 2918 // a single value type, just emit a memcpy. 2919 bool EmitMemCpy = 2920 !VecTy && !IntTy && 2921 (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset || 2922 SliceSize != DL.getTypeStoreSize(NewAI.getAllocatedType()) || 2923 !NewAI.getAllocatedType()->isSingleValueType()); 2924 2925 // If we're just going to emit a memcpy, the alloca hasn't changed, and the 2926 // size hasn't been shrunk based on analysis of the viable range, this is 2927 // a no-op. 2928 if (EmitMemCpy && &OldAI == &NewAI) { 2929 // Ensure the start lines up. 2930 assert(NewBeginOffset == BeginOffset); 2931 2932 // Rewrite the size as needed. 2933 if (NewEndOffset != EndOffset) 2934 II.setLength(ConstantInt::get(II.getLength()->getType(), 2935 NewEndOffset - NewBeginOffset)); 2936 return false; 2937 } 2938 // Record this instruction for deletion. 2939 Pass.DeadInsts.insert(&II); 2940 2941 // Strip all inbounds GEPs and pointer casts to try to dig out any root 2942 // alloca that should be re-examined after rewriting this instruction. 2943 Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest(); 2944 if (AllocaInst *AI = 2945 dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) { 2946 assert(AI != &OldAI && AI != &NewAI && 2947 "Splittable transfers cannot reach the same alloca on both ends."); 2948 Pass.Worklist.insert(AI); 2949 } 2950 2951 Type *OtherPtrTy = OtherPtr->getType(); 2952 unsigned OtherAS = OtherPtrTy->getPointerAddressSpace(); 2953 2954 // Compute the relative offset for the other pointer within the transfer. 2955 unsigned OffsetWidth = DL.getIndexSizeInBits(OtherAS); 2956 APInt OtherOffset(OffsetWidth, NewBeginOffset - BeginOffset); 2957 Align OtherAlign = 2958 assumeAligned(IsDest ? II.getSourceAlignment() : II.getDestAlignment()); 2959 OtherAlign = 2960 commonAlignment(OtherAlign, OtherOffset.zextOrTrunc(64).getZExtValue()); 2961 2962 if (EmitMemCpy) { 2963 // Compute the other pointer, folding as much as possible to produce 2964 // a single, simple GEP in most cases. 2965 OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, 2966 OtherPtr->getName() + "."); 2967 2968 Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 2969 Type *SizeTy = II.getLength()->getType(); 2970 Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); 2971 2972 Value *DestPtr, *SrcPtr; 2973 MaybeAlign DestAlign, SrcAlign; 2974 // Note: IsDest is true iff we're copying into the new alloca slice 2975 if (IsDest) { 2976 DestPtr = OurPtr; 2977 DestAlign = SliceAlign; 2978 SrcPtr = OtherPtr; 2979 SrcAlign = OtherAlign; 2980 } else { 2981 DestPtr = OtherPtr; 2982 DestAlign = OtherAlign; 2983 SrcPtr = OurPtr; 2984 SrcAlign = SliceAlign; 2985 } 2986 CallInst *New = IRB.CreateMemCpy(DestPtr, DestAlign, SrcPtr, SrcAlign, 2987 Size, II.isVolatile()); 2988 if (AATags) 2989 New->setAAMetadata(AATags); 2990 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 2991 return false; 2992 } 2993 2994 bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset && 2995 NewEndOffset == NewAllocaEndOffset; 2996 uint64_t Size = NewEndOffset - NewBeginOffset; 2997 unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0; 2998 unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0; 2999 unsigned NumElements = EndIndex - BeginIndex; 3000 IntegerType *SubIntTy = 3001 IntTy ? Type::getIntNTy(IntTy->getContext(), Size * 8) : nullptr; 3002 3003 // Reset the other pointer type to match the register type we're going to 3004 // use, but using the address space of the original other pointer. 3005 Type *OtherTy; 3006 if (VecTy && !IsWholeAlloca) { 3007 if (NumElements == 1) 3008 OtherTy = VecTy->getElementType(); 3009 else 3010 OtherTy = VectorType::get(VecTy->getElementType(), NumElements); 3011 } else if (IntTy && !IsWholeAlloca) { 3012 OtherTy = SubIntTy; 3013 } else { 3014 OtherTy = NewAllocaTy; 3015 } 3016 OtherPtrTy = OtherTy->getPointerTo(OtherAS); 3017 3018 Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, 3019 OtherPtr->getName() + "."); 3020 MaybeAlign SrcAlign = OtherAlign; 3021 Value *DstPtr = &NewAI; 3022 MaybeAlign DstAlign = SliceAlign; 3023 if (!IsDest) { 3024 std::swap(SrcPtr, DstPtr); 3025 std::swap(SrcAlign, DstAlign); 3026 } 3027 3028 Value *Src; 3029 if (VecTy && !IsWholeAlloca && !IsDest) { 3030 Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3031 NewAI.getAlignment(), "load"); 3032 Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec"); 3033 } else if (IntTy && !IsWholeAlloca && !IsDest) { 3034 Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3035 NewAI.getAlignment(), "load"); 3036 Src = convertValue(DL, IRB, Src, IntTy); 3037 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 3038 Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract"); 3039 } else { 3040 LoadInst *Load = IRB.CreateAlignedLoad(OtherTy, SrcPtr, SrcAlign, 3041 II.isVolatile(), "copyload"); 3042 if (AATags) 3043 Load->setAAMetadata(AATags); 3044 Src = Load; 3045 } 3046 3047 if (VecTy && !IsWholeAlloca && IsDest) { 3048 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3049 NewAI.getAlignment(), "oldload"); 3050 Src = insertVector(IRB, Old, Src, BeginIndex, "vec"); 3051 } else if (IntTy && !IsWholeAlloca && IsDest) { 3052 Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, 3053 NewAI.getAlignment(), "oldload"); 3054 Old = convertValue(DL, IRB, Old, IntTy); 3055 uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; 3056 Src = insertInteger(DL, IRB, Old, Src, Offset, "insert"); 3057 Src = convertValue(DL, IRB, Src, NewAllocaTy); 3058 } 3059 3060 StoreInst *Store = cast<StoreInst>( 3061 IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile())); 3062 if (AATags) 3063 Store->setAAMetadata(AATags); 3064 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 3065 return !II.isVolatile(); 3066 } 3067 3068 bool visitIntrinsicInst(IntrinsicInst &II) { 3069 assert(II.isLifetimeStartOrEnd()); 3070 LLVM_DEBUG(dbgs() << " original: " << II << "\n"); 3071 assert(II.getArgOperand(1) == OldPtr); 3072 3073 // Record this instruction for deletion. 3074 Pass.DeadInsts.insert(&II); 3075 3076 // Lifetime intrinsics are only promotable if they cover the whole alloca. 3077 // Therefore, we drop lifetime intrinsics which don't cover the whole 3078 // alloca. 3079 // (In theory, intrinsics which partially cover an alloca could be 3080 // promoted, but PromoteMemToReg doesn't handle that case.) 3081 // FIXME: Check whether the alloca is promotable before dropping the 3082 // lifetime intrinsics? 3083 if (NewBeginOffset != NewAllocaBeginOffset || 3084 NewEndOffset != NewAllocaEndOffset) 3085 return true; 3086 3087 ConstantInt *Size = 3088 ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()), 3089 NewEndOffset - NewBeginOffset); 3090 // Lifetime intrinsics always expect an i8* so directly get such a pointer 3091 // for the new alloca slice. 3092 Type *PointerTy = IRB.getInt8PtrTy(OldPtr->getType()->getPointerAddressSpace()); 3093 Value *Ptr = getNewAllocaSlicePtr(IRB, PointerTy); 3094 Value *New; 3095 if (II.getIntrinsicID() == Intrinsic::lifetime_start) 3096 New = IRB.CreateLifetimeStart(Ptr, Size); 3097 else 3098 New = IRB.CreateLifetimeEnd(Ptr, Size); 3099 3100 (void)New; 3101 LLVM_DEBUG(dbgs() << " to: " << *New << "\n"); 3102 3103 return true; 3104 } 3105 3106 void fixLoadStoreAlign(Instruction &Root) { 3107 // This algorithm implements the same visitor loop as 3108 // hasUnsafePHIOrSelectUse, and fixes the alignment of each load 3109 // or store found. 3110 SmallPtrSet<Instruction *, 4> Visited; 3111 SmallVector<Instruction *, 4> Uses; 3112 Visited.insert(&Root); 3113 Uses.push_back(&Root); 3114 do { 3115 Instruction *I = Uses.pop_back_val(); 3116 3117 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 3118 MaybeAlign LoadAlign = DL.getValueOrABITypeAlignment( 3119 MaybeAlign(LI->getAlignment()), LI->getType()); 3120 LI->setAlignment(std::min(LoadAlign, getSliceAlign())); 3121 continue; 3122 } 3123 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 3124 Value *Op = SI->getOperand(0); 3125 MaybeAlign StoreAlign = DL.getValueOrABITypeAlignment( 3126 MaybeAlign(SI->getAlignment()), Op->getType()); 3127 SI->setAlignment(std::min(StoreAlign, getSliceAlign())); 3128 continue; 3129 } 3130 3131 assert(isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I) || 3132 isa<PHINode>(I) || isa<SelectInst>(I) || 3133 isa<GetElementPtrInst>(I)); 3134 for (User *U : I->users()) 3135 if (Visited.insert(cast<Instruction>(U)).second) 3136 Uses.push_back(cast<Instruction>(U)); 3137 } while (!Uses.empty()); 3138 } 3139 3140 bool visitPHINode(PHINode &PN) { 3141 LLVM_DEBUG(dbgs() << " original: " << PN << "\n"); 3142 assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable"); 3143 assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable"); 3144 3145 // We would like to compute a new pointer in only one place, but have it be 3146 // as local as possible to the PHI. To do that, we re-use the location of 3147 // the old pointer, which necessarily must be in the right position to 3148 // dominate the PHI. 3149 IRBuilderTy PtrBuilder(IRB); 3150 if (isa<PHINode>(OldPtr)) 3151 PtrBuilder.SetInsertPoint(&*OldPtr->getParent()->getFirstInsertionPt()); 3152 else 3153 PtrBuilder.SetInsertPoint(OldPtr); 3154 PtrBuilder.SetCurrentDebugLocation(OldPtr->getDebugLoc()); 3155 3156 Value *NewPtr = getNewAllocaSlicePtr(PtrBuilder, OldPtr->getType()); 3157 // Replace the operands which were using the old pointer. 3158 std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr); 3159 3160 LLVM_DEBUG(dbgs() << " to: " << PN << "\n"); 3161 deleteIfTriviallyDead(OldPtr); 3162 3163 // Fix the alignment of any loads or stores using this PHI node. 3164 fixLoadStoreAlign(PN); 3165 3166 // PHIs can't be promoted on their own, but often can be speculated. We 3167 // check the speculation outside of the rewriter so that we see the 3168 // fully-rewritten alloca. 3169 PHIUsers.insert(&PN); 3170 return true; 3171 } 3172 3173 bool visitSelectInst(SelectInst &SI) { 3174 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 3175 assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) && 3176 "Pointer isn't an operand!"); 3177 assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable"); 3178 assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable"); 3179 3180 Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); 3181 // Replace the operands which were using the old pointer. 3182 if (SI.getOperand(1) == OldPtr) 3183 SI.setOperand(1, NewPtr); 3184 if (SI.getOperand(2) == OldPtr) 3185 SI.setOperand(2, NewPtr); 3186 3187 LLVM_DEBUG(dbgs() << " to: " << SI << "\n"); 3188 deleteIfTriviallyDead(OldPtr); 3189 3190 // Fix the alignment of any loads or stores using this select. 3191 fixLoadStoreAlign(SI); 3192 3193 // Selects can't be promoted on their own, but often can be speculated. We 3194 // check the speculation outside of the rewriter so that we see the 3195 // fully-rewritten alloca. 3196 SelectUsers.insert(&SI); 3197 return true; 3198 } 3199 }; 3200 3201 namespace { 3202 3203 /// Visitor to rewrite aggregate loads and stores as scalar. 3204 /// 3205 /// This pass aggressively rewrites all aggregate loads and stores on 3206 /// a particular pointer (or any pointer derived from it which we can identify) 3207 /// with scalar loads and stores. 3208 class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> { 3209 // Befriend the base class so it can delegate to private visit methods. 3210 friend class InstVisitor<AggLoadStoreRewriter, bool>; 3211 3212 /// Queue of pointer uses to analyze and potentially rewrite. 3213 SmallVector<Use *, 8> Queue; 3214 3215 /// Set to prevent us from cycling with phi nodes and loops. 3216 SmallPtrSet<User *, 8> Visited; 3217 3218 /// The current pointer use being rewritten. This is used to dig up the used 3219 /// value (as opposed to the user). 3220 Use *U = nullptr; 3221 3222 /// Used to calculate offsets, and hence alignment, of subobjects. 3223 const DataLayout &DL; 3224 3225 public: 3226 AggLoadStoreRewriter(const DataLayout &DL) : DL(DL) {} 3227 3228 /// Rewrite loads and stores through a pointer and all pointers derived from 3229 /// it. 3230 bool rewrite(Instruction &I) { 3231 LLVM_DEBUG(dbgs() << " Rewriting FCA loads and stores...\n"); 3232 enqueueUsers(I); 3233 bool Changed = false; 3234 while (!Queue.empty()) { 3235 U = Queue.pop_back_val(); 3236 Changed |= visit(cast<Instruction>(U->getUser())); 3237 } 3238 return Changed; 3239 } 3240 3241 private: 3242 /// Enqueue all the users of the given instruction for further processing. 3243 /// This uses a set to de-duplicate users. 3244 void enqueueUsers(Instruction &I) { 3245 for (Use &U : I.uses()) 3246 if (Visited.insert(U.getUser()).second) 3247 Queue.push_back(&U); 3248 } 3249 3250 // Conservative default is to not rewrite anything. 3251 bool visitInstruction(Instruction &I) { return false; } 3252 3253 /// Generic recursive split emission class. 3254 template <typename Derived> class OpSplitter { 3255 protected: 3256 /// The builder used to form new instructions. 3257 IRBuilderTy IRB; 3258 3259 /// The indices which to be used with insert- or extractvalue to select the 3260 /// appropriate value within the aggregate. 3261 SmallVector<unsigned, 4> Indices; 3262 3263 /// The indices to a GEP instruction which will move Ptr to the correct slot 3264 /// within the aggregate. 3265 SmallVector<Value *, 4> GEPIndices; 3266 3267 /// The base pointer of the original op, used as a base for GEPing the 3268 /// split operations. 3269 Value *Ptr; 3270 3271 /// The base pointee type being GEPed into. 3272 Type *BaseTy; 3273 3274 /// Known alignment of the base pointer. 3275 Align BaseAlign; 3276 3277 /// To calculate offset of each component so we can correctly deduce 3278 /// alignments. 3279 const DataLayout &DL; 3280 3281 /// Initialize the splitter with an insertion point, Ptr and start with a 3282 /// single zero GEP index. 3283 OpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy, 3284 Align BaseAlign, const DataLayout &DL) 3285 : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr), 3286 BaseTy(BaseTy), BaseAlign(BaseAlign), DL(DL) {} 3287 3288 public: 3289 /// Generic recursive split emission routine. 3290 /// 3291 /// This method recursively splits an aggregate op (load or store) into 3292 /// scalar or vector ops. It splits recursively until it hits a single value 3293 /// and emits that single value operation via the template argument. 3294 /// 3295 /// The logic of this routine relies on GEPs and insertvalue and 3296 /// extractvalue all operating with the same fundamental index list, merely 3297 /// formatted differently (GEPs need actual values). 3298 /// 3299 /// \param Ty The type being split recursively into smaller ops. 3300 /// \param Agg The aggregate value being built up or stored, depending on 3301 /// whether this is splitting a load or a store respectively. 3302 void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) { 3303 if (Ty->isSingleValueType()) { 3304 unsigned Offset = DL.getIndexedOffsetInType(BaseTy, GEPIndices); 3305 return static_cast<Derived *>(this)->emitFunc( 3306 Ty, Agg, commonAlignment(BaseAlign, Offset), Name); 3307 } 3308 3309 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 3310 unsigned OldSize = Indices.size(); 3311 (void)OldSize; 3312 for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size; 3313 ++Idx) { 3314 assert(Indices.size() == OldSize && "Did not return to the old size"); 3315 Indices.push_back(Idx); 3316 GEPIndices.push_back(IRB.getInt32(Idx)); 3317 emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx)); 3318 GEPIndices.pop_back(); 3319 Indices.pop_back(); 3320 } 3321 return; 3322 } 3323 3324 if (StructType *STy = dyn_cast<StructType>(Ty)) { 3325 unsigned OldSize = Indices.size(); 3326 (void)OldSize; 3327 for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size; 3328 ++Idx) { 3329 assert(Indices.size() == OldSize && "Did not return to the old size"); 3330 Indices.push_back(Idx); 3331 GEPIndices.push_back(IRB.getInt32(Idx)); 3332 emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx)); 3333 GEPIndices.pop_back(); 3334 Indices.pop_back(); 3335 } 3336 return; 3337 } 3338 3339 llvm_unreachable("Only arrays and structs are aggregate loadable types"); 3340 } 3341 }; 3342 3343 struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> { 3344 AAMDNodes AATags; 3345 3346 LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy, 3347 AAMDNodes AATags, Align BaseAlign, const DataLayout &DL) 3348 : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign, 3349 DL), 3350 AATags(AATags) {} 3351 3352 /// Emit a leaf load of a single value. This is called at the leaves of the 3353 /// recursive emission to actually load values. 3354 void emitFunc(Type *Ty, Value *&Agg, Align Alignment, const Twine &Name) { 3355 assert(Ty->isSingleValueType()); 3356 // Load the single value and insert it using the indices. 3357 Value *GEP = 3358 IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep"); 3359 LoadInst *Load = 3360 IRB.CreateAlignedLoad(Ty, GEP, Alignment.value(), Name + ".load"); 3361 if (AATags) 3362 Load->setAAMetadata(AATags); 3363 Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert"); 3364 LLVM_DEBUG(dbgs() << " to: " << *Load << "\n"); 3365 } 3366 }; 3367 3368 bool visitLoadInst(LoadInst &LI) { 3369 assert(LI.getPointerOperand() == *U); 3370 if (!LI.isSimple() || LI.getType()->isSingleValueType()) 3371 return false; 3372 3373 // We have an aggregate being loaded, split it apart. 3374 LLVM_DEBUG(dbgs() << " original: " << LI << "\n"); 3375 AAMDNodes AATags; 3376 LI.getAAMetadata(AATags); 3377 LoadOpSplitter Splitter(&LI, *U, LI.getType(), AATags, 3378 getAdjustedAlignment(&LI, 0, DL), DL); 3379 Value *V = UndefValue::get(LI.getType()); 3380 Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca"); 3381 LI.replaceAllUsesWith(V); 3382 LI.eraseFromParent(); 3383 return true; 3384 } 3385 3386 struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> { 3387 StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr, Type *BaseTy, 3388 AAMDNodes AATags, Align BaseAlign, const DataLayout &DL) 3389 : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr, BaseTy, BaseAlign, 3390 DL), 3391 AATags(AATags) {} 3392 AAMDNodes AATags; 3393 /// Emit a leaf store of a single value. This is called at the leaves of the 3394 /// recursive emission to actually produce stores. 3395 void emitFunc(Type *Ty, Value *&Agg, Align Alignment, const Twine &Name) { 3396 assert(Ty->isSingleValueType()); 3397 // Extract the single value and store it using the indices. 3398 // 3399 // The gep and extractvalue values are factored out of the CreateStore 3400 // call to make the output independent of the argument evaluation order. 3401 Value *ExtractValue = 3402 IRB.CreateExtractValue(Agg, Indices, Name + ".extract"); 3403 Value *InBoundsGEP = 3404 IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep"); 3405 StoreInst *Store = 3406 IRB.CreateAlignedStore(ExtractValue, InBoundsGEP, Alignment.value()); 3407 if (AATags) 3408 Store->setAAMetadata(AATags); 3409 LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); 3410 } 3411 }; 3412 3413 bool visitStoreInst(StoreInst &SI) { 3414 if (!SI.isSimple() || SI.getPointerOperand() != *U) 3415 return false; 3416 Value *V = SI.getValueOperand(); 3417 if (V->getType()->isSingleValueType()) 3418 return false; 3419 3420 // We have an aggregate being stored, split it apart. 3421 LLVM_DEBUG(dbgs() << " original: " << SI << "\n"); 3422 AAMDNodes AATags; 3423 SI.getAAMetadata(AATags); 3424 StoreOpSplitter Splitter(&SI, *U, V->getType(), AATags, 3425 getAdjustedAlignment(&SI, 0, DL), DL); 3426 Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca"); 3427 SI.eraseFromParent(); 3428 return true; 3429 } 3430 3431 bool visitBitCastInst(BitCastInst &BC) { 3432 enqueueUsers(BC); 3433 return false; 3434 } 3435 3436 bool visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 3437 enqueueUsers(ASC); 3438 return false; 3439 } 3440 3441 bool visitGetElementPtrInst(GetElementPtrInst &GEPI) { 3442 enqueueUsers(GEPI); 3443 return false; 3444 } 3445 3446 bool visitPHINode(PHINode &PN) { 3447 enqueueUsers(PN); 3448 return false; 3449 } 3450 3451 bool visitSelectInst(SelectInst &SI) { 3452 enqueueUsers(SI); 3453 return false; 3454 } 3455 }; 3456 3457 } // end anonymous namespace 3458 3459 /// Strip aggregate type wrapping. 3460 /// 3461 /// This removes no-op aggregate types wrapping an underlying type. It will 3462 /// strip as many layers of types as it can without changing either the type 3463 /// size or the allocated size. 3464 static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) { 3465 if (Ty->isSingleValueType()) 3466 return Ty; 3467 3468 uint64_t AllocSize = DL.getTypeAllocSize(Ty); 3469 uint64_t TypeSize = DL.getTypeSizeInBits(Ty); 3470 3471 Type *InnerTy; 3472 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) { 3473 InnerTy = ArrTy->getElementType(); 3474 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 3475 const StructLayout *SL = DL.getStructLayout(STy); 3476 unsigned Index = SL->getElementContainingOffset(0); 3477 InnerTy = STy->getElementType(Index); 3478 } else { 3479 return Ty; 3480 } 3481 3482 if (AllocSize > DL.getTypeAllocSize(InnerTy) || 3483 TypeSize > DL.getTypeSizeInBits(InnerTy)) 3484 return Ty; 3485 3486 return stripAggregateTypeWrapping(DL, InnerTy); 3487 } 3488 3489 /// Try to find a partition of the aggregate type passed in for a given 3490 /// offset and size. 3491 /// 3492 /// This recurses through the aggregate type and tries to compute a subtype 3493 /// based on the offset and size. When the offset and size span a sub-section 3494 /// of an array, it will even compute a new array type for that sub-section, 3495 /// and the same for structs. 3496 /// 3497 /// Note that this routine is very strict and tries to find a partition of the 3498 /// type which produces the *exact* right offset and size. It is not forgiving 3499 /// when the size or offset cause either end of type-based partition to be off. 3500 /// Also, this is a best-effort routine. It is reasonable to give up and not 3501 /// return a type if necessary. 3502 static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset, 3503 uint64_t Size) { 3504 if (Offset == 0 && DL.getTypeAllocSize(Ty) == Size) 3505 return stripAggregateTypeWrapping(DL, Ty); 3506 if (Offset > DL.getTypeAllocSize(Ty) || 3507 (DL.getTypeAllocSize(Ty) - Offset) < Size) 3508 return nullptr; 3509 3510 if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) { 3511 Type *ElementTy = SeqTy->getElementType(); 3512 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); 3513 uint64_t NumSkippedElements = Offset / ElementSize; 3514 if (NumSkippedElements >= SeqTy->getNumElements()) 3515 return nullptr; 3516 Offset -= NumSkippedElements * ElementSize; 3517 3518 // First check if we need to recurse. 3519 if (Offset > 0 || Size < ElementSize) { 3520 // Bail if the partition ends in a different array element. 3521 if ((Offset + Size) > ElementSize) 3522 return nullptr; 3523 // Recurse through the element type trying to peel off offset bytes. 3524 return getTypePartition(DL, ElementTy, Offset, Size); 3525 } 3526 assert(Offset == 0); 3527 3528 if (Size == ElementSize) 3529 return stripAggregateTypeWrapping(DL, ElementTy); 3530 assert(Size > ElementSize); 3531 uint64_t NumElements = Size / ElementSize; 3532 if (NumElements * ElementSize != Size) 3533 return nullptr; 3534 return ArrayType::get(ElementTy, NumElements); 3535 } 3536 3537 StructType *STy = dyn_cast<StructType>(Ty); 3538 if (!STy) 3539 return nullptr; 3540 3541 const StructLayout *SL = DL.getStructLayout(STy); 3542 if (Offset >= SL->getSizeInBytes()) 3543 return nullptr; 3544 uint64_t EndOffset = Offset + Size; 3545 if (EndOffset > SL->getSizeInBytes()) 3546 return nullptr; 3547 3548 unsigned Index = SL->getElementContainingOffset(Offset); 3549 Offset -= SL->getElementOffset(Index); 3550 3551 Type *ElementTy = STy->getElementType(Index); 3552 uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); 3553 if (Offset >= ElementSize) 3554 return nullptr; // The offset points into alignment padding. 3555 3556 // See if any partition must be contained by the element. 3557 if (Offset > 0 || Size < ElementSize) { 3558 if ((Offset + Size) > ElementSize) 3559 return nullptr; 3560 return getTypePartition(DL, ElementTy, Offset, Size); 3561 } 3562 assert(Offset == 0); 3563 3564 if (Size == ElementSize) 3565 return stripAggregateTypeWrapping(DL, ElementTy); 3566 3567 StructType::element_iterator EI = STy->element_begin() + Index, 3568 EE = STy->element_end(); 3569 if (EndOffset < SL->getSizeInBytes()) { 3570 unsigned EndIndex = SL->getElementContainingOffset(EndOffset); 3571 if (Index == EndIndex) 3572 return nullptr; // Within a single element and its padding. 3573 3574 // Don't try to form "natural" types if the elements don't line up with the 3575 // expected size. 3576 // FIXME: We could potentially recurse down through the last element in the 3577 // sub-struct to find a natural end point. 3578 if (SL->getElementOffset(EndIndex) != EndOffset) 3579 return nullptr; 3580 3581 assert(Index < EndIndex); 3582 EE = STy->element_begin() + EndIndex; 3583 } 3584 3585 // Try to build up a sub-structure. 3586 StructType *SubTy = 3587 StructType::get(STy->getContext(), makeArrayRef(EI, EE), STy->isPacked()); 3588 const StructLayout *SubSL = DL.getStructLayout(SubTy); 3589 if (Size != SubSL->getSizeInBytes()) 3590 return nullptr; // The sub-struct doesn't have quite the size needed. 3591 3592 return SubTy; 3593 } 3594 3595 /// Pre-split loads and stores to simplify rewriting. 3596 /// 3597 /// We want to break up the splittable load+store pairs as much as 3598 /// possible. This is important to do as a preprocessing step, as once we 3599 /// start rewriting the accesses to partitions of the alloca we lose the 3600 /// necessary information to correctly split apart paired loads and stores 3601 /// which both point into this alloca. The case to consider is something like 3602 /// the following: 3603 /// 3604 /// %a = alloca [12 x i8] 3605 /// %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0 3606 /// %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4 3607 /// %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8 3608 /// %iptr1 = bitcast i8* %gep1 to i64* 3609 /// %iptr2 = bitcast i8* %gep2 to i64* 3610 /// %fptr1 = bitcast i8* %gep1 to float* 3611 /// %fptr2 = bitcast i8* %gep2 to float* 3612 /// %fptr3 = bitcast i8* %gep3 to float* 3613 /// store float 0.0, float* %fptr1 3614 /// store float 1.0, float* %fptr2 3615 /// %v = load i64* %iptr1 3616 /// store i64 %v, i64* %iptr2 3617 /// %f1 = load float* %fptr2 3618 /// %f2 = load float* %fptr3 3619 /// 3620 /// Here we want to form 3 partitions of the alloca, each 4 bytes large, and 3621 /// promote everything so we recover the 2 SSA values that should have been 3622 /// there all along. 3623 /// 3624 /// \returns true if any changes are made. 3625 bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { 3626 LLVM_DEBUG(dbgs() << "Pre-splitting loads and stores\n"); 3627 3628 // Track the loads and stores which are candidates for pre-splitting here, in 3629 // the order they first appear during the partition scan. These give stable 3630 // iteration order and a basis for tracking which loads and stores we 3631 // actually split. 3632 SmallVector<LoadInst *, 4> Loads; 3633 SmallVector<StoreInst *, 4> Stores; 3634 3635 // We need to accumulate the splits required of each load or store where we 3636 // can find them via a direct lookup. This is important to cross-check loads 3637 // and stores against each other. We also track the slice so that we can kill 3638 // all the slices that end up split. 3639 struct SplitOffsets { 3640 Slice *S; 3641 std::vector<uint64_t> Splits; 3642 }; 3643 SmallDenseMap<Instruction *, SplitOffsets, 8> SplitOffsetsMap; 3644 3645 // Track loads out of this alloca which cannot, for any reason, be pre-split. 3646 // This is important as we also cannot pre-split stores of those loads! 3647 // FIXME: This is all pretty gross. It means that we can be more aggressive 3648 // in pre-splitting when the load feeding the store happens to come from 3649 // a separate alloca. Put another way, the effectiveness of SROA would be 3650 // decreased by a frontend which just concatenated all of its local allocas 3651 // into one big flat alloca. But defeating such patterns is exactly the job 3652 // SROA is tasked with! Sadly, to not have this discrepancy we would have 3653 // change store pre-splitting to actually force pre-splitting of the load 3654 // that feeds it *and all stores*. That makes pre-splitting much harder, but 3655 // maybe it would make it more principled? 3656 SmallPtrSet<LoadInst *, 8> UnsplittableLoads; 3657 3658 LLVM_DEBUG(dbgs() << " Searching for candidate loads and stores\n"); 3659 for (auto &P : AS.partitions()) { 3660 for (Slice &S : P) { 3661 Instruction *I = cast<Instruction>(S.getUse()->getUser()); 3662 if (!S.isSplittable() || S.endOffset() <= P.endOffset()) { 3663 // If this is a load we have to track that it can't participate in any 3664 // pre-splitting. If this is a store of a load we have to track that 3665 // that load also can't participate in any pre-splitting. 3666 if (auto *LI = dyn_cast<LoadInst>(I)) 3667 UnsplittableLoads.insert(LI); 3668 else if (auto *SI = dyn_cast<StoreInst>(I)) 3669 if (auto *LI = dyn_cast<LoadInst>(SI->getValueOperand())) 3670 UnsplittableLoads.insert(LI); 3671 continue; 3672 } 3673 assert(P.endOffset() > S.beginOffset() && 3674 "Empty or backwards partition!"); 3675 3676 // Determine if this is a pre-splittable slice. 3677 if (auto *LI = dyn_cast<LoadInst>(I)) { 3678 assert(!LI->isVolatile() && "Cannot split volatile loads!"); 3679 3680 // The load must be used exclusively to store into other pointers for 3681 // us to be able to arbitrarily pre-split it. The stores must also be 3682 // simple to avoid changing semantics. 3683 auto IsLoadSimplyStored = [](LoadInst *LI) { 3684 for (User *LU : LI->users()) { 3685 auto *SI = dyn_cast<StoreInst>(LU); 3686 if (!SI || !SI->isSimple()) 3687 return false; 3688 } 3689 return true; 3690 }; 3691 if (!IsLoadSimplyStored(LI)) { 3692 UnsplittableLoads.insert(LI); 3693 continue; 3694 } 3695 3696 Loads.push_back(LI); 3697 } else if (auto *SI = dyn_cast<StoreInst>(I)) { 3698 if (S.getUse() != &SI->getOperandUse(SI->getPointerOperandIndex())) 3699 // Skip stores *of* pointers. FIXME: This shouldn't even be possible! 3700 continue; 3701 auto *StoredLoad = dyn_cast<LoadInst>(SI->getValueOperand()); 3702 if (!StoredLoad || !StoredLoad->isSimple()) 3703 continue; 3704 assert(!SI->isVolatile() && "Cannot split volatile stores!"); 3705 3706 Stores.push_back(SI); 3707 } else { 3708 // Other uses cannot be pre-split. 3709 continue; 3710 } 3711 3712 // Record the initial split. 3713 LLVM_DEBUG(dbgs() << " Candidate: " << *I << "\n"); 3714 auto &Offsets = SplitOffsetsMap[I]; 3715 assert(Offsets.Splits.empty() && 3716 "Should not have splits the first time we see an instruction!"); 3717 Offsets.S = &S; 3718 Offsets.Splits.push_back(P.endOffset() - S.beginOffset()); 3719 } 3720 3721 // Now scan the already split slices, and add a split for any of them which 3722 // we're going to pre-split. 3723 for (Slice *S : P.splitSliceTails()) { 3724 auto SplitOffsetsMapI = 3725 SplitOffsetsMap.find(cast<Instruction>(S->getUse()->getUser())); 3726 if (SplitOffsetsMapI == SplitOffsetsMap.end()) 3727 continue; 3728 auto &Offsets = SplitOffsetsMapI->second; 3729 3730 assert(Offsets.S == S && "Found a mismatched slice!"); 3731 assert(!Offsets.Splits.empty() && 3732 "Cannot have an empty set of splits on the second partition!"); 3733 assert(Offsets.Splits.back() == 3734 P.beginOffset() - Offsets.S->beginOffset() && 3735 "Previous split does not end where this one begins!"); 3736 3737 // Record each split. The last partition's end isn't needed as the size 3738 // of the slice dictates that. 3739 if (S->endOffset() > P.endOffset()) 3740 Offsets.Splits.push_back(P.endOffset() - Offsets.S->beginOffset()); 3741 } 3742 } 3743 3744 // We may have split loads where some of their stores are split stores. For 3745 // such loads and stores, we can only pre-split them if their splits exactly 3746 // match relative to their starting offset. We have to verify this prior to 3747 // any rewriting. 3748 Stores.erase( 3749 llvm::remove_if(Stores, 3750 [&UnsplittableLoads, &SplitOffsetsMap](StoreInst *SI) { 3751 // Lookup the load we are storing in our map of split 3752 // offsets. 3753 auto *LI = cast<LoadInst>(SI->getValueOperand()); 3754 // If it was completely unsplittable, then we're done, 3755 // and this store can't be pre-split. 3756 if (UnsplittableLoads.count(LI)) 3757 return true; 3758 3759 auto LoadOffsetsI = SplitOffsetsMap.find(LI); 3760 if (LoadOffsetsI == SplitOffsetsMap.end()) 3761 return false; // Unrelated loads are definitely safe. 3762 auto &LoadOffsets = LoadOffsetsI->second; 3763 3764 // Now lookup the store's offsets. 3765 auto &StoreOffsets = SplitOffsetsMap[SI]; 3766 3767 // If the relative offsets of each split in the load and 3768 // store match exactly, then we can split them and we 3769 // don't need to remove them here. 3770 if (LoadOffsets.Splits == StoreOffsets.Splits) 3771 return false; 3772 3773 LLVM_DEBUG( 3774 dbgs() 3775 << " Mismatched splits for load and store:\n" 3776 << " " << *LI << "\n" 3777 << " " << *SI << "\n"); 3778 3779 // We've found a store and load that we need to split 3780 // with mismatched relative splits. Just give up on them 3781 // and remove both instructions from our list of 3782 // candidates. 3783 UnsplittableLoads.insert(LI); 3784 return true; 3785 }), 3786 Stores.end()); 3787 // Now we have to go *back* through all the stores, because a later store may 3788 // have caused an earlier store's load to become unsplittable and if it is 3789 // unsplittable for the later store, then we can't rely on it being split in 3790 // the earlier store either. 3791 Stores.erase(llvm::remove_if(Stores, 3792 [&UnsplittableLoads](StoreInst *SI) { 3793 auto *LI = 3794 cast<LoadInst>(SI->getValueOperand()); 3795 return UnsplittableLoads.count(LI); 3796 }), 3797 Stores.end()); 3798 // Once we've established all the loads that can't be split for some reason, 3799 // filter any that made it into our list out. 3800 Loads.erase(llvm::remove_if(Loads, 3801 [&UnsplittableLoads](LoadInst *LI) { 3802 return UnsplittableLoads.count(LI); 3803 }), 3804 Loads.end()); 3805 3806 // If no loads or stores are left, there is no pre-splitting to be done for 3807 // this alloca. 3808 if (Loads.empty() && Stores.empty()) 3809 return false; 3810 3811 // From here on, we can't fail and will be building new accesses, so rig up 3812 // an IR builder. 3813 IRBuilderTy IRB(&AI); 3814 3815 // Collect the new slices which we will merge into the alloca slices. 3816 SmallVector<Slice, 4> NewSlices; 3817 3818 // Track any allocas we end up splitting loads and stores for so we iterate 3819 // on them. 3820 SmallPtrSet<AllocaInst *, 4> ResplitPromotableAllocas; 3821 3822 // At this point, we have collected all of the loads and stores we can 3823 // pre-split, and the specific splits needed for them. We actually do the 3824 // splitting in a specific order in order to handle when one of the loads in 3825 // the value operand to one of the stores. 3826 // 3827 // First, we rewrite all of the split loads, and just accumulate each split 3828 // load in a parallel structure. We also build the slices for them and append 3829 // them to the alloca slices. 3830 SmallDenseMap<LoadInst *, std::vector<LoadInst *>, 1> SplitLoadsMap; 3831 std::vector<LoadInst *> SplitLoads; 3832 const DataLayout &DL = AI.getModule()->getDataLayout(); 3833 for (LoadInst *LI : Loads) { 3834 SplitLoads.clear(); 3835 3836 IntegerType *Ty = cast<IntegerType>(LI->getType()); 3837 uint64_t LoadSize = Ty->getBitWidth() / 8; 3838 assert(LoadSize > 0 && "Cannot have a zero-sized integer load!"); 3839 3840 auto &Offsets = SplitOffsetsMap[LI]; 3841 assert(LoadSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && 3842 "Slice size should always match load size exactly!"); 3843 uint64_t BaseOffset = Offsets.S->beginOffset(); 3844 assert(BaseOffset + LoadSize > BaseOffset && 3845 "Cannot represent alloca access size using 64-bit integers!"); 3846 3847 Instruction *BasePtr = cast<Instruction>(LI->getPointerOperand()); 3848 IRB.SetInsertPoint(LI); 3849 3850 LLVM_DEBUG(dbgs() << " Splitting load: " << *LI << "\n"); 3851 3852 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); 3853 int Idx = 0, Size = Offsets.Splits.size(); 3854 for (;;) { 3855 auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); 3856 auto AS = LI->getPointerAddressSpace(); 3857 auto *PartPtrTy = PartTy->getPointerTo(AS); 3858 LoadInst *PLoad = IRB.CreateAlignedLoad( 3859 PartTy, 3860 getAdjustedPtr(IRB, DL, BasePtr, 3861 APInt(DL.getIndexSizeInBits(AS), PartOffset), 3862 PartPtrTy, BasePtr->getName() + "."), 3863 getAdjustedAlignment(LI, PartOffset, DL).value(), 3864 /*IsVolatile*/ false, LI->getName()); 3865 PLoad->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access, 3866 LLVMContext::MD_access_group}); 3867 3868 // Append this load onto the list of split loads so we can find it later 3869 // to rewrite the stores. 3870 SplitLoads.push_back(PLoad); 3871 3872 // Now build a new slice for the alloca. 3873 NewSlices.push_back( 3874 Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, 3875 &PLoad->getOperandUse(PLoad->getPointerOperandIndex()), 3876 /*IsSplittable*/ false)); 3877 LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() 3878 << ", " << NewSlices.back().endOffset() 3879 << "): " << *PLoad << "\n"); 3880 3881 // See if we've handled all the splits. 3882 if (Idx >= Size) 3883 break; 3884 3885 // Setup the next partition. 3886 PartOffset = Offsets.Splits[Idx]; 3887 ++Idx; 3888 PartSize = (Idx < Size ? Offsets.Splits[Idx] : LoadSize) - PartOffset; 3889 } 3890 3891 // Now that we have the split loads, do the slow walk over all uses of the 3892 // load and rewrite them as split stores, or save the split loads to use 3893 // below if the store is going to be split there anyways. 3894 bool DeferredStores = false; 3895 for (User *LU : LI->users()) { 3896 StoreInst *SI = cast<StoreInst>(LU); 3897 if (!Stores.empty() && SplitOffsetsMap.count(SI)) { 3898 DeferredStores = true; 3899 LLVM_DEBUG(dbgs() << " Deferred splitting of store: " << *SI 3900 << "\n"); 3901 continue; 3902 } 3903 3904 Value *StoreBasePtr = SI->getPointerOperand(); 3905 IRB.SetInsertPoint(SI); 3906 3907 LLVM_DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n"); 3908 3909 for (int Idx = 0, Size = SplitLoads.size(); Idx < Size; ++Idx) { 3910 LoadInst *PLoad = SplitLoads[Idx]; 3911 uint64_t PartOffset = Idx == 0 ? 0 : Offsets.Splits[Idx - 1]; 3912 auto *PartPtrTy = 3913 PLoad->getType()->getPointerTo(SI->getPointerAddressSpace()); 3914 3915 auto AS = SI->getPointerAddressSpace(); 3916 StoreInst *PStore = IRB.CreateAlignedStore( 3917 PLoad, 3918 getAdjustedPtr(IRB, DL, StoreBasePtr, 3919 APInt(DL.getIndexSizeInBits(AS), PartOffset), 3920 PartPtrTy, StoreBasePtr->getName() + "."), 3921 getAdjustedAlignment(SI, PartOffset, DL).value(), 3922 /*IsVolatile*/ false); 3923 PStore->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access, 3924 LLVMContext::MD_access_group}); 3925 LLVM_DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n"); 3926 } 3927 3928 // We want to immediately iterate on any allocas impacted by splitting 3929 // this store, and we have to track any promotable alloca (indicated by 3930 // a direct store) as needing to be resplit because it is no longer 3931 // promotable. 3932 if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(StoreBasePtr)) { 3933 ResplitPromotableAllocas.insert(OtherAI); 3934 Worklist.insert(OtherAI); 3935 } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>( 3936 StoreBasePtr->stripInBoundsOffsets())) { 3937 Worklist.insert(OtherAI); 3938 } 3939 3940 // Mark the original store as dead. 3941 DeadInsts.insert(SI); 3942 } 3943 3944 // Save the split loads if there are deferred stores among the users. 3945 if (DeferredStores) 3946 SplitLoadsMap.insert(std::make_pair(LI, std::move(SplitLoads))); 3947 3948 // Mark the original load as dead and kill the original slice. 3949 DeadInsts.insert(LI); 3950 Offsets.S->kill(); 3951 } 3952 3953 // Second, we rewrite all of the split stores. At this point, we know that 3954 // all loads from this alloca have been split already. For stores of such 3955 // loads, we can simply look up the pre-existing split loads. For stores of 3956 // other loads, we split those loads first and then write split stores of 3957 // them. 3958 for (StoreInst *SI : Stores) { 3959 auto *LI = cast<LoadInst>(SI->getValueOperand()); 3960 IntegerType *Ty = cast<IntegerType>(LI->getType()); 3961 uint64_t StoreSize = Ty->getBitWidth() / 8; 3962 assert(StoreSize > 0 && "Cannot have a zero-sized integer store!"); 3963 3964 auto &Offsets = SplitOffsetsMap[SI]; 3965 assert(StoreSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && 3966 "Slice size should always match load size exactly!"); 3967 uint64_t BaseOffset = Offsets.S->beginOffset(); 3968 assert(BaseOffset + StoreSize > BaseOffset && 3969 "Cannot represent alloca access size using 64-bit integers!"); 3970 3971 Value *LoadBasePtr = LI->getPointerOperand(); 3972 Instruction *StoreBasePtr = cast<Instruction>(SI->getPointerOperand()); 3973 3974 LLVM_DEBUG(dbgs() << " Splitting store: " << *SI << "\n"); 3975 3976 // Check whether we have an already split load. 3977 auto SplitLoadsMapI = SplitLoadsMap.find(LI); 3978 std::vector<LoadInst *> *SplitLoads = nullptr; 3979 if (SplitLoadsMapI != SplitLoadsMap.end()) { 3980 SplitLoads = &SplitLoadsMapI->second; 3981 assert(SplitLoads->size() == Offsets.Splits.size() + 1 && 3982 "Too few split loads for the number of splits in the store!"); 3983 } else { 3984 LLVM_DEBUG(dbgs() << " of load: " << *LI << "\n"); 3985 } 3986 3987 uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); 3988 int Idx = 0, Size = Offsets.Splits.size(); 3989 for (;;) { 3990 auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); 3991 auto *LoadPartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace()); 3992 auto *StorePartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace()); 3993 3994 // Either lookup a split load or create one. 3995 LoadInst *PLoad; 3996 if (SplitLoads) { 3997 PLoad = (*SplitLoads)[Idx]; 3998 } else { 3999 IRB.SetInsertPoint(LI); 4000 auto AS = LI->getPointerAddressSpace(); 4001 PLoad = IRB.CreateAlignedLoad( 4002 PartTy, 4003 getAdjustedPtr(IRB, DL, LoadBasePtr, 4004 APInt(DL.getIndexSizeInBits(AS), PartOffset), 4005 LoadPartPtrTy, LoadBasePtr->getName() + "."), 4006 getAdjustedAlignment(LI, PartOffset, DL).value(), 4007 /*IsVolatile*/ false, LI->getName()); 4008 } 4009 4010 // And store this partition. 4011 IRB.SetInsertPoint(SI); 4012 auto AS = SI->getPointerAddressSpace(); 4013 StoreInst *PStore = IRB.CreateAlignedStore( 4014 PLoad, 4015 getAdjustedPtr(IRB, DL, StoreBasePtr, 4016 APInt(DL.getIndexSizeInBits(AS), PartOffset), 4017 StorePartPtrTy, StoreBasePtr->getName() + "."), 4018 getAdjustedAlignment(SI, PartOffset, DL).value(), 4019 /*IsVolatile*/ false); 4020 4021 // Now build a new slice for the alloca. 4022 NewSlices.push_back( 4023 Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, 4024 &PStore->getOperandUse(PStore->getPointerOperandIndex()), 4025 /*IsSplittable*/ false)); 4026 LLVM_DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() 4027 << ", " << NewSlices.back().endOffset() 4028 << "): " << *PStore << "\n"); 4029 if (!SplitLoads) { 4030 LLVM_DEBUG(dbgs() << " of split load: " << *PLoad << "\n"); 4031 } 4032 4033 // See if we've finished all the splits. 4034 if (Idx >= Size) 4035 break; 4036 4037 // Setup the next partition. 4038 PartOffset = Offsets.Splits[Idx]; 4039 ++Idx; 4040 PartSize = (Idx < Size ? Offsets.Splits[Idx] : StoreSize) - PartOffset; 4041 } 4042 4043 // We want to immediately iterate on any allocas impacted by splitting 4044 // this load, which is only relevant if it isn't a load of this alloca and 4045 // thus we didn't already split the loads above. We also have to keep track 4046 // of any promotable allocas we split loads on as they can no longer be 4047 // promoted. 4048 if (!SplitLoads) { 4049 if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(LoadBasePtr)) { 4050 assert(OtherAI != &AI && "We can't re-split our own alloca!"); 4051 ResplitPromotableAllocas.insert(OtherAI); 4052 Worklist.insert(OtherAI); 4053 } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>( 4054 LoadBasePtr->stripInBoundsOffsets())) { 4055 assert(OtherAI != &AI && "We can't re-split our own alloca!"); 4056 Worklist.insert(OtherAI); 4057 } 4058 } 4059 4060 // Mark the original store as dead now that we've split it up and kill its 4061 // slice. Note that we leave the original load in place unless this store 4062 // was its only use. It may in turn be split up if it is an alloca load 4063 // for some other alloca, but it may be a normal load. This may introduce 4064 // redundant loads, but where those can be merged the rest of the optimizer 4065 // should handle the merging, and this uncovers SSA splits which is more 4066 // important. In practice, the original loads will almost always be fully 4067 // split and removed eventually, and the splits will be merged by any 4068 // trivial CSE, including instcombine. 4069 if (LI->hasOneUse()) { 4070 assert(*LI->user_begin() == SI && "Single use isn't this store!"); 4071 DeadInsts.insert(LI); 4072 } 4073 DeadInsts.insert(SI); 4074 Offsets.S->kill(); 4075 } 4076 4077 // Remove the killed slices that have ben pre-split. 4078 AS.erase(llvm::remove_if(AS, [](const Slice &S) { return S.isDead(); }), 4079 AS.end()); 4080 4081 // Insert our new slices. This will sort and merge them into the sorted 4082 // sequence. 4083 AS.insert(NewSlices); 4084 4085 LLVM_DEBUG(dbgs() << " Pre-split slices:\n"); 4086 #ifndef NDEBUG 4087 for (auto I = AS.begin(), E = AS.end(); I != E; ++I) 4088 LLVM_DEBUG(AS.print(dbgs(), I, " ")); 4089 #endif 4090 4091 // Finally, don't try to promote any allocas that new require re-splitting. 4092 // They have already been added to the worklist above. 4093 PromotableAllocas.erase( 4094 llvm::remove_if( 4095 PromotableAllocas, 4096 [&](AllocaInst *AI) { return ResplitPromotableAllocas.count(AI); }), 4097 PromotableAllocas.end()); 4098 4099 return true; 4100 } 4101 4102 /// Rewrite an alloca partition's users. 4103 /// 4104 /// This routine drives both of the rewriting goals of the SROA pass. It tries 4105 /// to rewrite uses of an alloca partition to be conducive for SSA value 4106 /// promotion. If the partition needs a new, more refined alloca, this will 4107 /// build that new alloca, preserving as much type information as possible, and 4108 /// rewrite the uses of the old alloca to point at the new one and have the 4109 /// appropriate new offsets. It also evaluates how successful the rewrite was 4110 /// at enabling promotion and if it was successful queues the alloca to be 4111 /// promoted. 4112 AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS, 4113 Partition &P) { 4114 // Try to compute a friendly type for this partition of the alloca. This 4115 // won't always succeed, in which case we fall back to a legal integer type 4116 // or an i8 array of an appropriate size. 4117 Type *SliceTy = nullptr; 4118 const DataLayout &DL = AI.getModule()->getDataLayout(); 4119 if (Type *CommonUseTy = findCommonType(P.begin(), P.end(), P.endOffset())) 4120 if (DL.getTypeAllocSize(CommonUseTy) >= P.size()) 4121 SliceTy = CommonUseTy; 4122 if (!SliceTy) 4123 if (Type *TypePartitionTy = getTypePartition(DL, AI.getAllocatedType(), 4124 P.beginOffset(), P.size())) 4125 SliceTy = TypePartitionTy; 4126 if ((!SliceTy || (SliceTy->isArrayTy() && 4127 SliceTy->getArrayElementType()->isIntegerTy())) && 4128 DL.isLegalInteger(P.size() * 8)) 4129 SliceTy = Type::getIntNTy(*C, P.size() * 8); 4130 if (!SliceTy) 4131 SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size()); 4132 assert(DL.getTypeAllocSize(SliceTy) >= P.size()); 4133 4134 bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, DL); 4135 4136 VectorType *VecTy = 4137 IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, DL); 4138 if (VecTy) 4139 SliceTy = VecTy; 4140 4141 // Check for the case where we're going to rewrite to a new alloca of the 4142 // exact same type as the original, and with the same access offsets. In that 4143 // case, re-use the existing alloca, but still run through the rewriter to 4144 // perform phi and select speculation. 4145 // P.beginOffset() can be non-zero even with the same type in a case with 4146 // out-of-bounds access (e.g. @PR35657 function in SROA/basictest.ll). 4147 AllocaInst *NewAI; 4148 if (SliceTy == AI.getAllocatedType() && P.beginOffset() == 0) { 4149 NewAI = &AI; 4150 // FIXME: We should be able to bail at this point with "nothing changed". 4151 // FIXME: We might want to defer PHI speculation until after here. 4152 // FIXME: return nullptr; 4153 } else { 4154 // If alignment is unspecified we fallback on the one required by the ABI 4155 // for this type. We also make sure the alignment is compatible with 4156 // P.beginOffset(). 4157 const Align Alignment = commonAlignment( 4158 DL.getValueOrABITypeAlignment(MaybeAlign(AI.getAlignment()), 4159 AI.getAllocatedType()), 4160 P.beginOffset()); 4161 // If we will get at least this much alignment from the type alone, leave 4162 // the alloca's alignment unconstrained. 4163 const bool IsUnconstrained = Alignment <= DL.getABITypeAlignment(SliceTy); 4164 NewAI = new AllocaInst( 4165 SliceTy, AI.getType()->getAddressSpace(), nullptr, 4166 IsUnconstrained ? MaybeAlign() : Alignment, 4167 AI.getName() + ".sroa." + Twine(P.begin() - AS.begin()), &AI); 4168 // Copy the old AI debug location over to the new one. 4169 NewAI->setDebugLoc(AI.getDebugLoc()); 4170 ++NumNewAllocas; 4171 } 4172 4173 LLVM_DEBUG(dbgs() << "Rewriting alloca partition " 4174 << "[" << P.beginOffset() << "," << P.endOffset() 4175 << ") to: " << *NewAI << "\n"); 4176 4177 // Track the high watermark on the worklist as it is only relevant for 4178 // promoted allocas. We will reset it to this point if the alloca is not in 4179 // fact scheduled for promotion. 4180 unsigned PPWOldSize = PostPromotionWorklist.size(); 4181 unsigned NumUses = 0; 4182 SmallSetVector<PHINode *, 8> PHIUsers; 4183 SmallSetVector<SelectInst *, 8> SelectUsers; 4184 4185 AllocaSliceRewriter Rewriter(DL, AS, *this, AI, *NewAI, P.beginOffset(), 4186 P.endOffset(), IsIntegerPromotable, VecTy, 4187 PHIUsers, SelectUsers); 4188 bool Promotable = true; 4189 for (Slice *S : P.splitSliceTails()) { 4190 Promotable &= Rewriter.visit(S); 4191 ++NumUses; 4192 } 4193 for (Slice &S : P) { 4194 Promotable &= Rewriter.visit(&S); 4195 ++NumUses; 4196 } 4197 4198 NumAllocaPartitionUses += NumUses; 4199 MaxUsesPerAllocaPartition.updateMax(NumUses); 4200 4201 // Now that we've processed all the slices in the new partition, check if any 4202 // PHIs or Selects would block promotion. 4203 for (PHINode *PHI : PHIUsers) 4204 if (!isSafePHIToSpeculate(*PHI)) { 4205 Promotable = false; 4206 PHIUsers.clear(); 4207 SelectUsers.clear(); 4208 break; 4209 } 4210 4211 for (SelectInst *Sel : SelectUsers) 4212 if (!isSafeSelectToSpeculate(*Sel)) { 4213 Promotable = false; 4214 PHIUsers.clear(); 4215 SelectUsers.clear(); 4216 break; 4217 } 4218 4219 if (Promotable) { 4220 if (PHIUsers.empty() && SelectUsers.empty()) { 4221 // Promote the alloca. 4222 PromotableAllocas.push_back(NewAI); 4223 } else { 4224 // If we have either PHIs or Selects to speculate, add them to those 4225 // worklists and re-queue the new alloca so that we promote in on the 4226 // next iteration. 4227 for (PHINode *PHIUser : PHIUsers) 4228 SpeculatablePHIs.insert(PHIUser); 4229 for (SelectInst *SelectUser : SelectUsers) 4230 SpeculatableSelects.insert(SelectUser); 4231 Worklist.insert(NewAI); 4232 } 4233 } else { 4234 // Drop any post-promotion work items if promotion didn't happen. 4235 while (PostPromotionWorklist.size() > PPWOldSize) 4236 PostPromotionWorklist.pop_back(); 4237 4238 // We couldn't promote and we didn't create a new partition, nothing 4239 // happened. 4240 if (NewAI == &AI) 4241 return nullptr; 4242 4243 // If we can't promote the alloca, iterate on it to check for new 4244 // refinements exposed by splitting the current alloca. Don't iterate on an 4245 // alloca which didn't actually change and didn't get promoted. 4246 Worklist.insert(NewAI); 4247 } 4248 4249 return NewAI; 4250 } 4251 4252 /// Walks the slices of an alloca and form partitions based on them, 4253 /// rewriting each of their uses. 4254 bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) { 4255 if (AS.begin() == AS.end()) 4256 return false; 4257 4258 unsigned NumPartitions = 0; 4259 bool Changed = false; 4260 const DataLayout &DL = AI.getModule()->getDataLayout(); 4261 4262 // First try to pre-split loads and stores. 4263 Changed |= presplitLoadsAndStores(AI, AS); 4264 4265 // Now that we have identified any pre-splitting opportunities, 4266 // mark loads and stores unsplittable except for the following case. 4267 // We leave a slice splittable if all other slices are disjoint or fully 4268 // included in the slice, such as whole-alloca loads and stores. 4269 // If we fail to split these during pre-splitting, we want to force them 4270 // to be rewritten into a partition. 4271 bool IsSorted = true; 4272 4273 uint64_t AllocaSize = DL.getTypeAllocSize(AI.getAllocatedType()); 4274 const uint64_t MaxBitVectorSize = 1024; 4275 if (AllocaSize <= MaxBitVectorSize) { 4276 // If a byte boundary is included in any load or store, a slice starting or 4277 // ending at the boundary is not splittable. 4278 SmallBitVector SplittableOffset(AllocaSize + 1, true); 4279 for (Slice &S : AS) 4280 for (unsigned O = S.beginOffset() + 1; 4281 O < S.endOffset() && O < AllocaSize; O++) 4282 SplittableOffset.reset(O); 4283 4284 for (Slice &S : AS) { 4285 if (!S.isSplittable()) 4286 continue; 4287 4288 if ((S.beginOffset() > AllocaSize || SplittableOffset[S.beginOffset()]) && 4289 (S.endOffset() > AllocaSize || SplittableOffset[S.endOffset()])) 4290 continue; 4291 4292 if (isa<LoadInst>(S.getUse()->getUser()) || 4293 isa<StoreInst>(S.getUse()->getUser())) { 4294 S.makeUnsplittable(); 4295 IsSorted = false; 4296 } 4297 } 4298 } 4299 else { 4300 // We only allow whole-alloca splittable loads and stores 4301 // for a large alloca to avoid creating too large BitVector. 4302 for (Slice &S : AS) { 4303 if (!S.isSplittable()) 4304 continue; 4305 4306 if (S.beginOffset() == 0 && S.endOffset() >= AllocaSize) 4307 continue; 4308 4309 if (isa<LoadInst>(S.getUse()->getUser()) || 4310 isa<StoreInst>(S.getUse()->getUser())) { 4311 S.makeUnsplittable(); 4312 IsSorted = false; 4313 } 4314 } 4315 } 4316 4317 if (!IsSorted) 4318 llvm::sort(AS); 4319 4320 /// Describes the allocas introduced by rewritePartition in order to migrate 4321 /// the debug info. 4322 struct Fragment { 4323 AllocaInst *Alloca; 4324 uint64_t Offset; 4325 uint64_t Size; 4326 Fragment(AllocaInst *AI, uint64_t O, uint64_t S) 4327 : Alloca(AI), Offset(O), Size(S) {} 4328 }; 4329 SmallVector<Fragment, 4> Fragments; 4330 4331 // Rewrite each partition. 4332 for (auto &P : AS.partitions()) { 4333 if (AllocaInst *NewAI = rewritePartition(AI, AS, P)) { 4334 Changed = true; 4335 if (NewAI != &AI) { 4336 uint64_t SizeOfByte = 8; 4337 uint64_t AllocaSize = DL.getTypeSizeInBits(NewAI->getAllocatedType()); 4338 // Don't include any padding. 4339 uint64_t Size = std::min(AllocaSize, P.size() * SizeOfByte); 4340 Fragments.push_back(Fragment(NewAI, P.beginOffset() * SizeOfByte, Size)); 4341 } 4342 } 4343 ++NumPartitions; 4344 } 4345 4346 NumAllocaPartitions += NumPartitions; 4347 MaxPartitionsPerAlloca.updateMax(NumPartitions); 4348 4349 // Migrate debug information from the old alloca to the new alloca(s) 4350 // and the individual partitions. 4351 TinyPtrVector<DbgVariableIntrinsic *> DbgDeclares = FindDbgAddrUses(&AI); 4352 if (!DbgDeclares.empty()) { 4353 auto *Var = DbgDeclares.front()->getVariable(); 4354 auto *Expr = DbgDeclares.front()->getExpression(); 4355 auto VarSize = Var->getSizeInBits(); 4356 DIBuilder DIB(*AI.getModule(), /*AllowUnresolved*/ false); 4357 uint64_t AllocaSize = DL.getTypeSizeInBits(AI.getAllocatedType()); 4358 for (auto Fragment : Fragments) { 4359 // Create a fragment expression describing the new partition or reuse AI's 4360 // expression if there is only one partition. 4361 auto *FragmentExpr = Expr; 4362 if (Fragment.Size < AllocaSize || Expr->isFragment()) { 4363 // If this alloca is already a scalar replacement of a larger aggregate, 4364 // Fragment.Offset describes the offset inside the scalar. 4365 auto ExprFragment = Expr->getFragmentInfo(); 4366 uint64_t Offset = ExprFragment ? ExprFragment->OffsetInBits : 0; 4367 uint64_t Start = Offset + Fragment.Offset; 4368 uint64_t Size = Fragment.Size; 4369 if (ExprFragment) { 4370 uint64_t AbsEnd = 4371 ExprFragment->OffsetInBits + ExprFragment->SizeInBits; 4372 if (Start >= AbsEnd) 4373 // No need to describe a SROAed padding. 4374 continue; 4375 Size = std::min(Size, AbsEnd - Start); 4376 } 4377 // The new, smaller fragment is stenciled out from the old fragment. 4378 if (auto OrigFragment = FragmentExpr->getFragmentInfo()) { 4379 assert(Start >= OrigFragment->OffsetInBits && 4380 "new fragment is outside of original fragment"); 4381 Start -= OrigFragment->OffsetInBits; 4382 } 4383 4384 // The alloca may be larger than the variable. 4385 if (VarSize) { 4386 if (Size > *VarSize) 4387 Size = *VarSize; 4388 if (Size == 0 || Start + Size > *VarSize) 4389 continue; 4390 } 4391 4392 // Avoid creating a fragment expression that covers the entire variable. 4393 if (!VarSize || *VarSize != Size) { 4394 if (auto E = 4395 DIExpression::createFragmentExpression(Expr, Start, Size)) 4396 FragmentExpr = *E; 4397 else 4398 continue; 4399 } 4400 } 4401 4402 // Remove any existing intrinsics describing the same alloca. 4403 for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(Fragment.Alloca)) 4404 OldDII->eraseFromParent(); 4405 4406 DIB.insertDeclare(Fragment.Alloca, Var, FragmentExpr, 4407 DbgDeclares.front()->getDebugLoc(), &AI); 4408 } 4409 } 4410 return Changed; 4411 } 4412 4413 /// Clobber a use with undef, deleting the used value if it becomes dead. 4414 void SROA::clobberUse(Use &U) { 4415 Value *OldV = U; 4416 // Replace the use with an undef value. 4417 U = UndefValue::get(OldV->getType()); 4418 4419 // Check for this making an instruction dead. We have to garbage collect 4420 // all the dead instructions to ensure the uses of any alloca end up being 4421 // minimal. 4422 if (Instruction *OldI = dyn_cast<Instruction>(OldV)) 4423 if (isInstructionTriviallyDead(OldI)) { 4424 DeadInsts.insert(OldI); 4425 } 4426 } 4427 4428 /// Analyze an alloca for SROA. 4429 /// 4430 /// This analyzes the alloca to ensure we can reason about it, builds 4431 /// the slices of the alloca, and then hands it off to be split and 4432 /// rewritten as needed. 4433 bool SROA::runOnAlloca(AllocaInst &AI) { 4434 LLVM_DEBUG(dbgs() << "SROA alloca: " << AI << "\n"); 4435 ++NumAllocasAnalyzed; 4436 4437 // Special case dead allocas, as they're trivial. 4438 if (AI.use_empty()) { 4439 AI.eraseFromParent(); 4440 return true; 4441 } 4442 const DataLayout &DL = AI.getModule()->getDataLayout(); 4443 4444 // Skip alloca forms that this analysis can't handle. 4445 if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() || 4446 DL.getTypeAllocSize(AI.getAllocatedType()) == 0) 4447 return false; 4448 4449 bool Changed = false; 4450 4451 // First, split any FCA loads and stores touching this alloca to promote 4452 // better splitting and promotion opportunities. 4453 AggLoadStoreRewriter AggRewriter(DL); 4454 Changed |= AggRewriter.rewrite(AI); 4455 4456 // Build the slices using a recursive instruction-visiting builder. 4457 AllocaSlices AS(DL, AI); 4458 LLVM_DEBUG(AS.print(dbgs())); 4459 if (AS.isEscaped()) 4460 return Changed; 4461 4462 // Delete all the dead users of this alloca before splitting and rewriting it. 4463 for (Instruction *DeadUser : AS.getDeadUsers()) { 4464 // Free up everything used by this instruction. 4465 for (Use &DeadOp : DeadUser->operands()) 4466 clobberUse(DeadOp); 4467 4468 // Now replace the uses of this instruction. 4469 DeadUser->replaceAllUsesWith(UndefValue::get(DeadUser->getType())); 4470 4471 // And mark it for deletion. 4472 DeadInsts.insert(DeadUser); 4473 Changed = true; 4474 } 4475 for (Use *DeadOp : AS.getDeadOperands()) { 4476 clobberUse(*DeadOp); 4477 Changed = true; 4478 } 4479 4480 // No slices to split. Leave the dead alloca for a later pass to clean up. 4481 if (AS.begin() == AS.end()) 4482 return Changed; 4483 4484 Changed |= splitAlloca(AI, AS); 4485 4486 LLVM_DEBUG(dbgs() << " Speculating PHIs\n"); 4487 while (!SpeculatablePHIs.empty()) 4488 speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val()); 4489 4490 LLVM_DEBUG(dbgs() << " Speculating Selects\n"); 4491 while (!SpeculatableSelects.empty()) 4492 speculateSelectInstLoads(*SpeculatableSelects.pop_back_val()); 4493 4494 return Changed; 4495 } 4496 4497 /// Delete the dead instructions accumulated in this run. 4498 /// 4499 /// Recursively deletes the dead instructions we've accumulated. This is done 4500 /// at the very end to maximize locality of the recursive delete and to 4501 /// minimize the problems of invalidated instruction pointers as such pointers 4502 /// are used heavily in the intermediate stages of the algorithm. 4503 /// 4504 /// We also record the alloca instructions deleted here so that they aren't 4505 /// subsequently handed to mem2reg to promote. 4506 bool SROA::deleteDeadInstructions( 4507 SmallPtrSetImpl<AllocaInst *> &DeletedAllocas) { 4508 bool Changed = false; 4509 while (!DeadInsts.empty()) { 4510 Instruction *I = DeadInsts.pop_back_val(); 4511 LLVM_DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n"); 4512 4513 // If the instruction is an alloca, find the possible dbg.declare connected 4514 // to it, and remove it too. We must do this before calling RAUW or we will 4515 // not be able to find it. 4516 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) { 4517 DeletedAllocas.insert(AI); 4518 for (DbgVariableIntrinsic *OldDII : FindDbgAddrUses(AI)) 4519 OldDII->eraseFromParent(); 4520 } 4521 4522 I->replaceAllUsesWith(UndefValue::get(I->getType())); 4523 4524 for (Use &Operand : I->operands()) 4525 if (Instruction *U = dyn_cast<Instruction>(Operand)) { 4526 // Zero out the operand and see if it becomes trivially dead. 4527 Operand = nullptr; 4528 if (isInstructionTriviallyDead(U)) 4529 DeadInsts.insert(U); 4530 } 4531 4532 ++NumDeleted; 4533 I->eraseFromParent(); 4534 Changed = true; 4535 } 4536 return Changed; 4537 } 4538 4539 /// Promote the allocas, using the best available technique. 4540 /// 4541 /// This attempts to promote whatever allocas have been identified as viable in 4542 /// the PromotableAllocas list. If that list is empty, there is nothing to do. 4543 /// This function returns whether any promotion occurred. 4544 bool SROA::promoteAllocas(Function &F) { 4545 if (PromotableAllocas.empty()) 4546 return false; 4547 4548 NumPromoted += PromotableAllocas.size(); 4549 4550 LLVM_DEBUG(dbgs() << "Promoting allocas with mem2reg...\n"); 4551 PromoteMemToReg(PromotableAllocas, *DT, AC); 4552 PromotableAllocas.clear(); 4553 return true; 4554 } 4555 4556 PreservedAnalyses SROA::runImpl(Function &F, DominatorTree &RunDT, 4557 AssumptionCache &RunAC) { 4558 LLVM_DEBUG(dbgs() << "SROA function: " << F.getName() << "\n"); 4559 C = &F.getContext(); 4560 DT = &RunDT; 4561 AC = &RunAC; 4562 4563 BasicBlock &EntryBB = F.getEntryBlock(); 4564 for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end()); 4565 I != E; ++I) { 4566 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) 4567 Worklist.insert(AI); 4568 } 4569 4570 bool Changed = false; 4571 // A set of deleted alloca instruction pointers which should be removed from 4572 // the list of promotable allocas. 4573 SmallPtrSet<AllocaInst *, 4> DeletedAllocas; 4574 4575 do { 4576 while (!Worklist.empty()) { 4577 Changed |= runOnAlloca(*Worklist.pop_back_val()); 4578 Changed |= deleteDeadInstructions(DeletedAllocas); 4579 4580 // Remove the deleted allocas from various lists so that we don't try to 4581 // continue processing them. 4582 if (!DeletedAllocas.empty()) { 4583 auto IsInSet = [&](AllocaInst *AI) { return DeletedAllocas.count(AI); }; 4584 Worklist.remove_if(IsInSet); 4585 PostPromotionWorklist.remove_if(IsInSet); 4586 PromotableAllocas.erase(llvm::remove_if(PromotableAllocas, IsInSet), 4587 PromotableAllocas.end()); 4588 DeletedAllocas.clear(); 4589 } 4590 } 4591 4592 Changed |= promoteAllocas(F); 4593 4594 Worklist = PostPromotionWorklist; 4595 PostPromotionWorklist.clear(); 4596 } while (!Worklist.empty()); 4597 4598 if (!Changed) 4599 return PreservedAnalyses::all(); 4600 4601 PreservedAnalyses PA; 4602 PA.preserveSet<CFGAnalyses>(); 4603 PA.preserve<GlobalsAA>(); 4604 return PA; 4605 } 4606 4607 PreservedAnalyses SROA::run(Function &F, FunctionAnalysisManager &AM) { 4608 return runImpl(F, AM.getResult<DominatorTreeAnalysis>(F), 4609 AM.getResult<AssumptionAnalysis>(F)); 4610 } 4611 4612 /// A legacy pass for the legacy pass manager that wraps the \c SROA pass. 4613 /// 4614 /// This is in the llvm namespace purely to allow it to be a friend of the \c 4615 /// SROA pass. 4616 class llvm::sroa::SROALegacyPass : public FunctionPass { 4617 /// The SROA implementation. 4618 SROA Impl; 4619 4620 public: 4621 static char ID; 4622 4623 SROALegacyPass() : FunctionPass(ID) { 4624 initializeSROALegacyPassPass(*PassRegistry::getPassRegistry()); 4625 } 4626 4627 bool runOnFunction(Function &F) override { 4628 if (skipFunction(F)) 4629 return false; 4630 4631 auto PA = Impl.runImpl( 4632 F, getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 4633 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 4634 return !PA.areAllPreserved(); 4635 } 4636 4637 void getAnalysisUsage(AnalysisUsage &AU) const override { 4638 AU.addRequired<AssumptionCacheTracker>(); 4639 AU.addRequired<DominatorTreeWrapperPass>(); 4640 AU.addPreserved<GlobalsAAWrapperPass>(); 4641 AU.setPreservesCFG(); 4642 } 4643 4644 StringRef getPassName() const override { return "SROA"; } 4645 }; 4646 4647 char SROALegacyPass::ID = 0; 4648 4649 FunctionPass *llvm::createSROAPass() { return new SROALegacyPass(); } 4650 4651 INITIALIZE_PASS_BEGIN(SROALegacyPass, "sroa", 4652 "Scalar Replacement Of Aggregates", false, false) 4653 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 4654 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 4655 INITIALIZE_PASS_END(SROALegacyPass, "sroa", "Scalar Replacement Of Aggregates", 4656 false, false) 4657