1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass performs various transformations related to eliminating memcpy 10 // calls, or transforming sets of stores into memset's. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/MemCpyOptimizer.h" 15 #include "llvm/ADT/DenseSet.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/ADT/iterator_range.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/GlobalsModRef.h" 24 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 25 #include "llvm/Analysis/MemoryLocation.h" 26 #include "llvm/Analysis/TargetLibraryInfo.h" 27 #include "llvm/Transforms/Utils/Local.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/IR/Argument.h" 30 #include "llvm/IR/BasicBlock.h" 31 #include "llvm/IR/CallSite.h" 32 #include "llvm/IR/Constants.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/DerivedTypes.h" 35 #include "llvm/IR/Dominators.h" 36 #include "llvm/IR/Function.h" 37 #include "llvm/IR/GetElementPtrTypeIterator.h" 38 #include "llvm/IR/GlobalVariable.h" 39 #include "llvm/IR/IRBuilder.h" 40 #include "llvm/IR/InstrTypes.h" 41 #include "llvm/IR/Instruction.h" 42 #include "llvm/IR/Instructions.h" 43 #include "llvm/IR/IntrinsicInst.h" 44 #include "llvm/IR/Intrinsics.h" 45 #include "llvm/IR/LLVMContext.h" 46 #include "llvm/IR/Module.h" 47 #include "llvm/IR/Operator.h" 48 #include "llvm/IR/PassManager.h" 49 #include "llvm/IR/Type.h" 50 #include "llvm/IR/User.h" 51 #include "llvm/IR/Value.h" 52 #include "llvm/Pass.h" 53 #include "llvm/Support/Casting.h" 54 #include "llvm/Support/Debug.h" 55 #include "llvm/Support/MathExtras.h" 56 #include "llvm/Support/raw_ostream.h" 57 #include "llvm/Transforms/Scalar.h" 58 #include <algorithm> 59 #include <cassert> 60 #include <cstdint> 61 #include <utility> 62 63 using namespace llvm; 64 65 #define DEBUG_TYPE "memcpyopt" 66 67 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 68 STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 69 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 70 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); 71 72 static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, 73 bool &VariableIdxFound, 74 const DataLayout &DL) { 75 // Skip over the first indices. 76 gep_type_iterator GTI = gep_type_begin(GEP); 77 for (unsigned i = 1; i != Idx; ++i, ++GTI) 78 /*skip along*/; 79 80 // Compute the offset implied by the rest of the indices. 81 int64_t Offset = 0; 82 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { 83 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); 84 if (!OpC) 85 return VariableIdxFound = true; 86 if (OpC->isZero()) continue; // No offset. 87 88 // Handle struct indices, which add their field offset to the pointer. 89 if (StructType *STy = GTI.getStructTypeOrNull()) { 90 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 91 continue; 92 } 93 94 // Otherwise, we have a sequential type like an array or vector. Multiply 95 // the index by the ElementSize. 96 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); 97 Offset += Size*OpC->getSExtValue(); 98 } 99 100 return Offset; 101 } 102 103 /// Return true if Ptr1 is provably equal to Ptr2 plus a constant offset, and 104 /// return that constant offset. For example, Ptr1 might be &A[42], and Ptr2 105 /// might be &A[40]. In this case offset would be -8. 106 static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, 107 const DataLayout &DL) { 108 Ptr1 = Ptr1->stripPointerCasts(); 109 Ptr2 = Ptr2->stripPointerCasts(); 110 111 // Handle the trivial case first. 112 if (Ptr1 == Ptr2) { 113 Offset = 0; 114 return true; 115 } 116 117 GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1); 118 GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2); 119 120 bool VariableIdxFound = false; 121 122 // If one pointer is a GEP and the other isn't, then see if the GEP is a 123 // constant offset from the base, as in "P" and "gep P, 1". 124 if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) { 125 Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, DL); 126 return !VariableIdxFound; 127 } 128 129 if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) { 130 Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, DL); 131 return !VariableIdxFound; 132 } 133 134 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical 135 // base. After that base, they may have some number of common (and 136 // potentially variable) indices. After that they handle some constant 137 // offset, which determines their offset from each other. At this point, we 138 // handle no other case. 139 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) 140 return false; 141 142 // Skip any common indices and track the GEP types. 143 unsigned Idx = 1; 144 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) 145 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) 146 break; 147 148 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL); 149 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, DL); 150 if (VariableIdxFound) return false; 151 152 Offset = Offset2-Offset1; 153 return true; 154 } 155 156 namespace { 157 158 /// Represents a range of memset'd bytes with the ByteVal value. 159 /// This allows us to analyze stores like: 160 /// store 0 -> P+1 161 /// store 0 -> P+0 162 /// store 0 -> P+3 163 /// store 0 -> P+2 164 /// which sometimes happens with stores to arrays of structs etc. When we see 165 /// the first store, we make a range [1, 2). The second store extends the range 166 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 167 /// two ranges into [0, 3) which is memset'able. 168 struct MemsetRange { 169 // Start/End - A semi range that describes the span that this range covers. 170 // The range is closed at the start and open at the end: [Start, End). 171 int64_t Start, End; 172 173 /// StartPtr - The getelementptr instruction that points to the start of the 174 /// range. 175 Value *StartPtr; 176 177 /// Alignment - The known alignment of the first store. 178 unsigned Alignment; 179 180 /// TheStores - The actual stores that make up this range. 181 SmallVector<Instruction*, 16> TheStores; 182 183 bool isProfitableToUseMemset(const DataLayout &DL) const; 184 }; 185 186 } // end anonymous namespace 187 188 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const { 189 // If we found more than 4 stores to merge or 16 bytes, use memset. 190 if (TheStores.size() >= 4 || End-Start >= 16) return true; 191 192 // If there is nothing to merge, don't do anything. 193 if (TheStores.size() < 2) return false; 194 195 // If any of the stores are a memset, then it is always good to extend the 196 // memset. 197 for (Instruction *SI : TheStores) 198 if (!isa<StoreInst>(SI)) 199 return true; 200 201 // Assume that the code generator is capable of merging pairs of stores 202 // together if it wants to. 203 if (TheStores.size() == 2) return false; 204 205 // If we have fewer than 8 stores, it can still be worthwhile to do this. 206 // For example, merging 4 i8 stores into an i32 store is useful almost always. 207 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 208 // memset will be split into 2 32-bit stores anyway) and doing so can 209 // pessimize the llvm optimizer. 210 // 211 // Since we don't have perfect knowledge here, make some assumptions: assume 212 // the maximum GPR width is the same size as the largest legal integer 213 // size. If so, check to see whether we will end up actually reducing the 214 // number of stores used. 215 unsigned Bytes = unsigned(End-Start); 216 unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8; 217 if (MaxIntSize == 0) 218 MaxIntSize = 1; 219 unsigned NumPointerStores = Bytes / MaxIntSize; 220 221 // Assume the remaining bytes if any are done a byte at a time. 222 unsigned NumByteStores = Bytes % MaxIntSize; 223 224 // If we will reduce the # stores (according to this heuristic), do the 225 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 226 // etc. 227 return TheStores.size() > NumPointerStores+NumByteStores; 228 } 229 230 namespace { 231 232 class MemsetRanges { 233 using range_iterator = SmallVectorImpl<MemsetRange>::iterator; 234 235 /// A sorted list of the memset ranges. 236 SmallVector<MemsetRange, 8> Ranges; 237 238 const DataLayout &DL; 239 240 public: 241 MemsetRanges(const DataLayout &DL) : DL(DL) {} 242 243 using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator; 244 245 const_iterator begin() const { return Ranges.begin(); } 246 const_iterator end() const { return Ranges.end(); } 247 bool empty() const { return Ranges.empty(); } 248 249 void addInst(int64_t OffsetFromFirst, Instruction *Inst) { 250 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 251 addStore(OffsetFromFirst, SI); 252 else 253 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); 254 } 255 256 void addStore(int64_t OffsetFromFirst, StoreInst *SI) { 257 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); 258 259 addRange(OffsetFromFirst, StoreSize, 260 SI->getPointerOperand(), SI->getAlignment(), SI); 261 } 262 263 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { 264 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 265 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI); 266 } 267 268 void addRange(int64_t Start, int64_t Size, Value *Ptr, 269 unsigned Alignment, Instruction *Inst); 270 }; 271 272 } // end anonymous namespace 273 274 /// Add a new store to the MemsetRanges data structure. This adds a 275 /// new range for the specified store at the specified offset, merging into 276 /// existing ranges as appropriate. 277 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, 278 unsigned Alignment, Instruction *Inst) { 279 int64_t End = Start+Size; 280 281 range_iterator I = partition_point( 282 Ranges, [=](const MemsetRange &O) { return O.End < Start; }); 283 284 // We now know that I == E, in which case we didn't find anything to merge 285 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 286 // to insert a new range. Handle this now. 287 if (I == Ranges.end() || End < I->Start) { 288 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 289 R.Start = Start; 290 R.End = End; 291 R.StartPtr = Ptr; 292 R.Alignment = Alignment; 293 R.TheStores.push_back(Inst); 294 return; 295 } 296 297 // This store overlaps with I, add it. 298 I->TheStores.push_back(Inst); 299 300 // At this point, we may have an interval that completely contains our store. 301 // If so, just add it to the interval and return. 302 if (I->Start <= Start && I->End >= End) 303 return; 304 305 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 306 // but is not entirely contained within the range. 307 308 // See if the range extends the start of the range. In this case, it couldn't 309 // possibly cause it to join the prior range, because otherwise we would have 310 // stopped on *it*. 311 if (Start < I->Start) { 312 I->Start = Start; 313 I->StartPtr = Ptr; 314 I->Alignment = Alignment; 315 } 316 317 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 318 // is in or right at the end of I), and that End >= I->Start. Extend I out to 319 // End. 320 if (End > I->End) { 321 I->End = End; 322 range_iterator NextI = I; 323 while (++NextI != Ranges.end() && End >= NextI->Start) { 324 // Merge the range in. 325 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 326 if (NextI->End > I->End) 327 I->End = NextI->End; 328 Ranges.erase(NextI); 329 NextI = I; 330 } 331 } 332 } 333 334 //===----------------------------------------------------------------------===// 335 // MemCpyOptLegacyPass Pass 336 //===----------------------------------------------------------------------===// 337 338 namespace { 339 340 class MemCpyOptLegacyPass : public FunctionPass { 341 MemCpyOptPass Impl; 342 343 public: 344 static char ID; // Pass identification, replacement for typeid 345 346 MemCpyOptLegacyPass() : FunctionPass(ID) { 347 initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry()); 348 } 349 350 bool runOnFunction(Function &F) override; 351 352 private: 353 // This transformation requires dominator postdominator info 354 void getAnalysisUsage(AnalysisUsage &AU) const override { 355 AU.setPreservesCFG(); 356 AU.addRequired<AssumptionCacheTracker>(); 357 AU.addRequired<DominatorTreeWrapperPass>(); 358 AU.addRequired<MemoryDependenceWrapperPass>(); 359 AU.addRequired<AAResultsWrapperPass>(); 360 AU.addRequired<TargetLibraryInfoWrapperPass>(); 361 AU.addPreserved<GlobalsAAWrapperPass>(); 362 AU.addPreserved<MemoryDependenceWrapperPass>(); 363 } 364 }; 365 366 } // end anonymous namespace 367 368 char MemCpyOptLegacyPass::ID = 0; 369 370 /// The public interface to this file... 371 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); } 372 373 INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", 374 false, false) 375 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 376 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 377 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 378 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 379 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 380 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 381 INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", 382 false, false) 383 384 /// When scanning forward over instructions, we look for some other patterns to 385 /// fold away. In particular, this looks for stores to neighboring locations of 386 /// memory. If it sees enough consecutive ones, it attempts to merge them 387 /// together into a memcpy/memset. 388 Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst, 389 Value *StartPtr, 390 Value *ByteVal) { 391 const DataLayout &DL = StartInst->getModule()->getDataLayout(); 392 393 // Okay, so we now have a single store that can be splatable. Scan to find 394 // all subsequent stores of the same value to offset from the same pointer. 395 // Join these together into ranges, so we can decide whether contiguous blocks 396 // are stored. 397 MemsetRanges Ranges(DL); 398 399 BasicBlock::iterator BI(StartInst); 400 for (++BI; !BI->isTerminator(); ++BI) { 401 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { 402 // If the instruction is readnone, ignore it, otherwise bail out. We 403 // don't even allow readonly here because we don't want something like: 404 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 405 if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) 406 break; 407 continue; 408 } 409 410 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { 411 // If this is a store, see if we can merge it in. 412 if (!NextStore->isSimple()) break; 413 414 // Check to see if this stored value is of the same byte-splattable value. 415 Value *StoredByte = isBytewiseValue(NextStore->getOperand(0), DL); 416 if (isa<UndefValue>(ByteVal) && StoredByte) 417 ByteVal = StoredByte; 418 if (ByteVal != StoredByte) 419 break; 420 421 // Check to see if this store is to a constant offset from the start ptr. 422 int64_t Offset; 423 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, 424 DL)) 425 break; 426 427 Ranges.addStore(Offset, NextStore); 428 } else { 429 MemSetInst *MSI = cast<MemSetInst>(BI); 430 431 if (MSI->isVolatile() || ByteVal != MSI->getValue() || 432 !isa<ConstantInt>(MSI->getLength())) 433 break; 434 435 // Check to see if this store is to a constant offset from the start ptr. 436 int64_t Offset; 437 if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, DL)) 438 break; 439 440 Ranges.addMemSet(Offset, MSI); 441 } 442 } 443 444 // If we have no ranges, then we just had a single store with nothing that 445 // could be merged in. This is a very common case of course. 446 if (Ranges.empty()) 447 return nullptr; 448 449 // If we had at least one store that could be merged in, add the starting 450 // store as well. We try to avoid this unless there is at least something 451 // interesting as a small compile-time optimization. 452 Ranges.addInst(0, StartInst); 453 454 // If we create any memsets, we put it right before the first instruction that 455 // isn't part of the memset block. This ensure that the memset is dominated 456 // by any addressing instruction needed by the start of the block. 457 IRBuilder<> Builder(&*BI); 458 459 // Now that we have full information about ranges, loop over the ranges and 460 // emit memset's for anything big enough to be worthwhile. 461 Instruction *AMemSet = nullptr; 462 for (const MemsetRange &Range : Ranges) { 463 if (Range.TheStores.size() == 1) continue; 464 465 // If it is profitable to lower this range to memset, do so now. 466 if (!Range.isProfitableToUseMemset(DL)) 467 continue; 468 469 // Otherwise, we do want to transform this! Create a new memset. 470 // Get the starting pointer of the block. 471 StartPtr = Range.StartPtr; 472 473 // Determine alignment 474 unsigned Alignment = Range.Alignment; 475 if (Alignment == 0) { 476 Type *EltType = 477 cast<PointerType>(StartPtr->getType())->getElementType(); 478 Alignment = DL.getABITypeAlignment(EltType); 479 } 480 481 AMemSet = 482 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment); 483 484 LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI 485 : Range.TheStores) dbgs() 486 << *SI << '\n'; 487 dbgs() << "With: " << *AMemSet << '\n'); 488 489 if (!Range.TheStores.empty()) 490 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); 491 492 // Zap all the stores. 493 for (Instruction *SI : Range.TheStores) { 494 MD->removeInstruction(SI); 495 SI->eraseFromParent(); 496 } 497 ++NumMemSetInfer; 498 } 499 500 return AMemSet; 501 } 502 503 static unsigned findStoreAlignment(const DataLayout &DL, const StoreInst *SI) { 504 unsigned StoreAlign = SI->getAlignment(); 505 if (!StoreAlign) 506 StoreAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType()); 507 return StoreAlign; 508 } 509 510 static unsigned findLoadAlignment(const DataLayout &DL, const LoadInst *LI) { 511 unsigned LoadAlign = LI->getAlignment(); 512 if (!LoadAlign) 513 LoadAlign = DL.getABITypeAlignment(LI->getType()); 514 return LoadAlign; 515 } 516 517 static unsigned findCommonAlignment(const DataLayout &DL, const StoreInst *SI, 518 const LoadInst *LI) { 519 unsigned StoreAlign = findStoreAlignment(DL, SI); 520 unsigned LoadAlign = findLoadAlignment(DL, LI); 521 return MinAlign(StoreAlign, LoadAlign); 522 } 523 524 // This method try to lift a store instruction before position P. 525 // It will lift the store and its argument + that anything that 526 // may alias with these. 527 // The method returns true if it was successful. 528 static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P, 529 const LoadInst *LI) { 530 // If the store alias this position, early bail out. 531 MemoryLocation StoreLoc = MemoryLocation::get(SI); 532 if (isModOrRefSet(AA.getModRefInfo(P, StoreLoc))) 533 return false; 534 535 // Keep track of the arguments of all instruction we plan to lift 536 // so we can make sure to lift them as well if appropriate. 537 DenseSet<Instruction*> Args; 538 if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand())) 539 if (Ptr->getParent() == SI->getParent()) 540 Args.insert(Ptr); 541 542 // Instruction to lift before P. 543 SmallVector<Instruction*, 8> ToLift; 544 545 // Memory locations of lifted instructions. 546 SmallVector<MemoryLocation, 8> MemLocs{StoreLoc}; 547 548 // Lifted calls. 549 SmallVector<const CallBase *, 8> Calls; 550 551 const MemoryLocation LoadLoc = MemoryLocation::get(LI); 552 553 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) { 554 auto *C = &*I; 555 556 bool MayAlias = isModOrRefSet(AA.getModRefInfo(C, None)); 557 558 bool NeedLift = false; 559 if (Args.erase(C)) 560 NeedLift = true; 561 else if (MayAlias) { 562 NeedLift = llvm::any_of(MemLocs, [C, &AA](const MemoryLocation &ML) { 563 return isModOrRefSet(AA.getModRefInfo(C, ML)); 564 }); 565 566 if (!NeedLift) 567 NeedLift = llvm::any_of(Calls, [C, &AA](const CallBase *Call) { 568 return isModOrRefSet(AA.getModRefInfo(C, Call)); 569 }); 570 } 571 572 if (!NeedLift) 573 continue; 574 575 if (MayAlias) { 576 // Since LI is implicitly moved downwards past the lifted instructions, 577 // none of them may modify its source. 578 if (isModSet(AA.getModRefInfo(C, LoadLoc))) 579 return false; 580 else if (const auto *Call = dyn_cast<CallBase>(C)) { 581 // If we can't lift this before P, it's game over. 582 if (isModOrRefSet(AA.getModRefInfo(P, Call))) 583 return false; 584 585 Calls.push_back(Call); 586 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) { 587 // If we can't lift this before P, it's game over. 588 auto ML = MemoryLocation::get(C); 589 if (isModOrRefSet(AA.getModRefInfo(P, ML))) 590 return false; 591 592 MemLocs.push_back(ML); 593 } else 594 // We don't know how to lift this instruction. 595 return false; 596 } 597 598 ToLift.push_back(C); 599 for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k) 600 if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) 601 if (A->getParent() == SI->getParent()) 602 Args.insert(A); 603 } 604 605 // We made it, we need to lift 606 for (auto *I : llvm::reverse(ToLift)) { 607 LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n"); 608 I->moveBefore(P); 609 } 610 611 return true; 612 } 613 614 bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 615 if (!SI->isSimple()) return false; 616 617 // Avoid merging nontemporal stores since the resulting 618 // memcpy/memset would not be able to preserve the nontemporal hint. 619 // In theory we could teach how to propagate the !nontemporal metadata to 620 // memset calls. However, that change would force the backend to 621 // conservatively expand !nontemporal memset calls back to sequences of 622 // store instructions (effectively undoing the merging). 623 if (SI->getMetadata(LLVMContext::MD_nontemporal)) 624 return false; 625 626 const DataLayout &DL = SI->getModule()->getDataLayout(); 627 628 // Load to store forwarding can be interpreted as memcpy. 629 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) { 630 if (LI->isSimple() && LI->hasOneUse() && 631 LI->getParent() == SI->getParent()) { 632 633 auto *T = LI->getType(); 634 if (T->isAggregateType()) { 635 AliasAnalysis &AA = LookupAliasAnalysis(); 636 MemoryLocation LoadLoc = MemoryLocation::get(LI); 637 638 // We use alias analysis to check if an instruction may store to 639 // the memory we load from in between the load and the store. If 640 // such an instruction is found, we try to promote there instead 641 // of at the store position. 642 Instruction *P = SI; 643 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) { 644 if (isModSet(AA.getModRefInfo(&I, LoadLoc))) { 645 P = &I; 646 break; 647 } 648 } 649 650 // We found an instruction that may write to the loaded memory. 651 // We can try to promote at this position instead of the store 652 // position if nothing alias the store memory after this and the store 653 // destination is not in the range. 654 if (P && P != SI) { 655 if (!moveUp(AA, SI, P, LI)) 656 P = nullptr; 657 } 658 659 // If a valid insertion position is found, then we can promote 660 // the load/store pair to a memcpy. 661 if (P) { 662 // If we load from memory that may alias the memory we store to, 663 // memmove must be used to preserve semantic. If not, memcpy can 664 // be used. 665 bool UseMemMove = false; 666 if (!AA.isNoAlias(MemoryLocation::get(SI), LoadLoc)) 667 UseMemMove = true; 668 669 uint64_t Size = DL.getTypeStoreSize(T); 670 671 IRBuilder<> Builder(P); 672 Instruction *M; 673 if (UseMemMove) 674 M = Builder.CreateMemMove( 675 SI->getPointerOperand(), findStoreAlignment(DL, SI), 676 LI->getPointerOperand(), findLoadAlignment(DL, LI), Size); 677 else 678 M = Builder.CreateMemCpy( 679 SI->getPointerOperand(), findStoreAlignment(DL, SI), 680 LI->getPointerOperand(), findLoadAlignment(DL, LI), Size); 681 682 LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => " 683 << *M << "\n"); 684 685 MD->removeInstruction(SI); 686 SI->eraseFromParent(); 687 MD->removeInstruction(LI); 688 LI->eraseFromParent(); 689 ++NumMemCpyInstr; 690 691 // Make sure we do not invalidate the iterator. 692 BBI = M->getIterator(); 693 return true; 694 } 695 } 696 697 // Detect cases where we're performing call slot forwarding, but 698 // happen to be using a load-store pair to implement it, rather than 699 // a memcpy. 700 MemDepResult ldep = MD->getDependency(LI); 701 CallInst *C = nullptr; 702 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst())) 703 C = dyn_cast<CallInst>(ldep.getInst()); 704 705 if (C) { 706 // Check that nothing touches the dest of the "copy" between 707 // the call and the store. 708 Value *CpyDest = SI->getPointerOperand()->stripPointerCasts(); 709 bool CpyDestIsLocal = isa<AllocaInst>(CpyDest); 710 AliasAnalysis &AA = LookupAliasAnalysis(); 711 MemoryLocation StoreLoc = MemoryLocation::get(SI); 712 for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator(); 713 I != E; --I) { 714 if (isModOrRefSet(AA.getModRefInfo(&*I, StoreLoc))) { 715 C = nullptr; 716 break; 717 } 718 // The store to dest may never happen if an exception can be thrown 719 // between the load and the store. 720 if (I->mayThrow() && !CpyDestIsLocal) { 721 C = nullptr; 722 break; 723 } 724 } 725 } 726 727 if (C) { 728 bool changed = performCallSlotOptzn( 729 LI, SI->getPointerOperand()->stripPointerCasts(), 730 LI->getPointerOperand()->stripPointerCasts(), 731 DL.getTypeStoreSize(SI->getOperand(0)->getType()), 732 findCommonAlignment(DL, SI, LI), C); 733 if (changed) { 734 MD->removeInstruction(SI); 735 SI->eraseFromParent(); 736 MD->removeInstruction(LI); 737 LI->eraseFromParent(); 738 ++NumMemCpyInstr; 739 return true; 740 } 741 } 742 } 743 } 744 745 // There are two cases that are interesting for this code to handle: memcpy 746 // and memset. Right now we only handle memset. 747 748 // Ensure that the value being stored is something that can be memset'able a 749 // byte at a time like "0" or "-1" or any width, as well as things like 750 // 0xA0A0A0A0 and 0.0. 751 auto *V = SI->getOperand(0); 752 if (Value *ByteVal = isBytewiseValue(V, DL)) { 753 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), 754 ByteVal)) { 755 BBI = I->getIterator(); // Don't invalidate iterator. 756 return true; 757 } 758 759 // If we have an aggregate, we try to promote it to memset regardless 760 // of opportunity for merging as it can expose optimization opportunities 761 // in subsequent passes. 762 auto *T = V->getType(); 763 if (T->isAggregateType()) { 764 uint64_t Size = DL.getTypeStoreSize(T); 765 unsigned Align = SI->getAlignment(); 766 if (!Align) 767 Align = DL.getABITypeAlignment(T); 768 IRBuilder<> Builder(SI); 769 auto *M = 770 Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size, Align); 771 772 LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n"); 773 774 MD->removeInstruction(SI); 775 SI->eraseFromParent(); 776 NumMemSetInfer++; 777 778 // Make sure we do not invalidate the iterator. 779 BBI = M->getIterator(); 780 return true; 781 } 782 } 783 784 return false; 785 } 786 787 bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { 788 // See if there is another memset or store neighboring this memset which 789 // allows us to widen out the memset to do a single larger store. 790 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) 791 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), 792 MSI->getValue())) { 793 BBI = I->getIterator(); // Don't invalidate iterator. 794 return true; 795 } 796 return false; 797 } 798 799 /// Takes a memcpy and a call that it depends on, 800 /// and checks for the possibility of a call slot optimization by having 801 /// the call write its result directly into the destination of the memcpy. 802 bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpy, Value *cpyDest, 803 Value *cpySrc, uint64_t cpyLen, 804 unsigned cpyAlign, CallInst *C) { 805 // The general transformation to keep in mind is 806 // 807 // call @func(..., src, ...) 808 // memcpy(dest, src, ...) 809 // 810 // -> 811 // 812 // memcpy(dest, src, ...) 813 // call @func(..., dest, ...) 814 // 815 // Since moving the memcpy is technically awkward, we additionally check that 816 // src only holds uninitialized values at the moment of the call, meaning that 817 // the memcpy can be discarded rather than moved. 818 819 // Lifetime marks shouldn't be operated on. 820 if (Function *F = C->getCalledFunction()) 821 if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start) 822 return false; 823 824 // Deliberately get the source and destination with bitcasts stripped away, 825 // because we'll need to do type comparisons based on the underlying type. 826 CallSite CS(C); 827 828 // Require that src be an alloca. This simplifies the reasoning considerably. 829 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 830 if (!srcAlloca) 831 return false; 832 833 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 834 if (!srcArraySize) 835 return false; 836 837 const DataLayout &DL = cpy->getModule()->getDataLayout(); 838 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) * 839 srcArraySize->getZExtValue(); 840 841 if (cpyLen < srcSize) 842 return false; 843 844 // Check that accessing the first srcSize bytes of dest will not cause a 845 // trap. Otherwise the transform is invalid since it might cause a trap 846 // to occur earlier than it otherwise would. 847 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) { 848 // The destination is an alloca. Check it is larger than srcSize. 849 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize()); 850 if (!destArraySize) 851 return false; 852 853 uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) * 854 destArraySize->getZExtValue(); 855 856 if (destSize < srcSize) 857 return false; 858 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) { 859 // The store to dest may never happen if the call can throw. 860 if (C->mayThrow()) 861 return false; 862 863 if (A->getDereferenceableBytes() < srcSize) { 864 // If the destination is an sret parameter then only accesses that are 865 // outside of the returned struct type can trap. 866 if (!A->hasStructRetAttr()) 867 return false; 868 869 Type *StructTy = cast<PointerType>(A->getType())->getElementType(); 870 if (!StructTy->isSized()) { 871 // The call may never return and hence the copy-instruction may never 872 // be executed, and therefore it's not safe to say "the destination 873 // has at least <cpyLen> bytes, as implied by the copy-instruction", 874 return false; 875 } 876 877 uint64_t destSize = DL.getTypeAllocSize(StructTy); 878 if (destSize < srcSize) 879 return false; 880 } 881 } else { 882 return false; 883 } 884 885 // Check that dest points to memory that is at least as aligned as src. 886 unsigned srcAlign = srcAlloca->getAlignment(); 887 if (!srcAlign) 888 srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType()); 889 bool isDestSufficientlyAligned = srcAlign <= cpyAlign; 890 // If dest is not aligned enough and we can't increase its alignment then 891 // bail out. 892 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) 893 return false; 894 895 // Check that src is not accessed except via the call and the memcpy. This 896 // guarantees that it holds only undefined values when passed in (so the final 897 // memcpy can be dropped), that it is not read or written between the call and 898 // the memcpy, and that writing beyond the end of it is undefined. 899 SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(), 900 srcAlloca->user_end()); 901 while (!srcUseList.empty()) { 902 User *U = srcUseList.pop_back_val(); 903 904 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) { 905 for (User *UU : U->users()) 906 srcUseList.push_back(UU); 907 continue; 908 } 909 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) { 910 if (!G->hasAllZeroIndices()) 911 return false; 912 913 for (User *UU : U->users()) 914 srcUseList.push_back(UU); 915 continue; 916 } 917 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U)) 918 if (IT->isLifetimeStartOrEnd()) 919 continue; 920 921 if (U != C && U != cpy) 922 return false; 923 } 924 925 // Check that src isn't captured by the called function since the 926 // transformation can cause aliasing issues in that case. 927 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 928 if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i)) 929 return false; 930 931 // Since we're changing the parameter to the callsite, we need to make sure 932 // that what would be the new parameter dominates the callsite. 933 DominatorTree &DT = LookupDomTree(); 934 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest)) 935 if (!DT.dominates(cpyDestInst, C)) 936 return false; 937 938 // In addition to knowing that the call does not access src in some 939 // unexpected manner, for example via a global, which we deduce from 940 // the use analysis, we also need to know that it does not sneakily 941 // access dest. We rely on AA to figure this out for us. 942 AliasAnalysis &AA = LookupAliasAnalysis(); 943 ModRefInfo MR = AA.getModRefInfo(C, cpyDest, LocationSize::precise(srcSize)); 944 // If necessary, perform additional analysis. 945 if (isModOrRefSet(MR)) 946 MR = AA.callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), &DT); 947 if (isModOrRefSet(MR)) 948 return false; 949 950 // We can't create address space casts here because we don't know if they're 951 // safe for the target. 952 if (cpySrc->getType()->getPointerAddressSpace() != 953 cpyDest->getType()->getPointerAddressSpace()) 954 return false; 955 for (unsigned i = 0; i < CS.arg_size(); ++i) 956 if (CS.getArgument(i)->stripPointerCasts() == cpySrc && 957 cpySrc->getType()->getPointerAddressSpace() != 958 CS.getArgument(i)->getType()->getPointerAddressSpace()) 959 return false; 960 961 // All the checks have passed, so do the transformation. 962 bool changedArgument = false; 963 for (unsigned i = 0; i < CS.arg_size(); ++i) 964 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) { 965 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest 966 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 967 cpyDest->getName(), C); 968 changedArgument = true; 969 if (CS.getArgument(i)->getType() == Dest->getType()) 970 CS.setArgument(i, Dest); 971 else 972 CS.setArgument(i, CastInst::CreatePointerCast(Dest, 973 CS.getArgument(i)->getType(), Dest->getName(), C)); 974 } 975 976 if (!changedArgument) 977 return false; 978 979 // If the destination wasn't sufficiently aligned then increase its alignment. 980 if (!isDestSufficientlyAligned) { 981 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!"); 982 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); 983 } 984 985 // Drop any cached information about the call, because we may have changed 986 // its dependence information by changing its parameter. 987 MD->removeInstruction(C); 988 989 // Update AA metadata 990 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be 991 // handled here, but combineMetadata doesn't support them yet 992 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 993 LLVMContext::MD_noalias, 994 LLVMContext::MD_invariant_group, 995 LLVMContext::MD_access_group}; 996 combineMetadata(C, cpy, KnownIDs, true); 997 998 // Remove the memcpy. 999 MD->removeInstruction(cpy); 1000 ++NumMemCpyInstr; 1001 1002 return true; 1003 } 1004 1005 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is 1006 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can. 1007 bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M, 1008 MemCpyInst *MDep) { 1009 // We can only transforms memcpy's where the dest of one is the source of the 1010 // other. 1011 if (M->getSource() != MDep->getDest() || MDep->isVolatile()) 1012 return false; 1013 1014 // If dep instruction is reading from our current input, then it is a noop 1015 // transfer and substituting the input won't change this instruction. Just 1016 // ignore the input and let someone else zap MDep. This handles cases like: 1017 // memcpy(a <- a) 1018 // memcpy(b <- a) 1019 if (M->getSource() == MDep->getSource()) 1020 return false; 1021 1022 // Second, the length of the memcpy's must be the same, or the preceding one 1023 // must be larger than the following one. 1024 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); 1025 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); 1026 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) 1027 return false; 1028 1029 AliasAnalysis &AA = LookupAliasAnalysis(); 1030 1031 // Verify that the copied-from memory doesn't change in between the two 1032 // transfers. For example, in: 1033 // memcpy(a <- b) 1034 // *b = 42; 1035 // memcpy(c <- a) 1036 // It would be invalid to transform the second memcpy into memcpy(c <- b). 1037 // 1038 // TODO: If the code between M and MDep is transparent to the destination "c", 1039 // then we could still perform the xform by moving M up to the first memcpy. 1040 // 1041 // NOTE: This is conservative, it will stop on any read from the source loc, 1042 // not just the defining memcpy. 1043 MemDepResult SourceDep = 1044 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false, 1045 M->getIterator(), M->getParent()); 1046 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 1047 return false; 1048 1049 // If the dest of the second might alias the source of the first, then the 1050 // source and dest might overlap. We still want to eliminate the intermediate 1051 // value, but we have to generate a memmove instead of memcpy. 1052 bool UseMemMove = false; 1053 if (!AA.isNoAlias(MemoryLocation::getForDest(M), 1054 MemoryLocation::getForSource(MDep))) 1055 UseMemMove = true; 1056 1057 // If all checks passed, then we can transform M. 1058 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n" 1059 << *MDep << '\n' << *M << '\n'); 1060 1061 // TODO: Is this worth it if we're creating a less aligned memcpy? For 1062 // example we could be moving from movaps -> movq on x86. 1063 IRBuilder<> Builder(M); 1064 if (UseMemMove) 1065 Builder.CreateMemMove(M->getRawDest(), M->getDestAlignment(), 1066 MDep->getRawSource(), MDep->getSourceAlignment(), 1067 M->getLength(), M->isVolatile()); 1068 else 1069 Builder.CreateMemCpy(M->getRawDest(), M->getDestAlignment(), 1070 MDep->getRawSource(), MDep->getSourceAlignment(), 1071 M->getLength(), M->isVolatile()); 1072 1073 // Remove the instruction we're replacing. 1074 MD->removeInstruction(M); 1075 M->eraseFromParent(); 1076 ++NumMemCpyInstr; 1077 return true; 1078 } 1079 1080 /// We've found that the (upward scanning) memory dependence of \p MemCpy is 1081 /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that 1082 /// weren't copied over by \p MemCpy. 1083 /// 1084 /// In other words, transform: 1085 /// \code 1086 /// memset(dst, c, dst_size); 1087 /// memcpy(dst, src, src_size); 1088 /// \endcode 1089 /// into: 1090 /// \code 1091 /// memcpy(dst, src, src_size); 1092 /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); 1093 /// \endcode 1094 bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy, 1095 MemSetInst *MemSet) { 1096 // We can only transform memset/memcpy with the same destination. 1097 if (MemSet->getDest() != MemCpy->getDest()) 1098 return false; 1099 1100 // Check that there are no other dependencies on the memset destination. 1101 MemDepResult DstDepInfo = 1102 MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false, 1103 MemCpy->getIterator(), MemCpy->getParent()); 1104 if (DstDepInfo.getInst() != MemSet) 1105 return false; 1106 1107 // Use the same i8* dest as the memcpy, killing the memset dest if different. 1108 Value *Dest = MemCpy->getRawDest(); 1109 Value *DestSize = MemSet->getLength(); 1110 Value *SrcSize = MemCpy->getLength(); 1111 1112 // By default, create an unaligned memset. 1113 unsigned Align = 1; 1114 // If Dest is aligned, and SrcSize is constant, use the minimum alignment 1115 // of the sum. 1116 const unsigned DestAlign = 1117 std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment()); 1118 if (DestAlign > 1) 1119 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize)) 1120 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign); 1121 1122 IRBuilder<> Builder(MemCpy); 1123 1124 // If the sizes have different types, zext the smaller one. 1125 if (DestSize->getType() != SrcSize->getType()) { 1126 if (DestSize->getType()->getIntegerBitWidth() > 1127 SrcSize->getType()->getIntegerBitWidth()) 1128 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType()); 1129 else 1130 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType()); 1131 } 1132 1133 Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize); 1134 Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize); 1135 Value *MemsetLen = Builder.CreateSelect( 1136 Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff); 1137 Builder.CreateMemSet( 1138 Builder.CreateGEP(Dest->getType()->getPointerElementType(), Dest, 1139 SrcSize), 1140 MemSet->getOperand(1), MemsetLen, Align); 1141 1142 MD->removeInstruction(MemSet); 1143 MemSet->eraseFromParent(); 1144 return true; 1145 } 1146 1147 /// Determine whether the instruction has undefined content for the given Size, 1148 /// either because it was freshly alloca'd or started its lifetime. 1149 static bool hasUndefContents(Instruction *I, ConstantInt *Size) { 1150 if (isa<AllocaInst>(I)) 1151 return true; 1152 1153 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 1154 if (II->getIntrinsicID() == Intrinsic::lifetime_start) 1155 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0))) 1156 if (LTSize->getZExtValue() >= Size->getZExtValue()) 1157 return true; 1158 1159 return false; 1160 } 1161 1162 /// Transform memcpy to memset when its source was just memset. 1163 /// In other words, turn: 1164 /// \code 1165 /// memset(dst1, c, dst1_size); 1166 /// memcpy(dst2, dst1, dst2_size); 1167 /// \endcode 1168 /// into: 1169 /// \code 1170 /// memset(dst1, c, dst1_size); 1171 /// memset(dst2, c, dst2_size); 1172 /// \endcode 1173 /// When dst2_size <= dst1_size. 1174 /// 1175 /// The \p MemCpy must have a Constant length. 1176 bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, 1177 MemSetInst *MemSet) { 1178 AliasAnalysis &AA = LookupAliasAnalysis(); 1179 1180 // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and 1181 // memcpying from the same address. Otherwise it is hard to reason about. 1182 if (!AA.isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource())) 1183 return false; 1184 1185 // A known memset size is required. 1186 ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength()); 1187 if (!MemSetSize) 1188 return false; 1189 1190 // Make sure the memcpy doesn't read any more than what the memset wrote. 1191 // Don't worry about sizes larger than i64. 1192 ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength()); 1193 if (CopySize->getZExtValue() > MemSetSize->getZExtValue()) { 1194 // If the memcpy is larger than the memset, but the memory was undef prior 1195 // to the memset, we can just ignore the tail. Technically we're only 1196 // interested in the bytes from MemSetSize..CopySize here, but as we can't 1197 // easily represent this location, we use the full 0..CopySize range. 1198 MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy); 1199 MemDepResult DepInfo = MD->getPointerDependencyFrom( 1200 MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent()); 1201 if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize)) 1202 CopySize = MemSetSize; 1203 else 1204 return false; 1205 } 1206 1207 IRBuilder<> Builder(MemCpy); 1208 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), 1209 CopySize, MemCpy->getDestAlignment()); 1210 return true; 1211 } 1212 1213 /// Perform simplification of memcpy's. If we have memcpy A 1214 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite 1215 /// B to be a memcpy from X to Z (or potentially a memmove, depending on 1216 /// circumstances). This allows later passes to remove the first memcpy 1217 /// altogether. 1218 bool MemCpyOptPass::processMemCpy(MemCpyInst *M) { 1219 // We can only optimize non-volatile memcpy's. 1220 if (M->isVolatile()) return false; 1221 1222 // If the source and destination of the memcpy are the same, then zap it. 1223 if (M->getSource() == M->getDest()) { 1224 MD->removeInstruction(M); 1225 M->eraseFromParent(); 1226 return false; 1227 } 1228 1229 // If copying from a constant, try to turn the memcpy into a memset. 1230 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource())) 1231 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 1232 if (Value *ByteVal = isBytewiseValue(GV->getInitializer(), 1233 M->getModule()->getDataLayout())) { 1234 IRBuilder<> Builder(M); 1235 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(), 1236 M->getDestAlignment(), false); 1237 MD->removeInstruction(M); 1238 M->eraseFromParent(); 1239 ++NumCpyToSet; 1240 return true; 1241 } 1242 1243 MemDepResult DepInfo = MD->getDependency(M); 1244 1245 // Try to turn a partially redundant memset + memcpy into 1246 // memcpy + smaller memset. We don't need the memcpy size for this. 1247 if (DepInfo.isClobber()) 1248 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst())) 1249 if (processMemSetMemCpyDependence(M, MDep)) 1250 return true; 1251 1252 // The optimizations after this point require the memcpy size. 1253 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); 1254 if (!CopySize) return false; 1255 1256 // There are four possible optimizations we can do for memcpy: 1257 // a) memcpy-memcpy xform which exposes redundance for DSE. 1258 // b) call-memcpy xform for return slot optimization. 1259 // c) memcpy from freshly alloca'd space or space that has just started its 1260 // lifetime copies undefined data, and we can therefore eliminate the 1261 // memcpy in favor of the data that was already at the destination. 1262 // d) memcpy from a just-memset'd source can be turned into memset. 1263 if (DepInfo.isClobber()) { 1264 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { 1265 // FIXME: Can we pass in either of dest/src alignment here instead 1266 // of conservatively taking the minimum? 1267 unsigned Align = MinAlign(M->getDestAlignment(), M->getSourceAlignment()); 1268 if (performCallSlotOptzn(M, M->getDest(), M->getSource(), 1269 CopySize->getZExtValue(), Align, 1270 C)) { 1271 MD->removeInstruction(M); 1272 M->eraseFromParent(); 1273 return true; 1274 } 1275 } 1276 } 1277 1278 MemoryLocation SrcLoc = MemoryLocation::getForSource(M); 1279 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom( 1280 SrcLoc, true, M->getIterator(), M->getParent()); 1281 1282 if (SrcDepInfo.isClobber()) { 1283 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst())) 1284 return processMemCpyMemCpyDependence(M, MDep); 1285 } else if (SrcDepInfo.isDef()) { 1286 if (hasUndefContents(SrcDepInfo.getInst(), CopySize)) { 1287 MD->removeInstruction(M); 1288 M->eraseFromParent(); 1289 ++NumMemCpyInstr; 1290 return true; 1291 } 1292 } 1293 1294 if (SrcDepInfo.isClobber()) 1295 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst())) 1296 if (performMemCpyToMemSetOptzn(M, MDep)) { 1297 MD->removeInstruction(M); 1298 M->eraseFromParent(); 1299 ++NumCpyToSet; 1300 return true; 1301 } 1302 1303 return false; 1304 } 1305 1306 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed 1307 /// not to alias. 1308 bool MemCpyOptPass::processMemMove(MemMoveInst *M) { 1309 AliasAnalysis &AA = LookupAliasAnalysis(); 1310 1311 if (!TLI->has(LibFunc_memmove)) 1312 return false; 1313 1314 // See if the pointers alias. 1315 if (!AA.isNoAlias(MemoryLocation::getForDest(M), 1316 MemoryLocation::getForSource(M))) 1317 return false; 1318 1319 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M 1320 << "\n"); 1321 1322 // If not, then we know we can transform this. 1323 Type *ArgTys[3] = { M->getRawDest()->getType(), 1324 M->getRawSource()->getType(), 1325 M->getLength()->getType() }; 1326 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(), 1327 Intrinsic::memcpy, ArgTys)); 1328 1329 // MemDep may have over conservative information about this instruction, just 1330 // conservatively flush it from the cache. 1331 MD->removeInstruction(M); 1332 1333 ++NumMoveToCpy; 1334 return true; 1335 } 1336 1337 /// This is called on every byval argument in call sites. 1338 bool MemCpyOptPass::processByValArgument(CallSite CS, unsigned ArgNo) { 1339 const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout(); 1340 // Find out what feeds this byval argument. 1341 Value *ByValArg = CS.getArgument(ArgNo); 1342 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType(); 1343 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy); 1344 MemDepResult DepInfo = MD->getPointerDependencyFrom( 1345 MemoryLocation(ByValArg, LocationSize::precise(ByValSize)), true, 1346 CS.getInstruction()->getIterator(), CS.getInstruction()->getParent()); 1347 if (!DepInfo.isClobber()) 1348 return false; 1349 1350 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by 1351 // a memcpy, see if we can byval from the source of the memcpy instead of the 1352 // result. 1353 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); 1354 if (!MDep || MDep->isVolatile() || 1355 ByValArg->stripPointerCasts() != MDep->getDest()) 1356 return false; 1357 1358 // The length of the memcpy must be larger or equal to the size of the byval. 1359 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 1360 if (!C1 || C1->getValue().getZExtValue() < ByValSize) 1361 return false; 1362 1363 // Get the alignment of the byval. If the call doesn't specify the alignment, 1364 // then it is some target specific value that we can't know. 1365 unsigned ByValAlign = CS.getParamAlignment(ArgNo); 1366 if (ByValAlign == 0) return false; 1367 1368 // If it is greater than the memcpy, then we check to see if we can force the 1369 // source of the memcpy to the alignment we need. If we fail, we bail out. 1370 AssumptionCache &AC = LookupAssumptionCache(); 1371 DominatorTree &DT = LookupDomTree(); 1372 if (MDep->getSourceAlignment() < ByValAlign && 1373 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, 1374 CS.getInstruction(), &AC, &DT) < ByValAlign) 1375 return false; 1376 1377 // The address space of the memcpy source must match the byval argument 1378 if (MDep->getSource()->getType()->getPointerAddressSpace() != 1379 ByValArg->getType()->getPointerAddressSpace()) 1380 return false; 1381 1382 // Verify that the copied-from memory doesn't change in between the memcpy and 1383 // the byval call. 1384 // memcpy(a <- b) 1385 // *b = 42; 1386 // foo(*a) 1387 // It would be invalid to transform the second memcpy into foo(*b). 1388 // 1389 // NOTE: This is conservative, it will stop on any read from the source loc, 1390 // not just the defining memcpy. 1391 MemDepResult SourceDep = MD->getPointerDependencyFrom( 1392 MemoryLocation::getForSource(MDep), false, 1393 CS.getInstruction()->getIterator(), MDep->getParent()); 1394 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 1395 return false; 1396 1397 Value *TmpCast = MDep->getSource(); 1398 if (MDep->getSource()->getType() != ByValArg->getType()) 1399 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), 1400 "tmpcast", CS.getInstruction()); 1401 1402 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" 1403 << " " << *MDep << "\n" 1404 << " " << *CS.getInstruction() << "\n"); 1405 1406 // Otherwise we're good! Update the byval argument. 1407 CS.setArgument(ArgNo, TmpCast); 1408 ++NumMemCpyInstr; 1409 return true; 1410 } 1411 1412 /// Executes one iteration of MemCpyOptPass. 1413 bool MemCpyOptPass::iterateOnFunction(Function &F) { 1414 bool MadeChange = false; 1415 1416 DominatorTree &DT = LookupDomTree(); 1417 1418 // Walk all instruction in the function. 1419 for (BasicBlock &BB : F) { 1420 // Skip unreachable blocks. For example processStore assumes that an 1421 // instruction in a BB can't be dominated by a later instruction in the 1422 // same BB (which is a scenario that can happen for an unreachable BB that 1423 // has itself as a predecessor). 1424 if (!DT.isReachableFromEntry(&BB)) 1425 continue; 1426 1427 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 1428 // Avoid invalidating the iterator. 1429 Instruction *I = &*BI++; 1430 1431 bool RepeatInstruction = false; 1432 1433 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1434 MadeChange |= processStore(SI, BI); 1435 else if (MemSetInst *M = dyn_cast<MemSetInst>(I)) 1436 RepeatInstruction = processMemSet(M, BI); 1437 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) 1438 RepeatInstruction = processMemCpy(M); 1439 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) 1440 RepeatInstruction = processMemMove(M); 1441 else if (auto CS = CallSite(I)) { 1442 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 1443 if (CS.isByValArgument(i)) 1444 MadeChange |= processByValArgument(CS, i); 1445 } 1446 1447 // Reprocess the instruction if desired. 1448 if (RepeatInstruction) { 1449 if (BI != BB.begin()) 1450 --BI; 1451 MadeChange = true; 1452 } 1453 } 1454 } 1455 1456 return MadeChange; 1457 } 1458 1459 PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) { 1460 auto &MD = AM.getResult<MemoryDependenceAnalysis>(F); 1461 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1462 1463 auto LookupAliasAnalysis = [&]() -> AliasAnalysis & { 1464 return AM.getResult<AAManager>(F); 1465 }; 1466 auto LookupAssumptionCache = [&]() -> AssumptionCache & { 1467 return AM.getResult<AssumptionAnalysis>(F); 1468 }; 1469 auto LookupDomTree = [&]() -> DominatorTree & { 1470 return AM.getResult<DominatorTreeAnalysis>(F); 1471 }; 1472 1473 bool MadeChange = runImpl(F, &MD, &TLI, LookupAliasAnalysis, 1474 LookupAssumptionCache, LookupDomTree); 1475 if (!MadeChange) 1476 return PreservedAnalyses::all(); 1477 1478 PreservedAnalyses PA; 1479 PA.preserveSet<CFGAnalyses>(); 1480 PA.preserve<GlobalsAA>(); 1481 PA.preserve<MemoryDependenceAnalysis>(); 1482 return PA; 1483 } 1484 1485 bool MemCpyOptPass::runImpl( 1486 Function &F, MemoryDependenceResults *MD_, TargetLibraryInfo *TLI_, 1487 std::function<AliasAnalysis &()> LookupAliasAnalysis_, 1488 std::function<AssumptionCache &()> LookupAssumptionCache_, 1489 std::function<DominatorTree &()> LookupDomTree_) { 1490 bool MadeChange = false; 1491 MD = MD_; 1492 TLI = TLI_; 1493 LookupAliasAnalysis = std::move(LookupAliasAnalysis_); 1494 LookupAssumptionCache = std::move(LookupAssumptionCache_); 1495 LookupDomTree = std::move(LookupDomTree_); 1496 1497 // If we don't have at least memset and memcpy, there is little point of doing 1498 // anything here. These are required by a freestanding implementation, so if 1499 // even they are disabled, there is no point in trying hard. 1500 if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy)) 1501 return false; 1502 1503 while (true) { 1504 if (!iterateOnFunction(F)) 1505 break; 1506 MadeChange = true; 1507 } 1508 1509 MD = nullptr; 1510 return MadeChange; 1511 } 1512 1513 /// This is the main transformation entry point for a function. 1514 bool MemCpyOptLegacyPass::runOnFunction(Function &F) { 1515 if (skipFunction(F)) 1516 return false; 1517 1518 auto *MD = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep(); 1519 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 1520 1521 auto LookupAliasAnalysis = [this]() -> AliasAnalysis & { 1522 return getAnalysis<AAResultsWrapperPass>().getAAResults(); 1523 }; 1524 auto LookupAssumptionCache = [this, &F]() -> AssumptionCache & { 1525 return getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1526 }; 1527 auto LookupDomTree = [this]() -> DominatorTree & { 1528 return getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1529 }; 1530 1531 return Impl.runImpl(F, MD, TLI, LookupAliasAnalysis, LookupAssumptionCache, 1532 LookupDomTree); 1533 } 1534