1 //===- Reassociate.cpp - Reassociate binary expressions -------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass reassociates commutative expressions in an order that is designed 10 // to promote better constant propagation, GCSE, LICM, PRE, etc. 11 // 12 // For example: 4 + (x + 5) -> x + (4 + 5) 13 // 14 // In the implementation of this algorithm, constants are assigned rank = 0, 15 // function arguments are rank = 1, and other values are assigned ranks 16 // corresponding to the reverse post order traversal of current function 17 // (starting at 2), which effectively gives values in deep loops higher rank 18 // than values not in loops. 19 // 20 //===----------------------------------------------------------------------===// 21 22 #include "llvm/Transforms/Scalar/Reassociate.h" 23 #include "llvm/ADT/APFloat.h" 24 #include "llvm/ADT/APInt.h" 25 #include "llvm/ADT/DenseMap.h" 26 #include "llvm/ADT/PostOrderIterator.h" 27 #include "llvm/ADT/SmallPtrSet.h" 28 #include "llvm/ADT/SmallSet.h" 29 #include "llvm/ADT/SmallVector.h" 30 #include "llvm/ADT/Statistic.h" 31 #include "llvm/Analysis/BasicAliasAnalysis.h" 32 #include "llvm/Analysis/ConstantFolding.h" 33 #include "llvm/Analysis/GlobalsModRef.h" 34 #include "llvm/Analysis/ValueTracking.h" 35 #include "llvm/IR/Argument.h" 36 #include "llvm/IR/BasicBlock.h" 37 #include "llvm/IR/CFG.h" 38 #include "llvm/IR/Constant.h" 39 #include "llvm/IR/Constants.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/IRBuilder.h" 42 #include "llvm/IR/InstrTypes.h" 43 #include "llvm/IR/Instruction.h" 44 #include "llvm/IR/Instructions.h" 45 #include "llvm/IR/Operator.h" 46 #include "llvm/IR/PassManager.h" 47 #include "llvm/IR/PatternMatch.h" 48 #include "llvm/IR/Type.h" 49 #include "llvm/IR/User.h" 50 #include "llvm/IR/Value.h" 51 #include "llvm/IR/ValueHandle.h" 52 #include "llvm/InitializePasses.h" 53 #include "llvm/Pass.h" 54 #include "llvm/Support/Casting.h" 55 #include "llvm/Support/CommandLine.h" 56 #include "llvm/Support/Debug.h" 57 #include "llvm/Support/raw_ostream.h" 58 #include "llvm/Transforms/Scalar.h" 59 #include "llvm/Transforms/Utils/Local.h" 60 #include <algorithm> 61 #include <cassert> 62 #include <utility> 63 64 using namespace llvm; 65 using namespace reassociate; 66 using namespace PatternMatch; 67 68 #define DEBUG_TYPE "reassociate" 69 70 STATISTIC(NumChanged, "Number of insts reassociated"); 71 STATISTIC(NumAnnihil, "Number of expr tree annihilated"); 72 STATISTIC(NumFactor , "Number of multiplies factored"); 73 74 static cl::opt<bool> 75 UseCSELocalOpt(DEBUG_TYPE "-use-cse-local", 76 cl::desc("Only reorder expressions within a basic block " 77 "when exposing CSE opportunities"), 78 cl::init(true), cl::Hidden); 79 80 #ifndef NDEBUG 81 /// Print out the expression identified in the Ops list. 82 static void PrintOps(Instruction *I, const SmallVectorImpl<ValueEntry> &Ops) { 83 Module *M = I->getModule(); 84 dbgs() << Instruction::getOpcodeName(I->getOpcode()) << " " 85 << *Ops[0].Op->getType() << '\t'; 86 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 87 dbgs() << "[ "; 88 Ops[i].Op->printAsOperand(dbgs(), false, M); 89 dbgs() << ", #" << Ops[i].Rank << "] "; 90 } 91 } 92 #endif 93 94 /// Utility class representing a non-constant Xor-operand. We classify 95 /// non-constant Xor-Operands into two categories: 96 /// C1) The operand is in the form "X & C", where C is a constant and C != ~0 97 /// C2) 98 /// C2.1) The operand is in the form of "X | C", where C is a non-zero 99 /// constant. 100 /// C2.2) Any operand E which doesn't fall into C1 and C2.1, we view this 101 /// operand as "E | 0" 102 class llvm::reassociate::XorOpnd { 103 public: 104 XorOpnd(Value *V); 105 106 bool isInvalid() const { return SymbolicPart == nullptr; } 107 bool isOrExpr() const { return isOr; } 108 Value *getValue() const { return OrigVal; } 109 Value *getSymbolicPart() const { return SymbolicPart; } 110 unsigned getSymbolicRank() const { return SymbolicRank; } 111 const APInt &getConstPart() const { return ConstPart; } 112 113 void Invalidate() { SymbolicPart = OrigVal = nullptr; } 114 void setSymbolicRank(unsigned R) { SymbolicRank = R; } 115 116 private: 117 Value *OrigVal; 118 Value *SymbolicPart; 119 APInt ConstPart; 120 unsigned SymbolicRank; 121 bool isOr; 122 }; 123 124 XorOpnd::XorOpnd(Value *V) { 125 assert(!isa<ConstantInt>(V) && "No ConstantInt"); 126 OrigVal = V; 127 Instruction *I = dyn_cast<Instruction>(V); 128 SymbolicRank = 0; 129 130 if (I && (I->getOpcode() == Instruction::Or || 131 I->getOpcode() == Instruction::And)) { 132 Value *V0 = I->getOperand(0); 133 Value *V1 = I->getOperand(1); 134 const APInt *C; 135 if (match(V0, m_APInt(C))) 136 std::swap(V0, V1); 137 138 if (match(V1, m_APInt(C))) { 139 ConstPart = *C; 140 SymbolicPart = V0; 141 isOr = (I->getOpcode() == Instruction::Or); 142 return; 143 } 144 } 145 146 // view the operand as "V | 0" 147 SymbolicPart = V; 148 ConstPart = APInt::getZero(V->getType()->getScalarSizeInBits()); 149 isOr = true; 150 } 151 152 /// Return true if I is an instruction with the FastMathFlags that are needed 153 /// for general reassociation set. This is not the same as testing 154 /// Instruction::isAssociative() because it includes operations like fsub. 155 /// (This routine is only intended to be called for floating-point operations.) 156 static bool hasFPAssociativeFlags(Instruction *I) { 157 assert(I && isa<FPMathOperator>(I) && "Should only check FP ops"); 158 return I->hasAllowReassoc() && I->hasNoSignedZeros(); 159 } 160 161 /// Return true if V is an instruction of the specified opcode and if it 162 /// only has one use. 163 static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) { 164 auto *BO = dyn_cast<BinaryOperator>(V); 165 if (BO && BO->hasOneUse() && BO->getOpcode() == Opcode) 166 if (!isa<FPMathOperator>(BO) || hasFPAssociativeFlags(BO)) 167 return BO; 168 return nullptr; 169 } 170 171 static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode1, 172 unsigned Opcode2) { 173 auto *BO = dyn_cast<BinaryOperator>(V); 174 if (BO && BO->hasOneUse() && 175 (BO->getOpcode() == Opcode1 || BO->getOpcode() == Opcode2)) 176 if (!isa<FPMathOperator>(BO) || hasFPAssociativeFlags(BO)) 177 return BO; 178 return nullptr; 179 } 180 181 void ReassociatePass::BuildRankMap(Function &F, 182 ReversePostOrderTraversal<Function*> &RPOT) { 183 unsigned Rank = 2; 184 185 // Assign distinct ranks to function arguments. 186 for (auto &Arg : F.args()) { 187 ValueRankMap[&Arg] = ++Rank; 188 LLVM_DEBUG(dbgs() << "Calculated Rank[" << Arg.getName() << "] = " << Rank 189 << "\n"); 190 } 191 192 // Traverse basic blocks in ReversePostOrder. 193 for (BasicBlock *BB : RPOT) { 194 unsigned BBRank = RankMap[BB] = ++Rank << 16; 195 196 // Walk the basic block, adding precomputed ranks for any instructions that 197 // we cannot move. This ensures that the ranks for these instructions are 198 // all different in the block. 199 for (Instruction &I : *BB) 200 if (mayHaveNonDefUseDependency(I)) 201 ValueRankMap[&I] = ++BBRank; 202 } 203 } 204 205 unsigned ReassociatePass::getRank(Value *V) { 206 Instruction *I = dyn_cast<Instruction>(V); 207 if (!I) { 208 if (isa<Argument>(V)) return ValueRankMap[V]; // Function argument. 209 return 0; // Otherwise it's a global or constant, rank 0. 210 } 211 212 if (unsigned Rank = ValueRankMap[I]) 213 return Rank; // Rank already known? 214 215 // If this is an expression, return the 1+MAX(rank(LHS), rank(RHS)) so that 216 // we can reassociate expressions for code motion! Since we do not recurse 217 // for PHI nodes, we cannot have infinite recursion here, because there 218 // cannot be loops in the value graph that do not go through PHI nodes. 219 unsigned Rank = 0, MaxRank = RankMap[I->getParent()]; 220 for (unsigned i = 0, e = I->getNumOperands(); i != e && Rank != MaxRank; ++i) 221 Rank = std::max(Rank, getRank(I->getOperand(i))); 222 223 // If this is a 'not' or 'neg' instruction, do not count it for rank. This 224 // assures us that X and ~X will have the same rank. 225 if (!match(I, m_Not(m_Value())) && !match(I, m_Neg(m_Value())) && 226 !match(I, m_FNeg(m_Value()))) 227 ++Rank; 228 229 LLVM_DEBUG(dbgs() << "Calculated Rank[" << V->getName() << "] = " << Rank 230 << "\n"); 231 232 return ValueRankMap[I] = Rank; 233 } 234 235 // Canonicalize constants to RHS. Otherwise, sort the operands by rank. 236 void ReassociatePass::canonicalizeOperands(Instruction *I) { 237 assert(isa<BinaryOperator>(I) && "Expected binary operator."); 238 assert(I->isCommutative() && "Expected commutative operator."); 239 240 Value *LHS = I->getOperand(0); 241 Value *RHS = I->getOperand(1); 242 if (LHS == RHS || isa<Constant>(RHS)) 243 return; 244 if (isa<Constant>(LHS) || getRank(RHS) < getRank(LHS)) 245 cast<BinaryOperator>(I)->swapOperands(); 246 } 247 248 static BinaryOperator *CreateAdd(Value *S1, Value *S2, const Twine &Name, 249 BasicBlock::iterator InsertBefore, 250 Value *FlagsOp) { 251 if (S1->getType()->isIntOrIntVectorTy()) 252 return BinaryOperator::CreateAdd(S1, S2, Name, InsertBefore); 253 else { 254 BinaryOperator *Res = 255 BinaryOperator::CreateFAdd(S1, S2, Name, InsertBefore); 256 Res->setFastMathFlags(cast<FPMathOperator>(FlagsOp)->getFastMathFlags()); 257 return Res; 258 } 259 } 260 261 static BinaryOperator *CreateMul(Value *S1, Value *S2, const Twine &Name, 262 BasicBlock::iterator InsertBefore, 263 Value *FlagsOp) { 264 if (S1->getType()->isIntOrIntVectorTy()) 265 return BinaryOperator::CreateMul(S1, S2, Name, InsertBefore); 266 else { 267 BinaryOperator *Res = 268 BinaryOperator::CreateFMul(S1, S2, Name, InsertBefore); 269 Res->setFastMathFlags(cast<FPMathOperator>(FlagsOp)->getFastMathFlags()); 270 return Res; 271 } 272 } 273 274 static Instruction *CreateNeg(Value *S1, const Twine &Name, 275 BasicBlock::iterator InsertBefore, 276 Value *FlagsOp) { 277 if (S1->getType()->isIntOrIntVectorTy()) 278 return BinaryOperator::CreateNeg(S1, Name, InsertBefore); 279 280 if (auto *FMFSource = dyn_cast<Instruction>(FlagsOp)) 281 return UnaryOperator::CreateFNegFMF(S1, FMFSource, Name, InsertBefore); 282 283 return UnaryOperator::CreateFNeg(S1, Name, InsertBefore); 284 } 285 286 /// Replace 0-X with X*-1. 287 static BinaryOperator *LowerNegateToMultiply(Instruction *Neg) { 288 assert((isa<UnaryOperator>(Neg) || isa<BinaryOperator>(Neg)) && 289 "Expected a Negate!"); 290 // FIXME: It's not safe to lower a unary FNeg into a FMul by -1.0. 291 unsigned OpNo = isa<BinaryOperator>(Neg) ? 1 : 0; 292 Type *Ty = Neg->getType(); 293 Constant *NegOne = Ty->isIntOrIntVectorTy() ? 294 ConstantInt::getAllOnesValue(Ty) : ConstantFP::get(Ty, -1.0); 295 296 BinaryOperator *Res = 297 CreateMul(Neg->getOperand(OpNo), NegOne, "", Neg->getIterator(), Neg); 298 Neg->setOperand(OpNo, Constant::getNullValue(Ty)); // Drop use of op. 299 Res->takeName(Neg); 300 Neg->replaceAllUsesWith(Res); 301 Res->setDebugLoc(Neg->getDebugLoc()); 302 return Res; 303 } 304 305 /// Returns k such that lambda(2^Bitwidth) = 2^k, where lambda is the Carmichael 306 /// function. This means that x^(2^k) === 1 mod 2^Bitwidth for 307 /// every odd x, i.e. x^(2^k) = 1 for every odd x in Bitwidth-bit arithmetic. 308 /// Note that 0 <= k < Bitwidth, and if Bitwidth > 3 then x^(2^k) = 0 for every 309 /// even x in Bitwidth-bit arithmetic. 310 static unsigned CarmichaelShift(unsigned Bitwidth) { 311 if (Bitwidth < 3) 312 return Bitwidth - 1; 313 return Bitwidth - 2; 314 } 315 316 /// Add the extra weight 'RHS' to the existing weight 'LHS', 317 /// reducing the combined weight using any special properties of the operation. 318 /// The existing weight LHS represents the computation X op X op ... op X where 319 /// X occurs LHS times. The combined weight represents X op X op ... op X with 320 /// X occurring LHS + RHS times. If op is "Xor" for example then the combined 321 /// operation is equivalent to X if LHS + RHS is odd, or 0 if LHS + RHS is even; 322 /// the routine returns 1 in LHS in the first case, and 0 in LHS in the second. 323 static void IncorporateWeight(APInt &LHS, const APInt &RHS, unsigned Opcode) { 324 // If we were working with infinite precision arithmetic then the combined 325 // weight would be LHS + RHS. But we are using finite precision arithmetic, 326 // and the APInt sum LHS + RHS may not be correct if it wraps (it is correct 327 // for nilpotent operations and addition, but not for idempotent operations 328 // and multiplication), so it is important to correctly reduce the combined 329 // weight back into range if wrapping would be wrong. 330 331 // If RHS is zero then the weight didn't change. 332 if (RHS.isMinValue()) 333 return; 334 // If LHS is zero then the combined weight is RHS. 335 if (LHS.isMinValue()) { 336 LHS = RHS; 337 return; 338 } 339 // From this point on we know that neither LHS nor RHS is zero. 340 341 if (Instruction::isIdempotent(Opcode)) { 342 // Idempotent means X op X === X, so any non-zero weight is equivalent to a 343 // weight of 1. Keeping weights at zero or one also means that wrapping is 344 // not a problem. 345 assert(LHS == 1 && RHS == 1 && "Weights not reduced!"); 346 return; // Return a weight of 1. 347 } 348 if (Instruction::isNilpotent(Opcode)) { 349 // Nilpotent means X op X === 0, so reduce weights modulo 2. 350 assert(LHS == 1 && RHS == 1 && "Weights not reduced!"); 351 LHS = 0; // 1 + 1 === 0 modulo 2. 352 return; 353 } 354 if (Opcode == Instruction::Add || Opcode == Instruction::FAdd) { 355 // TODO: Reduce the weight by exploiting nsw/nuw? 356 LHS += RHS; 357 return; 358 } 359 360 assert((Opcode == Instruction::Mul || Opcode == Instruction::FMul) && 361 "Unknown associative operation!"); 362 unsigned Bitwidth = LHS.getBitWidth(); 363 // If CM is the Carmichael number then a weight W satisfying W >= CM+Bitwidth 364 // can be replaced with W-CM. That's because x^W=x^(W-CM) for every Bitwidth 365 // bit number x, since either x is odd in which case x^CM = 1, or x is even in 366 // which case both x^W and x^(W - CM) are zero. By subtracting off multiples 367 // of CM like this weights can always be reduced to the range [0, CM+Bitwidth) 368 // which by a happy accident means that they can always be represented using 369 // Bitwidth bits. 370 // TODO: Reduce the weight by exploiting nsw/nuw? (Could do much better than 371 // the Carmichael number). 372 if (Bitwidth > 3) { 373 /// CM - The value of Carmichael's lambda function. 374 APInt CM = APInt::getOneBitSet(Bitwidth, CarmichaelShift(Bitwidth)); 375 // Any weight W >= Threshold can be replaced with W - CM. 376 APInt Threshold = CM + Bitwidth; 377 assert(LHS.ult(Threshold) && RHS.ult(Threshold) && "Weights not reduced!"); 378 // For Bitwidth 4 or more the following sum does not overflow. 379 LHS += RHS; 380 while (LHS.uge(Threshold)) 381 LHS -= CM; 382 } else { 383 // To avoid problems with overflow do everything the same as above but using 384 // a larger type. 385 unsigned CM = 1U << CarmichaelShift(Bitwidth); 386 unsigned Threshold = CM + Bitwidth; 387 assert(LHS.getZExtValue() < Threshold && RHS.getZExtValue() < Threshold && 388 "Weights not reduced!"); 389 unsigned Total = LHS.getZExtValue() + RHS.getZExtValue(); 390 while (Total >= Threshold) 391 Total -= CM; 392 LHS = Total; 393 } 394 } 395 396 using RepeatedValue = std::pair<Value*, APInt>; 397 398 /// Given an associative binary expression, return the leaf 399 /// nodes in Ops along with their weights (how many times the leaf occurs). The 400 /// original expression is the same as 401 /// (Ops[0].first op Ops[0].first op ... Ops[0].first) <- Ops[0].second times 402 /// op 403 /// (Ops[1].first op Ops[1].first op ... Ops[1].first) <- Ops[1].second times 404 /// op 405 /// ... 406 /// op 407 /// (Ops[N].first op Ops[N].first op ... Ops[N].first) <- Ops[N].second times 408 /// 409 /// Note that the values Ops[0].first, ..., Ops[N].first are all distinct. 410 /// 411 /// This routine may modify the function, in which case it returns 'true'. The 412 /// changes it makes may well be destructive, changing the value computed by 'I' 413 /// to something completely different. Thus if the routine returns 'true' then 414 /// you MUST either replace I with a new expression computed from the Ops array, 415 /// or use RewriteExprTree to put the values back in. 416 /// 417 /// A leaf node is either not a binary operation of the same kind as the root 418 /// node 'I' (i.e. is not a binary operator at all, or is, but with a different 419 /// opcode), or is the same kind of binary operator but has a use which either 420 /// does not belong to the expression, or does belong to the expression but is 421 /// a leaf node. Every leaf node has at least one use that is a non-leaf node 422 /// of the expression, while for non-leaf nodes (except for the root 'I') every 423 /// use is a non-leaf node of the expression. 424 /// 425 /// For example: 426 /// expression graph node names 427 /// 428 /// + | I 429 /// / \ | 430 /// + + | A, B 431 /// / \ / \ | 432 /// * + * | C, D, E 433 /// / \ / \ / \ | 434 /// + * | F, G 435 /// 436 /// The leaf nodes are C, E, F and G. The Ops array will contain (maybe not in 437 /// that order) (C, 1), (E, 1), (F, 2), (G, 2). 438 /// 439 /// The expression is maximal: if some instruction is a binary operator of the 440 /// same kind as 'I', and all of its uses are non-leaf nodes of the expression, 441 /// then the instruction also belongs to the expression, is not a leaf node of 442 /// it, and its operands also belong to the expression (but may be leaf nodes). 443 /// 444 /// NOTE: This routine will set operands of non-leaf non-root nodes to undef in 445 /// order to ensure that every non-root node in the expression has *exactly one* 446 /// use by a non-leaf node of the expression. This destruction means that the 447 /// caller MUST either replace 'I' with a new expression or use something like 448 /// RewriteExprTree to put the values back in if the routine indicates that it 449 /// made a change by returning 'true'. 450 /// 451 /// In the above example either the right operand of A or the left operand of B 452 /// will be replaced by undef. If it is B's operand then this gives: 453 /// 454 /// + | I 455 /// / \ | 456 /// + + | A, B - operand of B replaced with undef 457 /// / \ \ | 458 /// * + * | C, D, E 459 /// / \ / \ / \ | 460 /// + * | F, G 461 /// 462 /// Note that such undef operands can only be reached by passing through 'I'. 463 /// For example, if you visit operands recursively starting from a leaf node 464 /// then you will never see such an undef operand unless you get back to 'I', 465 /// which requires passing through a phi node. 466 /// 467 /// Note that this routine may also mutate binary operators of the wrong type 468 /// that have all uses inside the expression (i.e. only used by non-leaf nodes 469 /// of the expression) if it can turn them into binary operators of the right 470 /// type and thus make the expression bigger. 471 static bool LinearizeExprTree(Instruction *I, 472 SmallVectorImpl<RepeatedValue> &Ops, 473 ReassociatePass::OrderedSet &ToRedo, 474 reassociate::OverflowTracking &Flags) { 475 assert((isa<UnaryOperator>(I) || isa<BinaryOperator>(I)) && 476 "Expected a UnaryOperator or BinaryOperator!"); 477 LLVM_DEBUG(dbgs() << "LINEARIZE: " << *I << '\n'); 478 unsigned Bitwidth = I->getType()->getScalarType()->getPrimitiveSizeInBits(); 479 unsigned Opcode = I->getOpcode(); 480 assert(I->isAssociative() && I->isCommutative() && 481 "Expected an associative and commutative operation!"); 482 483 // Visit all operands of the expression, keeping track of their weight (the 484 // number of paths from the expression root to the operand, or if you like 485 // the number of times that operand occurs in the linearized expression). 486 // For example, if I = X + A, where X = A + B, then I, X and B have weight 1 487 // while A has weight two. 488 489 // Worklist of non-leaf nodes (their operands are in the expression too) along 490 // with their weights, representing a certain number of paths to the operator. 491 // If an operator occurs in the worklist multiple times then we found multiple 492 // ways to get to it. 493 SmallVector<std::pair<Instruction*, APInt>, 8> Worklist; // (Op, Weight) 494 Worklist.push_back(std::make_pair(I, APInt(Bitwidth, 1))); 495 bool Changed = false; 496 497 // Leaves of the expression are values that either aren't the right kind of 498 // operation (eg: a constant, or a multiply in an add tree), or are, but have 499 // some uses that are not inside the expression. For example, in I = X + X, 500 // X = A + B, the value X has two uses (by I) that are in the expression. If 501 // X has any other uses, for example in a return instruction, then we consider 502 // X to be a leaf, and won't analyze it further. When we first visit a value, 503 // if it has more than one use then at first we conservatively consider it to 504 // be a leaf. Later, as the expression is explored, we may discover some more 505 // uses of the value from inside the expression. If all uses turn out to be 506 // from within the expression (and the value is a binary operator of the right 507 // kind) then the value is no longer considered to be a leaf, and its operands 508 // are explored. 509 510 // Leaves - Keeps track of the set of putative leaves as well as the number of 511 // paths to each leaf seen so far. 512 using LeafMap = DenseMap<Value *, APInt>; 513 LeafMap Leaves; // Leaf -> Total weight so far. 514 SmallVector<Value *, 8> LeafOrder; // Ensure deterministic leaf output order. 515 const DataLayout DL = I->getModule()->getDataLayout(); 516 517 #ifndef NDEBUG 518 SmallPtrSet<Value *, 8> Visited; // For checking the iteration scheme. 519 #endif 520 while (!Worklist.empty()) { 521 std::pair<Instruction*, APInt> P = Worklist.pop_back_val(); 522 I = P.first; // We examine the operands of this binary operator. 523 524 if (isa<OverflowingBinaryOperator>(I)) { 525 Flags.HasNUW &= I->hasNoUnsignedWrap(); 526 Flags.HasNSW &= I->hasNoSignedWrap(); 527 } 528 529 for (unsigned OpIdx = 0; OpIdx < I->getNumOperands(); ++OpIdx) { // Visit operands. 530 Value *Op = I->getOperand(OpIdx); 531 APInt Weight = P.second; // Number of paths to this operand. 532 LLVM_DEBUG(dbgs() << "OPERAND: " << *Op << " (" << Weight << ")\n"); 533 assert(!Op->use_empty() && "No uses, so how did we get to it?!"); 534 535 // If this is a binary operation of the right kind with only one use then 536 // add its operands to the expression. 537 if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) { 538 assert(Visited.insert(Op).second && "Not first visit!"); 539 LLVM_DEBUG(dbgs() << "DIRECT ADD: " << *Op << " (" << Weight << ")\n"); 540 Worklist.push_back(std::make_pair(BO, Weight)); 541 continue; 542 } 543 544 // Appears to be a leaf. Is the operand already in the set of leaves? 545 LeafMap::iterator It = Leaves.find(Op); 546 if (It == Leaves.end()) { 547 // Not in the leaf map. Must be the first time we saw this operand. 548 assert(Visited.insert(Op).second && "Not first visit!"); 549 if (!Op->hasOneUse()) { 550 // This value has uses not accounted for by the expression, so it is 551 // not safe to modify. Mark it as being a leaf. 552 LLVM_DEBUG(dbgs() 553 << "ADD USES LEAF: " << *Op << " (" << Weight << ")\n"); 554 LeafOrder.push_back(Op); 555 Leaves[Op] = Weight; 556 continue; 557 } 558 // No uses outside the expression, try morphing it. 559 } else { 560 // Already in the leaf map. 561 assert(It != Leaves.end() && Visited.count(Op) && 562 "In leaf map but not visited!"); 563 564 // Update the number of paths to the leaf. 565 IncorporateWeight(It->second, Weight, Opcode); 566 567 #if 0 // TODO: Re-enable once PR13021 is fixed. 568 // The leaf already has one use from inside the expression. As we want 569 // exactly one such use, drop this new use of the leaf. 570 assert(!Op->hasOneUse() && "Only one use, but we got here twice!"); 571 I->setOperand(OpIdx, UndefValue::get(I->getType())); 572 Changed = true; 573 574 // If the leaf is a binary operation of the right kind and we now see 575 // that its multiple original uses were in fact all by nodes belonging 576 // to the expression, then no longer consider it to be a leaf and add 577 // its operands to the expression. 578 if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) { 579 LLVM_DEBUG(dbgs() << "UNLEAF: " << *Op << " (" << It->second << ")\n"); 580 Worklist.push_back(std::make_pair(BO, It->second)); 581 Leaves.erase(It); 582 continue; 583 } 584 #endif 585 586 // If we still have uses that are not accounted for by the expression 587 // then it is not safe to modify the value. 588 if (!Op->hasOneUse()) 589 continue; 590 591 // No uses outside the expression, try morphing it. 592 Weight = It->second; 593 Leaves.erase(It); // Since the value may be morphed below. 594 } 595 596 // At this point we have a value which, first of all, is not a binary 597 // expression of the right kind, and secondly, is only used inside the 598 // expression. This means that it can safely be modified. See if we 599 // can usefully morph it into an expression of the right kind. 600 assert((!isa<Instruction>(Op) || 601 cast<Instruction>(Op)->getOpcode() != Opcode 602 || (isa<FPMathOperator>(Op) && 603 !hasFPAssociativeFlags(cast<Instruction>(Op)))) && 604 "Should have been handled above!"); 605 assert(Op->hasOneUse() && "Has uses outside the expression tree!"); 606 607 // If this is a multiply expression, turn any internal negations into 608 // multiplies by -1 so they can be reassociated. Add any users of the 609 // newly created multiplication by -1 to the redo list, so any 610 // reassociation opportunities that are exposed will be reassociated 611 // further. 612 Instruction *Neg; 613 if (((Opcode == Instruction::Mul && match(Op, m_Neg(m_Value()))) || 614 (Opcode == Instruction::FMul && match(Op, m_FNeg(m_Value())))) && 615 match(Op, m_Instruction(Neg))) { 616 LLVM_DEBUG(dbgs() 617 << "MORPH LEAF: " << *Op << " (" << Weight << ") TO "); 618 Instruction *Mul = LowerNegateToMultiply(Neg); 619 LLVM_DEBUG(dbgs() << *Mul << '\n'); 620 Worklist.push_back(std::make_pair(Mul, Weight)); 621 for (User *U : Mul->users()) { 622 if (BinaryOperator *UserBO = dyn_cast<BinaryOperator>(U)) 623 ToRedo.insert(UserBO); 624 } 625 ToRedo.insert(Neg); 626 Changed = true; 627 continue; 628 } 629 630 // Failed to morph into an expression of the right type. This really is 631 // a leaf. 632 LLVM_DEBUG(dbgs() << "ADD LEAF: " << *Op << " (" << Weight << ")\n"); 633 assert(!isReassociableOp(Op, Opcode) && "Value was morphed?"); 634 LeafOrder.push_back(Op); 635 Leaves[Op] = Weight; 636 } 637 } 638 639 // The leaves, repeated according to their weights, represent the linearized 640 // form of the expression. 641 for (Value *V : LeafOrder) { 642 LeafMap::iterator It = Leaves.find(V); 643 if (It == Leaves.end()) 644 // Node initially thought to be a leaf wasn't. 645 continue; 646 assert(!isReassociableOp(V, Opcode) && "Shouldn't be a leaf!"); 647 APInt Weight = It->second; 648 if (Weight.isMinValue()) 649 // Leaf already output or weight reduction eliminated it. 650 continue; 651 // Ensure the leaf is only output once. 652 It->second = 0; 653 Ops.push_back(std::make_pair(V, Weight)); 654 if (Opcode == Instruction::Add && Flags.AllKnownNonNegative && Flags.HasNSW) 655 Flags.AllKnownNonNegative &= isKnownNonNegative(V, SimplifyQuery(DL)); 656 } 657 658 // For nilpotent operations or addition there may be no operands, for example 659 // because the expression was "X xor X" or consisted of 2^Bitwidth additions: 660 // in both cases the weight reduces to 0 causing the value to be skipped. 661 if (Ops.empty()) { 662 Constant *Identity = ConstantExpr::getBinOpIdentity(Opcode, I->getType()); 663 assert(Identity && "Associative operation without identity!"); 664 Ops.emplace_back(Identity, APInt(Bitwidth, 1)); 665 } 666 667 return Changed; 668 } 669 670 /// Now that the operands for this expression tree are 671 /// linearized and optimized, emit them in-order. 672 void ReassociatePass::RewriteExprTree(BinaryOperator *I, 673 SmallVectorImpl<ValueEntry> &Ops, 674 OverflowTracking Flags) { 675 assert(Ops.size() > 1 && "Single values should be used directly!"); 676 677 // Since our optimizations should never increase the number of operations, the 678 // new expression can usually be written reusing the existing binary operators 679 // from the original expression tree, without creating any new instructions, 680 // though the rewritten expression may have a completely different topology. 681 // We take care to not change anything if the new expression will be the same 682 // as the original. If more than trivial changes (like commuting operands) 683 // were made then we are obliged to clear out any optional subclass data like 684 // nsw flags. 685 686 /// NodesToRewrite - Nodes from the original expression available for writing 687 /// the new expression into. 688 SmallVector<BinaryOperator*, 8> NodesToRewrite; 689 unsigned Opcode = I->getOpcode(); 690 BinaryOperator *Op = I; 691 692 /// NotRewritable - The operands being written will be the leaves of the new 693 /// expression and must not be used as inner nodes (via NodesToRewrite) by 694 /// mistake. Inner nodes are always reassociable, and usually leaves are not 695 /// (if they were they would have been incorporated into the expression and so 696 /// would not be leaves), so most of the time there is no danger of this. But 697 /// in rare cases a leaf may become reassociable if an optimization kills uses 698 /// of it, or it may momentarily become reassociable during rewriting (below) 699 /// due it being removed as an operand of one of its uses. Ensure that misuse 700 /// of leaf nodes as inner nodes cannot occur by remembering all of the future 701 /// leaves and refusing to reuse any of them as inner nodes. 702 SmallPtrSet<Value*, 8> NotRewritable; 703 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 704 NotRewritable.insert(Ops[i].Op); 705 706 // ExpressionChangedStart - Non-null if the rewritten expression differs from 707 // the original in some non-trivial way, requiring the clearing of optional 708 // flags. Flags are cleared from the operator in ExpressionChangedStart up to 709 // ExpressionChangedEnd inclusive. 710 BinaryOperator *ExpressionChangedStart = nullptr, 711 *ExpressionChangedEnd = nullptr; 712 for (unsigned i = 0; ; ++i) { 713 // The last operation (which comes earliest in the IR) is special as both 714 // operands will come from Ops, rather than just one with the other being 715 // a subexpression. 716 if (i+2 == Ops.size()) { 717 Value *NewLHS = Ops[i].Op; 718 Value *NewRHS = Ops[i+1].Op; 719 Value *OldLHS = Op->getOperand(0); 720 Value *OldRHS = Op->getOperand(1); 721 722 if (NewLHS == OldLHS && NewRHS == OldRHS) 723 // Nothing changed, leave it alone. 724 break; 725 726 if (NewLHS == OldRHS && NewRHS == OldLHS) { 727 // The order of the operands was reversed. Swap them. 728 LLVM_DEBUG(dbgs() << "RA: " << *Op << '\n'); 729 Op->swapOperands(); 730 LLVM_DEBUG(dbgs() << "TO: " << *Op << '\n'); 731 MadeChange = true; 732 ++NumChanged; 733 break; 734 } 735 736 // The new operation differs non-trivially from the original. Overwrite 737 // the old operands with the new ones. 738 LLVM_DEBUG(dbgs() << "RA: " << *Op << '\n'); 739 if (NewLHS != OldLHS) { 740 BinaryOperator *BO = isReassociableOp(OldLHS, Opcode); 741 if (BO && !NotRewritable.count(BO)) 742 NodesToRewrite.push_back(BO); 743 Op->setOperand(0, NewLHS); 744 } 745 if (NewRHS != OldRHS) { 746 BinaryOperator *BO = isReassociableOp(OldRHS, Opcode); 747 if (BO && !NotRewritable.count(BO)) 748 NodesToRewrite.push_back(BO); 749 Op->setOperand(1, NewRHS); 750 } 751 LLVM_DEBUG(dbgs() << "TO: " << *Op << '\n'); 752 753 ExpressionChangedStart = Op; 754 if (!ExpressionChangedEnd) 755 ExpressionChangedEnd = Op; 756 MadeChange = true; 757 ++NumChanged; 758 759 break; 760 } 761 762 // Not the last operation. The left-hand side will be a sub-expression 763 // while the right-hand side will be the current element of Ops. 764 Value *NewRHS = Ops[i].Op; 765 if (NewRHS != Op->getOperand(1)) { 766 LLVM_DEBUG(dbgs() << "RA: " << *Op << '\n'); 767 if (NewRHS == Op->getOperand(0)) { 768 // The new right-hand side was already present as the left operand. If 769 // we are lucky then swapping the operands will sort out both of them. 770 Op->swapOperands(); 771 } else { 772 // Overwrite with the new right-hand side. 773 BinaryOperator *BO = isReassociableOp(Op->getOperand(1), Opcode); 774 if (BO && !NotRewritable.count(BO)) 775 NodesToRewrite.push_back(BO); 776 Op->setOperand(1, NewRHS); 777 ExpressionChangedStart = Op; 778 if (!ExpressionChangedEnd) 779 ExpressionChangedEnd = Op; 780 } 781 LLVM_DEBUG(dbgs() << "TO: " << *Op << '\n'); 782 MadeChange = true; 783 ++NumChanged; 784 } 785 786 // Now deal with the left-hand side. If this is already an operation node 787 // from the original expression then just rewrite the rest of the expression 788 // into it. 789 BinaryOperator *BO = isReassociableOp(Op->getOperand(0), Opcode); 790 if (BO && !NotRewritable.count(BO)) { 791 Op = BO; 792 continue; 793 } 794 795 // Otherwise, grab a spare node from the original expression and use that as 796 // the left-hand side. If there are no nodes left then the optimizers made 797 // an expression with more nodes than the original! This usually means that 798 // they did something stupid but it might mean that the problem was just too 799 // hard (finding the mimimal number of multiplications needed to realize a 800 // multiplication expression is NP-complete). Whatever the reason, smart or 801 // stupid, create a new node if there are none left. 802 BinaryOperator *NewOp; 803 if (NodesToRewrite.empty()) { 804 Constant *Undef = UndefValue::get(I->getType()); 805 NewOp = BinaryOperator::Create(Instruction::BinaryOps(Opcode), Undef, 806 Undef, "", I->getIterator()); 807 if (isa<FPMathOperator>(NewOp)) 808 NewOp->setFastMathFlags(I->getFastMathFlags()); 809 } else { 810 NewOp = NodesToRewrite.pop_back_val(); 811 } 812 813 LLVM_DEBUG(dbgs() << "RA: " << *Op << '\n'); 814 Op->setOperand(0, NewOp); 815 LLVM_DEBUG(dbgs() << "TO: " << *Op << '\n'); 816 ExpressionChangedStart = Op; 817 if (!ExpressionChangedEnd) 818 ExpressionChangedEnd = Op; 819 MadeChange = true; 820 ++NumChanged; 821 Op = NewOp; 822 } 823 824 // If the expression changed non-trivially then clear out all subclass data 825 // starting from the operator specified in ExpressionChanged, and compactify 826 // the operators to just before the expression root to guarantee that the 827 // expression tree is dominated by all of Ops. 828 if (ExpressionChangedStart) { 829 bool ClearFlags = true; 830 do { 831 // Preserve flags. 832 if (ClearFlags) { 833 if (isa<FPMathOperator>(I)) { 834 FastMathFlags Flags = I->getFastMathFlags(); 835 ExpressionChangedStart->clearSubclassOptionalData(); 836 ExpressionChangedStart->setFastMathFlags(Flags); 837 } else { 838 ExpressionChangedStart->clearSubclassOptionalData(); 839 // Note that it doesn't hold for mul if one of the operands is zero. 840 // TODO: We can preserve NUW flag if we prove that all mul operands 841 // are non-zero. 842 if (ExpressionChangedStart->getOpcode() == Instruction::Add) { 843 if (Flags.HasNUW) 844 ExpressionChangedStart->setHasNoUnsignedWrap(); 845 if (Flags.HasNSW && (Flags.AllKnownNonNegative || Flags.HasNUW)) 846 ExpressionChangedStart->setHasNoSignedWrap(); 847 } 848 } 849 } 850 851 if (ExpressionChangedStart == ExpressionChangedEnd) 852 ClearFlags = false; 853 if (ExpressionChangedStart == I) 854 break; 855 856 // Discard any debug info related to the expressions that has changed (we 857 // can leave debug info related to the root and any operation that didn't 858 // change, since the result of the expression tree should be the same 859 // even after reassociation). 860 if (ClearFlags) 861 replaceDbgUsesWithUndef(ExpressionChangedStart); 862 863 ExpressionChangedStart->moveBefore(I); 864 ExpressionChangedStart = 865 cast<BinaryOperator>(*ExpressionChangedStart->user_begin()); 866 } while (true); 867 } 868 869 // Throw away any left over nodes from the original expression. 870 for (unsigned i = 0, e = NodesToRewrite.size(); i != e; ++i) 871 RedoInsts.insert(NodesToRewrite[i]); 872 } 873 874 /// Insert instructions before the instruction pointed to by BI, 875 /// that computes the negative version of the value specified. The negative 876 /// version of the value is returned, and BI is left pointing at the instruction 877 /// that should be processed next by the reassociation pass. 878 /// Also add intermediate instructions to the redo list that are modified while 879 /// pushing the negates through adds. These will be revisited to see if 880 /// additional opportunities have been exposed. 881 static Value *NegateValue(Value *V, Instruction *BI, 882 ReassociatePass::OrderedSet &ToRedo) { 883 if (auto *C = dyn_cast<Constant>(V)) { 884 const DataLayout &DL = BI->getModule()->getDataLayout(); 885 Constant *Res = C->getType()->isFPOrFPVectorTy() 886 ? ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL) 887 : ConstantExpr::getNeg(C); 888 if (Res) 889 return Res; 890 } 891 892 // We are trying to expose opportunity for reassociation. One of the things 893 // that we want to do to achieve this is to push a negation as deep into an 894 // expression chain as possible, to expose the add instructions. In practice, 895 // this means that we turn this: 896 // X = -(A+12+C+D) into X = -A + -12 + -C + -D = -12 + -A + -C + -D 897 // so that later, a: Y = 12+X could get reassociated with the -12 to eliminate 898 // the constants. We assume that instcombine will clean up the mess later if 899 // we introduce tons of unnecessary negation instructions. 900 // 901 if (BinaryOperator *I = 902 isReassociableOp(V, Instruction::Add, Instruction::FAdd)) { 903 // Push the negates through the add. 904 I->setOperand(0, NegateValue(I->getOperand(0), BI, ToRedo)); 905 I->setOperand(1, NegateValue(I->getOperand(1), BI, ToRedo)); 906 if (I->getOpcode() == Instruction::Add) { 907 I->setHasNoUnsignedWrap(false); 908 I->setHasNoSignedWrap(false); 909 } 910 911 // We must move the add instruction here, because the neg instructions do 912 // not dominate the old add instruction in general. By moving it, we are 913 // assured that the neg instructions we just inserted dominate the 914 // instruction we are about to insert after them. 915 // 916 I->moveBefore(BI); 917 I->setName(I->getName()+".neg"); 918 919 // Add the intermediate negates to the redo list as processing them later 920 // could expose more reassociating opportunities. 921 ToRedo.insert(I); 922 return I; 923 } 924 925 // Okay, we need to materialize a negated version of V with an instruction. 926 // Scan the use lists of V to see if we have one already. 927 for (User *U : V->users()) { 928 if (!match(U, m_Neg(m_Value())) && !match(U, m_FNeg(m_Value()))) 929 continue; 930 931 // We found one! Now we have to make sure that the definition dominates 932 // this use. We do this by moving it to the entry block (if it is a 933 // non-instruction value) or right after the definition. These negates will 934 // be zapped by reassociate later, so we don't need much finesse here. 935 Instruction *TheNeg = dyn_cast<Instruction>(U); 936 937 // We can't safely propagate a vector zero constant with poison/undef lanes. 938 Constant *C; 939 if (match(TheNeg, m_BinOp(m_Constant(C), m_Value())) && 940 C->containsUndefOrPoisonElement()) 941 continue; 942 943 // Verify that the negate is in this function, V might be a constant expr. 944 if (!TheNeg || 945 TheNeg->getParent()->getParent() != BI->getParent()->getParent()) 946 continue; 947 948 BasicBlock::iterator InsertPt; 949 if (Instruction *InstInput = dyn_cast<Instruction>(V)) { 950 auto InsertPtOpt = InstInput->getInsertionPointAfterDef(); 951 if (!InsertPtOpt) 952 continue; 953 InsertPt = *InsertPtOpt; 954 } else { 955 InsertPt = TheNeg->getFunction() 956 ->getEntryBlock() 957 .getFirstNonPHIOrDbg() 958 ->getIterator(); 959 } 960 961 TheNeg->moveBefore(*InsertPt->getParent(), InsertPt); 962 if (TheNeg->getOpcode() == Instruction::Sub) { 963 TheNeg->setHasNoUnsignedWrap(false); 964 TheNeg->setHasNoSignedWrap(false); 965 } else { 966 TheNeg->andIRFlags(BI); 967 } 968 ToRedo.insert(TheNeg); 969 return TheNeg; 970 } 971 972 // Insert a 'neg' instruction that subtracts the value from zero to get the 973 // negation. 974 Instruction *NewNeg = 975 CreateNeg(V, V->getName() + ".neg", BI->getIterator(), BI); 976 ToRedo.insert(NewNeg); 977 return NewNeg; 978 } 979 980 // See if this `or` looks like an load widening reduction, i.e. that it 981 // consists of an `or`/`shl`/`zext`/`load` nodes only. Note that we don't 982 // ensure that the pattern is *really* a load widening reduction, 983 // we do not ensure that it can really be replaced with a widened load, 984 // only that it mostly looks like one. 985 static bool isLoadCombineCandidate(Instruction *Or) { 986 SmallVector<Instruction *, 8> Worklist; 987 SmallSet<Instruction *, 8> Visited; 988 989 auto Enqueue = [&](Value *V) { 990 auto *I = dyn_cast<Instruction>(V); 991 // Each node of an `or` reduction must be an instruction, 992 if (!I) 993 return false; // Node is certainly not part of an `or` load reduction. 994 // Only process instructions we have never processed before. 995 if (Visited.insert(I).second) 996 Worklist.emplace_back(I); 997 return true; // Will need to look at parent nodes. 998 }; 999 1000 if (!Enqueue(Or)) 1001 return false; // Not an `or` reduction pattern. 1002 1003 while (!Worklist.empty()) { 1004 auto *I = Worklist.pop_back_val(); 1005 1006 // Okay, which instruction is this node? 1007 switch (I->getOpcode()) { 1008 case Instruction::Or: 1009 // Got an `or` node. That's fine, just recurse into it's operands. 1010 for (Value *Op : I->operands()) 1011 if (!Enqueue(Op)) 1012 return false; // Not an `or` reduction pattern. 1013 continue; 1014 1015 case Instruction::Shl: 1016 case Instruction::ZExt: 1017 // `shl`/`zext` nodes are fine, just recurse into their base operand. 1018 if (!Enqueue(I->getOperand(0))) 1019 return false; // Not an `or` reduction pattern. 1020 continue; 1021 1022 case Instruction::Load: 1023 // Perfect, `load` node means we've reached an edge of the graph. 1024 continue; 1025 1026 default: // Unknown node. 1027 return false; // Not an `or` reduction pattern. 1028 } 1029 } 1030 1031 return true; 1032 } 1033 1034 /// Return true if it may be profitable to convert this (X|Y) into (X+Y). 1035 static bool shouldConvertOrWithNoCommonBitsToAdd(Instruction *Or) { 1036 // Don't bother to convert this up unless either the LHS is an associable add 1037 // or subtract or mul or if this is only used by one of the above. 1038 // This is only a compile-time improvement, it is not needed for correctness! 1039 auto isInteresting = [](Value *V) { 1040 for (auto Op : {Instruction::Add, Instruction::Sub, Instruction::Mul, 1041 Instruction::Shl}) 1042 if (isReassociableOp(V, Op)) 1043 return true; 1044 return false; 1045 }; 1046 1047 if (any_of(Or->operands(), isInteresting)) 1048 return true; 1049 1050 Value *VB = Or->user_back(); 1051 if (Or->hasOneUse() && isInteresting(VB)) 1052 return true; 1053 1054 return false; 1055 } 1056 1057 /// If we have (X|Y), and iff X and Y have no common bits set, 1058 /// transform this into (X+Y) to allow arithmetics reassociation. 1059 static BinaryOperator *convertOrWithNoCommonBitsToAdd(Instruction *Or) { 1060 // Convert an or into an add. 1061 BinaryOperator *New = CreateAdd(Or->getOperand(0), Or->getOperand(1), "", 1062 Or->getIterator(), Or); 1063 New->setHasNoSignedWrap(); 1064 New->setHasNoUnsignedWrap(); 1065 New->takeName(Or); 1066 1067 // Everyone now refers to the add instruction. 1068 Or->replaceAllUsesWith(New); 1069 New->setDebugLoc(Or->getDebugLoc()); 1070 1071 LLVM_DEBUG(dbgs() << "Converted or into an add: " << *New << '\n'); 1072 return New; 1073 } 1074 1075 /// Return true if we should break up this subtract of X-Y into (X + -Y). 1076 static bool ShouldBreakUpSubtract(Instruction *Sub) { 1077 // If this is a negation, we can't split it up! 1078 if (match(Sub, m_Neg(m_Value())) || match(Sub, m_FNeg(m_Value()))) 1079 return false; 1080 1081 // Don't breakup X - undef. 1082 if (isa<UndefValue>(Sub->getOperand(1))) 1083 return false; 1084 1085 // Don't bother to break this up unless either the LHS is an associable add or 1086 // subtract or if this is only used by one. 1087 Value *V0 = Sub->getOperand(0); 1088 if (isReassociableOp(V0, Instruction::Add, Instruction::FAdd) || 1089 isReassociableOp(V0, Instruction::Sub, Instruction::FSub)) 1090 return true; 1091 Value *V1 = Sub->getOperand(1); 1092 if (isReassociableOp(V1, Instruction::Add, Instruction::FAdd) || 1093 isReassociableOp(V1, Instruction::Sub, Instruction::FSub)) 1094 return true; 1095 Value *VB = Sub->user_back(); 1096 if (Sub->hasOneUse() && 1097 (isReassociableOp(VB, Instruction::Add, Instruction::FAdd) || 1098 isReassociableOp(VB, Instruction::Sub, Instruction::FSub))) 1099 return true; 1100 1101 return false; 1102 } 1103 1104 /// If we have (X-Y), and if either X is an add, or if this is only used by an 1105 /// add, transform this into (X+(0-Y)) to promote better reassociation. 1106 static BinaryOperator *BreakUpSubtract(Instruction *Sub, 1107 ReassociatePass::OrderedSet &ToRedo) { 1108 // Convert a subtract into an add and a neg instruction. This allows sub 1109 // instructions to be commuted with other add instructions. 1110 // 1111 // Calculate the negative value of Operand 1 of the sub instruction, 1112 // and set it as the RHS of the add instruction we just made. 1113 Value *NegVal = NegateValue(Sub->getOperand(1), Sub, ToRedo); 1114 BinaryOperator *New = 1115 CreateAdd(Sub->getOperand(0), NegVal, "", Sub->getIterator(), Sub); 1116 Sub->setOperand(0, Constant::getNullValue(Sub->getType())); // Drop use of op. 1117 Sub->setOperand(1, Constant::getNullValue(Sub->getType())); // Drop use of op. 1118 New->takeName(Sub); 1119 1120 // Everyone now refers to the add instruction. 1121 Sub->replaceAllUsesWith(New); 1122 New->setDebugLoc(Sub->getDebugLoc()); 1123 1124 LLVM_DEBUG(dbgs() << "Negated: " << *New << '\n'); 1125 return New; 1126 } 1127 1128 /// If this is a shift of a reassociable multiply or is used by one, change 1129 /// this into a multiply by a constant to assist with further reassociation. 1130 static BinaryOperator *ConvertShiftToMul(Instruction *Shl) { 1131 Constant *MulCst = ConstantInt::get(Shl->getType(), 1); 1132 auto *SA = cast<ConstantInt>(Shl->getOperand(1)); 1133 MulCst = ConstantExpr::getShl(MulCst, SA); 1134 1135 BinaryOperator *Mul = BinaryOperator::CreateMul(Shl->getOperand(0), MulCst, 1136 "", Shl->getIterator()); 1137 Shl->setOperand(0, PoisonValue::get(Shl->getType())); // Drop use of op. 1138 Mul->takeName(Shl); 1139 1140 // Everyone now refers to the mul instruction. 1141 Shl->replaceAllUsesWith(Mul); 1142 Mul->setDebugLoc(Shl->getDebugLoc()); 1143 1144 // We can safely preserve the nuw flag in all cases. It's also safe to turn a 1145 // nuw nsw shl into a nuw nsw mul. However, nsw in isolation requires special 1146 // handling. It can be preserved as long as we're not left shifting by 1147 // bitwidth - 1. 1148 bool NSW = cast<BinaryOperator>(Shl)->hasNoSignedWrap(); 1149 bool NUW = cast<BinaryOperator>(Shl)->hasNoUnsignedWrap(); 1150 unsigned BitWidth = Shl->getType()->getIntegerBitWidth(); 1151 if (NSW && (NUW || SA->getValue().ult(BitWidth - 1))) 1152 Mul->setHasNoSignedWrap(true); 1153 Mul->setHasNoUnsignedWrap(NUW); 1154 return Mul; 1155 } 1156 1157 /// Scan backwards and forwards among values with the same rank as element i 1158 /// to see if X exists. If X does not exist, return i. This is useful when 1159 /// scanning for 'x' when we see '-x' because they both get the same rank. 1160 static unsigned FindInOperandList(const SmallVectorImpl<ValueEntry> &Ops, 1161 unsigned i, Value *X) { 1162 unsigned XRank = Ops[i].Rank; 1163 unsigned e = Ops.size(); 1164 for (unsigned j = i+1; j != e && Ops[j].Rank == XRank; ++j) { 1165 if (Ops[j].Op == X) 1166 return j; 1167 if (Instruction *I1 = dyn_cast<Instruction>(Ops[j].Op)) 1168 if (Instruction *I2 = dyn_cast<Instruction>(X)) 1169 if (I1->isIdenticalTo(I2)) 1170 return j; 1171 } 1172 // Scan backwards. 1173 for (unsigned j = i-1; j != ~0U && Ops[j].Rank == XRank; --j) { 1174 if (Ops[j].Op == X) 1175 return j; 1176 if (Instruction *I1 = dyn_cast<Instruction>(Ops[j].Op)) 1177 if (Instruction *I2 = dyn_cast<Instruction>(X)) 1178 if (I1->isIdenticalTo(I2)) 1179 return j; 1180 } 1181 return i; 1182 } 1183 1184 /// Emit a tree of add instructions, summing Ops together 1185 /// and returning the result. Insert the tree before I. 1186 static Value *EmitAddTreeOfValues(BasicBlock::iterator It, 1187 SmallVectorImpl<WeakTrackingVH> &Ops) { 1188 if (Ops.size() == 1) return Ops.back(); 1189 1190 Value *V1 = Ops.pop_back_val(); 1191 Value *V2 = EmitAddTreeOfValues(It, Ops); 1192 return CreateAdd(V2, V1, "reass.add", It, &*It); 1193 } 1194 1195 /// If V is an expression tree that is a multiplication sequence, 1196 /// and if this sequence contains a multiply by Factor, 1197 /// remove Factor from the tree and return the new tree. 1198 Value *ReassociatePass::RemoveFactorFromExpression(Value *V, Value *Factor) { 1199 BinaryOperator *BO = isReassociableOp(V, Instruction::Mul, Instruction::FMul); 1200 if (!BO) 1201 return nullptr; 1202 1203 SmallVector<RepeatedValue, 8> Tree; 1204 OverflowTracking Flags; 1205 MadeChange |= LinearizeExprTree(BO, Tree, RedoInsts, Flags); 1206 SmallVector<ValueEntry, 8> Factors; 1207 Factors.reserve(Tree.size()); 1208 for (unsigned i = 0, e = Tree.size(); i != e; ++i) { 1209 RepeatedValue E = Tree[i]; 1210 Factors.append(E.second.getZExtValue(), 1211 ValueEntry(getRank(E.first), E.first)); 1212 } 1213 1214 bool FoundFactor = false; 1215 bool NeedsNegate = false; 1216 for (unsigned i = 0, e = Factors.size(); i != e; ++i) { 1217 if (Factors[i].Op == Factor) { 1218 FoundFactor = true; 1219 Factors.erase(Factors.begin()+i); 1220 break; 1221 } 1222 1223 // If this is a negative version of this factor, remove it. 1224 if (ConstantInt *FC1 = dyn_cast<ConstantInt>(Factor)) { 1225 if (ConstantInt *FC2 = dyn_cast<ConstantInt>(Factors[i].Op)) 1226 if (FC1->getValue() == -FC2->getValue()) { 1227 FoundFactor = NeedsNegate = true; 1228 Factors.erase(Factors.begin()+i); 1229 break; 1230 } 1231 } else if (ConstantFP *FC1 = dyn_cast<ConstantFP>(Factor)) { 1232 if (ConstantFP *FC2 = dyn_cast<ConstantFP>(Factors[i].Op)) { 1233 const APFloat &F1 = FC1->getValueAPF(); 1234 APFloat F2(FC2->getValueAPF()); 1235 F2.changeSign(); 1236 if (F1 == F2) { 1237 FoundFactor = NeedsNegate = true; 1238 Factors.erase(Factors.begin() + i); 1239 break; 1240 } 1241 } 1242 } 1243 } 1244 1245 if (!FoundFactor) { 1246 // Make sure to restore the operands to the expression tree. 1247 RewriteExprTree(BO, Factors, Flags); 1248 return nullptr; 1249 } 1250 1251 BasicBlock::iterator InsertPt = ++BO->getIterator(); 1252 1253 // If this was just a single multiply, remove the multiply and return the only 1254 // remaining operand. 1255 if (Factors.size() == 1) { 1256 RedoInsts.insert(BO); 1257 V = Factors[0].Op; 1258 } else { 1259 RewriteExprTree(BO, Factors, Flags); 1260 V = BO; 1261 } 1262 1263 if (NeedsNegate) 1264 V = CreateNeg(V, "neg", InsertPt, BO); 1265 1266 return V; 1267 } 1268 1269 /// If V is a single-use multiply, recursively add its operands as factors, 1270 /// otherwise add V to the list of factors. 1271 /// 1272 /// Ops is the top-level list of add operands we're trying to factor. 1273 static void FindSingleUseMultiplyFactors(Value *V, 1274 SmallVectorImpl<Value*> &Factors) { 1275 BinaryOperator *BO = isReassociableOp(V, Instruction::Mul, Instruction::FMul); 1276 if (!BO) { 1277 Factors.push_back(V); 1278 return; 1279 } 1280 1281 // Otherwise, add the LHS and RHS to the list of factors. 1282 FindSingleUseMultiplyFactors(BO->getOperand(1), Factors); 1283 FindSingleUseMultiplyFactors(BO->getOperand(0), Factors); 1284 } 1285 1286 /// Optimize a series of operands to an 'and', 'or', or 'xor' instruction. 1287 /// This optimizes based on identities. If it can be reduced to a single Value, 1288 /// it is returned, otherwise the Ops list is mutated as necessary. 1289 static Value *OptimizeAndOrXor(unsigned Opcode, 1290 SmallVectorImpl<ValueEntry> &Ops) { 1291 // Scan the operand lists looking for X and ~X pairs, along with X,X pairs. 1292 // If we find any, we can simplify the expression. X&~X == 0, X|~X == -1. 1293 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1294 // First, check for X and ~X in the operand list. 1295 assert(i < Ops.size()); 1296 Value *X; 1297 if (match(Ops[i].Op, m_Not(m_Value(X)))) { // Cannot occur for ^. 1298 unsigned FoundX = FindInOperandList(Ops, i, X); 1299 if (FoundX != i) { 1300 if (Opcode == Instruction::And) // ...&X&~X = 0 1301 return Constant::getNullValue(X->getType()); 1302 1303 if (Opcode == Instruction::Or) // ...|X|~X = -1 1304 return Constant::getAllOnesValue(X->getType()); 1305 } 1306 } 1307 1308 // Next, check for duplicate pairs of values, which we assume are next to 1309 // each other, due to our sorting criteria. 1310 assert(i < Ops.size()); 1311 if (i+1 != Ops.size() && Ops[i+1].Op == Ops[i].Op) { 1312 if (Opcode == Instruction::And || Opcode == Instruction::Or) { 1313 // Drop duplicate values for And and Or. 1314 Ops.erase(Ops.begin()+i); 1315 --i; --e; 1316 ++NumAnnihil; 1317 continue; 1318 } 1319 1320 // Drop pairs of values for Xor. 1321 assert(Opcode == Instruction::Xor); 1322 if (e == 2) 1323 return Constant::getNullValue(Ops[0].Op->getType()); 1324 1325 // Y ^ X^X -> Y 1326 Ops.erase(Ops.begin()+i, Ops.begin()+i+2); 1327 i -= 1; e -= 2; 1328 ++NumAnnihil; 1329 } 1330 } 1331 return nullptr; 1332 } 1333 1334 /// Helper function of CombineXorOpnd(). It creates a bitwise-and 1335 /// instruction with the given two operands, and return the resulting 1336 /// instruction. There are two special cases: 1) if the constant operand is 0, 1337 /// it will return NULL. 2) if the constant is ~0, the symbolic operand will 1338 /// be returned. 1339 static Value *createAndInstr(BasicBlock::iterator InsertBefore, Value *Opnd, 1340 const APInt &ConstOpnd) { 1341 if (ConstOpnd.isZero()) 1342 return nullptr; 1343 1344 if (ConstOpnd.isAllOnes()) 1345 return Opnd; 1346 1347 Instruction *I = BinaryOperator::CreateAnd( 1348 Opnd, ConstantInt::get(Opnd->getType(), ConstOpnd), "and.ra", 1349 InsertBefore); 1350 I->setDebugLoc(InsertBefore->getDebugLoc()); 1351 return I; 1352 } 1353 1354 // Helper function of OptimizeXor(). It tries to simplify "Opnd1 ^ ConstOpnd" 1355 // into "R ^ C", where C would be 0, and R is a symbolic value. 1356 // 1357 // If it was successful, true is returned, and the "R" and "C" is returned 1358 // via "Res" and "ConstOpnd", respectively; otherwise, false is returned, 1359 // and both "Res" and "ConstOpnd" remain unchanged. 1360 bool ReassociatePass::CombineXorOpnd(BasicBlock::iterator It, XorOpnd *Opnd1, 1361 APInt &ConstOpnd, Value *&Res) { 1362 // Xor-Rule 1: (x | c1) ^ c2 = (x | c1) ^ (c1 ^ c1) ^ c2 1363 // = ((x | c1) ^ c1) ^ (c1 ^ c2) 1364 // = (x & ~c1) ^ (c1 ^ c2) 1365 // It is useful only when c1 == c2. 1366 if (!Opnd1->isOrExpr() || Opnd1->getConstPart().isZero()) 1367 return false; 1368 1369 if (!Opnd1->getValue()->hasOneUse()) 1370 return false; 1371 1372 const APInt &C1 = Opnd1->getConstPart(); 1373 if (C1 != ConstOpnd) 1374 return false; 1375 1376 Value *X = Opnd1->getSymbolicPart(); 1377 Res = createAndInstr(It, X, ~C1); 1378 // ConstOpnd was C2, now C1 ^ C2. 1379 ConstOpnd ^= C1; 1380 1381 if (Instruction *T = dyn_cast<Instruction>(Opnd1->getValue())) 1382 RedoInsts.insert(T); 1383 return true; 1384 } 1385 1386 // Helper function of OptimizeXor(). It tries to simplify 1387 // "Opnd1 ^ Opnd2 ^ ConstOpnd" into "R ^ C", where C would be 0, and R is a 1388 // symbolic value. 1389 // 1390 // If it was successful, true is returned, and the "R" and "C" is returned 1391 // via "Res" and "ConstOpnd", respectively (If the entire expression is 1392 // evaluated to a constant, the Res is set to NULL); otherwise, false is 1393 // returned, and both "Res" and "ConstOpnd" remain unchanged. 1394 bool ReassociatePass::CombineXorOpnd(BasicBlock::iterator It, XorOpnd *Opnd1, 1395 XorOpnd *Opnd2, APInt &ConstOpnd, 1396 Value *&Res) { 1397 Value *X = Opnd1->getSymbolicPart(); 1398 if (X != Opnd2->getSymbolicPart()) 1399 return false; 1400 1401 // This many instruction become dead.(At least "Opnd1 ^ Opnd2" will die.) 1402 int DeadInstNum = 1; 1403 if (Opnd1->getValue()->hasOneUse()) 1404 DeadInstNum++; 1405 if (Opnd2->getValue()->hasOneUse()) 1406 DeadInstNum++; 1407 1408 // Xor-Rule 2: 1409 // (x | c1) ^ (x & c2) 1410 // = (x|c1) ^ (x&c2) ^ (c1 ^ c1) = ((x|c1) ^ c1) ^ (x & c2) ^ c1 1411 // = (x & ~c1) ^ (x & c2) ^ c1 // Xor-Rule 1 1412 // = (x & c3) ^ c1, where c3 = ~c1 ^ c2 // Xor-rule 3 1413 // 1414 if (Opnd1->isOrExpr() != Opnd2->isOrExpr()) { 1415 if (Opnd2->isOrExpr()) 1416 std::swap(Opnd1, Opnd2); 1417 1418 const APInt &C1 = Opnd1->getConstPart(); 1419 const APInt &C2 = Opnd2->getConstPart(); 1420 APInt C3((~C1) ^ C2); 1421 1422 // Do not increase code size! 1423 if (!C3.isZero() && !C3.isAllOnes()) { 1424 int NewInstNum = ConstOpnd.getBoolValue() ? 1 : 2; 1425 if (NewInstNum > DeadInstNum) 1426 return false; 1427 } 1428 1429 Res = createAndInstr(It, X, C3); 1430 ConstOpnd ^= C1; 1431 } else if (Opnd1->isOrExpr()) { 1432 // Xor-Rule 3: (x | c1) ^ (x | c2) = (x & c3) ^ c3 where c3 = c1 ^ c2 1433 // 1434 const APInt &C1 = Opnd1->getConstPart(); 1435 const APInt &C2 = Opnd2->getConstPart(); 1436 APInt C3 = C1 ^ C2; 1437 1438 // Do not increase code size 1439 if (!C3.isZero() && !C3.isAllOnes()) { 1440 int NewInstNum = ConstOpnd.getBoolValue() ? 1 : 2; 1441 if (NewInstNum > DeadInstNum) 1442 return false; 1443 } 1444 1445 Res = createAndInstr(It, X, C3); 1446 ConstOpnd ^= C3; 1447 } else { 1448 // Xor-Rule 4: (x & c1) ^ (x & c2) = (x & (c1^c2)) 1449 // 1450 const APInt &C1 = Opnd1->getConstPart(); 1451 const APInt &C2 = Opnd2->getConstPart(); 1452 APInt C3 = C1 ^ C2; 1453 Res = createAndInstr(It, X, C3); 1454 } 1455 1456 // Put the original operands in the Redo list; hope they will be deleted 1457 // as dead code. 1458 if (Instruction *T = dyn_cast<Instruction>(Opnd1->getValue())) 1459 RedoInsts.insert(T); 1460 if (Instruction *T = dyn_cast<Instruction>(Opnd2->getValue())) 1461 RedoInsts.insert(T); 1462 1463 return true; 1464 } 1465 1466 /// Optimize a series of operands to an 'xor' instruction. If it can be reduced 1467 /// to a single Value, it is returned, otherwise the Ops list is mutated as 1468 /// necessary. 1469 Value *ReassociatePass::OptimizeXor(Instruction *I, 1470 SmallVectorImpl<ValueEntry> &Ops) { 1471 if (Value *V = OptimizeAndOrXor(Instruction::Xor, Ops)) 1472 return V; 1473 1474 if (Ops.size() == 1) 1475 return nullptr; 1476 1477 SmallVector<XorOpnd, 8> Opnds; 1478 SmallVector<XorOpnd*, 8> OpndPtrs; 1479 Type *Ty = Ops[0].Op->getType(); 1480 APInt ConstOpnd(Ty->getScalarSizeInBits(), 0); 1481 1482 // Step 1: Convert ValueEntry to XorOpnd 1483 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1484 Value *V = Ops[i].Op; 1485 const APInt *C; 1486 // TODO: Support non-splat vectors. 1487 if (match(V, m_APInt(C))) { 1488 ConstOpnd ^= *C; 1489 } else { 1490 XorOpnd O(V); 1491 O.setSymbolicRank(getRank(O.getSymbolicPart())); 1492 Opnds.push_back(O); 1493 } 1494 } 1495 1496 // NOTE: From this point on, do *NOT* add/delete element to/from "Opnds". 1497 // It would otherwise invalidate the "Opnds"'s iterator, and hence invalidate 1498 // the "OpndPtrs" as well. For the similar reason, do not fuse this loop 1499 // with the previous loop --- the iterator of the "Opnds" may be invalidated 1500 // when new elements are added to the vector. 1501 for (unsigned i = 0, e = Opnds.size(); i != e; ++i) 1502 OpndPtrs.push_back(&Opnds[i]); 1503 1504 // Step 2: Sort the Xor-Operands in a way such that the operands containing 1505 // the same symbolic value cluster together. For instance, the input operand 1506 // sequence ("x | 123", "y & 456", "x & 789") will be sorted into: 1507 // ("x | 123", "x & 789", "y & 456"). 1508 // 1509 // The purpose is twofold: 1510 // 1) Cluster together the operands sharing the same symbolic-value. 1511 // 2) Operand having smaller symbolic-value-rank is permuted earlier, which 1512 // could potentially shorten crital path, and expose more loop-invariants. 1513 // Note that values' rank are basically defined in RPO order (FIXME). 1514 // So, if Rank(X) < Rank(Y) < Rank(Z), it means X is defined earlier 1515 // than Y which is defined earlier than Z. Permute "x | 1", "Y & 2", 1516 // "z" in the order of X-Y-Z is better than any other orders. 1517 llvm::stable_sort(OpndPtrs, [](XorOpnd *LHS, XorOpnd *RHS) { 1518 return LHS->getSymbolicRank() < RHS->getSymbolicRank(); 1519 }); 1520 1521 // Step 3: Combine adjacent operands 1522 XorOpnd *PrevOpnd = nullptr; 1523 bool Changed = false; 1524 for (unsigned i = 0, e = Opnds.size(); i < e; i++) { 1525 XorOpnd *CurrOpnd = OpndPtrs[i]; 1526 // The combined value 1527 Value *CV; 1528 1529 // Step 3.1: Try simplifying "CurrOpnd ^ ConstOpnd" 1530 if (!ConstOpnd.isZero() && 1531 CombineXorOpnd(I->getIterator(), CurrOpnd, ConstOpnd, CV)) { 1532 Changed = true; 1533 if (CV) 1534 *CurrOpnd = XorOpnd(CV); 1535 else { 1536 CurrOpnd->Invalidate(); 1537 continue; 1538 } 1539 } 1540 1541 if (!PrevOpnd || CurrOpnd->getSymbolicPart() != PrevOpnd->getSymbolicPart()) { 1542 PrevOpnd = CurrOpnd; 1543 continue; 1544 } 1545 1546 // step 3.2: When previous and current operands share the same symbolic 1547 // value, try to simplify "PrevOpnd ^ CurrOpnd ^ ConstOpnd" 1548 if (CombineXorOpnd(I->getIterator(), CurrOpnd, PrevOpnd, ConstOpnd, CV)) { 1549 // Remove previous operand 1550 PrevOpnd->Invalidate(); 1551 if (CV) { 1552 *CurrOpnd = XorOpnd(CV); 1553 PrevOpnd = CurrOpnd; 1554 } else { 1555 CurrOpnd->Invalidate(); 1556 PrevOpnd = nullptr; 1557 } 1558 Changed = true; 1559 } 1560 } 1561 1562 // Step 4: Reassemble the Ops 1563 if (Changed) { 1564 Ops.clear(); 1565 for (const XorOpnd &O : Opnds) { 1566 if (O.isInvalid()) 1567 continue; 1568 ValueEntry VE(getRank(O.getValue()), O.getValue()); 1569 Ops.push_back(VE); 1570 } 1571 if (!ConstOpnd.isZero()) { 1572 Value *C = ConstantInt::get(Ty, ConstOpnd); 1573 ValueEntry VE(getRank(C), C); 1574 Ops.push_back(VE); 1575 } 1576 unsigned Sz = Ops.size(); 1577 if (Sz == 1) 1578 return Ops.back().Op; 1579 if (Sz == 0) { 1580 assert(ConstOpnd.isZero()); 1581 return ConstantInt::get(Ty, ConstOpnd); 1582 } 1583 } 1584 1585 return nullptr; 1586 } 1587 1588 /// Optimize a series of operands to an 'add' instruction. This 1589 /// optimizes based on identities. If it can be reduced to a single Value, it 1590 /// is returned, otherwise the Ops list is mutated as necessary. 1591 Value *ReassociatePass::OptimizeAdd(Instruction *I, 1592 SmallVectorImpl<ValueEntry> &Ops) { 1593 // Scan the operand lists looking for X and -X pairs. If we find any, we 1594 // can simplify expressions like X+-X == 0 and X+~X ==-1. While we're at it, 1595 // scan for any 1596 // duplicates. We want to canonicalize Y+Y+Y+Z -> 3*Y+Z. 1597 1598 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1599 Value *TheOp = Ops[i].Op; 1600 // Check to see if we've seen this operand before. If so, we factor all 1601 // instances of the operand together. Due to our sorting criteria, we know 1602 // that these need to be next to each other in the vector. 1603 if (i+1 != Ops.size() && Ops[i+1].Op == TheOp) { 1604 // Rescan the list, remove all instances of this operand from the expr. 1605 unsigned NumFound = 0; 1606 do { 1607 Ops.erase(Ops.begin()+i); 1608 ++NumFound; 1609 } while (i != Ops.size() && Ops[i].Op == TheOp); 1610 1611 LLVM_DEBUG(dbgs() << "\nFACTORING [" << NumFound << "]: " << *TheOp 1612 << '\n'); 1613 ++NumFactor; 1614 1615 // Insert a new multiply. 1616 Type *Ty = TheOp->getType(); 1617 Constant *C = Ty->isIntOrIntVectorTy() ? 1618 ConstantInt::get(Ty, NumFound) : ConstantFP::get(Ty, NumFound); 1619 Instruction *Mul = CreateMul(TheOp, C, "factor", I->getIterator(), I); 1620 1621 // Now that we have inserted a multiply, optimize it. This allows us to 1622 // handle cases that require multiple factoring steps, such as this: 1623 // (X*2) + (X*2) + (X*2) -> (X*2)*3 -> X*6 1624 RedoInsts.insert(Mul); 1625 1626 // If every add operand was a duplicate, return the multiply. 1627 if (Ops.empty()) 1628 return Mul; 1629 1630 // Otherwise, we had some input that didn't have the dupe, such as 1631 // "A + A + B" -> "A*2 + B". Add the new multiply to the list of 1632 // things being added by this operation. 1633 Ops.insert(Ops.begin(), ValueEntry(getRank(Mul), Mul)); 1634 1635 --i; 1636 e = Ops.size(); 1637 continue; 1638 } 1639 1640 // Check for X and -X or X and ~X in the operand list. 1641 Value *X; 1642 if (!match(TheOp, m_Neg(m_Value(X))) && !match(TheOp, m_Not(m_Value(X))) && 1643 !match(TheOp, m_FNeg(m_Value(X)))) 1644 continue; 1645 1646 unsigned FoundX = FindInOperandList(Ops, i, X); 1647 if (FoundX == i) 1648 continue; 1649 1650 // Remove X and -X from the operand list. 1651 if (Ops.size() == 2 && 1652 (match(TheOp, m_Neg(m_Value())) || match(TheOp, m_FNeg(m_Value())))) 1653 return Constant::getNullValue(X->getType()); 1654 1655 // Remove X and ~X from the operand list. 1656 if (Ops.size() == 2 && match(TheOp, m_Not(m_Value()))) 1657 return Constant::getAllOnesValue(X->getType()); 1658 1659 Ops.erase(Ops.begin()+i); 1660 if (i < FoundX) 1661 --FoundX; 1662 else 1663 --i; // Need to back up an extra one. 1664 Ops.erase(Ops.begin()+FoundX); 1665 ++NumAnnihil; 1666 --i; // Revisit element. 1667 e -= 2; // Removed two elements. 1668 1669 // if X and ~X we append -1 to the operand list. 1670 if (match(TheOp, m_Not(m_Value()))) { 1671 Value *V = Constant::getAllOnesValue(X->getType()); 1672 Ops.insert(Ops.end(), ValueEntry(getRank(V), V)); 1673 e += 1; 1674 } 1675 } 1676 1677 // Scan the operand list, checking to see if there are any common factors 1678 // between operands. Consider something like A*A+A*B*C+D. We would like to 1679 // reassociate this to A*(A+B*C)+D, which reduces the number of multiplies. 1680 // To efficiently find this, we count the number of times a factor occurs 1681 // for any ADD operands that are MULs. 1682 DenseMap<Value*, unsigned> FactorOccurrences; 1683 1684 // Keep track of each multiply we see, to avoid triggering on (X*4)+(X*4) 1685 // where they are actually the same multiply. 1686 unsigned MaxOcc = 0; 1687 Value *MaxOccVal = nullptr; 1688 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1689 BinaryOperator *BOp = 1690 isReassociableOp(Ops[i].Op, Instruction::Mul, Instruction::FMul); 1691 if (!BOp) 1692 continue; 1693 1694 // Compute all of the factors of this added value. 1695 SmallVector<Value*, 8> Factors; 1696 FindSingleUseMultiplyFactors(BOp, Factors); 1697 assert(Factors.size() > 1 && "Bad linearize!"); 1698 1699 // Add one to FactorOccurrences for each unique factor in this op. 1700 SmallPtrSet<Value*, 8> Duplicates; 1701 for (Value *Factor : Factors) { 1702 if (!Duplicates.insert(Factor).second) 1703 continue; 1704 1705 unsigned Occ = ++FactorOccurrences[Factor]; 1706 if (Occ > MaxOcc) { 1707 MaxOcc = Occ; 1708 MaxOccVal = Factor; 1709 } 1710 1711 // If Factor is a negative constant, add the negated value as a factor 1712 // because we can percolate the negate out. Watch for minint, which 1713 // cannot be positivified. 1714 if (ConstantInt *CI = dyn_cast<ConstantInt>(Factor)) { 1715 if (CI->isNegative() && !CI->isMinValue(true)) { 1716 Factor = ConstantInt::get(CI->getContext(), -CI->getValue()); 1717 if (!Duplicates.insert(Factor).second) 1718 continue; 1719 unsigned Occ = ++FactorOccurrences[Factor]; 1720 if (Occ > MaxOcc) { 1721 MaxOcc = Occ; 1722 MaxOccVal = Factor; 1723 } 1724 } 1725 } else if (ConstantFP *CF = dyn_cast<ConstantFP>(Factor)) { 1726 if (CF->isNegative()) { 1727 APFloat F(CF->getValueAPF()); 1728 F.changeSign(); 1729 Factor = ConstantFP::get(CF->getContext(), F); 1730 if (!Duplicates.insert(Factor).second) 1731 continue; 1732 unsigned Occ = ++FactorOccurrences[Factor]; 1733 if (Occ > MaxOcc) { 1734 MaxOcc = Occ; 1735 MaxOccVal = Factor; 1736 } 1737 } 1738 } 1739 } 1740 } 1741 1742 // If any factor occurred more than one time, we can pull it out. 1743 if (MaxOcc > 1) { 1744 LLVM_DEBUG(dbgs() << "\nFACTORING [" << MaxOcc << "]: " << *MaxOccVal 1745 << '\n'); 1746 ++NumFactor; 1747 1748 // Create a new instruction that uses the MaxOccVal twice. If we don't do 1749 // this, we could otherwise run into situations where removing a factor 1750 // from an expression will drop a use of maxocc, and this can cause 1751 // RemoveFactorFromExpression on successive values to behave differently. 1752 Instruction *DummyInst = 1753 I->getType()->isIntOrIntVectorTy() 1754 ? BinaryOperator::CreateAdd(MaxOccVal, MaxOccVal) 1755 : BinaryOperator::CreateFAdd(MaxOccVal, MaxOccVal); 1756 1757 SmallVector<WeakTrackingVH, 4> NewMulOps; 1758 for (unsigned i = 0; i != Ops.size(); ++i) { 1759 // Only try to remove factors from expressions we're allowed to. 1760 BinaryOperator *BOp = 1761 isReassociableOp(Ops[i].Op, Instruction::Mul, Instruction::FMul); 1762 if (!BOp) 1763 continue; 1764 1765 if (Value *V = RemoveFactorFromExpression(Ops[i].Op, MaxOccVal)) { 1766 // The factorized operand may occur several times. Convert them all in 1767 // one fell swoop. 1768 for (unsigned j = Ops.size(); j != i;) { 1769 --j; 1770 if (Ops[j].Op == Ops[i].Op) { 1771 NewMulOps.push_back(V); 1772 Ops.erase(Ops.begin()+j); 1773 } 1774 } 1775 --i; 1776 } 1777 } 1778 1779 // No need for extra uses anymore. 1780 DummyInst->deleteValue(); 1781 1782 unsigned NumAddedValues = NewMulOps.size(); 1783 Value *V = EmitAddTreeOfValues(I->getIterator(), NewMulOps); 1784 1785 // Now that we have inserted the add tree, optimize it. This allows us to 1786 // handle cases that require multiple factoring steps, such as this: 1787 // A*A*B + A*A*C --> A*(A*B+A*C) --> A*(A*(B+C)) 1788 assert(NumAddedValues > 1 && "Each occurrence should contribute a value"); 1789 (void)NumAddedValues; 1790 if (Instruction *VI = dyn_cast<Instruction>(V)) 1791 RedoInsts.insert(VI); 1792 1793 // Create the multiply. 1794 Instruction *V2 = CreateMul(V, MaxOccVal, "reass.mul", I->getIterator(), I); 1795 1796 // Rerun associate on the multiply in case the inner expression turned into 1797 // a multiply. We want to make sure that we keep things in canonical form. 1798 RedoInsts.insert(V2); 1799 1800 // If every add operand included the factor (e.g. "A*B + A*C"), then the 1801 // entire result expression is just the multiply "A*(B+C)". 1802 if (Ops.empty()) 1803 return V2; 1804 1805 // Otherwise, we had some input that didn't have the factor, such as 1806 // "A*B + A*C + D" -> "A*(B+C) + D". Add the new multiply to the list of 1807 // things being added by this operation. 1808 Ops.insert(Ops.begin(), ValueEntry(getRank(V2), V2)); 1809 } 1810 1811 return nullptr; 1812 } 1813 1814 /// Build up a vector of value/power pairs factoring a product. 1815 /// 1816 /// Given a series of multiplication operands, build a vector of factors and 1817 /// the powers each is raised to when forming the final product. Sort them in 1818 /// the order of descending power. 1819 /// 1820 /// (x*x) -> [(x, 2)] 1821 /// ((x*x)*x) -> [(x, 3)] 1822 /// ((((x*y)*x)*y)*x) -> [(x, 3), (y, 2)] 1823 /// 1824 /// \returns Whether any factors have a power greater than one. 1825 static bool collectMultiplyFactors(SmallVectorImpl<ValueEntry> &Ops, 1826 SmallVectorImpl<Factor> &Factors) { 1827 // FIXME: Have Ops be (ValueEntry, Multiplicity) pairs, simplifying this. 1828 // Compute the sum of powers of simplifiable factors. 1829 unsigned FactorPowerSum = 0; 1830 for (unsigned Idx = 1, Size = Ops.size(); Idx < Size; ++Idx) { 1831 Value *Op = Ops[Idx-1].Op; 1832 1833 // Count the number of occurrences of this value. 1834 unsigned Count = 1; 1835 for (; Idx < Size && Ops[Idx].Op == Op; ++Idx) 1836 ++Count; 1837 // Track for simplification all factors which occur 2 or more times. 1838 if (Count > 1) 1839 FactorPowerSum += Count; 1840 } 1841 1842 // We can only simplify factors if the sum of the powers of our simplifiable 1843 // factors is 4 or higher. When that is the case, we will *always* have 1844 // a simplification. This is an important invariant to prevent cyclicly 1845 // trying to simplify already minimal formations. 1846 if (FactorPowerSum < 4) 1847 return false; 1848 1849 // Now gather the simplifiable factors, removing them from Ops. 1850 FactorPowerSum = 0; 1851 for (unsigned Idx = 1; Idx < Ops.size(); ++Idx) { 1852 Value *Op = Ops[Idx-1].Op; 1853 1854 // Count the number of occurrences of this value. 1855 unsigned Count = 1; 1856 for (; Idx < Ops.size() && Ops[Idx].Op == Op; ++Idx) 1857 ++Count; 1858 if (Count == 1) 1859 continue; 1860 // Move an even number of occurrences to Factors. 1861 Count &= ~1U; 1862 Idx -= Count; 1863 FactorPowerSum += Count; 1864 Factors.push_back(Factor(Op, Count)); 1865 Ops.erase(Ops.begin()+Idx, Ops.begin()+Idx+Count); 1866 } 1867 1868 // None of the adjustments above should have reduced the sum of factor powers 1869 // below our mininum of '4'. 1870 assert(FactorPowerSum >= 4); 1871 1872 llvm::stable_sort(Factors, [](const Factor &LHS, const Factor &RHS) { 1873 return LHS.Power > RHS.Power; 1874 }); 1875 return true; 1876 } 1877 1878 /// Build a tree of multiplies, computing the product of Ops. 1879 static Value *buildMultiplyTree(IRBuilderBase &Builder, 1880 SmallVectorImpl<Value*> &Ops) { 1881 if (Ops.size() == 1) 1882 return Ops.back(); 1883 1884 Value *LHS = Ops.pop_back_val(); 1885 do { 1886 if (LHS->getType()->isIntOrIntVectorTy()) 1887 LHS = Builder.CreateMul(LHS, Ops.pop_back_val()); 1888 else 1889 LHS = Builder.CreateFMul(LHS, Ops.pop_back_val()); 1890 } while (!Ops.empty()); 1891 1892 return LHS; 1893 } 1894 1895 /// Build a minimal multiplication DAG for (a^x)*(b^y)*(c^z)*... 1896 /// 1897 /// Given a vector of values raised to various powers, where no two values are 1898 /// equal and the powers are sorted in decreasing order, compute the minimal 1899 /// DAG of multiplies to compute the final product, and return that product 1900 /// value. 1901 Value * 1902 ReassociatePass::buildMinimalMultiplyDAG(IRBuilderBase &Builder, 1903 SmallVectorImpl<Factor> &Factors) { 1904 assert(Factors[0].Power); 1905 SmallVector<Value *, 4> OuterProduct; 1906 for (unsigned LastIdx = 0, Idx = 1, Size = Factors.size(); 1907 Idx < Size && Factors[Idx].Power > 0; ++Idx) { 1908 if (Factors[Idx].Power != Factors[LastIdx].Power) { 1909 LastIdx = Idx; 1910 continue; 1911 } 1912 1913 // We want to multiply across all the factors with the same power so that 1914 // we can raise them to that power as a single entity. Build a mini tree 1915 // for that. 1916 SmallVector<Value *, 4> InnerProduct; 1917 InnerProduct.push_back(Factors[LastIdx].Base); 1918 do { 1919 InnerProduct.push_back(Factors[Idx].Base); 1920 ++Idx; 1921 } while (Idx < Size && Factors[Idx].Power == Factors[LastIdx].Power); 1922 1923 // Reset the base value of the first factor to the new expression tree. 1924 // We'll remove all the factors with the same power in a second pass. 1925 Value *M = Factors[LastIdx].Base = buildMultiplyTree(Builder, InnerProduct); 1926 if (Instruction *MI = dyn_cast<Instruction>(M)) 1927 RedoInsts.insert(MI); 1928 1929 LastIdx = Idx; 1930 } 1931 // Unique factors with equal powers -- we've folded them into the first one's 1932 // base. 1933 Factors.erase(std::unique(Factors.begin(), Factors.end(), 1934 [](const Factor &LHS, const Factor &RHS) { 1935 return LHS.Power == RHS.Power; 1936 }), 1937 Factors.end()); 1938 1939 // Iteratively collect the base of each factor with an add power into the 1940 // outer product, and halve each power in preparation for squaring the 1941 // expression. 1942 for (Factor &F : Factors) { 1943 if (F.Power & 1) 1944 OuterProduct.push_back(F.Base); 1945 F.Power >>= 1; 1946 } 1947 if (Factors[0].Power) { 1948 Value *SquareRoot = buildMinimalMultiplyDAG(Builder, Factors); 1949 OuterProduct.push_back(SquareRoot); 1950 OuterProduct.push_back(SquareRoot); 1951 } 1952 if (OuterProduct.size() == 1) 1953 return OuterProduct.front(); 1954 1955 Value *V = buildMultiplyTree(Builder, OuterProduct); 1956 return V; 1957 } 1958 1959 Value *ReassociatePass::OptimizeMul(BinaryOperator *I, 1960 SmallVectorImpl<ValueEntry> &Ops) { 1961 // We can only optimize the multiplies when there is a chain of more than 1962 // three, such that a balanced tree might require fewer total multiplies. 1963 if (Ops.size() < 4) 1964 return nullptr; 1965 1966 // Try to turn linear trees of multiplies without other uses of the 1967 // intermediate stages into minimal multiply DAGs with perfect sub-expression 1968 // re-use. 1969 SmallVector<Factor, 4> Factors; 1970 if (!collectMultiplyFactors(Ops, Factors)) 1971 return nullptr; // All distinct factors, so nothing left for us to do. 1972 1973 IRBuilder<> Builder(I); 1974 // The reassociate transformation for FP operations is performed only 1975 // if unsafe algebra is permitted by FastMathFlags. Propagate those flags 1976 // to the newly generated operations. 1977 if (auto FPI = dyn_cast<FPMathOperator>(I)) 1978 Builder.setFastMathFlags(FPI->getFastMathFlags()); 1979 1980 Value *V = buildMinimalMultiplyDAG(Builder, Factors); 1981 if (Ops.empty()) 1982 return V; 1983 1984 ValueEntry NewEntry = ValueEntry(getRank(V), V); 1985 Ops.insert(llvm::lower_bound(Ops, NewEntry), NewEntry); 1986 return nullptr; 1987 } 1988 1989 Value *ReassociatePass::OptimizeExpression(BinaryOperator *I, 1990 SmallVectorImpl<ValueEntry> &Ops) { 1991 // Now that we have the linearized expression tree, try to optimize it. 1992 // Start by folding any constants that we found. 1993 const DataLayout &DL = I->getModule()->getDataLayout(); 1994 Constant *Cst = nullptr; 1995 unsigned Opcode = I->getOpcode(); 1996 while (!Ops.empty()) { 1997 if (auto *C = dyn_cast<Constant>(Ops.back().Op)) { 1998 if (!Cst) { 1999 Ops.pop_back(); 2000 Cst = C; 2001 continue; 2002 } 2003 if (Constant *Res = ConstantFoldBinaryOpOperands(Opcode, C, Cst, DL)) { 2004 Ops.pop_back(); 2005 Cst = Res; 2006 continue; 2007 } 2008 } 2009 break; 2010 } 2011 // If there was nothing but constants then we are done. 2012 if (Ops.empty()) 2013 return Cst; 2014 2015 // Put the combined constant back at the end of the operand list, except if 2016 // there is no point. For example, an add of 0 gets dropped here, while a 2017 // multiplication by zero turns the whole expression into zero. 2018 if (Cst && Cst != ConstantExpr::getBinOpIdentity(Opcode, I->getType())) { 2019 if (Cst == ConstantExpr::getBinOpAbsorber(Opcode, I->getType())) 2020 return Cst; 2021 Ops.push_back(ValueEntry(0, Cst)); 2022 } 2023 2024 if (Ops.size() == 1) return Ops[0].Op; 2025 2026 // Handle destructive annihilation due to identities between elements in the 2027 // argument list here. 2028 unsigned NumOps = Ops.size(); 2029 switch (Opcode) { 2030 default: break; 2031 case Instruction::And: 2032 case Instruction::Or: 2033 if (Value *Result = OptimizeAndOrXor(Opcode, Ops)) 2034 return Result; 2035 break; 2036 2037 case Instruction::Xor: 2038 if (Value *Result = OptimizeXor(I, Ops)) 2039 return Result; 2040 break; 2041 2042 case Instruction::Add: 2043 case Instruction::FAdd: 2044 if (Value *Result = OptimizeAdd(I, Ops)) 2045 return Result; 2046 break; 2047 2048 case Instruction::Mul: 2049 case Instruction::FMul: 2050 if (Value *Result = OptimizeMul(I, Ops)) 2051 return Result; 2052 break; 2053 } 2054 2055 if (Ops.size() != NumOps) 2056 return OptimizeExpression(I, Ops); 2057 return nullptr; 2058 } 2059 2060 // Remove dead instructions and if any operands are trivially dead add them to 2061 // Insts so they will be removed as well. 2062 void ReassociatePass::RecursivelyEraseDeadInsts(Instruction *I, 2063 OrderedSet &Insts) { 2064 assert(isInstructionTriviallyDead(I) && "Trivially dead instructions only!"); 2065 SmallVector<Value *, 4> Ops(I->operands()); 2066 ValueRankMap.erase(I); 2067 Insts.remove(I); 2068 RedoInsts.remove(I); 2069 llvm::salvageDebugInfo(*I); 2070 I->eraseFromParent(); 2071 for (auto *Op : Ops) 2072 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 2073 if (OpInst->use_empty()) 2074 Insts.insert(OpInst); 2075 } 2076 2077 /// Zap the given instruction, adding interesting operands to the work list. 2078 void ReassociatePass::EraseInst(Instruction *I) { 2079 assert(isInstructionTriviallyDead(I) && "Trivially dead instructions only!"); 2080 LLVM_DEBUG(dbgs() << "Erasing dead inst: "; I->dump()); 2081 2082 SmallVector<Value *, 8> Ops(I->operands()); 2083 // Erase the dead instruction. 2084 ValueRankMap.erase(I); 2085 RedoInsts.remove(I); 2086 llvm::salvageDebugInfo(*I); 2087 I->eraseFromParent(); 2088 // Optimize its operands. 2089 SmallPtrSet<Instruction *, 8> Visited; // Detect self-referential nodes. 2090 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2091 if (Instruction *Op = dyn_cast<Instruction>(Ops[i])) { 2092 // If this is a node in an expression tree, climb to the expression root 2093 // and add that since that's where optimization actually happens. 2094 unsigned Opcode = Op->getOpcode(); 2095 while (Op->hasOneUse() && Op->user_back()->getOpcode() == Opcode && 2096 Visited.insert(Op).second) 2097 Op = Op->user_back(); 2098 2099 // The instruction we're going to push may be coming from a 2100 // dead block, and Reassociate skips the processing of unreachable 2101 // blocks because it's a waste of time and also because it can 2102 // lead to infinite loop due to LLVM's non-standard definition 2103 // of dominance. 2104 if (ValueRankMap.contains(Op)) 2105 RedoInsts.insert(Op); 2106 } 2107 2108 MadeChange = true; 2109 } 2110 2111 /// Recursively analyze an expression to build a list of instructions that have 2112 /// negative floating-point constant operands. The caller can then transform 2113 /// the list to create positive constants for better reassociation and CSE. 2114 static void getNegatibleInsts(Value *V, 2115 SmallVectorImpl<Instruction *> &Candidates) { 2116 // Handle only one-use instructions. Combining negations does not justify 2117 // replicating instructions. 2118 Instruction *I; 2119 if (!match(V, m_OneUse(m_Instruction(I)))) 2120 return; 2121 2122 // Handle expressions of multiplications and divisions. 2123 // TODO: This could look through floating-point casts. 2124 const APFloat *C; 2125 switch (I->getOpcode()) { 2126 case Instruction::FMul: 2127 // Not expecting non-canonical code here. Bail out and wait. 2128 if (match(I->getOperand(0), m_Constant())) 2129 break; 2130 2131 if (match(I->getOperand(1), m_APFloat(C)) && C->isNegative()) { 2132 Candidates.push_back(I); 2133 LLVM_DEBUG(dbgs() << "FMul with negative constant: " << *I << '\n'); 2134 } 2135 getNegatibleInsts(I->getOperand(0), Candidates); 2136 getNegatibleInsts(I->getOperand(1), Candidates); 2137 break; 2138 case Instruction::FDiv: 2139 // Not expecting non-canonical code here. Bail out and wait. 2140 if (match(I->getOperand(0), m_Constant()) && 2141 match(I->getOperand(1), m_Constant())) 2142 break; 2143 2144 if ((match(I->getOperand(0), m_APFloat(C)) && C->isNegative()) || 2145 (match(I->getOperand(1), m_APFloat(C)) && C->isNegative())) { 2146 Candidates.push_back(I); 2147 LLVM_DEBUG(dbgs() << "FDiv with negative constant: " << *I << '\n'); 2148 } 2149 getNegatibleInsts(I->getOperand(0), Candidates); 2150 getNegatibleInsts(I->getOperand(1), Candidates); 2151 break; 2152 default: 2153 break; 2154 } 2155 } 2156 2157 /// Given an fadd/fsub with an operand that is a one-use instruction 2158 /// (the fadd/fsub), try to change negative floating-point constants into 2159 /// positive constants to increase potential for reassociation and CSE. 2160 Instruction *ReassociatePass::canonicalizeNegFPConstantsForOp(Instruction *I, 2161 Instruction *Op, 2162 Value *OtherOp) { 2163 assert((I->getOpcode() == Instruction::FAdd || 2164 I->getOpcode() == Instruction::FSub) && "Expected fadd/fsub"); 2165 2166 // Collect instructions with negative FP constants from the subtree that ends 2167 // in Op. 2168 SmallVector<Instruction *, 4> Candidates; 2169 getNegatibleInsts(Op, Candidates); 2170 if (Candidates.empty()) 2171 return nullptr; 2172 2173 // Don't canonicalize x + (-Constant * y) -> x - (Constant * y), if the 2174 // resulting subtract will be broken up later. This can get us into an 2175 // infinite loop during reassociation. 2176 bool IsFSub = I->getOpcode() == Instruction::FSub; 2177 bool NeedsSubtract = !IsFSub && Candidates.size() % 2 == 1; 2178 if (NeedsSubtract && ShouldBreakUpSubtract(I)) 2179 return nullptr; 2180 2181 for (Instruction *Negatible : Candidates) { 2182 const APFloat *C; 2183 if (match(Negatible->getOperand(0), m_APFloat(C))) { 2184 assert(!match(Negatible->getOperand(1), m_Constant()) && 2185 "Expecting only 1 constant operand"); 2186 assert(C->isNegative() && "Expected negative FP constant"); 2187 Negatible->setOperand(0, ConstantFP::get(Negatible->getType(), abs(*C))); 2188 MadeChange = true; 2189 } 2190 if (match(Negatible->getOperand(1), m_APFloat(C))) { 2191 assert(!match(Negatible->getOperand(0), m_Constant()) && 2192 "Expecting only 1 constant operand"); 2193 assert(C->isNegative() && "Expected negative FP constant"); 2194 Negatible->setOperand(1, ConstantFP::get(Negatible->getType(), abs(*C))); 2195 MadeChange = true; 2196 } 2197 } 2198 assert(MadeChange == true && "Negative constant candidate was not changed"); 2199 2200 // Negations cancelled out. 2201 if (Candidates.size() % 2 == 0) 2202 return I; 2203 2204 // Negate the final operand in the expression by flipping the opcode of this 2205 // fadd/fsub. 2206 assert(Candidates.size() % 2 == 1 && "Expected odd number"); 2207 IRBuilder<> Builder(I); 2208 Value *NewInst = IsFSub ? Builder.CreateFAddFMF(OtherOp, Op, I) 2209 : Builder.CreateFSubFMF(OtherOp, Op, I); 2210 I->replaceAllUsesWith(NewInst); 2211 RedoInsts.insert(I); 2212 return dyn_cast<Instruction>(NewInst); 2213 } 2214 2215 /// Canonicalize expressions that contain a negative floating-point constant 2216 /// of the following form: 2217 /// OtherOp + (subtree) -> OtherOp {+/-} (canonical subtree) 2218 /// (subtree) + OtherOp -> OtherOp {+/-} (canonical subtree) 2219 /// OtherOp - (subtree) -> OtherOp {+/-} (canonical subtree) 2220 /// 2221 /// The fadd/fsub opcode may be switched to allow folding a negation into the 2222 /// input instruction. 2223 Instruction *ReassociatePass::canonicalizeNegFPConstants(Instruction *I) { 2224 LLVM_DEBUG(dbgs() << "Combine negations for: " << *I << '\n'); 2225 Value *X; 2226 Instruction *Op; 2227 if (match(I, m_FAdd(m_Value(X), m_OneUse(m_Instruction(Op))))) 2228 if (Instruction *R = canonicalizeNegFPConstantsForOp(I, Op, X)) 2229 I = R; 2230 if (match(I, m_FAdd(m_OneUse(m_Instruction(Op)), m_Value(X)))) 2231 if (Instruction *R = canonicalizeNegFPConstantsForOp(I, Op, X)) 2232 I = R; 2233 if (match(I, m_FSub(m_Value(X), m_OneUse(m_Instruction(Op))))) 2234 if (Instruction *R = canonicalizeNegFPConstantsForOp(I, Op, X)) 2235 I = R; 2236 return I; 2237 } 2238 2239 /// Inspect and optimize the given instruction. Note that erasing 2240 /// instructions is not allowed. 2241 void ReassociatePass::OptimizeInst(Instruction *I) { 2242 // Only consider operations that we understand. 2243 if (!isa<UnaryOperator>(I) && !isa<BinaryOperator>(I)) 2244 return; 2245 2246 if (I->getOpcode() == Instruction::Shl && isa<ConstantInt>(I->getOperand(1))) 2247 // If an operand of this shift is a reassociable multiply, or if the shift 2248 // is used by a reassociable multiply or add, turn into a multiply. 2249 if (isReassociableOp(I->getOperand(0), Instruction::Mul) || 2250 (I->hasOneUse() && 2251 (isReassociableOp(I->user_back(), Instruction::Mul) || 2252 isReassociableOp(I->user_back(), Instruction::Add)))) { 2253 Instruction *NI = ConvertShiftToMul(I); 2254 RedoInsts.insert(I); 2255 MadeChange = true; 2256 I = NI; 2257 } 2258 2259 // Commute binary operators, to canonicalize the order of their operands. 2260 // This can potentially expose more CSE opportunities, and makes writing other 2261 // transformations simpler. 2262 if (I->isCommutative()) 2263 canonicalizeOperands(I); 2264 2265 // Canonicalize negative constants out of expressions. 2266 if (Instruction *Res = canonicalizeNegFPConstants(I)) 2267 I = Res; 2268 2269 // Don't optimize floating-point instructions unless they have the 2270 // appropriate FastMathFlags for reassociation enabled. 2271 if (isa<FPMathOperator>(I) && !hasFPAssociativeFlags(I)) 2272 return; 2273 2274 // Do not reassociate boolean (i1) expressions. We want to preserve the 2275 // original order of evaluation for short-circuited comparisons that 2276 // SimplifyCFG has folded to AND/OR expressions. If the expression 2277 // is not further optimized, it is likely to be transformed back to a 2278 // short-circuited form for code gen, and the source order may have been 2279 // optimized for the most likely conditions. 2280 if (I->getType()->isIntegerTy(1)) 2281 return; 2282 2283 // If this is a bitwise or instruction of operands 2284 // with no common bits set, convert it to X+Y. 2285 if (I->getOpcode() == Instruction::Or && 2286 shouldConvertOrWithNoCommonBitsToAdd(I) && !isLoadCombineCandidate(I) && 2287 (cast<PossiblyDisjointInst>(I)->isDisjoint() || 2288 haveNoCommonBitsSet(I->getOperand(0), I->getOperand(1), 2289 SimplifyQuery(I->getModule()->getDataLayout(), 2290 /*DT=*/nullptr, /*AC=*/nullptr, I)))) { 2291 Instruction *NI = convertOrWithNoCommonBitsToAdd(I); 2292 RedoInsts.insert(I); 2293 MadeChange = true; 2294 I = NI; 2295 } 2296 2297 // If this is a subtract instruction which is not already in negate form, 2298 // see if we can convert it to X+-Y. 2299 if (I->getOpcode() == Instruction::Sub) { 2300 if (ShouldBreakUpSubtract(I)) { 2301 Instruction *NI = BreakUpSubtract(I, RedoInsts); 2302 RedoInsts.insert(I); 2303 MadeChange = true; 2304 I = NI; 2305 } else if (match(I, m_Neg(m_Value()))) { 2306 // Otherwise, this is a negation. See if the operand is a multiply tree 2307 // and if this is not an inner node of a multiply tree. 2308 if (isReassociableOp(I->getOperand(1), Instruction::Mul) && 2309 (!I->hasOneUse() || 2310 !isReassociableOp(I->user_back(), Instruction::Mul))) { 2311 Instruction *NI = LowerNegateToMultiply(I); 2312 // If the negate was simplified, revisit the users to see if we can 2313 // reassociate further. 2314 for (User *U : NI->users()) { 2315 if (BinaryOperator *Tmp = dyn_cast<BinaryOperator>(U)) 2316 RedoInsts.insert(Tmp); 2317 } 2318 RedoInsts.insert(I); 2319 MadeChange = true; 2320 I = NI; 2321 } 2322 } 2323 } else if (I->getOpcode() == Instruction::FNeg || 2324 I->getOpcode() == Instruction::FSub) { 2325 if (ShouldBreakUpSubtract(I)) { 2326 Instruction *NI = BreakUpSubtract(I, RedoInsts); 2327 RedoInsts.insert(I); 2328 MadeChange = true; 2329 I = NI; 2330 } else if (match(I, m_FNeg(m_Value()))) { 2331 // Otherwise, this is a negation. See if the operand is a multiply tree 2332 // and if this is not an inner node of a multiply tree. 2333 Value *Op = isa<BinaryOperator>(I) ? I->getOperand(1) : 2334 I->getOperand(0); 2335 if (isReassociableOp(Op, Instruction::FMul) && 2336 (!I->hasOneUse() || 2337 !isReassociableOp(I->user_back(), Instruction::FMul))) { 2338 // If the negate was simplified, revisit the users to see if we can 2339 // reassociate further. 2340 Instruction *NI = LowerNegateToMultiply(I); 2341 for (User *U : NI->users()) { 2342 if (BinaryOperator *Tmp = dyn_cast<BinaryOperator>(U)) 2343 RedoInsts.insert(Tmp); 2344 } 2345 RedoInsts.insert(I); 2346 MadeChange = true; 2347 I = NI; 2348 } 2349 } 2350 } 2351 2352 // If this instruction is an associative binary operator, process it. 2353 if (!I->isAssociative()) return; 2354 BinaryOperator *BO = cast<BinaryOperator>(I); 2355 2356 // If this is an interior node of a reassociable tree, ignore it until we 2357 // get to the root of the tree, to avoid N^2 analysis. 2358 unsigned Opcode = BO->getOpcode(); 2359 if (BO->hasOneUse() && BO->user_back()->getOpcode() == Opcode) { 2360 // During the initial run we will get to the root of the tree. 2361 // But if we get here while we are redoing instructions, there is no 2362 // guarantee that the root will be visited. So Redo later 2363 if (BO->user_back() != BO && 2364 BO->getParent() == BO->user_back()->getParent()) 2365 RedoInsts.insert(BO->user_back()); 2366 return; 2367 } 2368 2369 // If this is an add tree that is used by a sub instruction, ignore it 2370 // until we process the subtract. 2371 if (BO->hasOneUse() && BO->getOpcode() == Instruction::Add && 2372 cast<Instruction>(BO->user_back())->getOpcode() == Instruction::Sub) 2373 return; 2374 if (BO->hasOneUse() && BO->getOpcode() == Instruction::FAdd && 2375 cast<Instruction>(BO->user_back())->getOpcode() == Instruction::FSub) 2376 return; 2377 2378 ReassociateExpression(BO); 2379 } 2380 2381 void ReassociatePass::ReassociateExpression(BinaryOperator *I) { 2382 // First, walk the expression tree, linearizing the tree, collecting the 2383 // operand information. 2384 SmallVector<RepeatedValue, 8> Tree; 2385 OverflowTracking Flags; 2386 MadeChange |= LinearizeExprTree(I, Tree, RedoInsts, Flags); 2387 SmallVector<ValueEntry, 8> Ops; 2388 Ops.reserve(Tree.size()); 2389 for (const RepeatedValue &E : Tree) 2390 Ops.append(E.second.getZExtValue(), ValueEntry(getRank(E.first), E.first)); 2391 2392 LLVM_DEBUG(dbgs() << "RAIn:\t"; PrintOps(I, Ops); dbgs() << '\n'); 2393 2394 // Now that we have linearized the tree to a list and have gathered all of 2395 // the operands and their ranks, sort the operands by their rank. Use a 2396 // stable_sort so that values with equal ranks will have their relative 2397 // positions maintained (and so the compiler is deterministic). Note that 2398 // this sorts so that the highest ranking values end up at the beginning of 2399 // the vector. 2400 llvm::stable_sort(Ops); 2401 2402 // Now that we have the expression tree in a convenient 2403 // sorted form, optimize it globally if possible. 2404 if (Value *V = OptimizeExpression(I, Ops)) { 2405 if (V == I) 2406 // Self-referential expression in unreachable code. 2407 return; 2408 // This expression tree simplified to something that isn't a tree, 2409 // eliminate it. 2410 LLVM_DEBUG(dbgs() << "Reassoc to scalar: " << *V << '\n'); 2411 I->replaceAllUsesWith(V); 2412 if (Instruction *VI = dyn_cast<Instruction>(V)) 2413 if (I->getDebugLoc()) 2414 VI->setDebugLoc(I->getDebugLoc()); 2415 RedoInsts.insert(I); 2416 ++NumAnnihil; 2417 return; 2418 } 2419 2420 // We want to sink immediates as deeply as possible except in the case where 2421 // this is a multiply tree used only by an add, and the immediate is a -1. 2422 // In this case we reassociate to put the negation on the outside so that we 2423 // can fold the negation into the add: (-X)*Y + Z -> Z-X*Y 2424 if (I->hasOneUse()) { 2425 if (I->getOpcode() == Instruction::Mul && 2426 cast<Instruction>(I->user_back())->getOpcode() == Instruction::Add && 2427 isa<ConstantInt>(Ops.back().Op) && 2428 cast<ConstantInt>(Ops.back().Op)->isMinusOne()) { 2429 ValueEntry Tmp = Ops.pop_back_val(); 2430 Ops.insert(Ops.begin(), Tmp); 2431 } else if (I->getOpcode() == Instruction::FMul && 2432 cast<Instruction>(I->user_back())->getOpcode() == 2433 Instruction::FAdd && 2434 isa<ConstantFP>(Ops.back().Op) && 2435 cast<ConstantFP>(Ops.back().Op)->isExactlyValue(-1.0)) { 2436 ValueEntry Tmp = Ops.pop_back_val(); 2437 Ops.insert(Ops.begin(), Tmp); 2438 } 2439 } 2440 2441 LLVM_DEBUG(dbgs() << "RAOut:\t"; PrintOps(I, Ops); dbgs() << '\n'); 2442 2443 if (Ops.size() == 1) { 2444 if (Ops[0].Op == I) 2445 // Self-referential expression in unreachable code. 2446 return; 2447 2448 // This expression tree simplified to something that isn't a tree, 2449 // eliminate it. 2450 I->replaceAllUsesWith(Ops[0].Op); 2451 if (Instruction *OI = dyn_cast<Instruction>(Ops[0].Op)) 2452 OI->setDebugLoc(I->getDebugLoc()); 2453 RedoInsts.insert(I); 2454 return; 2455 } 2456 2457 if (Ops.size() > 2 && Ops.size() <= GlobalReassociateLimit) { 2458 // Find the pair with the highest count in the pairmap and move it to the 2459 // back of the list so that it can later be CSE'd. 2460 // example: 2461 // a*b*c*d*e 2462 // if c*e is the most "popular" pair, we can express this as 2463 // (((c*e)*d)*b)*a 2464 unsigned Max = 1; 2465 unsigned BestRank = 0; 2466 std::pair<unsigned, unsigned> BestPair; 2467 unsigned Idx = I->getOpcode() - Instruction::BinaryOpsBegin; 2468 unsigned LimitIdx = 0; 2469 // With the CSE-driven heuristic, we are about to slap two values at the 2470 // beginning of the expression whereas they could live very late in the CFG. 2471 // When using the CSE-local heuristic we avoid creating dependences from 2472 // completely unrelated part of the CFG by limiting the expression 2473 // reordering on the values that live in the first seen basic block. 2474 // The main idea is that we want to avoid forming expressions that would 2475 // become loop dependent. 2476 if (UseCSELocalOpt) { 2477 const BasicBlock *FirstSeenBB = nullptr; 2478 int StartIdx = Ops.size() - 1; 2479 // Skip the first value of the expression since we need at least two 2480 // values to materialize an expression. I.e., even if this value is 2481 // anchored in a different basic block, the actual first sub expression 2482 // will be anchored on the second value. 2483 for (int i = StartIdx - 1; i != -1; --i) { 2484 const Value *Val = Ops[i].Op; 2485 const auto *CurrLeafInstr = dyn_cast<Instruction>(Val); 2486 const BasicBlock *SeenBB = nullptr; 2487 if (!CurrLeafInstr) { 2488 // The value is free of any CFG dependencies. 2489 // Do as if it lives in the entry block. 2490 // 2491 // We do this to make sure all the values falling on this path are 2492 // seen through the same anchor point. The rationale is these values 2493 // can be combined together to from a sub expression free of any CFG 2494 // dependencies so we want them to stay together. 2495 // We could be cleverer and postpone the anchor down to the first 2496 // anchored value, but that's likely complicated to get right. 2497 // E.g., we wouldn't want to do that if that means being stuck in a 2498 // loop. 2499 // 2500 // For instance, we wouldn't want to change: 2501 // res = arg1 op arg2 op arg3 op ... op loop_val1 op loop_val2 ... 2502 // into 2503 // res = loop_val1 op arg1 op arg2 op arg3 op ... op loop_val2 ... 2504 // Because all the sub expressions with arg2..N would be stuck between 2505 // two loop dependent values. 2506 SeenBB = &I->getParent()->getParent()->getEntryBlock(); 2507 } else { 2508 SeenBB = CurrLeafInstr->getParent(); 2509 } 2510 2511 if (!FirstSeenBB) { 2512 FirstSeenBB = SeenBB; 2513 continue; 2514 } 2515 if (FirstSeenBB != SeenBB) { 2516 // ith value is in a different basic block. 2517 // Rewind the index once to point to the last value on the same basic 2518 // block. 2519 LimitIdx = i + 1; 2520 LLVM_DEBUG(dbgs() << "CSE reordering: Consider values between [" 2521 << LimitIdx << ", " << StartIdx << "]\n"); 2522 break; 2523 } 2524 } 2525 } 2526 for (unsigned i = Ops.size() - 1; i > LimitIdx; --i) { 2527 // We must use int type to go below zero when LimitIdx is 0. 2528 for (int j = i - 1; j >= (int)LimitIdx; --j) { 2529 unsigned Score = 0; 2530 Value *Op0 = Ops[i].Op; 2531 Value *Op1 = Ops[j].Op; 2532 if (std::less<Value *>()(Op1, Op0)) 2533 std::swap(Op0, Op1); 2534 auto it = PairMap[Idx].find({Op0, Op1}); 2535 if (it != PairMap[Idx].end()) { 2536 // Functions like BreakUpSubtract() can erase the Values we're using 2537 // as keys and create new Values after we built the PairMap. There's a 2538 // small chance that the new nodes can have the same address as 2539 // something already in the table. We shouldn't accumulate the stored 2540 // score in that case as it refers to the wrong Value. 2541 if (it->second.isValid()) 2542 Score += it->second.Score; 2543 } 2544 2545 unsigned MaxRank = std::max(Ops[i].Rank, Ops[j].Rank); 2546 2547 // By construction, the operands are sorted in reverse order of their 2548 // topological order. 2549 // So we tend to form (sub) expressions with values that are close to 2550 // each other. 2551 // 2552 // Now to expose more CSE opportunities we want to expose the pair of 2553 // operands that occur the most (as statically computed in 2554 // BuildPairMap.) as the first sub-expression. 2555 // 2556 // If two pairs occur as many times, we pick the one with the 2557 // lowest rank, meaning the one with both operands appearing first in 2558 // the topological order. 2559 if (Score > Max || (Score == Max && MaxRank < BestRank)) { 2560 BestPair = {j, i}; 2561 Max = Score; 2562 BestRank = MaxRank; 2563 } 2564 } 2565 } 2566 if (Max > 1) { 2567 auto Op0 = Ops[BestPair.first]; 2568 auto Op1 = Ops[BestPair.second]; 2569 Ops.erase(&Ops[BestPair.second]); 2570 Ops.erase(&Ops[BestPair.first]); 2571 Ops.push_back(Op0); 2572 Ops.push_back(Op1); 2573 } 2574 } 2575 LLVM_DEBUG(dbgs() << "RAOut after CSE reorder:\t"; PrintOps(I, Ops); 2576 dbgs() << '\n'); 2577 // Now that we ordered and optimized the expressions, splat them back into 2578 // the expression tree, removing any unneeded nodes. 2579 RewriteExprTree(I, Ops, Flags); 2580 } 2581 2582 void 2583 ReassociatePass::BuildPairMap(ReversePostOrderTraversal<Function *> &RPOT) { 2584 // Make a "pairmap" of how often each operand pair occurs. 2585 for (BasicBlock *BI : RPOT) { 2586 for (Instruction &I : *BI) { 2587 if (!I.isAssociative() || !I.isBinaryOp()) 2588 continue; 2589 2590 // Ignore nodes that aren't at the root of trees. 2591 if (I.hasOneUse() && I.user_back()->getOpcode() == I.getOpcode()) 2592 continue; 2593 2594 // Collect all operands in a single reassociable expression. 2595 // Since Reassociate has already been run once, we can assume things 2596 // are already canonical according to Reassociation's regime. 2597 SmallVector<Value *, 8> Worklist = { I.getOperand(0), I.getOperand(1) }; 2598 SmallVector<Value *, 8> Ops; 2599 while (!Worklist.empty() && Ops.size() <= GlobalReassociateLimit) { 2600 Value *Op = Worklist.pop_back_val(); 2601 Instruction *OpI = dyn_cast<Instruction>(Op); 2602 if (!OpI || OpI->getOpcode() != I.getOpcode() || !OpI->hasOneUse()) { 2603 Ops.push_back(Op); 2604 continue; 2605 } 2606 // Be paranoid about self-referencing expressions in unreachable code. 2607 if (OpI->getOperand(0) != OpI) 2608 Worklist.push_back(OpI->getOperand(0)); 2609 if (OpI->getOperand(1) != OpI) 2610 Worklist.push_back(OpI->getOperand(1)); 2611 } 2612 // Skip extremely long expressions. 2613 if (Ops.size() > GlobalReassociateLimit) 2614 continue; 2615 2616 // Add all pairwise combinations of operands to the pair map. 2617 unsigned BinaryIdx = I.getOpcode() - Instruction::BinaryOpsBegin; 2618 SmallSet<std::pair<Value *, Value*>, 32> Visited; 2619 for (unsigned i = 0; i < Ops.size() - 1; ++i) { 2620 for (unsigned j = i + 1; j < Ops.size(); ++j) { 2621 // Canonicalize operand orderings. 2622 Value *Op0 = Ops[i]; 2623 Value *Op1 = Ops[j]; 2624 if (std::less<Value *>()(Op1, Op0)) 2625 std::swap(Op0, Op1); 2626 if (!Visited.insert({Op0, Op1}).second) 2627 continue; 2628 auto res = PairMap[BinaryIdx].insert({{Op0, Op1}, {Op0, Op1, 1}}); 2629 if (!res.second) { 2630 // If either key value has been erased then we've got the same 2631 // address by coincidence. That can't happen here because nothing is 2632 // erasing values but it can happen by the time we're querying the 2633 // map. 2634 assert(res.first->second.isValid() && "WeakVH invalidated"); 2635 ++res.first->second.Score; 2636 } 2637 } 2638 } 2639 } 2640 } 2641 } 2642 2643 PreservedAnalyses ReassociatePass::run(Function &F, FunctionAnalysisManager &) { 2644 // Get the functions basic blocks in Reverse Post Order. This order is used by 2645 // BuildRankMap to pre calculate ranks correctly. It also excludes dead basic 2646 // blocks (it has been seen that the analysis in this pass could hang when 2647 // analysing dead basic blocks). 2648 ReversePostOrderTraversal<Function *> RPOT(&F); 2649 2650 // Calculate the rank map for F. 2651 BuildRankMap(F, RPOT); 2652 2653 // Build the pair map before running reassociate. 2654 // Technically this would be more accurate if we did it after one round 2655 // of reassociation, but in practice it doesn't seem to help much on 2656 // real-world code, so don't waste the compile time running reassociate 2657 // twice. 2658 // If a user wants, they could expicitly run reassociate twice in their 2659 // pass pipeline for further potential gains. 2660 // It might also be possible to update the pair map during runtime, but the 2661 // overhead of that may be large if there's many reassociable chains. 2662 BuildPairMap(RPOT); 2663 2664 MadeChange = false; 2665 2666 // Traverse the same blocks that were analysed by BuildRankMap. 2667 for (BasicBlock *BI : RPOT) { 2668 assert(RankMap.count(&*BI) && "BB should be ranked."); 2669 // Optimize every instruction in the basic block. 2670 for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;) 2671 if (isInstructionTriviallyDead(&*II)) { 2672 EraseInst(&*II++); 2673 } else { 2674 OptimizeInst(&*II); 2675 assert(II->getParent() == &*BI && "Moved to a different block!"); 2676 ++II; 2677 } 2678 2679 // Make a copy of all the instructions to be redone so we can remove dead 2680 // instructions. 2681 OrderedSet ToRedo(RedoInsts); 2682 // Iterate over all instructions to be reevaluated and remove trivially dead 2683 // instructions. If any operand of the trivially dead instruction becomes 2684 // dead mark it for deletion as well. Continue this process until all 2685 // trivially dead instructions have been removed. 2686 while (!ToRedo.empty()) { 2687 Instruction *I = ToRedo.pop_back_val(); 2688 if (isInstructionTriviallyDead(I)) { 2689 RecursivelyEraseDeadInsts(I, ToRedo); 2690 MadeChange = true; 2691 } 2692 } 2693 2694 // Now that we have removed dead instructions, we can reoptimize the 2695 // remaining instructions. 2696 while (!RedoInsts.empty()) { 2697 Instruction *I = RedoInsts.front(); 2698 RedoInsts.erase(RedoInsts.begin()); 2699 if (isInstructionTriviallyDead(I)) 2700 EraseInst(I); 2701 else 2702 OptimizeInst(I); 2703 } 2704 } 2705 2706 // We are done with the rank map and pair map. 2707 RankMap.clear(); 2708 ValueRankMap.clear(); 2709 for (auto &Entry : PairMap) 2710 Entry.clear(); 2711 2712 if (MadeChange) { 2713 PreservedAnalyses PA; 2714 PA.preserveSet<CFGAnalyses>(); 2715 return PA; 2716 } 2717 2718 return PreservedAnalyses::all(); 2719 } 2720 2721 namespace { 2722 2723 class ReassociateLegacyPass : public FunctionPass { 2724 ReassociatePass Impl; 2725 2726 public: 2727 static char ID; // Pass identification, replacement for typeid 2728 2729 ReassociateLegacyPass() : FunctionPass(ID) { 2730 initializeReassociateLegacyPassPass(*PassRegistry::getPassRegistry()); 2731 } 2732 2733 bool runOnFunction(Function &F) override { 2734 if (skipFunction(F)) 2735 return false; 2736 2737 FunctionAnalysisManager DummyFAM; 2738 auto PA = Impl.run(F, DummyFAM); 2739 return !PA.areAllPreserved(); 2740 } 2741 2742 void getAnalysisUsage(AnalysisUsage &AU) const override { 2743 AU.setPreservesCFG(); 2744 AU.addPreserved<AAResultsWrapperPass>(); 2745 AU.addPreserved<BasicAAWrapperPass>(); 2746 AU.addPreserved<GlobalsAAWrapperPass>(); 2747 } 2748 }; 2749 2750 } // end anonymous namespace 2751 2752 char ReassociateLegacyPass::ID = 0; 2753 2754 INITIALIZE_PASS(ReassociateLegacyPass, "reassociate", 2755 "Reassociate expressions", false, false) 2756 2757 // Public interface to the Reassociate pass 2758 FunctionPass *llvm::createReassociatePass() { 2759 return new ReassociateLegacyPass(); 2760 } 2761