1 //===- CodeMetrics.cpp - Code cost measurements ---------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements code cost measurement utilities. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/CodeMetrics.h" 15 #include "llvm/Function.h" 16 #include "llvm/Support/CallSite.h" 17 #include "llvm/IntrinsicInst.h" 18 #include "llvm/DataLayout.h" 19 20 using namespace llvm; 21 22 /// callIsSmall - If a call is likely to lower to a single target instruction, 23 /// or is otherwise deemed small return true. 24 /// TODO: Perhaps calls like memcpy, strcpy, etc? 25 bool llvm::callIsSmall(ImmutableCallSite CS) { 26 if (isa<IntrinsicInst>(CS.getInstruction())) 27 return true; 28 29 const Function *F = CS.getCalledFunction(); 30 if (!F) return false; 31 32 if (F->hasLocalLinkage()) return false; 33 34 if (!F->hasName()) return false; 35 36 StringRef Name = F->getName(); 37 38 // These will all likely lower to a single selection DAG node. 39 if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" || 40 Name == "fabs" || Name == "fabsf" || Name == "fabsl" || 41 Name == "sin" || Name == "sinf" || Name == "sinl" || 42 Name == "cos" || Name == "cosf" || Name == "cosl" || 43 Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl" ) 44 return true; 45 46 // These are all likely to be optimized into something smaller. 47 if (Name == "pow" || Name == "powf" || Name == "powl" || 48 Name == "exp2" || Name == "exp2l" || Name == "exp2f" || 49 Name == "floor" || Name == "floorf" || Name == "ceil" || 50 Name == "round" || Name == "ffs" || Name == "ffsl" || 51 Name == "abs" || Name == "labs" || Name == "llabs") 52 return true; 53 54 return false; 55 } 56 57 bool llvm::isInstructionFree(const Instruction *I, const DataLayout *TD) { 58 if (isa<PHINode>(I)) 59 return true; 60 61 // If a GEP has all constant indices, it will probably be folded with 62 // a load/store. 63 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) 64 return GEP->hasAllConstantIndices(); 65 66 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 67 switch (II->getIntrinsicID()) { 68 default: 69 return false; 70 case Intrinsic::dbg_declare: 71 case Intrinsic::dbg_value: 72 case Intrinsic::invariant_start: 73 case Intrinsic::invariant_end: 74 case Intrinsic::lifetime_start: 75 case Intrinsic::lifetime_end: 76 case Intrinsic::objectsize: 77 case Intrinsic::ptr_annotation: 78 case Intrinsic::var_annotation: 79 // These intrinsics don't count as size. 80 return true; 81 } 82 } 83 84 if (const CastInst *CI = dyn_cast<CastInst>(I)) { 85 // Noop casts, including ptr <-> int, don't count. 86 if (CI->isLosslessCast()) 87 return true; 88 89 Value *Op = CI->getOperand(0); 90 // An inttoptr cast is free so long as the input is a legal integer type 91 // which doesn't contain values outside the range of a pointer. 92 if (isa<IntToPtrInst>(CI) && TD && 93 TD->isLegalInteger(Op->getType()->getScalarSizeInBits()) && 94 Op->getType()->getScalarSizeInBits() <= TD->getPointerSizeInBits( 95 cast<IntToPtrInst>(CI)->getAddressSpace())) 96 return true; 97 98 // A ptrtoint cast is free so long as the result is large enough to store 99 // the pointer, and a legal integer type. 100 if (isa<PtrToIntInst>(CI) && TD && 101 TD->isLegalInteger(Op->getType()->getScalarSizeInBits()) && 102 Op->getType()->getScalarSizeInBits() >= TD->getPointerSizeInBits( 103 cast<PtrToIntInst>(CI)->getPointerAddressSpace())) 104 return true; 105 106 // trunc to a native type is free (assuming the target has compare and 107 // shift-right of the same width). 108 if (TD && isa<TruncInst>(CI) && 109 TD->isLegalInteger(TD->getTypeSizeInBits(CI->getType()))) 110 return true; 111 // Result of a cmp instruction is often extended (to be used by other 112 // cmp instructions, logical or return instructions). These are usually 113 // nop on most sane targets. 114 if (isa<CmpInst>(CI->getOperand(0))) 115 return true; 116 } 117 118 return false; 119 } 120 121 /// analyzeBasicBlock - Fill in the current structure with information gleaned 122 /// from the specified block. 123 void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB, 124 const DataLayout *TD) { 125 ++NumBlocks; 126 unsigned NumInstsBeforeThisBB = NumInsts; 127 for (BasicBlock::const_iterator II = BB->begin(), E = BB->end(); 128 II != E; ++II) { 129 if (isInstructionFree(II, TD)) 130 continue; 131 132 // Special handling for calls. 133 if (isa<CallInst>(II) || isa<InvokeInst>(II)) { 134 ImmutableCallSite CS(cast<Instruction>(II)); 135 136 if (const Function *F = CS.getCalledFunction()) { 137 // If a function is both internal and has a single use, then it is 138 // extremely likely to get inlined in the future (it was probably 139 // exposed by an interleaved devirtualization pass). 140 if (!CS.isNoInline() && F->hasInternalLinkage() && F->hasOneUse()) 141 ++NumInlineCandidates; 142 143 // If this call is to function itself, then the function is recursive. 144 // Inlining it into other functions is a bad idea, because this is 145 // basically just a form of loop peeling, and our metrics aren't useful 146 // for that case. 147 if (F == BB->getParent()) 148 isRecursive = true; 149 } 150 151 if (!callIsSmall(CS)) { 152 // Each argument to a call takes on average one instruction to set up. 153 NumInsts += CS.arg_size(); 154 155 // We don't want inline asm to count as a call - that would prevent loop 156 // unrolling. The argument setup cost is still real, though. 157 if (!isa<InlineAsm>(CS.getCalledValue())) 158 ++NumCalls; 159 } 160 } 161 162 if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) { 163 if (!AI->isStaticAlloca()) 164 this->usesDynamicAlloca = true; 165 } 166 167 if (isa<ExtractElementInst>(II) || II->getType()->isVectorTy()) 168 ++NumVectorInsts; 169 170 ++NumInsts; 171 } 172 173 if (isa<ReturnInst>(BB->getTerminator())) 174 ++NumRets; 175 176 // We never want to inline functions that contain an indirectbr. This is 177 // incorrect because all the blockaddress's (in static global initializers 178 // for example) would be referring to the original function, and this indirect 179 // jump would jump from the inlined copy of the function into the original 180 // function which is extremely undefined behavior. 181 // FIXME: This logic isn't really right; we can safely inline functions 182 // with indirectbr's as long as no other function or global references the 183 // blockaddress of a block within the current function. And as a QOI issue, 184 // if someone is using a blockaddress without an indirectbr, and that 185 // reference somehow ends up in another function or global, we probably 186 // don't want to inline this function. 187 if (isa<IndirectBrInst>(BB->getTerminator())) 188 containsIndirectBr = true; 189 190 // Remember NumInsts for this BB. 191 NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB; 192 } 193 194 void CodeMetrics::analyzeFunction(Function *F, const DataLayout *TD) { 195 // If this function contains a call that "returns twice" (e.g., setjmp or 196 // _setjmp) and it isn't marked with "returns twice" itself, never inline it. 197 // This is a hack because we depend on the user marking their local variables 198 // as volatile if they are live across a setjmp call, and they probably 199 // won't do this in callers. 200 exposesReturnsTwice = F->callsFunctionThatReturnsTwice() && 201 !F->getFnAttributes().hasAttribute(Attributes::ReturnsTwice); 202 203 // Look at the size of the callee. 204 for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB) 205 analyzeBasicBlock(&*BB, TD); 206 } 207