Lines Matching +full:32 +full:kb
56 GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
112 // 33 bits are sign extended and with S_MUL_U64_U32_PSEUDO if the higher 32
132 GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
135 : Combiner(MF, CInfo, TPC, &KB, CSEInfo), RuleConfig(RuleConfig), STI(STI),
137 Helper(Observer, B, /*IsPreLegalize*/ false, &KB, MDT, LI),
153 // common case, splitting this into a move and a 32-bit shift is faster and
155 return Helper.tryCombineShiftToUnmerge(MI, 32);
216 if (Ty == LLT::scalar(32) || Ty == LLT::scalar(16)) {
219 assert(SrcSize == 16 || SrcSize == 32 || SrcSize == 64);
229 const LLT S32 = LLT::scalar(32);
338 return ShiftOffset < 32 && ShiftOffset >= 8 && (ShiftOffset % 8) == 0;
349 const LLT S32 = LLT::scalar(32);
424 if (KB->getKnownBits(Src1).countMinLeadingZeros() >= 32 &&
425 KB->getKnownBits(Src0).countMinLeadingZeros() >= 32) {
430 if (KB->computeNumSignBits(Src1) >= 33 &&
431 KB->computeNumSignBits(Src0) >= 33) {
495 GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
503 AMDGPUPostLegalizerCombinerImpl Impl(MF, CInfo, TPC, *KB, /*CSEInfo*/ nullptr,