Lines Matching defs:XLen
910 // If the upper XLen-16 bits are not used, try to convert this to a simm12
1086 unsigned XLen = Subtarget->getXLen();
1087 unsigned LeadingZeros = XLen - llvm::bit_width(Mask);
1115 unsigned XLen = Subtarget->getXLen();
1116 unsigned LeadingZeros = XLen - llvm::bit_width(Mask);
1131 // (srli (slli X, (XLen-C3), (XLen-C3) + C)
1136 // less than XLen bits.
1180 // (srai (slli X, (XLen-16), (XLen-16) + C)
1182 // (srai (slli X, (XLen-8), (XLen-8) + C)
1244 unsigned XLen = Subtarget->getXLen();
1245 assert((C2 > 0 && C2 < XLen) && "Unexpected shift amount!");
1259 C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
1270 unsigned Leading = XLen - llvm::bit_width(C1);
1316 Skip |= HasBitTest && Leading == XLen - 1;
1333 unsigned Leading = XLen - llvm::bit_width(C1);
1335 if (C2 + Leading < XLen &&
1336 C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
1338 if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
1363 unsigned Leading = XLen - llvm::bit_width(C1);
1365 if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
1401 unsigned Leading = XLen - llvm::bit_width(C1);
1458 // fits in XLen bits. We can shift X left by the number of leading zeros in
1459 // C2 and shift C1 left by XLen-lzcnt(C2). This will ensure the final
1460 // product has XLen trailing zeros, putting it in the output of MULHU. This
1499 // We need to shift left the AND input and C1 by a total of XLen bits.
1502 unsigned XLen = Subtarget->getXLen();
1503 unsigned LeadingZeros = XLen - llvm::bit_width(C2);
1508 unsigned ConstantShift = XLen - LeadingZeros;
1509 if (ConstantShift > (XLen - llvm::bit_width(C1)))
1514 if (XLen == 32)
1517 // Create (mulhu (slli X, lzcnt(C2)), C1 << (XLen - lzcnt(C2))).
2943 unsigned XLen = Subtarget->getXLen();
2947 Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
2953 unsigned Leading = XLen - llvm::bit_width(Mask);
2990 unsigned XLen = Subtarget->getXLen();
2991 unsigned Leading = XLen - llvm::bit_width(Mask);
3176 // SLLI only uses the lower (XLen - ShAmt) bits.