Lines Matching +full:sense +full:- +full:bitfield +full:- +full:width

1 //===- AMDGPURegisterBankInfo.cpp -------------------------------*- C++ -*-==//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
17 /// sort of pseudo-register bank needed to represent SGPRs used in a vector
31 /// is naturally a bitmask with one bit per lane, in a 32 or 64-bit
37 /// SCC, which is a 1-bit unaddressable register. This will need to be copied to
38 /// a 32-bit virtual register. Taken together, this means we need to adjust the
40 /// widened to 32-bits, and all VALU booleans need to be s1 values.
42 /// A noteworthy exception to the s1-means-vcc rule is for legalization artifact
44 /// bank. A non-boolean source (such as a truncate from a 1-bit load from
67 /// picking the optimal operand combination from a post-isel optimization pass.
69 //===----------------------------------------------------------------------===//
142 auto True = B.buildConstant(S32, Opc == AMDGPU::G_SEXT ? -1 : 1);
277 // 32-bit extract of a 64-bit value is just access of a subregister, so free.
281 // TODO: 32-bit insert to a 64-bit SGPR may incur a non-free copy due to SGPR
293 // VCC-like use.
294 if (TRI->isSGPRClass(&RC)) {
296 // should be inferable from the copied to-type. We don't have many boolean
304 return TRI->isAGPRClass(&RC) ? AMDGPU::AGPRRegBank : AMDGPU::VGPRRegBank;
449 const unsigned AS = MMO->getAddrSpace();
452 const unsigned MemSize = 8 * MMO->getSize().getValue();
454 // Require 4-byte alignment.
455 return (MMO->getAlign() >= Align(4) ||
457 ((MemSize == 16 && MMO->getAlign() >= Align(2)) ||
458 (MemSize == 8 && MMO->getAlign() >= Align(1))))) &&
460 !MMO->isAtomic() &&
461 // Don't use scalar loads for volatile accesses to non-constant address
463 (IsConst || !MMO->isVolatile()) &&
465 (IsConst || MMO->isInvariant() || (MMO->getFlags() & MONoClobber)) &&
473 const MachineFunction &MF = *MI.getParent()->getParent();
510 // s_{and|or|xor}_b32 set scc when the result of the 32-bit op is not 0.
666 Register LoLHS = MRI->createGenericVirtualRegister(HalfTy);
667 Register HiLHS = MRI->createGenericVirtualRegister(HalfTy);
669 MRI->setRegBank(LoLHS, *Bank);
670 MRI->setRegBank(HiLHS, *Bank);
785 const TargetRegisterClass *WaveRC = TRI->getWaveMaskRegClass();
815 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
816 MachineBasicBlock *BodyBB = MF->CreateMachineBasicBlock();
817 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
818 MachineBasicBlock *RestoreExecBB = MF->CreateMachineBasicBlock();
821 MF->insert(MBBI, LoopBB);
822 MF->insert(MBBI, BodyBB);
823 MF->insert(MBBI, RestoreExecBB);
824 MF->insert(MBBI, RemainderBB);
826 LoopBB->addSuccessor(BodyBB);
827 BodyBB->addSuccessor(RestoreExecBB);
828 BodyBB->addSuccessor(LoopBB);
831 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
832 RemainderBB->splice(RemainderBB->begin(), &MBB, Range.end(), MBB.end());
835 RestoreExecBB->addSuccessor(RemainderBB);
837 B.setInsertPt(*LoopBB, LoopBB->end());
852 BodyBB->splice(BodyBB->end(), &MBB, Range.begin(), MBB.end());
856 auto NewEnd = BodyBB->end();
875 Op.setReg(OldVal->second);
931 // Make sure we don't re-process this register again.
936 // The ballot becomes a no-op during instruction selection.
950 B.setInsertPt(*BodyBB, BodyBB->end());
958 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
965 BuildMI(MBB, MBB.end(), DL, TII->get(MovExecOpc), SaveExecReg)
976 B.setInsertPt(*RemainderBB, RemainderBB->begin());
992 if (OpBank->getID() != AMDGPU::SGPRRegBankID)
1032 return {LLT::scalar(FirstSize), LLT::scalar(TotalSize - FirstSize)};
1039 unsigned RemainderElts = (TotalSize - FirstSize) / EltSize;
1073 const unsigned MemSize = 8 * MMO->getSize().getValue();
1083 ((MemSize == 8 && MMO->getAlign() >= Align(1)) ||
1084 (MemSize == 16 && MMO->getAlign() >= Align(2))) &&
1094 // This is an extending load from a sub-dword size. Widen the memory
1109 // 96-bit loads are only available for vector loads. We need to split this
1110 // into a 64-bit part, and 32 (unless we can widen to a 128-bit load).
1111 if (MMO->getAlign() < Align(16)) {
1134 // 128-bit loads are supported for all instruction types.
1197 Register SPReg = Info->getStackPtrOffsetReg();
1255 if (TII->splitMUBUFOffset(*Imm, SOffset, ImmOffset, Alignment)) {
1260 B.getMRI()->setRegBank(VOffsetReg, AMDGPU::VGPRRegBank);
1261 B.getMRI()->setRegBank(SOffsetReg, AMDGPU::SGPRRegBank);
1274 TII->splitMUBUFOffset(Offset, SOffset, ImmOffset, Alignment)) {
1278 B.getMRI()->setRegBank(SOffsetReg, AMDGPU::SGPRRegBank);
1280 return 0; // XXX - Why is this 0?
1286 B.getMRI()->setRegBank(VOffsetReg, AMDGPU::VGPRRegBank);
1289 return 0; // XXX - Why is this 0?
1296 Register Src0 = getSrcRegIgnoringCopies(Add->getOperand(1).getReg(), *MRI);
1297 Register Src1 = getSrcRegIgnoringCopies(Add->getOperand(2).getReg(), *MRI);
1321 B.getMRI()->setRegBank(VOffsetReg, AMDGPU::VGPRRegBank);
1325 B.getMRI()->setRegBank(SOffsetReg, AMDGPU::SGPRRegBank);
1346 // FIXME: 96-bit case was widened during legalize. We need to narrow it back
1369 // TODO: 96-bit loads were widened to 128-bit results. Shrink the result if we
1386 B.getMRI()->setRegBank(VIndex, AMDGPU::VGPRRegBank);
1472 // There is no 64-bit vgpr bitfield extract instructions so the operation
1482 // A 64-bit bitfield extract uses the 32-bit bitfield extract instructions
1483 // if the width is a constant.
1485 // Use the 32-bit bitfield extract instruction if the width is a constant.
1486 // Depending on the width size, use either the low or high 32-bits.
1488 auto WidthImm = ConstWidth->Value.getZExtValue();
1490 // Use bitfield extract on the lower 32-bit source, and then sign-extend
1491 // or clear the upper 32-bits.
1499 // Use bitfield extract on upper 32-bit source, and combine with lower
1500 // 32-bit source.
1501 auto UpperWidth = B.buildConstant(S32, WidthImm - 32);
1512 // Expand to Src >> Offset << (64 - Width) >> (64 - Width) using 64-bit
1524 // The scalar form packs the offset and width in a single operand.
1535 // Transformation function, pack the offset and width of a BFE into
1537 // source, bits [5:0] contain the offset and bits [22:16] the width.
1611 // Accumulate and produce the "carry-out" bit.
1613 // The "carry-out" is defined as bit 64 of the result when computed as a
1614 // big integer. For unsigned multiply-add, this matches the usual definition
1615 // of carry-out. For signed multiply-add, bit 64 is the sign bit of the
1617 // sign(Src0 * Src1) + sign(Src2) + carry-out from unsigned 64-bit add
1718 // Emit a legalized extension from <2 x s16> to 2 32-bit components, avoiding
1771 for (int I = 0, E = Unmerge->getNumOperands() - 1; I != E; ++I)
1818 ImmOffset -= Overflow;
1893 /// Implement extending a 32-bit value to a 64-bit value. \p Lo32Reg is the
1894 /// original 32-bit source value (to be inserted in the low part of the combined
1895 /// 64-bit result), and \p Hi32Reg is the high half of the combined 64-bit
1910 // Replicate sign bit from 32-bit extended part.
1912 B.getMRI()->setRegBank(ShiftAmt.getReg(0), RegBank);
1957 Idx = B.buildCopy(S32, Idx)->getOperand(0).getReg();
1976 MRI.setRegBank(IC->getOperand(0).getReg(), AMDGPU::SGPRRegBank);
1978 MRI.setRegBank(Cmp->getOperand(0).getReg(), CCBank);
1985 MRI.setRegBank(S->getOperand(N).getReg(), DstBank);
1987 Res[L] = S->getOperand(0).getReg();
2058 Idx = B.buildCopy(S32, Idx)->getOperand(0).getReg();
2077 MRI.setRegBank(IC->getOperand(0).getReg(), AMDGPU::SGPRRegBank);
2079 MRI.setRegBank(Cmp->getOperand(0).getReg(), CCBank);
2098 MRI.setRegBank(Vec->getOperand(0).getReg(), DstBank);
2108 // Break s_mul_u64 into 32-bit vector operations.
2151 // --------------------
2154 // -----------------------------------------
2157 // We drop Op1H*Op0H because the result of the multiplication is a 64-bit
2159 // The low 32-bit value is Op1L*Op0L.
2160 // The high 32-bit value is Op1H*Op0L + Op1L*Op0H + carry (from
2205 uint64_t ConstVal = MI.getOperand(1).getCImm()->getZExtValue();
2236 B.setInsertPt(*SrcMBB, SrcMBB->getFirstTerminator());
2283 // will end up using a copy to a 32-bit vreg.
2384 // 64-bit and is only available on the SALU, so split into 2 32-bit ops if
2395 MachineFunction *MF = MI.getParent()->getParent();
2453 MachineFunction *MF = MI.getParent()->getParent();
2477 // s_mul_u64. Hence, we have to break down s_mul_u64 into 32-bit vector
2484 // 16-bit operations are VALU only, but can be promoted to 32-bit SALU.
2485 // Packed 16-bit operations need to be scalarized and promoted.
2496 MachineFunction *MF = MBB->getParent();
2545 // where the 33 higher bits are sign-extended and
2547 // where the 32 higher bits are zero-extended. In case scalar registers are
2561 "that handles only 64-bit operands.");
2568 MI.setDesc(TII->get(AMDGPU::S_MUL_U64));
2638 B.buildSExtInReg(DstRegs[1], DstRegs[0], Amt - 32);
2684 // which return -1 when the input is zero:
2685 // (ctlz_zero_undef hi:lo) -> (umin (ffbh hi), (add (ffbh lo), 32))
2686 // (cttz_zero_undef hi:lo) -> (umin (add (ffbl hi), 32), (ffbl lo))
2687 // (ffbh hi:lo) -> (umin (ffbh hi), (uaddsat (ffbh lo), 32))
2688 // (ffbl hi:lo) -> (umin (uaddsat (ffbh hi), 32), (ffbh lo))
2732 // Extend to 32-bit, and then extend the low half.
2760 // 64-bit select is SGPR only
2762 SrcBank->getID() == AMDGPU::SGPRRegBankID;
2766 auto True = B.buildConstant(SelType, Signed ? -1 : 1);
2826 // Move the base register. We'll re-insert the add later.
2852 // Re-insert the constant offset add inside the waterfall loop.
2868 // Split the vector index into 32-bit pieces. Prepare to move all of the
2874 // Compute 32-bit element indices, (2 * OrigIdx, 2 * OrigIdx + 1).
2901 MachineBasicBlock *LoopBB = Extract1->getParent();
2907 Extract0->getOperand(0).setReg(TmpReg0);
2908 Extract1->getOperand(0).setReg(TmpReg1);
2910 B.setInsertPt(*LoopBB, ++Extract1->getIterator());
2957 // Move the base register. We'll re-insert the add later.
2965 // Re-insert the constant offset add inside the waterfall loop.
2981 // Split the vector index into 32-bit pieces. Prepare to move all of the
2987 // Compute 32-bit element indices, (2 * OrigIdx, 2 * OrigIdx + 1).
3032 // Re-insert the constant offset add inside the waterfall loop.
3109 // Make sure the index is an SGPR. It doesn't make sense to run this in a
3134 constrainOpWithReadfirstlane(B, MI, MI.getNumOperands() - 1); // Index
3147 // Doing a waterfall loop over these wouldn't make any sense.
3180 assert(RSrcIntrin && RSrcIntrin->IsImage);
3181 // Non-images can have complications from operands that allow both SGPR
3184 applyMappingImage(B, MI, OpdMapper, RSrcIntrin->RsrcArg);
3188 unsigned N = MI.getNumExplicitOperands() - 2;
3262 constrainOpWithReadfirstlane(B, MI, MI.getNumOperands() - 1); // Index
3292 // Non-images can have complications from operands that allow both SGPR
3295 if (RSrcIntrin->IsImage) {
3296 applyMappingImage(B, MI, OpdMapper, RSrcIntrin->RsrcArg);
3320 // Move all non-copies before the copies, so that a complete range can be
3329 MBB->getParent()->getInfo<SIMachineFunctionInfo>();
3330 while (Start->getOpcode() != FrameSetupOpcode) {
3331 --Start;
3333 if (Start->getOpcode() == AMDGPU::COPY) {
3334 auto &Dst = Start->getOperand(0);
3342 auto &Src = Start->getOperand(1);
3345 IsCopy = Info->getScratchRSrcReg() == Reg;
3361 MBB->splice(LastCopy, MBB, NonCopy->getIterator());
3370 while (End->getOpcode() != FrameDestroyOpcode) {
3373 if (End->getOpcode() == AMDGPU::COPY) {
3374 auto &Src = End->getOperand(1);
3393 MBB->splice(LastCopy, MBB, NonCopy->getIterator());
3453 // vgpr, sgpr -> vgpr
3454 // vgpr, agpr -> vgpr
3455 // agpr, agpr -> agpr
3456 // agpr, sgpr -> vgpr
3478 // vcc, vcc -> vcc
3479 // vcc, sgpr -> vcc
3480 // vcc, vgpr -> vcc
3484 // vcc, vgpr -> vgpr
3497 RegBank = regBankUnion(RegBank, Bank->getID());
3507 const MachineFunction &MF = *MI.getParent()->getParent();
3514 if (Bank->getID() != AMDGPU::SGPRRegBankID)
3523 const MachineFunction &MF = *MI.getParent()->getParent();
3541 const MachineFunction &MF = *MI.getParent()->getParent();
3565 const MachineFunction &MF = *MI.getParent()->getParent();
3638 return AMDGPU::getValueMapping(PtrBank->getID(), Size);
3644 const MachineFunction &MF = *MI.getParent()->getParent();
3693 return Bank ? Bank->getID() : Default;
3734 const MachineFunction &MF = *MI.getParent()->getParent();
3772 // It doesn't make sense to use vcc or scc banks here, so just ignore
3787 // The default handling is broken and doesn't handle illegal SGPR->VGPR copies
3793 Register DstReg = PHI->getReg(0);
3797 ResultBank = DstBank->getID();
3799 for (unsigned I = 0; I < PHI->getNumIncomingValues(); ++I) {
3800 Register Reg = PHI->getIncomingValue(I);
3804 if (!Bank || Bank->getID() == AMDGPU::VGPRRegBankID) {
3810 unsigned OpBank = Bank->getID();
3848 TargetBankID = DstBank->getID();
4013 // - Default SOP
4014 // - Default VOP
4015 // - Scalar multiply: src0 and src1 are SGPRs, the rest is VOP.
4024 if (Bank->getID() != AMDGPU::SGPRRegBankID) {
4037 // If the multiply-add is full-rate in VALU, use that even if the
4193 switch (SrcBank->getID()) {
4202 // Scalar extend can use 64-bit BFE, but VGPRs require extending to
4203 // 32-bits, and then to 64.
4205 OpdsMapping[1] = AMDGPU::getValueMappingSGPR64Only(SrcBank->getID(),
4261 // TODO: Use 32-bit for scalar output size.
4262 // SCC results will need to be copied to a 32-bit SGPR virtual register.
4440 unsigned RSrcBank = OpdsMapping[1]->BreakDown[0].RegBank->getID();
4441 unsigned OffsetBank = OpdsMapping[2]->BreakDown[0].RegBank->getID();
4735 Info->mayNeedAGPRs()
4741 Info->mayNeedAGPRs()
4774 const int M0Idx = MI.getNumOperands() - 1;
4852 // Non-images can have complications from operands that allow both SGPR
4855 assert(RSrcIntrin->IsImage);
4856 return getImageMapping(MRI, MI, RSrcIntrin->RsrcArg);
4859 unsigned N = MI.getNumExplicitOperands() - 2;
5082 const int M0Idx = MI.getNumOperands() - 1;
5175 // TODO: Should report 32-bit for scalar condition type.