1 //===-- HexagonISelLowering.h - Hexagon DAG Lowering Interface --*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that Hexagon uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #ifndef LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H 15 #define LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H 16 17 #include "Hexagon.h" 18 #include "MCTargetDesc/HexagonMCTargetDesc.h" 19 #include "llvm/ADT/StringRef.h" 20 #include "llvm/CodeGen/ISDOpcodes.h" 21 #include "llvm/CodeGen/SelectionDAGNodes.h" 22 #include "llvm/CodeGen/TargetLowering.h" 23 #include "llvm/CodeGen/ValueTypes.h" 24 #include "llvm/IR/CallingConv.h" 25 #include "llvm/IR/InlineAsm.h" 26 #include "llvm/Support/MachineValueType.h" 27 #include <cstdint> 28 #include <utility> 29 30 namespace llvm { 31 32 namespace HexagonISD { 33 34 enum NodeType : unsigned { 35 OP_BEGIN = ISD::BUILTIN_OP_END, 36 37 CONST32 = OP_BEGIN, 38 CONST32_GP, // For marking data present in GP. 39 ADDC, // Add with carry: (X, Y, Cin) -> (X+Y, Cout). 40 SUBC, // Sub with carry: (X, Y, Cin) -> (X+~Y+Cin, Cout). 41 ALLOCA, 42 43 AT_GOT, // Index in GOT. 44 AT_PCREL, // Offset relative to PC. 45 46 CALL, // Function call. 47 CALLnr, // Function call that does not return. 48 CALLR, 49 50 RET_FLAG, // Return with a flag operand. 51 BARRIER, // Memory barrier. 52 JT, // Jump table. 53 CP, // Constant pool. 54 55 COMBINE, 56 VASL, 57 VASR, 58 VLSR, 59 60 TSTBIT, 61 INSERT, 62 EXTRACTU, 63 VEXTRACTW, 64 VINSERTW0, 65 VROR, 66 TC_RETURN, 67 EH_RETURN, 68 DCFETCH, 69 READCYCLE, 70 PTRUE, 71 PFALSE, 72 D2P, // Convert 8-byte value to 8-bit predicate register. [*] 73 P2D, // Convert 8-bit predicate register to 8-byte value. [*] 74 V2Q, // Convert HVX vector to a vector predicate reg. [*] 75 Q2V, // Convert vector predicate to an HVX vector. [*] 76 // [*] The equivalence is defined as "Q <=> (V != 0)", 77 // where the != operation compares bytes. 78 // Note: V != 0 is implemented as V >u 0. 79 QCAT, 80 QTRUE, 81 QFALSE, 82 TYPECAST, // No-op that's used to convert between different legal 83 // types in a register. 84 VALIGN, // Align two vectors (in Op0, Op1) to one that would have 85 // been loaded from address in Op2. 86 VALIGNADDR, // Align vector address: Op0 & -Op1, except when it is 87 // an address in a vector load, then it's a no-op. 88 VPACKL, // Pack low parts of the input vector to the front of the 89 // output. For example v64i16 VPACKL(v32i32) will pick 90 // the low halfwords and pack them into the first 32 91 // halfwords of the output. The rest of the output is 92 // unspecified. 93 VUNPACK, // Unpacking into low elements with sign extension. 94 VUNPACKU, // Unpacking into low elements with zero extension. 95 ISEL, // Marker for nodes that were created during ISel, and 96 // which need explicit selection (would have been left 97 // unselected otherwise). 98 OP_END 99 }; 100 101 } // end namespace HexagonISD 102 103 class HexagonSubtarget; 104 105 class HexagonTargetLowering : public TargetLowering { 106 int VarArgsFrameOffset; // Frame offset to start of varargs area. 107 const HexagonTargetMachine &HTM; 108 const HexagonSubtarget &Subtarget; 109 110 bool CanReturnSmallStruct(const Function* CalleeFn, unsigned& RetSize) 111 const; 112 113 public: 114 explicit HexagonTargetLowering(const TargetMachine &TM, 115 const HexagonSubtarget &ST); 116 117 bool isHVXVectorType(MVT Ty) const; 118 119 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 120 /// for tail call optimization. Targets which want to do tail call 121 /// optimization should implement this function. 122 bool IsEligibleForTailCallOptimization(SDValue Callee, 123 CallingConv::ID CalleeCC, bool isVarArg, bool isCalleeStructRet, 124 bool isCallerStructRet, const SmallVectorImpl<ISD::OutputArg> &Outs, 125 const SmallVectorImpl<SDValue> &OutVals, 126 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG& DAG) const; 127 128 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, 129 MachineFunction &MF, 130 unsigned Intrinsic) const override; 131 132 bool isTruncateFree(Type *Ty1, Type *Ty2) const override; 133 bool isTruncateFree(EVT VT1, EVT VT2) const override; 134 isCheapToSpeculateCttz()135 bool isCheapToSpeculateCttz() const override { return true; } isCheapToSpeculateCtlz()136 bool isCheapToSpeculateCtlz() const override { return true; } isCtlzFast()137 bool isCtlzFast() const override { return true; } 138 139 bool hasBitTest(SDValue X, SDValue Y) const override; 140 141 bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override; 142 143 /// Return true if an FMA operation is faster than a pair of mul and add 144 /// instructions. fmuladd intrinsics will be expanded to FMAs when this 145 /// method returns true (and FMAs are legal), otherwise fmuladd is 146 /// expanded to mul + add. 147 bool isFMAFasterThanFMulAndFAdd(const MachineFunction &, 148 EVT) const override; 149 150 // Should we expand the build vector with shuffles? 151 bool shouldExpandBuildVectorWithShuffles(EVT VT, 152 unsigned DefinedValues) const override; 153 154 bool isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const override; 155 TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) 156 const override; 157 158 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 159 void LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results, 160 SelectionDAG &DAG) const override; 161 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, 162 SelectionDAG &DAG) const override; 163 164 const char *getTargetNodeName(unsigned Opcode) const override; 165 166 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; 167 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; 168 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 169 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; 170 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 171 SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; 172 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; 173 SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const; 174 SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const; 175 SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const; 176 SDValue LowerANY_EXTEND(SDValue Op, SelectionDAG &DAG) const; 177 SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const; 178 SDValue LowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const; 179 SDValue LowerLoad(SDValue Op, SelectionDAG &DAG) const; 180 SDValue LowerStore(SDValue Op, SelectionDAG &DAG) const; 181 SDValue LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) const; 182 SDValue LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const; 183 SDValue LowerAddSubCarry(SDValue Op, SelectionDAG &DAG) const; 184 185 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 186 SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const; 187 SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) const; 188 SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const; 189 SDValue LowerEH_LABEL(SDValue Op, SelectionDAG &DAG) const; 190 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const; 191 SDValue 192 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 193 const SmallVectorImpl<ISD::InputArg> &Ins, 194 const SDLoc &dl, SelectionDAG &DAG, 195 SmallVectorImpl<SDValue> &InVals) const override; 196 SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const; 197 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 198 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 199 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 200 SelectionDAG &DAG) const; 201 SDValue LowerToTLSInitialExecModel(GlobalAddressSDNode *GA, 202 SelectionDAG &DAG) const; 203 SDValue LowerToTLSLocalExecModel(GlobalAddressSDNode *GA, 204 SelectionDAG &DAG) const; 205 SDValue GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain, 206 GlobalAddressSDNode *GA, SDValue InFlag, EVT PtrVT, 207 unsigned ReturnReg, unsigned char OperandFlags) const; 208 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const; 209 210 SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, 211 SmallVectorImpl<SDValue> &InVals) const override; 212 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 213 CallingConv::ID CallConv, bool isVarArg, 214 const SmallVectorImpl<ISD::InputArg> &Ins, 215 const SDLoc &dl, SelectionDAG &DAG, 216 SmallVectorImpl<SDValue> &InVals, 217 const SmallVectorImpl<SDValue> &OutVals, 218 SDValue Callee) const; 219 220 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; 221 SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const; 222 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 223 SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const; 224 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 225 226 bool CanLowerReturn(CallingConv::ID CallConv, 227 MachineFunction &MF, bool isVarArg, 228 const SmallVectorImpl<ISD::OutputArg> &Outs, 229 LLVMContext &Context) const override; 230 231 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 232 const SmallVectorImpl<ISD::OutputArg> &Outs, 233 const SmallVectorImpl<SDValue> &OutVals, 234 const SDLoc &dl, SelectionDAG &DAG) const override; 235 236 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; 237 238 bool mayBeEmittedAsTailCall(const CallInst *CI) const override; 239 240 Register getRegisterByName(const char* RegName, LLT VT, 241 const MachineFunction &MF) const override; 242 243 /// If a physical register, this returns the register that receives the 244 /// exception address on entry to an EH pad. 245 Register getExceptionPointerRegister(const Constant * PersonalityFn)246 getExceptionPointerRegister(const Constant *PersonalityFn) const override { 247 return Hexagon::R0; 248 } 249 250 /// If a physical register, this returns the register that receives the 251 /// exception typeid on entry to a landing pad. 252 Register getExceptionSelectorRegister(const Constant * PersonalityFn)253 getExceptionSelectorRegister(const Constant *PersonalityFn) const override { 254 return Hexagon::R1; 255 } 256 257 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; 258 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const; 259 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; 260 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; 261 getSetCCResultType(const DataLayout &,LLVMContext & C,EVT VT)262 EVT getSetCCResultType(const DataLayout &, LLVMContext &C, 263 EVT VT) const override { 264 if (!VT.isVector()) 265 return MVT::i1; 266 else 267 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 268 } 269 270 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, 271 SDValue &Base, SDValue &Offset, 272 ISD::MemIndexedMode &AM, 273 SelectionDAG &DAG) const override; 274 275 ConstraintType getConstraintType(StringRef Constraint) const override; 276 277 std::pair<unsigned, const TargetRegisterClass *> 278 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 279 StringRef Constraint, MVT VT) const override; 280 281 // Intrinsics 282 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; 283 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const; 284 /// isLegalAddressingMode - Return true if the addressing mode represented 285 /// by AM is legal for this target, for a load/store of the specified type. 286 /// The type may be VoidTy, in which case only return true if the addressing 287 /// mode is legal for a load/store of any legal type. 288 /// TODO: Handle pre/postinc as well. 289 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, 290 Type *Ty, unsigned AS, 291 Instruction *I = nullptr) const override; 292 /// Return true if folding a constant offset with the given GlobalAddress 293 /// is legal. It is frequently not legal in PIC relocation models. 294 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; 295 296 bool isFPImmLegal(const APFloat &Imm, EVT VT, 297 bool ForCodeSize) const override; 298 299 /// isLegalICmpImmediate - Return true if the specified immediate is legal 300 /// icmp immediate, that is the target has icmp instructions which can 301 /// compare a register against the immediate without having to materialize 302 /// the immediate into a register. 303 bool isLegalICmpImmediate(int64_t Imm) const override; 304 305 EVT getOptimalMemOpType(const MemOp &Op, 306 const AttributeList &FuncAttributes) const override; 307 308 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, 309 unsigned AddrSpace, Align Alignment, 310 MachineMemOperand::Flags Flags, 311 bool *Fast) const override; 312 313 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, 314 Align Alignment, 315 MachineMemOperand::Flags Flags, 316 bool *Fast) const override; 317 318 /// Returns relocation base for the given PIC jumptable. 319 SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) 320 const override; 321 322 bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, 323 EVT NewVT) const override; 324 325 // Handling of atomic RMW instructions. 326 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr, 327 AtomicOrdering Ord) const override; 328 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val, 329 Value *Addr, AtomicOrdering Ord) const override; 330 AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override; 331 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; 332 AtomicExpansionKind 333 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override; 334 335 AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst * AI)336 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override { 337 return AtomicExpansionKind::LLSC; 338 } 339 340 private: 341 void initializeHVXLowering(); 342 unsigned getPreferredHvxVectorAction(MVT VecTy) const; 343 344 void validateConstPtrAlignment(SDValue Ptr, const SDLoc &dl, 345 unsigned NeedAlign) const; 346 347 std::pair<SDValue,int> getBaseAndOffset(SDValue Addr) const; 348 349 bool getBuildVectorConstInts(ArrayRef<SDValue> Values, MVT VecTy, 350 SelectionDAG &DAG, 351 MutableArrayRef<ConstantInt*> Consts) const; 352 SDValue buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl, MVT VecTy, 353 SelectionDAG &DAG) const; 354 SDValue buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl, MVT VecTy, 355 SelectionDAG &DAG) const; 356 SDValue extractVector(SDValue VecV, SDValue IdxV, const SDLoc &dl, 357 MVT ValTy, MVT ResTy, SelectionDAG &DAG) const; 358 SDValue insertVector(SDValue VecV, SDValue ValV, SDValue IdxV, 359 const SDLoc &dl, MVT ValTy, SelectionDAG &DAG) const; 360 SDValue expandPredicate(SDValue Vec32, const SDLoc &dl, 361 SelectionDAG &DAG) const; 362 SDValue contractPredicate(SDValue Vec64, const SDLoc &dl, 363 SelectionDAG &DAG) const; 364 SDValue getVectorShiftByInt(SDValue Op, SelectionDAG &DAG) const; 365 SDValue appendUndef(SDValue Val, MVT ResTy, SelectionDAG &DAG) const; 366 isUndef(SDValue Op)367 bool isUndef(SDValue Op) const { 368 if (Op.isMachineOpcode()) 369 return Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF; 370 return Op.getOpcode() == ISD::UNDEF; 371 } getInstr(unsigned MachineOpc,const SDLoc & dl,MVT Ty,ArrayRef<SDValue> Ops,SelectionDAG & DAG)372 SDValue getInstr(unsigned MachineOpc, const SDLoc &dl, MVT Ty, 373 ArrayRef<SDValue> Ops, SelectionDAG &DAG) const { 374 SDNode *N = DAG.getMachineNode(MachineOpc, dl, Ty, Ops); 375 return SDValue(N, 0); 376 } 377 SDValue getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG) const; 378 379 using VectorPair = std::pair<SDValue, SDValue>; 380 using TypePair = std::pair<MVT, MVT>; 381 382 SDValue getInt(unsigned IntId, MVT ResTy, ArrayRef<SDValue> Ops, 383 const SDLoc &dl, SelectionDAG &DAG) const; 384 ty(SDValue Op)385 MVT ty(SDValue Op) const { 386 return Op.getValueType().getSimpleVT(); 387 } ty(const VectorPair & Ops)388 TypePair ty(const VectorPair &Ops) const { 389 return { Ops.first.getValueType().getSimpleVT(), 390 Ops.second.getValueType().getSimpleVT() }; 391 } tyScalar(MVT Ty)392 MVT tyScalar(MVT Ty) const { 393 if (!Ty.isVector()) 394 return Ty; 395 return MVT::getIntegerVT(Ty.getSizeInBits()); 396 } tyVector(MVT Ty,MVT ElemTy)397 MVT tyVector(MVT Ty, MVT ElemTy) const { 398 if (Ty.isVector() && Ty.getVectorElementType() == ElemTy) 399 return Ty; 400 unsigned TyWidth = Ty.getSizeInBits(); 401 unsigned ElemWidth = ElemTy.getSizeInBits(); 402 assert((TyWidth % ElemWidth) == 0); 403 return MVT::getVectorVT(ElemTy, TyWidth/ElemWidth); 404 } 405 406 MVT typeJoin(const TypePair &Tys) const; 407 TypePair typeSplit(MVT Ty) const; 408 MVT typeExtElem(MVT VecTy, unsigned Factor) const; 409 MVT typeTruncElem(MVT VecTy, unsigned Factor) const; 410 411 SDValue opJoin(const VectorPair &Ops, const SDLoc &dl, 412 SelectionDAG &DAG) const; 413 VectorPair opSplit(SDValue Vec, const SDLoc &dl, SelectionDAG &DAG) const; 414 SDValue opCastElem(SDValue Vec, MVT ElemTy, SelectionDAG &DAG) const; 415 416 bool allowsHvxMemoryAccess(MVT VecTy, MachineMemOperand::Flags Flags, 417 bool *Fast) const; 418 bool allowsHvxMisalignedMemoryAccesses(MVT VecTy, 419 MachineMemOperand::Flags Flags, 420 bool *Fast) const; 421 422 bool isHvxSingleTy(MVT Ty) const; 423 bool isHvxPairTy(MVT Ty) const; 424 bool isHvxBoolTy(MVT Ty) const; 425 SDValue convertToByteIndex(SDValue ElemIdx, MVT ElemTy, 426 SelectionDAG &DAG) const; 427 SDValue getIndexInWord32(SDValue Idx, MVT ElemTy, SelectionDAG &DAG) const; 428 SDValue getByteShuffle(const SDLoc &dl, SDValue Op0, SDValue Op1, 429 ArrayRef<int> Mask, SelectionDAG &DAG) const; 430 431 SDValue buildHvxVectorReg(ArrayRef<SDValue> Values, const SDLoc &dl, 432 MVT VecTy, SelectionDAG &DAG) const; 433 SDValue buildHvxVectorPred(ArrayRef<SDValue> Values, const SDLoc &dl, 434 MVT VecTy, SelectionDAG &DAG) const; 435 SDValue createHvxPrefixPred(SDValue PredV, const SDLoc &dl, 436 unsigned BitBytes, bool ZeroFill, 437 SelectionDAG &DAG) const; 438 SDValue extractHvxElementReg(SDValue VecV, SDValue IdxV, const SDLoc &dl, 439 MVT ResTy, SelectionDAG &DAG) const; 440 SDValue extractHvxElementPred(SDValue VecV, SDValue IdxV, const SDLoc &dl, 441 MVT ResTy, SelectionDAG &DAG) const; 442 SDValue insertHvxElementReg(SDValue VecV, SDValue IdxV, SDValue ValV, 443 const SDLoc &dl, SelectionDAG &DAG) const; 444 SDValue insertHvxElementPred(SDValue VecV, SDValue IdxV, SDValue ValV, 445 const SDLoc &dl, SelectionDAG &DAG) const; 446 SDValue extractHvxSubvectorReg(SDValue VecV, SDValue IdxV, const SDLoc &dl, 447 MVT ResTy, SelectionDAG &DAG) const; 448 SDValue extractHvxSubvectorPred(SDValue VecV, SDValue IdxV, const SDLoc &dl, 449 MVT ResTy, SelectionDAG &DAG) const; 450 SDValue insertHvxSubvectorReg(SDValue VecV, SDValue SubV, SDValue IdxV, 451 const SDLoc &dl, SelectionDAG &DAG) const; 452 SDValue insertHvxSubvectorPred(SDValue VecV, SDValue SubV, SDValue IdxV, 453 const SDLoc &dl, SelectionDAG &DAG) const; 454 SDValue extendHvxVectorPred(SDValue VecV, const SDLoc &dl, MVT ResTy, 455 bool ZeroExt, SelectionDAG &DAG) const; 456 SDValue compressHvxPred(SDValue VecQ, const SDLoc &dl, MVT ResTy, 457 SelectionDAG &DAG) const; 458 459 SDValue LowerHvxBuildVector(SDValue Op, SelectionDAG &DAG) const; 460 SDValue LowerHvxConcatVectors(SDValue Op, SelectionDAG &DAG) const; 461 SDValue LowerHvxExtractElement(SDValue Op, SelectionDAG &DAG) const; 462 SDValue LowerHvxInsertElement(SDValue Op, SelectionDAG &DAG) const; 463 SDValue LowerHvxExtractSubvector(SDValue Op, SelectionDAG &DAG) const; 464 SDValue LowerHvxInsertSubvector(SDValue Op, SelectionDAG &DAG) const; 465 SDValue LowerHvxBitcast(SDValue Op, SelectionDAG &DAG) const; 466 SDValue LowerHvxAnyExt(SDValue Op, SelectionDAG &DAG) const; 467 SDValue LowerHvxSignExt(SDValue Op, SelectionDAG &DAG) const; 468 SDValue LowerHvxZeroExt(SDValue Op, SelectionDAG &DAG) const; 469 SDValue LowerHvxCttz(SDValue Op, SelectionDAG &DAG) const; 470 SDValue LowerHvxMul(SDValue Op, SelectionDAG &DAG) const; 471 SDValue LowerHvxMulh(SDValue Op, SelectionDAG &DAG) const; 472 SDValue LowerHvxSetCC(SDValue Op, SelectionDAG &DAG) const; 473 SDValue LowerHvxExtend(SDValue Op, SelectionDAG &DAG) const; 474 SDValue LowerHvxSelect(SDValue Op, SelectionDAG &DAG) const; 475 SDValue LowerHvxShift(SDValue Op, SelectionDAG &DAG) const; 476 SDValue LowerHvxIntrinsic(SDValue Op, SelectionDAG &DAG) const; 477 SDValue LowerHvxMaskedOp(SDValue Op, SelectionDAG &DAG) const; 478 479 SDValue SplitHvxPairOp(SDValue Op, SelectionDAG &DAG) const; 480 SDValue SplitHvxMemOp(SDValue Op, SelectionDAG &DAG) const; 481 SDValue WidenHvxLoad(SDValue Op, SelectionDAG &DAG) const; 482 SDValue WidenHvxStore(SDValue Op, SelectionDAG &DAG) const; 483 SDValue WidenHvxSetCC(SDValue Op, SelectionDAG &DAG) const; 484 SDValue WidenHvxExtend(SDValue Op, SelectionDAG &DAG) const; 485 SDValue WidenHvxTruncate(SDValue Op, SelectionDAG &DAG) const; 486 487 std::pair<const TargetRegisterClass*, uint8_t> 488 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) 489 const override; 490 491 bool shouldWidenToHvx(MVT Ty, SelectionDAG &DAG) const; 492 bool isHvxOperation(SDNode *N, SelectionDAG &DAG) const; 493 SDValue LowerHvxOperation(SDValue Op, SelectionDAG &DAG) const; 494 void LowerHvxOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results, 495 SelectionDAG &DAG) const; 496 void ReplaceHvxNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, 497 SelectionDAG &DAG) const; 498 SDValue PerformHvxDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 499 }; 500 501 } // end namespace llvm 502 503 #endif // LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H 504