1 //===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the ARM target.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "ARM.h"
14 #include "ARMBaseInstrInfo.h"
15 #include "ARMTargetMachine.h"
16 #include "MCTargetDesc/ARMAddressingModes.h"
17 #include "Utils/ARMBaseInfo.h"
18 #include "llvm/ADT/APSInt.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/SelectionDAG.h"
25 #include "llvm/CodeGen/SelectionDAGISel.h"
26 #include "llvm/CodeGen/TargetLowering.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Intrinsics.h"
32 #include "llvm/IR/IntrinsicsARM.h"
33 #include "llvm/IR/LLVMContext.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Target/TargetOptions.h"
38 #include <optional>
39
40 using namespace llvm;
41
42 #define DEBUG_TYPE "arm-isel"
43 #define PASS_NAME "ARM Instruction Selection"
44
45 static cl::opt<bool>
46 DisableShifterOp("disable-shifter-op", cl::Hidden,
47 cl::desc("Disable isel of shifter-op"),
48 cl::init(false));
49
50 //===--------------------------------------------------------------------===//
51 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
52 /// instructions for SelectionDAG operations.
53 ///
54 namespace {
55
56 class ARMDAGToDAGISel : public SelectionDAGISel {
57 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
58 /// make the right decision when generating code for different targets.
59 const ARMSubtarget *Subtarget;
60
61 public:
62 static char ID;
63
64 ARMDAGToDAGISel() = delete;
65
ARMDAGToDAGISel(ARMBaseTargetMachine & tm,CodeGenOpt::Level OptLevel)66 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm, CodeGenOpt::Level OptLevel)
67 : SelectionDAGISel(ID, tm, OptLevel) {}
68
runOnMachineFunction(MachineFunction & MF)69 bool runOnMachineFunction(MachineFunction &MF) override {
70 // Reset the subtarget each time through.
71 Subtarget = &MF.getSubtarget<ARMSubtarget>();
72 SelectionDAGISel::runOnMachineFunction(MF);
73 return true;
74 }
75
76 void PreprocessISelDAG() override;
77
78 /// getI32Imm - Return a target constant of type i32 with the specified
79 /// value.
getI32Imm(unsigned Imm,const SDLoc & dl)80 inline SDValue getI32Imm(unsigned Imm, const SDLoc &dl) {
81 return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
82 }
83
84 void Select(SDNode *N) override;
85
86 /// Return true as some complex patterns, like those that call
87 /// canExtractShiftFromMul can modify the DAG inplace.
ComplexPatternFuncMutatesDAG() const88 bool ComplexPatternFuncMutatesDAG() const override { return true; }
89
90 bool hasNoVMLxHazardUse(SDNode *N) const;
91 bool isShifterOpProfitable(const SDValue &Shift,
92 ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt);
93 bool SelectRegShifterOperand(SDValue N, SDValue &A,
94 SDValue &B, SDValue &C,
95 bool CheckProfitability = true);
96 bool SelectImmShifterOperand(SDValue N, SDValue &A,
97 SDValue &B, bool CheckProfitability = true);
SelectShiftRegShifterOperand(SDValue N,SDValue & A,SDValue & B,SDValue & C)98 bool SelectShiftRegShifterOperand(SDValue N, SDValue &A, SDValue &B,
99 SDValue &C) {
100 // Don't apply the profitability check
101 return SelectRegShifterOperand(N, A, B, C, false);
102 }
SelectShiftImmShifterOperand(SDValue N,SDValue & A,SDValue & B)103 bool SelectShiftImmShifterOperand(SDValue N, SDValue &A, SDValue &B) {
104 // Don't apply the profitability check
105 return SelectImmShifterOperand(N, A, B, false);
106 }
SelectShiftImmShifterOperandOneUse(SDValue N,SDValue & A,SDValue & B)107 bool SelectShiftImmShifterOperandOneUse(SDValue N, SDValue &A, SDValue &B) {
108 if (!N.hasOneUse())
109 return false;
110 return SelectImmShifterOperand(N, A, B, false);
111 }
112
113 bool SelectAddLikeOr(SDNode *Parent, SDValue N, SDValue &Out);
114
115 bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
116 bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc);
117
SelectCMOVPred(SDValue N,SDValue & Pred,SDValue & Reg)118 bool SelectCMOVPred(SDValue N, SDValue &Pred, SDValue &Reg) {
119 const ConstantSDNode *CN = cast<ConstantSDNode>(N);
120 Pred = CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(N), MVT::i32);
121 Reg = CurDAG->getRegister(ARM::CPSR, MVT::i32);
122 return true;
123 }
124
125 bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
126 SDValue &Offset, SDValue &Opc);
127 bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
128 SDValue &Offset, SDValue &Opc);
129 bool SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
130 SDValue &Offset, SDValue &Opc);
131 bool SelectAddrOffsetNone(SDValue N, SDValue &Base);
132 bool SelectAddrMode3(SDValue N, SDValue &Base,
133 SDValue &Offset, SDValue &Opc);
134 bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
135 SDValue &Offset, SDValue &Opc);
136 bool IsAddressingMode5(SDValue N, SDValue &Base, SDValue &Offset, bool FP16);
137 bool SelectAddrMode5(SDValue N, SDValue &Base, SDValue &Offset);
138 bool SelectAddrMode5FP16(SDValue N, SDValue &Base, SDValue &Offset);
139 bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align);
140 bool SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset);
141
142 bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label);
143
144 // Thumb Addressing Modes:
145 bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset);
146 bool SelectThumbAddrModeRRSext(SDValue N, SDValue &Base, SDValue &Offset);
147 bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base,
148 SDValue &OffImm);
149 bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
150 SDValue &OffImm);
151 bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
152 SDValue &OffImm);
153 bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
154 SDValue &OffImm);
155 bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm);
156 template <unsigned Shift>
157 bool SelectTAddrModeImm7(SDValue N, SDValue &Base, SDValue &OffImm);
158
159 // Thumb 2 Addressing Modes:
160 bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
161 template <unsigned Shift>
162 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base, SDValue &OffImm);
163 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
164 SDValue &OffImm);
165 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
166 SDValue &OffImm);
167 template <unsigned Shift>
168 bool SelectT2AddrModeImm7Offset(SDNode *Op, SDValue N, SDValue &OffImm);
169 bool SelectT2AddrModeImm7Offset(SDNode *Op, SDValue N, SDValue &OffImm,
170 unsigned Shift);
171 template <unsigned Shift>
172 bool SelectT2AddrModeImm7(SDValue N, SDValue &Base, SDValue &OffImm);
173 bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
174 SDValue &OffReg, SDValue &ShImm);
175 bool SelectT2AddrModeExclusive(SDValue N, SDValue &Base, SDValue &OffImm);
176
177 template<int Min, int Max>
178 bool SelectImmediateInRange(SDValue N, SDValue &OffImm);
179
is_so_imm(unsigned Imm) const180 inline bool is_so_imm(unsigned Imm) const {
181 return ARM_AM::getSOImmVal(Imm) != -1;
182 }
183
is_so_imm_not(unsigned Imm) const184 inline bool is_so_imm_not(unsigned Imm) const {
185 return ARM_AM::getSOImmVal(~Imm) != -1;
186 }
187
is_t2_so_imm(unsigned Imm) const188 inline bool is_t2_so_imm(unsigned Imm) const {
189 return ARM_AM::getT2SOImmVal(Imm) != -1;
190 }
191
is_t2_so_imm_not(unsigned Imm) const192 inline bool is_t2_so_imm_not(unsigned Imm) const {
193 return ARM_AM::getT2SOImmVal(~Imm) != -1;
194 }
195
196 // Include the pieces autogenerated from the target description.
197 #include "ARMGenDAGISel.inc"
198
199 private:
200 void transferMemOperands(SDNode *Src, SDNode *Dst);
201
202 /// Indexed (pre/post inc/dec) load matching code for ARM.
203 bool tryARMIndexedLoad(SDNode *N);
204 bool tryT1IndexedLoad(SDNode *N);
205 bool tryT2IndexedLoad(SDNode *N);
206 bool tryMVEIndexedLoad(SDNode *N);
207 bool tryFMULFixed(SDNode *N, SDLoc dl);
208 bool tryFP_TO_INT(SDNode *N, SDLoc dl);
209 bool transformFixedFloatingPointConversion(SDNode *N, SDNode *FMul,
210 bool IsUnsigned,
211 bool FixedToFloat);
212
213 /// SelectVLD - Select NEON load intrinsics. NumVecs should be
214 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
215 /// loads of D registers and even subregs and odd subregs of Q registers.
216 /// For NumVecs <= 2, QOpcodes1 is not used.
217 void SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
218 const uint16_t *DOpcodes, const uint16_t *QOpcodes0,
219 const uint16_t *QOpcodes1);
220
221 /// SelectVST - Select NEON store intrinsics. NumVecs should
222 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
223 /// stores of D registers and even subregs and odd subregs of Q registers.
224 /// For NumVecs <= 2, QOpcodes1 is not used.
225 void SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
226 const uint16_t *DOpcodes, const uint16_t *QOpcodes0,
227 const uint16_t *QOpcodes1);
228
229 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
230 /// be 2, 3 or 4. The opcode arrays specify the instructions used for
231 /// load/store of D registers and Q registers.
232 void SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
233 unsigned NumVecs, const uint16_t *DOpcodes,
234 const uint16_t *QOpcodes);
235
236 /// Helper functions for setting up clusters of MVE predication operands.
237 template <typename SDValueVector>
238 void AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
239 SDValue PredicateMask);
240 template <typename SDValueVector>
241 void AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
242 SDValue PredicateMask, SDValue Inactive);
243
244 template <typename SDValueVector>
245 void AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc);
246 template <typename SDValueVector>
247 void AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc, EVT InactiveTy);
248
249 /// SelectMVE_WB - Select MVE writeback load/store intrinsics.
250 void SelectMVE_WB(SDNode *N, const uint16_t *Opcodes, bool Predicated);
251
252 /// SelectMVE_LongShift - Select MVE 64-bit scalar shift intrinsics.
253 void SelectMVE_LongShift(SDNode *N, uint16_t Opcode, bool Immediate,
254 bool HasSaturationOperand);
255
256 /// SelectMVE_VADCSBC - Select MVE vector add/sub-with-carry intrinsics.
257 void SelectMVE_VADCSBC(SDNode *N, uint16_t OpcodeWithCarry,
258 uint16_t OpcodeWithNoCarry, bool Add, bool Predicated);
259
260 /// SelectMVE_VSHLC - Select MVE intrinsics for a shift that carries between
261 /// vector lanes.
262 void SelectMVE_VSHLC(SDNode *N, bool Predicated);
263
264 /// Select long MVE vector reductions with two vector operands
265 /// Stride is the number of vector element widths the instruction can operate
266 /// on:
267 /// 2 for long non-rounding variants, vml{a,s}ldav[a][x]: [i16, i32]
268 /// 1 for long rounding variants: vrml{a,s}ldavh[a][x]: [i32]
269 /// Stride is used when addressing the OpcodesS array which contains multiple
270 /// opcodes for each element width.
271 /// TySize is the index into the list of element types listed above
272 void SelectBaseMVE_VMLLDAV(SDNode *N, bool Predicated,
273 const uint16_t *OpcodesS, const uint16_t *OpcodesU,
274 size_t Stride, size_t TySize);
275
276 /// Select a 64-bit MVE vector reduction with two vector operands
277 /// arm_mve_vmlldava_[predicated]
278 void SelectMVE_VMLLDAV(SDNode *N, bool Predicated, const uint16_t *OpcodesS,
279 const uint16_t *OpcodesU);
280 /// Select a 72-bit MVE vector rounding reduction with two vector operands
281 /// int_arm_mve_vrmlldavha[_predicated]
282 void SelectMVE_VRMLLDAVH(SDNode *N, bool Predicated, const uint16_t *OpcodesS,
283 const uint16_t *OpcodesU);
284
285 /// SelectMVE_VLD - Select MVE interleaving load intrinsics. NumVecs
286 /// should be 2 or 4. The opcode array specifies the instructions
287 /// used for 8, 16 and 32-bit lane sizes respectively, and each
288 /// pointer points to a set of NumVecs sub-opcodes used for the
289 /// different stages (e.g. VLD20 versus VLD21) of each load family.
290 void SelectMVE_VLD(SDNode *N, unsigned NumVecs,
291 const uint16_t *const *Opcodes, bool HasWriteback);
292
293 /// SelectMVE_VxDUP - Select MVE incrementing-dup instructions. Opcodes is an
294 /// array of 3 elements for the 8, 16 and 32-bit lane sizes.
295 void SelectMVE_VxDUP(SDNode *N, const uint16_t *Opcodes,
296 bool Wrapping, bool Predicated);
297
298 /// Select SelectCDE_CXxD - Select CDE dual-GPR instruction (one of CX1D,
299 /// CX1DA, CX2D, CX2DA, CX3, CX3DA).
300 /// \arg \c NumExtraOps number of extra operands besides the coprocossor,
301 /// the accumulator and the immediate operand, i.e. 0
302 /// for CX1*, 1 for CX2*, 2 for CX3*
303 /// \arg \c HasAccum whether the instruction has an accumulator operand
304 void SelectCDE_CXxD(SDNode *N, uint16_t Opcode, size_t NumExtraOps,
305 bool HasAccum);
306
307 /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
308 /// should be 1, 2, 3 or 4. The opcode array specifies the instructions used
309 /// for loading D registers.
310 void SelectVLDDup(SDNode *N, bool IsIntrinsic, bool isUpdating,
311 unsigned NumVecs, const uint16_t *DOpcodes,
312 const uint16_t *QOpcodes0 = nullptr,
313 const uint16_t *QOpcodes1 = nullptr);
314
315 /// Try to select SBFX/UBFX instructions for ARM.
316 bool tryV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
317
318 bool tryInsertVectorElt(SDNode *N);
319
320 // Select special operations if node forms integer ABS pattern
321 bool tryABSOp(SDNode *N);
322
323 bool tryReadRegister(SDNode *N);
324 bool tryWriteRegister(SDNode *N);
325
326 bool tryInlineAsm(SDNode *N);
327
328 void SelectCMPZ(SDNode *N, bool &SwitchEQNEToPLMI);
329
330 void SelectCMP_SWAP(SDNode *N);
331
332 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
333 /// inline asm expressions.
334 bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
335 std::vector<SDValue> &OutOps) override;
336
337 // Form pairs of consecutive R, S, D, or Q registers.
338 SDNode *createGPRPairNode(EVT VT, SDValue V0, SDValue V1);
339 SDNode *createSRegPairNode(EVT VT, SDValue V0, SDValue V1);
340 SDNode *createDRegPairNode(EVT VT, SDValue V0, SDValue V1);
341 SDNode *createQRegPairNode(EVT VT, SDValue V0, SDValue V1);
342
343 // Form sequences of 4 consecutive S, D, or Q registers.
344 SDNode *createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
345 SDNode *createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
346 SDNode *createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
347
348 // Get the alignment operand for a NEON VLD or VST instruction.
349 SDValue GetVLDSTAlign(SDValue Align, const SDLoc &dl, unsigned NumVecs,
350 bool is64BitVector);
351
352 /// Checks if N is a multiplication by a constant where we can extract out a
353 /// power of two from the constant so that it can be used in a shift, but only
354 /// if it simplifies the materialization of the constant. Returns true if it
355 /// is, and assigns to PowerOfTwo the power of two that should be extracted
356 /// out and to NewMulConst the new constant to be multiplied by.
357 bool canExtractShiftFromMul(const SDValue &N, unsigned MaxShift,
358 unsigned &PowerOfTwo, SDValue &NewMulConst) const;
359
360 /// Replace N with M in CurDAG, in a way that also ensures that M gets
361 /// selected when N would have been selected.
362 void replaceDAGValue(const SDValue &N, SDValue M);
363 };
364 }
365
366 char ARMDAGToDAGISel::ID = 0;
367
INITIALIZE_PASS(ARMDAGToDAGISel,DEBUG_TYPE,PASS_NAME,false,false)368 INITIALIZE_PASS(ARMDAGToDAGISel, DEBUG_TYPE, PASS_NAME, false, false)
369
370 /// isInt32Immediate - This method tests to see if the node is a 32-bit constant
371 /// operand. If so Imm will receive the 32-bit value.
372 static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
373 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
374 Imm = cast<ConstantSDNode>(N)->getZExtValue();
375 return true;
376 }
377 return false;
378 }
379
380 // isInt32Immediate - This method tests to see if a constant operand.
381 // If so Imm will receive the 32 bit value.
isInt32Immediate(SDValue N,unsigned & Imm)382 static bool isInt32Immediate(SDValue N, unsigned &Imm) {
383 return isInt32Immediate(N.getNode(), Imm);
384 }
385
386 // isOpcWithIntImmediate - This method tests to see if the node is a specific
387 // opcode and that it has a immediate integer right operand.
388 // If so Imm will receive the 32 bit value.
isOpcWithIntImmediate(SDNode * N,unsigned Opc,unsigned & Imm)389 static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
390 return N->getOpcode() == Opc &&
391 isInt32Immediate(N->getOperand(1).getNode(), Imm);
392 }
393
394 /// Check whether a particular node is a constant value representable as
395 /// (N * Scale) where (N in [\p RangeMin, \p RangeMax).
396 ///
397 /// \param ScaledConstant [out] - On success, the pre-scaled constant value.
isScaledConstantInRange(SDValue Node,int Scale,int RangeMin,int RangeMax,int & ScaledConstant)398 static bool isScaledConstantInRange(SDValue Node, int Scale,
399 int RangeMin, int RangeMax,
400 int &ScaledConstant) {
401 assert(Scale > 0 && "Invalid scale!");
402
403 // Check that this is a constant.
404 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node);
405 if (!C)
406 return false;
407
408 ScaledConstant = (int) C->getZExtValue();
409 if ((ScaledConstant % Scale) != 0)
410 return false;
411
412 ScaledConstant /= Scale;
413 return ScaledConstant >= RangeMin && ScaledConstant < RangeMax;
414 }
415
PreprocessISelDAG()416 void ARMDAGToDAGISel::PreprocessISelDAG() {
417 if (!Subtarget->hasV6T2Ops())
418 return;
419
420 bool isThumb2 = Subtarget->isThumb();
421 // We use make_early_inc_range to avoid invalidation issues.
422 for (SDNode &N : llvm::make_early_inc_range(CurDAG->allnodes())) {
423 if (N.getOpcode() != ISD::ADD)
424 continue;
425
426 // Look for (add X1, (and (srl X2, c1), c2)) where c2 is constant with
427 // leading zeros, followed by consecutive set bits, followed by 1 or 2
428 // trailing zeros, e.g. 1020.
429 // Transform the expression to
430 // (add X1, (shl (and (srl X2, c1), (c2>>tz)), tz)) where tz is the number
431 // of trailing zeros of c2. The left shift would be folded as an shifter
432 // operand of 'add' and the 'and' and 'srl' would become a bits extraction
433 // node (UBFX).
434
435 SDValue N0 = N.getOperand(0);
436 SDValue N1 = N.getOperand(1);
437 unsigned And_imm = 0;
438 if (!isOpcWithIntImmediate(N1.getNode(), ISD::AND, And_imm)) {
439 if (isOpcWithIntImmediate(N0.getNode(), ISD::AND, And_imm))
440 std::swap(N0, N1);
441 }
442 if (!And_imm)
443 continue;
444
445 // Check if the AND mask is an immediate of the form: 000.....1111111100
446 unsigned TZ = countTrailingZeros(And_imm);
447 if (TZ != 1 && TZ != 2)
448 // Be conservative here. Shifter operands aren't always free. e.g. On
449 // Swift, left shifter operand of 1 / 2 for free but others are not.
450 // e.g.
451 // ubfx r3, r1, #16, #8
452 // ldr.w r3, [r0, r3, lsl #2]
453 // vs.
454 // mov.w r9, #1020
455 // and.w r2, r9, r1, lsr #14
456 // ldr r2, [r0, r2]
457 continue;
458 And_imm >>= TZ;
459 if (And_imm & (And_imm + 1))
460 continue;
461
462 // Look for (and (srl X, c1), c2).
463 SDValue Srl = N1.getOperand(0);
464 unsigned Srl_imm = 0;
465 if (!isOpcWithIntImmediate(Srl.getNode(), ISD::SRL, Srl_imm) ||
466 (Srl_imm <= 2))
467 continue;
468
469 // Make sure first operand is not a shifter operand which would prevent
470 // folding of the left shift.
471 SDValue CPTmp0;
472 SDValue CPTmp1;
473 SDValue CPTmp2;
474 if (isThumb2) {
475 if (SelectImmShifterOperand(N0, CPTmp0, CPTmp1))
476 continue;
477 } else {
478 if (SelectImmShifterOperand(N0, CPTmp0, CPTmp1) ||
479 SelectRegShifterOperand(N0, CPTmp0, CPTmp1, CPTmp2))
480 continue;
481 }
482
483 // Now make the transformation.
484 Srl = CurDAG->getNode(ISD::SRL, SDLoc(Srl), MVT::i32,
485 Srl.getOperand(0),
486 CurDAG->getConstant(Srl_imm + TZ, SDLoc(Srl),
487 MVT::i32));
488 N1 = CurDAG->getNode(ISD::AND, SDLoc(N1), MVT::i32,
489 Srl,
490 CurDAG->getConstant(And_imm, SDLoc(Srl), MVT::i32));
491 N1 = CurDAG->getNode(ISD::SHL, SDLoc(N1), MVT::i32,
492 N1, CurDAG->getConstant(TZ, SDLoc(Srl), MVT::i32));
493 CurDAG->UpdateNodeOperands(&N, N0, N1);
494 }
495 }
496
497 /// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
498 /// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at
499 /// least on current ARM implementations) which should be avoidded.
hasNoVMLxHazardUse(SDNode * N) const500 bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
501 if (OptLevel == CodeGenOpt::None)
502 return true;
503
504 if (!Subtarget->hasVMLxHazards())
505 return true;
506
507 if (!N->hasOneUse())
508 return false;
509
510 SDNode *Use = *N->use_begin();
511 if (Use->getOpcode() == ISD::CopyToReg)
512 return true;
513 if (Use->isMachineOpcode()) {
514 const ARMBaseInstrInfo *TII = static_cast<const ARMBaseInstrInfo *>(
515 CurDAG->getSubtarget().getInstrInfo());
516
517 const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
518 if (MCID.mayStore())
519 return true;
520 unsigned Opcode = MCID.getOpcode();
521 if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
522 return true;
523 // vmlx feeding into another vmlx. We actually want to unfold
524 // the use later in the MLxExpansion pass. e.g.
525 // vmla
526 // vmla (stall 8 cycles)
527 //
528 // vmul (5 cycles)
529 // vadd (5 cycles)
530 // vmla
531 // This adds up to about 18 - 19 cycles.
532 //
533 // vmla
534 // vmul (stall 4 cycles)
535 // vadd adds up to about 14 cycles.
536 return TII->isFpMLxInstruction(Opcode);
537 }
538
539 return false;
540 }
541
isShifterOpProfitable(const SDValue & Shift,ARM_AM::ShiftOpc ShOpcVal,unsigned ShAmt)542 bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
543 ARM_AM::ShiftOpc ShOpcVal,
544 unsigned ShAmt) {
545 if (!Subtarget->isLikeA9() && !Subtarget->isSwift())
546 return true;
547 if (Shift.hasOneUse())
548 return true;
549 // R << 2 is free.
550 return ShOpcVal == ARM_AM::lsl &&
551 (ShAmt == 2 || (Subtarget->isSwift() && ShAmt == 1));
552 }
553
canExtractShiftFromMul(const SDValue & N,unsigned MaxShift,unsigned & PowerOfTwo,SDValue & NewMulConst) const554 bool ARMDAGToDAGISel::canExtractShiftFromMul(const SDValue &N,
555 unsigned MaxShift,
556 unsigned &PowerOfTwo,
557 SDValue &NewMulConst) const {
558 assert(N.getOpcode() == ISD::MUL);
559 assert(MaxShift > 0);
560
561 // If the multiply is used in more than one place then changing the constant
562 // will make other uses incorrect, so don't.
563 if (!N.hasOneUse()) return false;
564 // Check if the multiply is by a constant
565 ConstantSDNode *MulConst = dyn_cast<ConstantSDNode>(N.getOperand(1));
566 if (!MulConst) return false;
567 // If the constant is used in more than one place then modifying it will mean
568 // we need to materialize two constants instead of one, which is a bad idea.
569 if (!MulConst->hasOneUse()) return false;
570 unsigned MulConstVal = MulConst->getZExtValue();
571 if (MulConstVal == 0) return false;
572
573 // Find the largest power of 2 that MulConstVal is a multiple of
574 PowerOfTwo = MaxShift;
575 while ((MulConstVal % (1 << PowerOfTwo)) != 0) {
576 --PowerOfTwo;
577 if (PowerOfTwo == 0) return false;
578 }
579
580 // Only optimise if the new cost is better
581 unsigned NewMulConstVal = MulConstVal / (1 << PowerOfTwo);
582 NewMulConst = CurDAG->getConstant(NewMulConstVal, SDLoc(N), MVT::i32);
583 unsigned OldCost = ConstantMaterializationCost(MulConstVal, Subtarget);
584 unsigned NewCost = ConstantMaterializationCost(NewMulConstVal, Subtarget);
585 return NewCost < OldCost;
586 }
587
replaceDAGValue(const SDValue & N,SDValue M)588 void ARMDAGToDAGISel::replaceDAGValue(const SDValue &N, SDValue M) {
589 CurDAG->RepositionNode(N.getNode()->getIterator(), M.getNode());
590 ReplaceUses(N, M);
591 }
592
SelectImmShifterOperand(SDValue N,SDValue & BaseReg,SDValue & Opc,bool CheckProfitability)593 bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
594 SDValue &BaseReg,
595 SDValue &Opc,
596 bool CheckProfitability) {
597 if (DisableShifterOp)
598 return false;
599
600 // If N is a multiply-by-constant and it's profitable to extract a shift and
601 // use it in a shifted operand do so.
602 if (N.getOpcode() == ISD::MUL) {
603 unsigned PowerOfTwo = 0;
604 SDValue NewMulConst;
605 if (canExtractShiftFromMul(N, 31, PowerOfTwo, NewMulConst)) {
606 HandleSDNode Handle(N);
607 SDLoc Loc(N);
608 replaceDAGValue(N.getOperand(1), NewMulConst);
609 BaseReg = Handle.getValue();
610 Opc = CurDAG->getTargetConstant(
611 ARM_AM::getSORegOpc(ARM_AM::lsl, PowerOfTwo), Loc, MVT::i32);
612 return true;
613 }
614 }
615
616 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
617
618 // Don't match base register only case. That is matched to a separate
619 // lower complexity pattern with explicit register operand.
620 if (ShOpcVal == ARM_AM::no_shift) return false;
621
622 BaseReg = N.getOperand(0);
623 unsigned ShImmVal = 0;
624 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
625 if (!RHS) return false;
626 ShImmVal = RHS->getZExtValue() & 31;
627 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
628 SDLoc(N), MVT::i32);
629 return true;
630 }
631
SelectRegShifterOperand(SDValue N,SDValue & BaseReg,SDValue & ShReg,SDValue & Opc,bool CheckProfitability)632 bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N,
633 SDValue &BaseReg,
634 SDValue &ShReg,
635 SDValue &Opc,
636 bool CheckProfitability) {
637 if (DisableShifterOp)
638 return false;
639
640 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
641
642 // Don't match base register only case. That is matched to a separate
643 // lower complexity pattern with explicit register operand.
644 if (ShOpcVal == ARM_AM::no_shift) return false;
645
646 BaseReg = N.getOperand(0);
647 unsigned ShImmVal = 0;
648 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
649 if (RHS) return false;
650
651 ShReg = N.getOperand(1);
652 if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal))
653 return false;
654 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
655 SDLoc(N), MVT::i32);
656 return true;
657 }
658
659 // Determine whether an ISD::OR's operands are suitable to turn the operation
660 // into an addition, which often has more compact encodings.
SelectAddLikeOr(SDNode * Parent,SDValue N,SDValue & Out)661 bool ARMDAGToDAGISel::SelectAddLikeOr(SDNode *Parent, SDValue N, SDValue &Out) {
662 assert(Parent->getOpcode() == ISD::OR && "unexpected parent");
663 Out = N;
664 return CurDAG->haveNoCommonBitsSet(N, Parent->getOperand(1));
665 }
666
667
SelectAddrModeImm12(SDValue N,SDValue & Base,SDValue & OffImm)668 bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
669 SDValue &Base,
670 SDValue &OffImm) {
671 // Match simple R + imm12 operands.
672
673 // Base only.
674 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
675 !CurDAG->isBaseWithConstantOffset(N)) {
676 if (N.getOpcode() == ISD::FrameIndex) {
677 // Match frame index.
678 int FI = cast<FrameIndexSDNode>(N)->getIndex();
679 Base = CurDAG->getTargetFrameIndex(
680 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
681 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
682 return true;
683 }
684
685 if (N.getOpcode() == ARMISD::Wrapper &&
686 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
687 N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
688 N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
689 Base = N.getOperand(0);
690 } else
691 Base = N;
692 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
693 return true;
694 }
695
696 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
697 int RHSC = (int)RHS->getSExtValue();
698 if (N.getOpcode() == ISD::SUB)
699 RHSC = -RHSC;
700
701 if (RHSC > -0x1000 && RHSC < 0x1000) { // 12 bits
702 Base = N.getOperand(0);
703 if (Base.getOpcode() == ISD::FrameIndex) {
704 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
705 Base = CurDAG->getTargetFrameIndex(
706 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
707 }
708 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
709 return true;
710 }
711 }
712
713 // Base only.
714 Base = N;
715 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
716 return true;
717 }
718
719
720
SelectLdStSOReg(SDValue N,SDValue & Base,SDValue & Offset,SDValue & Opc)721 bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
722 SDValue &Opc) {
723 if (N.getOpcode() == ISD::MUL &&
724 ((!Subtarget->isLikeA9() && !Subtarget->isSwift()) || N.hasOneUse())) {
725 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
726 // X * [3,5,9] -> X + X * [2,4,8] etc.
727 int RHSC = (int)RHS->getZExtValue();
728 if (RHSC & 1) {
729 RHSC = RHSC & ~1;
730 ARM_AM::AddrOpc AddSub = ARM_AM::add;
731 if (RHSC < 0) {
732 AddSub = ARM_AM::sub;
733 RHSC = - RHSC;
734 }
735 if (isPowerOf2_32(RHSC)) {
736 unsigned ShAmt = Log2_32(RHSC);
737 Base = Offset = N.getOperand(0);
738 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
739 ARM_AM::lsl),
740 SDLoc(N), MVT::i32);
741 return true;
742 }
743 }
744 }
745 }
746
747 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
748 // ISD::OR that is equivalent to an ISD::ADD.
749 !CurDAG->isBaseWithConstantOffset(N))
750 return false;
751
752 // Leave simple R +/- imm12 operands for LDRi12
753 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
754 int RHSC;
755 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
756 -0x1000+1, 0x1000, RHSC)) // 12 bits.
757 return false;
758 }
759
760 // Otherwise this is R +/- [possibly shifted] R.
761 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add;
762 ARM_AM::ShiftOpc ShOpcVal =
763 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
764 unsigned ShAmt = 0;
765
766 Base = N.getOperand(0);
767 Offset = N.getOperand(1);
768
769 if (ShOpcVal != ARM_AM::no_shift) {
770 // Check to see if the RHS of the shift is a constant, if not, we can't fold
771 // it.
772 if (ConstantSDNode *Sh =
773 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
774 ShAmt = Sh->getZExtValue();
775 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
776 Offset = N.getOperand(1).getOperand(0);
777 else {
778 ShAmt = 0;
779 ShOpcVal = ARM_AM::no_shift;
780 }
781 } else {
782 ShOpcVal = ARM_AM::no_shift;
783 }
784 }
785
786 // Try matching (R shl C) + (R).
787 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
788 !(Subtarget->isLikeA9() || Subtarget->isSwift() ||
789 N.getOperand(0).hasOneUse())) {
790 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
791 if (ShOpcVal != ARM_AM::no_shift) {
792 // Check to see if the RHS of the shift is a constant, if not, we can't
793 // fold it.
794 if (ConstantSDNode *Sh =
795 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
796 ShAmt = Sh->getZExtValue();
797 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
798 Offset = N.getOperand(0).getOperand(0);
799 Base = N.getOperand(1);
800 } else {
801 ShAmt = 0;
802 ShOpcVal = ARM_AM::no_shift;
803 }
804 } else {
805 ShOpcVal = ARM_AM::no_shift;
806 }
807 }
808 }
809
810 // If Offset is a multiply-by-constant and it's profitable to extract a shift
811 // and use it in a shifted operand do so.
812 if (Offset.getOpcode() == ISD::MUL && N.hasOneUse()) {
813 unsigned PowerOfTwo = 0;
814 SDValue NewMulConst;
815 if (canExtractShiftFromMul(Offset, 31, PowerOfTwo, NewMulConst)) {
816 HandleSDNode Handle(Offset);
817 replaceDAGValue(Offset.getOperand(1), NewMulConst);
818 Offset = Handle.getValue();
819 ShAmt = PowerOfTwo;
820 ShOpcVal = ARM_AM::lsl;
821 }
822 }
823
824 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
825 SDLoc(N), MVT::i32);
826 return true;
827 }
828
SelectAddrMode2OffsetReg(SDNode * Op,SDValue N,SDValue & Offset,SDValue & Opc)829 bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
830 SDValue &Offset, SDValue &Opc) {
831 unsigned Opcode = Op->getOpcode();
832 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
833 ? cast<LoadSDNode>(Op)->getAddressingMode()
834 : cast<StoreSDNode>(Op)->getAddressingMode();
835 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
836 ? ARM_AM::add : ARM_AM::sub;
837 int Val;
838 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val))
839 return false;
840
841 Offset = N;
842 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
843 unsigned ShAmt = 0;
844 if (ShOpcVal != ARM_AM::no_shift) {
845 // Check to see if the RHS of the shift is a constant, if not, we can't fold
846 // it.
847 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
848 ShAmt = Sh->getZExtValue();
849 if (isShifterOpProfitable(N, ShOpcVal, ShAmt))
850 Offset = N.getOperand(0);
851 else {
852 ShAmt = 0;
853 ShOpcVal = ARM_AM::no_shift;
854 }
855 } else {
856 ShOpcVal = ARM_AM::no_shift;
857 }
858 }
859
860 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
861 SDLoc(N), MVT::i32);
862 return true;
863 }
864
SelectAddrMode2OffsetImmPre(SDNode * Op,SDValue N,SDValue & Offset,SDValue & Opc)865 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
866 SDValue &Offset, SDValue &Opc) {
867 unsigned Opcode = Op->getOpcode();
868 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
869 ? cast<LoadSDNode>(Op)->getAddressingMode()
870 : cast<StoreSDNode>(Op)->getAddressingMode();
871 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
872 ? ARM_AM::add : ARM_AM::sub;
873 int Val;
874 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
875 if (AddSub == ARM_AM::sub) Val *= -1;
876 Offset = CurDAG->getRegister(0, MVT::i32);
877 Opc = CurDAG->getTargetConstant(Val, SDLoc(Op), MVT::i32);
878 return true;
879 }
880
881 return false;
882 }
883
884
SelectAddrMode2OffsetImm(SDNode * Op,SDValue N,SDValue & Offset,SDValue & Opc)885 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
886 SDValue &Offset, SDValue &Opc) {
887 unsigned Opcode = Op->getOpcode();
888 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
889 ? cast<LoadSDNode>(Op)->getAddressingMode()
890 : cast<StoreSDNode>(Op)->getAddressingMode();
891 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
892 ? ARM_AM::add : ARM_AM::sub;
893 int Val;
894 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
895 Offset = CurDAG->getRegister(0, MVT::i32);
896 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
897 ARM_AM::no_shift),
898 SDLoc(Op), MVT::i32);
899 return true;
900 }
901
902 return false;
903 }
904
SelectAddrOffsetNone(SDValue N,SDValue & Base)905 bool ARMDAGToDAGISel::SelectAddrOffsetNone(SDValue N, SDValue &Base) {
906 Base = N;
907 return true;
908 }
909
SelectAddrMode3(SDValue N,SDValue & Base,SDValue & Offset,SDValue & Opc)910 bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
911 SDValue &Base, SDValue &Offset,
912 SDValue &Opc) {
913 if (N.getOpcode() == ISD::SUB) {
914 // X - C is canonicalize to X + -C, no need to handle it here.
915 Base = N.getOperand(0);
916 Offset = N.getOperand(1);
917 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0), SDLoc(N),
918 MVT::i32);
919 return true;
920 }
921
922 if (!CurDAG->isBaseWithConstantOffset(N)) {
923 Base = N;
924 if (N.getOpcode() == ISD::FrameIndex) {
925 int FI = cast<FrameIndexSDNode>(N)->getIndex();
926 Base = CurDAG->getTargetFrameIndex(
927 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
928 }
929 Offset = CurDAG->getRegister(0, MVT::i32);
930 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), SDLoc(N),
931 MVT::i32);
932 return true;
933 }
934
935 // If the RHS is +/- imm8, fold into addr mode.
936 int RHSC;
937 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
938 -256 + 1, 256, RHSC)) { // 8 bits.
939 Base = N.getOperand(0);
940 if (Base.getOpcode() == ISD::FrameIndex) {
941 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
942 Base = CurDAG->getTargetFrameIndex(
943 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
944 }
945 Offset = CurDAG->getRegister(0, MVT::i32);
946
947 ARM_AM::AddrOpc AddSub = ARM_AM::add;
948 if (RHSC < 0) {
949 AddSub = ARM_AM::sub;
950 RHSC = -RHSC;
951 }
952 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC), SDLoc(N),
953 MVT::i32);
954 return true;
955 }
956
957 Base = N.getOperand(0);
958 Offset = N.getOperand(1);
959 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), SDLoc(N),
960 MVT::i32);
961 return true;
962 }
963
SelectAddrMode3Offset(SDNode * Op,SDValue N,SDValue & Offset,SDValue & Opc)964 bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
965 SDValue &Offset, SDValue &Opc) {
966 unsigned Opcode = Op->getOpcode();
967 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
968 ? cast<LoadSDNode>(Op)->getAddressingMode()
969 : cast<StoreSDNode>(Op)->getAddressingMode();
970 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
971 ? ARM_AM::add : ARM_AM::sub;
972 int Val;
973 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits.
974 Offset = CurDAG->getRegister(0, MVT::i32);
975 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), SDLoc(Op),
976 MVT::i32);
977 return true;
978 }
979
980 Offset = N;
981 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), SDLoc(Op),
982 MVT::i32);
983 return true;
984 }
985
IsAddressingMode5(SDValue N,SDValue & Base,SDValue & Offset,bool FP16)986 bool ARMDAGToDAGISel::IsAddressingMode5(SDValue N, SDValue &Base, SDValue &Offset,
987 bool FP16) {
988 if (!CurDAG->isBaseWithConstantOffset(N)) {
989 Base = N;
990 if (N.getOpcode() == ISD::FrameIndex) {
991 int FI = cast<FrameIndexSDNode>(N)->getIndex();
992 Base = CurDAG->getTargetFrameIndex(
993 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
994 } else if (N.getOpcode() == ARMISD::Wrapper &&
995 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
996 N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
997 N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
998 Base = N.getOperand(0);
999 }
1000 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
1001 SDLoc(N), MVT::i32);
1002 return true;
1003 }
1004
1005 // If the RHS is +/- imm8, fold into addr mode.
1006 int RHSC;
1007 const int Scale = FP16 ? 2 : 4;
1008
1009 if (isScaledConstantInRange(N.getOperand(1), Scale, -255, 256, RHSC)) {
1010 Base = N.getOperand(0);
1011 if (Base.getOpcode() == ISD::FrameIndex) {
1012 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1013 Base = CurDAG->getTargetFrameIndex(
1014 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1015 }
1016
1017 ARM_AM::AddrOpc AddSub = ARM_AM::add;
1018 if (RHSC < 0) {
1019 AddSub = ARM_AM::sub;
1020 RHSC = -RHSC;
1021 }
1022
1023 if (FP16)
1024 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5FP16Opc(AddSub, RHSC),
1025 SDLoc(N), MVT::i32);
1026 else
1027 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
1028 SDLoc(N), MVT::i32);
1029
1030 return true;
1031 }
1032
1033 Base = N;
1034
1035 if (FP16)
1036 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5FP16Opc(ARM_AM::add, 0),
1037 SDLoc(N), MVT::i32);
1038 else
1039 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
1040 SDLoc(N), MVT::i32);
1041
1042 return true;
1043 }
1044
SelectAddrMode5(SDValue N,SDValue & Base,SDValue & Offset)1045 bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
1046 SDValue &Base, SDValue &Offset) {
1047 return IsAddressingMode5(N, Base, Offset, /*FP16=*/ false);
1048 }
1049
SelectAddrMode5FP16(SDValue N,SDValue & Base,SDValue & Offset)1050 bool ARMDAGToDAGISel::SelectAddrMode5FP16(SDValue N,
1051 SDValue &Base, SDValue &Offset) {
1052 return IsAddressingMode5(N, Base, Offset, /*FP16=*/ true);
1053 }
1054
SelectAddrMode6(SDNode * Parent,SDValue N,SDValue & Addr,SDValue & Align)1055 bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
1056 SDValue &Align) {
1057 Addr = N;
1058
1059 unsigned Alignment = 0;
1060
1061 MemSDNode *MemN = cast<MemSDNode>(Parent);
1062
1063 if (isa<LSBaseSDNode>(MemN) ||
1064 ((MemN->getOpcode() == ARMISD::VST1_UPD ||
1065 MemN->getOpcode() == ARMISD::VLD1_UPD) &&
1066 MemN->getConstantOperandVal(MemN->getNumOperands() - 1) == 1)) {
1067 // This case occurs only for VLD1-lane/dup and VST1-lane instructions.
1068 // The maximum alignment is equal to the memory size being referenced.
1069 llvm::Align MMOAlign = MemN->getAlign();
1070 unsigned MemSize = MemN->getMemoryVT().getSizeInBits() / 8;
1071 if (MMOAlign.value() >= MemSize && MemSize > 1)
1072 Alignment = MemSize;
1073 } else {
1074 // All other uses of addrmode6 are for intrinsics. For now just record
1075 // the raw alignment value; it will be refined later based on the legal
1076 // alignment operands for the intrinsic.
1077 Alignment = MemN->getAlign().value();
1078 }
1079
1080 Align = CurDAG->getTargetConstant(Alignment, SDLoc(N), MVT::i32);
1081 return true;
1082 }
1083
SelectAddrMode6Offset(SDNode * Op,SDValue N,SDValue & Offset)1084 bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N,
1085 SDValue &Offset) {
1086 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op);
1087 ISD::MemIndexedMode AM = LdSt->getAddressingMode();
1088 if (AM != ISD::POST_INC)
1089 return false;
1090 Offset = N;
1091 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N)) {
1092 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits())
1093 Offset = CurDAG->getRegister(0, MVT::i32);
1094 }
1095 return true;
1096 }
1097
SelectAddrModePC(SDValue N,SDValue & Offset,SDValue & Label)1098 bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
1099 SDValue &Offset, SDValue &Label) {
1100 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
1101 Offset = N.getOperand(0);
1102 SDValue N1 = N.getOperand(1);
1103 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
1104 SDLoc(N), MVT::i32);
1105 return true;
1106 }
1107
1108 return false;
1109 }
1110
1111
1112 //===----------------------------------------------------------------------===//
1113 // Thumb Addressing Modes
1114 //===----------------------------------------------------------------------===//
1115
shouldUseZeroOffsetLdSt(SDValue N)1116 static bool shouldUseZeroOffsetLdSt(SDValue N) {
1117 // Negative numbers are difficult to materialise in thumb1. If we are
1118 // selecting the add of a negative, instead try to select ri with a zero
1119 // offset, so create the add node directly which will become a sub.
1120 if (N.getOpcode() != ISD::ADD)
1121 return false;
1122
1123 // Look for an imm which is not legal for ld/st, but is legal for sub.
1124 if (auto C = dyn_cast<ConstantSDNode>(N.getOperand(1)))
1125 return C->getSExtValue() < 0 && C->getSExtValue() >= -255;
1126
1127 return false;
1128 }
1129
SelectThumbAddrModeRRSext(SDValue N,SDValue & Base,SDValue & Offset)1130 bool ARMDAGToDAGISel::SelectThumbAddrModeRRSext(SDValue N, SDValue &Base,
1131 SDValue &Offset) {
1132 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) {
1133 ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
1134 if (!NC || !NC->isZero())
1135 return false;
1136
1137 Base = Offset = N;
1138 return true;
1139 }
1140
1141 Base = N.getOperand(0);
1142 Offset = N.getOperand(1);
1143 return true;
1144 }
1145
SelectThumbAddrModeRR(SDValue N,SDValue & Base,SDValue & Offset)1146 bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N, SDValue &Base,
1147 SDValue &Offset) {
1148 if (shouldUseZeroOffsetLdSt(N))
1149 return false; // Select ri instead
1150 return SelectThumbAddrModeRRSext(N, Base, Offset);
1151 }
1152
1153 bool
SelectThumbAddrModeImm5S(SDValue N,unsigned Scale,SDValue & Base,SDValue & OffImm)1154 ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
1155 SDValue &Base, SDValue &OffImm) {
1156 if (shouldUseZeroOffsetLdSt(N)) {
1157 Base = N;
1158 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1159 return true;
1160 }
1161
1162 if (!CurDAG->isBaseWithConstantOffset(N)) {
1163 if (N.getOpcode() == ISD::ADD) {
1164 return false; // We want to select register offset instead
1165 } else if (N.getOpcode() == ARMISD::Wrapper &&
1166 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
1167 N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
1168 N.getOperand(0).getOpcode() != ISD::TargetConstantPool &&
1169 N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
1170 Base = N.getOperand(0);
1171 } else {
1172 Base = N;
1173 }
1174
1175 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1176 return true;
1177 }
1178
1179 // If the RHS is + imm5 * scale, fold into addr mode.
1180 int RHSC;
1181 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) {
1182 Base = N.getOperand(0);
1183 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1184 return true;
1185 }
1186
1187 // Offset is too large, so use register offset instead.
1188 return false;
1189 }
1190
1191 bool
SelectThumbAddrModeImm5S4(SDValue N,SDValue & Base,SDValue & OffImm)1192 ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
1193 SDValue &OffImm) {
1194 return SelectThumbAddrModeImm5S(N, 4, Base, OffImm);
1195 }
1196
1197 bool
SelectThumbAddrModeImm5S2(SDValue N,SDValue & Base,SDValue & OffImm)1198 ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
1199 SDValue &OffImm) {
1200 return SelectThumbAddrModeImm5S(N, 2, Base, OffImm);
1201 }
1202
1203 bool
SelectThumbAddrModeImm5S1(SDValue N,SDValue & Base,SDValue & OffImm)1204 ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
1205 SDValue &OffImm) {
1206 return SelectThumbAddrModeImm5S(N, 1, Base, OffImm);
1207 }
1208
SelectThumbAddrModeSP(SDValue N,SDValue & Base,SDValue & OffImm)1209 bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
1210 SDValue &Base, SDValue &OffImm) {
1211 if (N.getOpcode() == ISD::FrameIndex) {
1212 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1213 // Only multiples of 4 are allowed for the offset, so the frame object
1214 // alignment must be at least 4.
1215 MachineFrameInfo &MFI = MF->getFrameInfo();
1216 if (MFI.getObjectAlign(FI) < Align(4))
1217 MFI.setObjectAlignment(FI, Align(4));
1218 Base = CurDAG->getTargetFrameIndex(
1219 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1220 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1221 return true;
1222 }
1223
1224 if (!CurDAG->isBaseWithConstantOffset(N))
1225 return false;
1226
1227 if (N.getOperand(0).getOpcode() == ISD::FrameIndex) {
1228 // If the RHS is + imm8 * scale, fold into addr mode.
1229 int RHSC;
1230 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) {
1231 Base = N.getOperand(0);
1232 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1233 // Make sure the offset is inside the object, or we might fail to
1234 // allocate an emergency spill slot. (An out-of-range access is UB, but
1235 // it could show up anyway.)
1236 MachineFrameInfo &MFI = MF->getFrameInfo();
1237 if (RHSC * 4 < MFI.getObjectSize(FI)) {
1238 // For LHS+RHS to result in an offset that's a multiple of 4 the object
1239 // indexed by the LHS must be 4-byte aligned.
1240 if (!MFI.isFixedObjectIndex(FI) && MFI.getObjectAlign(FI) < Align(4))
1241 MFI.setObjectAlignment(FI, Align(4));
1242 if (MFI.getObjectAlign(FI) >= Align(4)) {
1243 Base = CurDAG->getTargetFrameIndex(
1244 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1245 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1246 return true;
1247 }
1248 }
1249 }
1250 }
1251
1252 return false;
1253 }
1254
1255 template <unsigned Shift>
SelectTAddrModeImm7(SDValue N,SDValue & Base,SDValue & OffImm)1256 bool ARMDAGToDAGISel::SelectTAddrModeImm7(SDValue N, SDValue &Base,
1257 SDValue &OffImm) {
1258 if (N.getOpcode() == ISD::SUB || CurDAG->isBaseWithConstantOffset(N)) {
1259 int RHSC;
1260 if (isScaledConstantInRange(N.getOperand(1), 1 << Shift, -0x7f, 0x80,
1261 RHSC)) {
1262 Base = N.getOperand(0);
1263 if (N.getOpcode() == ISD::SUB)
1264 RHSC = -RHSC;
1265 OffImm =
1266 CurDAG->getTargetConstant(RHSC * (1 << Shift), SDLoc(N), MVT::i32);
1267 return true;
1268 }
1269 }
1270
1271 // Base only.
1272 Base = N;
1273 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1274 return true;
1275 }
1276
1277
1278 //===----------------------------------------------------------------------===//
1279 // Thumb 2 Addressing Modes
1280 //===----------------------------------------------------------------------===//
1281
1282
SelectT2AddrModeImm12(SDValue N,SDValue & Base,SDValue & OffImm)1283 bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
1284 SDValue &Base, SDValue &OffImm) {
1285 // Match simple R + imm12 operands.
1286
1287 // Base only.
1288 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1289 !CurDAG->isBaseWithConstantOffset(N)) {
1290 if (N.getOpcode() == ISD::FrameIndex) {
1291 // Match frame index.
1292 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1293 Base = CurDAG->getTargetFrameIndex(
1294 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1295 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1296 return true;
1297 }
1298
1299 if (N.getOpcode() == ARMISD::Wrapper &&
1300 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
1301 N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
1302 N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
1303 Base = N.getOperand(0);
1304 if (Base.getOpcode() == ISD::TargetConstantPool)
1305 return false; // We want to select t2LDRpci instead.
1306 } else
1307 Base = N;
1308 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1309 return true;
1310 }
1311
1312 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1313 if (SelectT2AddrModeImm8(N, Base, OffImm))
1314 // Let t2LDRi8 handle (R - imm8).
1315 return false;
1316
1317 int RHSC = (int)RHS->getZExtValue();
1318 if (N.getOpcode() == ISD::SUB)
1319 RHSC = -RHSC;
1320
1321 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
1322 Base = N.getOperand(0);
1323 if (Base.getOpcode() == ISD::FrameIndex) {
1324 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1325 Base = CurDAG->getTargetFrameIndex(
1326 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1327 }
1328 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1329 return true;
1330 }
1331 }
1332
1333 // Base only.
1334 Base = N;
1335 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1336 return true;
1337 }
1338
1339 template <unsigned Shift>
SelectT2AddrModeImm8(SDValue N,SDValue & Base,SDValue & OffImm)1340 bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N, SDValue &Base,
1341 SDValue &OffImm) {
1342 if (N.getOpcode() == ISD::SUB || CurDAG->isBaseWithConstantOffset(N)) {
1343 int RHSC;
1344 if (isScaledConstantInRange(N.getOperand(1), 1 << Shift, -255, 256, RHSC)) {
1345 Base = N.getOperand(0);
1346 if (Base.getOpcode() == ISD::FrameIndex) {
1347 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1348 Base = CurDAG->getTargetFrameIndex(
1349 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1350 }
1351
1352 if (N.getOpcode() == ISD::SUB)
1353 RHSC = -RHSC;
1354 OffImm =
1355 CurDAG->getTargetConstant(RHSC * (1 << Shift), SDLoc(N), MVT::i32);
1356 return true;
1357 }
1358 }
1359
1360 // Base only.
1361 Base = N;
1362 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1363 return true;
1364 }
1365
SelectT2AddrModeImm8(SDValue N,SDValue & Base,SDValue & OffImm)1366 bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
1367 SDValue &Base, SDValue &OffImm) {
1368 // Match simple R - imm8 operands.
1369 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1370 !CurDAG->isBaseWithConstantOffset(N))
1371 return false;
1372
1373 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1374 int RHSC = (int)RHS->getSExtValue();
1375 if (N.getOpcode() == ISD::SUB)
1376 RHSC = -RHSC;
1377
1378 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
1379 Base = N.getOperand(0);
1380 if (Base.getOpcode() == ISD::FrameIndex) {
1381 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1382 Base = CurDAG->getTargetFrameIndex(
1383 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1384 }
1385 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1386 return true;
1387 }
1388 }
1389
1390 return false;
1391 }
1392
SelectT2AddrModeImm8Offset(SDNode * Op,SDValue N,SDValue & OffImm)1393 bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
1394 SDValue &OffImm){
1395 unsigned Opcode = Op->getOpcode();
1396 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
1397 ? cast<LoadSDNode>(Op)->getAddressingMode()
1398 : cast<StoreSDNode>(Op)->getAddressingMode();
1399 int RHSC;
1400 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits.
1401 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1402 ? CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32)
1403 : CurDAG->getTargetConstant(-RHSC, SDLoc(N), MVT::i32);
1404 return true;
1405 }
1406
1407 return false;
1408 }
1409
1410 template <unsigned Shift>
SelectT2AddrModeImm7(SDValue N,SDValue & Base,SDValue & OffImm)1411 bool ARMDAGToDAGISel::SelectT2AddrModeImm7(SDValue N, SDValue &Base,
1412 SDValue &OffImm) {
1413 if (N.getOpcode() == ISD::SUB || CurDAG->isBaseWithConstantOffset(N)) {
1414 int RHSC;
1415 if (isScaledConstantInRange(N.getOperand(1), 1 << Shift, -0x7f, 0x80,
1416 RHSC)) {
1417 Base = N.getOperand(0);
1418 if (Base.getOpcode() == ISD::FrameIndex) {
1419 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1420 Base = CurDAG->getTargetFrameIndex(
1421 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1422 }
1423
1424 if (N.getOpcode() == ISD::SUB)
1425 RHSC = -RHSC;
1426 OffImm =
1427 CurDAG->getTargetConstant(RHSC * (1 << Shift), SDLoc(N), MVT::i32);
1428 return true;
1429 }
1430 }
1431
1432 // Base only.
1433 Base = N;
1434 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1435 return true;
1436 }
1437
1438 template <unsigned Shift>
SelectT2AddrModeImm7Offset(SDNode * Op,SDValue N,SDValue & OffImm)1439 bool ARMDAGToDAGISel::SelectT2AddrModeImm7Offset(SDNode *Op, SDValue N,
1440 SDValue &OffImm) {
1441 return SelectT2AddrModeImm7Offset(Op, N, OffImm, Shift);
1442 }
1443
SelectT2AddrModeImm7Offset(SDNode * Op,SDValue N,SDValue & OffImm,unsigned Shift)1444 bool ARMDAGToDAGISel::SelectT2AddrModeImm7Offset(SDNode *Op, SDValue N,
1445 SDValue &OffImm,
1446 unsigned Shift) {
1447 unsigned Opcode = Op->getOpcode();
1448 ISD::MemIndexedMode AM;
1449 switch (Opcode) {
1450 case ISD::LOAD:
1451 AM = cast<LoadSDNode>(Op)->getAddressingMode();
1452 break;
1453 case ISD::STORE:
1454 AM = cast<StoreSDNode>(Op)->getAddressingMode();
1455 break;
1456 case ISD::MLOAD:
1457 AM = cast<MaskedLoadSDNode>(Op)->getAddressingMode();
1458 break;
1459 case ISD::MSTORE:
1460 AM = cast<MaskedStoreSDNode>(Op)->getAddressingMode();
1461 break;
1462 default:
1463 llvm_unreachable("Unexpected Opcode for Imm7Offset");
1464 }
1465
1466 int RHSC;
1467 // 7 bit constant, shifted by Shift.
1468 if (isScaledConstantInRange(N, 1 << Shift, 0, 0x80, RHSC)) {
1469 OffImm =
1470 ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1471 ? CurDAG->getTargetConstant(RHSC * (1 << Shift), SDLoc(N), MVT::i32)
1472 : CurDAG->getTargetConstant(-RHSC * (1 << Shift), SDLoc(N),
1473 MVT::i32);
1474 return true;
1475 }
1476 return false;
1477 }
1478
1479 template <int Min, int Max>
SelectImmediateInRange(SDValue N,SDValue & OffImm)1480 bool ARMDAGToDAGISel::SelectImmediateInRange(SDValue N, SDValue &OffImm) {
1481 int Val;
1482 if (isScaledConstantInRange(N, 1, Min, Max, Val)) {
1483 OffImm = CurDAG->getTargetConstant(Val, SDLoc(N), MVT::i32);
1484 return true;
1485 }
1486 return false;
1487 }
1488
SelectT2AddrModeSoReg(SDValue N,SDValue & Base,SDValue & OffReg,SDValue & ShImm)1489 bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
1490 SDValue &Base,
1491 SDValue &OffReg, SDValue &ShImm) {
1492 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
1493 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N))
1494 return false;
1495
1496 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
1497 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1498 int RHSC = (int)RHS->getZExtValue();
1499 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned)
1500 return false;
1501 else if (RHSC < 0 && RHSC >= -255) // 8 bits
1502 return false;
1503 }
1504
1505 // Look for (R + R) or (R + (R << [1,2,3])).
1506 unsigned ShAmt = 0;
1507 Base = N.getOperand(0);
1508 OffReg = N.getOperand(1);
1509
1510 // Swap if it is ((R << c) + R).
1511 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg.getOpcode());
1512 if (ShOpcVal != ARM_AM::lsl) {
1513 ShOpcVal = ARM_AM::getShiftOpcForNode(Base.getOpcode());
1514 if (ShOpcVal == ARM_AM::lsl)
1515 std::swap(Base, OffReg);
1516 }
1517
1518 if (ShOpcVal == ARM_AM::lsl) {
1519 // Check to see if the RHS of the shift is a constant, if not, we can't fold
1520 // it.
1521 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
1522 ShAmt = Sh->getZExtValue();
1523 if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt))
1524 OffReg = OffReg.getOperand(0);
1525 else {
1526 ShAmt = 0;
1527 }
1528 }
1529 }
1530
1531 // If OffReg is a multiply-by-constant and it's profitable to extract a shift
1532 // and use it in a shifted operand do so.
1533 if (OffReg.getOpcode() == ISD::MUL && N.hasOneUse()) {
1534 unsigned PowerOfTwo = 0;
1535 SDValue NewMulConst;
1536 if (canExtractShiftFromMul(OffReg, 3, PowerOfTwo, NewMulConst)) {
1537 HandleSDNode Handle(OffReg);
1538 replaceDAGValue(OffReg.getOperand(1), NewMulConst);
1539 OffReg = Handle.getValue();
1540 ShAmt = PowerOfTwo;
1541 }
1542 }
1543
1544 ShImm = CurDAG->getTargetConstant(ShAmt, SDLoc(N), MVT::i32);
1545
1546 return true;
1547 }
1548
SelectT2AddrModeExclusive(SDValue N,SDValue & Base,SDValue & OffImm)1549 bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
1550 SDValue &OffImm) {
1551 // This *must* succeed since it's used for the irreplaceable ldrex and strex
1552 // instructions.
1553 Base = N;
1554 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1555
1556 if (N.getOpcode() != ISD::ADD || !CurDAG->isBaseWithConstantOffset(N))
1557 return true;
1558
1559 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1560 if (!RHS)
1561 return true;
1562
1563 uint32_t RHSC = (int)RHS->getZExtValue();
1564 if (RHSC > 1020 || RHSC % 4 != 0)
1565 return true;
1566
1567 Base = N.getOperand(0);
1568 if (Base.getOpcode() == ISD::FrameIndex) {
1569 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1570 Base = CurDAG->getTargetFrameIndex(
1571 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1572 }
1573
1574 OffImm = CurDAG->getTargetConstant(RHSC/4, SDLoc(N), MVT::i32);
1575 return true;
1576 }
1577
1578 //===--------------------------------------------------------------------===//
1579
1580 /// getAL - Returns a ARMCC::AL immediate node.
getAL(SelectionDAG * CurDAG,const SDLoc & dl)1581 static inline SDValue getAL(SelectionDAG *CurDAG, const SDLoc &dl) {
1582 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, dl, MVT::i32);
1583 }
1584
transferMemOperands(SDNode * N,SDNode * Result)1585 void ARMDAGToDAGISel::transferMemOperands(SDNode *N, SDNode *Result) {
1586 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
1587 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Result), {MemOp});
1588 }
1589
tryARMIndexedLoad(SDNode * N)1590 bool ARMDAGToDAGISel::tryARMIndexedLoad(SDNode *N) {
1591 LoadSDNode *LD = cast<LoadSDNode>(N);
1592 ISD::MemIndexedMode AM = LD->getAddressingMode();
1593 if (AM == ISD::UNINDEXED)
1594 return false;
1595
1596 EVT LoadedVT = LD->getMemoryVT();
1597 SDValue Offset, AMOpc;
1598 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1599 unsigned Opcode = 0;
1600 bool Match = false;
1601 if (LoadedVT == MVT::i32 && isPre &&
1602 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1603 Opcode = ARM::LDR_PRE_IMM;
1604 Match = true;
1605 } else if (LoadedVT == MVT::i32 && !isPre &&
1606 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1607 Opcode = ARM::LDR_POST_IMM;
1608 Match = true;
1609 } else if (LoadedVT == MVT::i32 &&
1610 SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1611 Opcode = isPre ? ARM::LDR_PRE_REG : ARM::LDR_POST_REG;
1612 Match = true;
1613
1614 } else if (LoadedVT == MVT::i16 &&
1615 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1616 Match = true;
1617 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
1618 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
1619 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
1620 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
1621 if (LD->getExtensionType() == ISD::SEXTLOAD) {
1622 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1623 Match = true;
1624 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
1625 }
1626 } else {
1627 if (isPre &&
1628 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1629 Match = true;
1630 Opcode = ARM::LDRB_PRE_IMM;
1631 } else if (!isPre &&
1632 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1633 Match = true;
1634 Opcode = ARM::LDRB_POST_IMM;
1635 } else if (SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1636 Match = true;
1637 Opcode = isPre ? ARM::LDRB_PRE_REG : ARM::LDRB_POST_REG;
1638 }
1639 }
1640 }
1641
1642 if (Match) {
1643 if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) {
1644 SDValue Chain = LD->getChain();
1645 SDValue Base = LD->getBasePtr();
1646 SDValue Ops[]= { Base, AMOpc, getAL(CurDAG, SDLoc(N)),
1647 CurDAG->getRegister(0, MVT::i32), Chain };
1648 SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
1649 MVT::Other, Ops);
1650 transferMemOperands(N, New);
1651 ReplaceNode(N, New);
1652 return true;
1653 } else {
1654 SDValue Chain = LD->getChain();
1655 SDValue Base = LD->getBasePtr();
1656 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG, SDLoc(N)),
1657 CurDAG->getRegister(0, MVT::i32), Chain };
1658 SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
1659 MVT::Other, Ops);
1660 transferMemOperands(N, New);
1661 ReplaceNode(N, New);
1662 return true;
1663 }
1664 }
1665
1666 return false;
1667 }
1668
tryT1IndexedLoad(SDNode * N)1669 bool ARMDAGToDAGISel::tryT1IndexedLoad(SDNode *N) {
1670 LoadSDNode *LD = cast<LoadSDNode>(N);
1671 EVT LoadedVT = LD->getMemoryVT();
1672 ISD::MemIndexedMode AM = LD->getAddressingMode();
1673 if (AM != ISD::POST_INC || LD->getExtensionType() != ISD::NON_EXTLOAD ||
1674 LoadedVT.getSimpleVT().SimpleTy != MVT::i32)
1675 return false;
1676
1677 auto *COffs = dyn_cast<ConstantSDNode>(LD->getOffset());
1678 if (!COffs || COffs->getZExtValue() != 4)
1679 return false;
1680
1681 // A T1 post-indexed load is just a single register LDM: LDM r0!, {r1}.
1682 // The encoding of LDM is not how the rest of ISel expects a post-inc load to
1683 // look however, so we use a pseudo here and switch it for a tLDMIA_UPD after
1684 // ISel.
1685 SDValue Chain = LD->getChain();
1686 SDValue Base = LD->getBasePtr();
1687 SDValue Ops[]= { Base, getAL(CurDAG, SDLoc(N)),
1688 CurDAG->getRegister(0, MVT::i32), Chain };
1689 SDNode *New = CurDAG->getMachineNode(ARM::tLDR_postidx, SDLoc(N), MVT::i32,
1690 MVT::i32, MVT::Other, Ops);
1691 transferMemOperands(N, New);
1692 ReplaceNode(N, New);
1693 return true;
1694 }
1695
tryT2IndexedLoad(SDNode * N)1696 bool ARMDAGToDAGISel::tryT2IndexedLoad(SDNode *N) {
1697 LoadSDNode *LD = cast<LoadSDNode>(N);
1698 ISD::MemIndexedMode AM = LD->getAddressingMode();
1699 if (AM == ISD::UNINDEXED)
1700 return false;
1701
1702 EVT LoadedVT = LD->getMemoryVT();
1703 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1704 SDValue Offset;
1705 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1706 unsigned Opcode = 0;
1707 bool Match = false;
1708 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) {
1709 switch (LoadedVT.getSimpleVT().SimpleTy) {
1710 case MVT::i32:
1711 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
1712 break;
1713 case MVT::i16:
1714 if (isSExtLd)
1715 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
1716 else
1717 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
1718 break;
1719 case MVT::i8:
1720 case MVT::i1:
1721 if (isSExtLd)
1722 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
1723 else
1724 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
1725 break;
1726 default:
1727 return false;
1728 }
1729 Match = true;
1730 }
1731
1732 if (Match) {
1733 SDValue Chain = LD->getChain();
1734 SDValue Base = LD->getBasePtr();
1735 SDValue Ops[]= { Base, Offset, getAL(CurDAG, SDLoc(N)),
1736 CurDAG->getRegister(0, MVT::i32), Chain };
1737 SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
1738 MVT::Other, Ops);
1739 transferMemOperands(N, New);
1740 ReplaceNode(N, New);
1741 return true;
1742 }
1743
1744 return false;
1745 }
1746
tryMVEIndexedLoad(SDNode * N)1747 bool ARMDAGToDAGISel::tryMVEIndexedLoad(SDNode *N) {
1748 EVT LoadedVT;
1749 unsigned Opcode = 0;
1750 bool isSExtLd, isPre;
1751 Align Alignment;
1752 ARMVCC::VPTCodes Pred;
1753 SDValue PredReg;
1754 SDValue Chain, Base, Offset;
1755
1756 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
1757 ISD::MemIndexedMode AM = LD->getAddressingMode();
1758 if (AM == ISD::UNINDEXED)
1759 return false;
1760 LoadedVT = LD->getMemoryVT();
1761 if (!LoadedVT.isVector())
1762 return false;
1763
1764 Chain = LD->getChain();
1765 Base = LD->getBasePtr();
1766 Offset = LD->getOffset();
1767 Alignment = LD->getAlign();
1768 isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1769 isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1770 Pred = ARMVCC::None;
1771 PredReg = CurDAG->getRegister(0, MVT::i32);
1772 } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) {
1773 ISD::MemIndexedMode AM = LD->getAddressingMode();
1774 if (AM == ISD::UNINDEXED)
1775 return false;
1776 LoadedVT = LD->getMemoryVT();
1777 if (!LoadedVT.isVector())
1778 return false;
1779
1780 Chain = LD->getChain();
1781 Base = LD->getBasePtr();
1782 Offset = LD->getOffset();
1783 Alignment = LD->getAlign();
1784 isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1785 isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1786 Pred = ARMVCC::Then;
1787 PredReg = LD->getMask();
1788 } else
1789 llvm_unreachable("Expected a Load or a Masked Load!");
1790
1791 // We allow LE non-masked loads to change the type (for example use a vldrb.8
1792 // as opposed to a vldrw.32). This can allow extra addressing modes or
1793 // alignments for what is otherwise an equivalent instruction.
1794 bool CanChangeType = Subtarget->isLittle() && !isa<MaskedLoadSDNode>(N);
1795
1796 SDValue NewOffset;
1797 if (Alignment >= Align(2) && LoadedVT == MVT::v4i16 &&
1798 SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 1)) {
1799 if (isSExtLd)
1800 Opcode = isPre ? ARM::MVE_VLDRHS32_pre : ARM::MVE_VLDRHS32_post;
1801 else
1802 Opcode = isPre ? ARM::MVE_VLDRHU32_pre : ARM::MVE_VLDRHU32_post;
1803 } else if (LoadedVT == MVT::v8i8 &&
1804 SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 0)) {
1805 if (isSExtLd)
1806 Opcode = isPre ? ARM::MVE_VLDRBS16_pre : ARM::MVE_VLDRBS16_post;
1807 else
1808 Opcode = isPre ? ARM::MVE_VLDRBU16_pre : ARM::MVE_VLDRBU16_post;
1809 } else if (LoadedVT == MVT::v4i8 &&
1810 SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 0)) {
1811 if (isSExtLd)
1812 Opcode = isPre ? ARM::MVE_VLDRBS32_pre : ARM::MVE_VLDRBS32_post;
1813 else
1814 Opcode = isPre ? ARM::MVE_VLDRBU32_pre : ARM::MVE_VLDRBU32_post;
1815 } else if (Alignment >= Align(4) &&
1816 (CanChangeType || LoadedVT == MVT::v4i32 ||
1817 LoadedVT == MVT::v4f32) &&
1818 SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 2))
1819 Opcode = isPre ? ARM::MVE_VLDRWU32_pre : ARM::MVE_VLDRWU32_post;
1820 else if (Alignment >= Align(2) &&
1821 (CanChangeType || LoadedVT == MVT::v8i16 ||
1822 LoadedVT == MVT::v8f16) &&
1823 SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 1))
1824 Opcode = isPre ? ARM::MVE_VLDRHU16_pre : ARM::MVE_VLDRHU16_post;
1825 else if ((CanChangeType || LoadedVT == MVT::v16i8) &&
1826 SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 0))
1827 Opcode = isPre ? ARM::MVE_VLDRBU8_pre : ARM::MVE_VLDRBU8_post;
1828 else
1829 return false;
1830
1831 SDValue Ops[] = {Base,
1832 NewOffset,
1833 CurDAG->getTargetConstant(Pred, SDLoc(N), MVT::i32),
1834 PredReg,
1835 CurDAG->getRegister(0, MVT::i32), // tp_reg
1836 Chain};
1837 SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
1838 N->getValueType(0), MVT::Other, Ops);
1839 transferMemOperands(N, New);
1840 ReplaceUses(SDValue(N, 0), SDValue(New, 1));
1841 ReplaceUses(SDValue(N, 1), SDValue(New, 0));
1842 ReplaceUses(SDValue(N, 2), SDValue(New, 2));
1843 CurDAG->RemoveDeadNode(N);
1844 return true;
1845 }
1846
1847 /// Form a GPRPair pseudo register from a pair of GPR regs.
createGPRPairNode(EVT VT,SDValue V0,SDValue V1)1848 SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) {
1849 SDLoc dl(V0.getNode());
1850 SDValue RegClass =
1851 CurDAG->getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32);
1852 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::gsub_0, dl, MVT::i32);
1853 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::gsub_1, dl, MVT::i32);
1854 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1855 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1856 }
1857
1858 /// Form a D register from a pair of S registers.
createSRegPairNode(EVT VT,SDValue V0,SDValue V1)1859 SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1860 SDLoc dl(V0.getNode());
1861 SDValue RegClass =
1862 CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, dl, MVT::i32);
1863 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, dl, MVT::i32);
1864 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, dl, MVT::i32);
1865 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1866 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1867 }
1868
1869 /// Form a quad register from a pair of D registers.
createDRegPairNode(EVT VT,SDValue V0,SDValue V1)1870 SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1871 SDLoc dl(V0.getNode());
1872 SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, dl,
1873 MVT::i32);
1874 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, dl, MVT::i32);
1875 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, dl, MVT::i32);
1876 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1877 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1878 }
1879
1880 /// Form 4 consecutive D registers from a pair of Q registers.
createQRegPairNode(EVT VT,SDValue V0,SDValue V1)1881 SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1882 SDLoc dl(V0.getNode());
1883 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, dl,
1884 MVT::i32);
1885 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, dl, MVT::i32);
1886 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, dl, MVT::i32);
1887 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1888 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1889 }
1890
1891 /// Form 4 consecutive S registers.
createQuadSRegsNode(EVT VT,SDValue V0,SDValue V1,SDValue V2,SDValue V3)1892 SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1,
1893 SDValue V2, SDValue V3) {
1894 SDLoc dl(V0.getNode());
1895 SDValue RegClass =
1896 CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, dl, MVT::i32);
1897 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, dl, MVT::i32);
1898 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, dl, MVT::i32);
1899 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, dl, MVT::i32);
1900 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, dl, MVT::i32);
1901 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1902 V2, SubReg2, V3, SubReg3 };
1903 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1904 }
1905
1906 /// Form 4 consecutive D registers.
createQuadDRegsNode(EVT VT,SDValue V0,SDValue V1,SDValue V2,SDValue V3)1907 SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1,
1908 SDValue V2, SDValue V3) {
1909 SDLoc dl(V0.getNode());
1910 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, dl,
1911 MVT::i32);
1912 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, dl, MVT::i32);
1913 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, dl, MVT::i32);
1914 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, dl, MVT::i32);
1915 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, dl, MVT::i32);
1916 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1917 V2, SubReg2, V3, SubReg3 };
1918 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1919 }
1920
1921 /// Form 4 consecutive Q registers.
createQuadQRegsNode(EVT VT,SDValue V0,SDValue V1,SDValue V2,SDValue V3)1922 SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1,
1923 SDValue V2, SDValue V3) {
1924 SDLoc dl(V0.getNode());
1925 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, dl,
1926 MVT::i32);
1927 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, dl, MVT::i32);
1928 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, dl, MVT::i32);
1929 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, dl, MVT::i32);
1930 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, dl, MVT::i32);
1931 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1932 V2, SubReg2, V3, SubReg3 };
1933 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1934 }
1935
1936 /// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
1937 /// of a NEON VLD or VST instruction. The supported values depend on the
1938 /// number of registers being loaded.
GetVLDSTAlign(SDValue Align,const SDLoc & dl,unsigned NumVecs,bool is64BitVector)1939 SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, const SDLoc &dl,
1940 unsigned NumVecs, bool is64BitVector) {
1941 unsigned NumRegs = NumVecs;
1942 if (!is64BitVector && NumVecs < 3)
1943 NumRegs *= 2;
1944
1945 unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1946 if (Alignment >= 32 && NumRegs == 4)
1947 Alignment = 32;
1948 else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
1949 Alignment = 16;
1950 else if (Alignment >= 8)
1951 Alignment = 8;
1952 else
1953 Alignment = 0;
1954
1955 return CurDAG->getTargetConstant(Alignment, dl, MVT::i32);
1956 }
1957
isVLDfixed(unsigned Opc)1958 static bool isVLDfixed(unsigned Opc)
1959 {
1960 switch (Opc) {
1961 default: return false;
1962 case ARM::VLD1d8wb_fixed : return true;
1963 case ARM::VLD1d16wb_fixed : return true;
1964 case ARM::VLD1d64Qwb_fixed : return true;
1965 case ARM::VLD1d32wb_fixed : return true;
1966 case ARM::VLD1d64wb_fixed : return true;
1967 case ARM::VLD1d8TPseudoWB_fixed : return true;
1968 case ARM::VLD1d16TPseudoWB_fixed : return true;
1969 case ARM::VLD1d32TPseudoWB_fixed : return true;
1970 case ARM::VLD1d64TPseudoWB_fixed : return true;
1971 case ARM::VLD1d8QPseudoWB_fixed : return true;
1972 case ARM::VLD1d16QPseudoWB_fixed : return true;
1973 case ARM::VLD1d32QPseudoWB_fixed : return true;
1974 case ARM::VLD1d64QPseudoWB_fixed : return true;
1975 case ARM::VLD1q8wb_fixed : return true;
1976 case ARM::VLD1q16wb_fixed : return true;
1977 case ARM::VLD1q32wb_fixed : return true;
1978 case ARM::VLD1q64wb_fixed : return true;
1979 case ARM::VLD1DUPd8wb_fixed : return true;
1980 case ARM::VLD1DUPd16wb_fixed : return true;
1981 case ARM::VLD1DUPd32wb_fixed : return true;
1982 case ARM::VLD1DUPq8wb_fixed : return true;
1983 case ARM::VLD1DUPq16wb_fixed : return true;
1984 case ARM::VLD1DUPq32wb_fixed : return true;
1985 case ARM::VLD2d8wb_fixed : return true;
1986 case ARM::VLD2d16wb_fixed : return true;
1987 case ARM::VLD2d32wb_fixed : return true;
1988 case ARM::VLD2q8PseudoWB_fixed : return true;
1989 case ARM::VLD2q16PseudoWB_fixed : return true;
1990 case ARM::VLD2q32PseudoWB_fixed : return true;
1991 case ARM::VLD2DUPd8wb_fixed : return true;
1992 case ARM::VLD2DUPd16wb_fixed : return true;
1993 case ARM::VLD2DUPd32wb_fixed : return true;
1994 case ARM::VLD2DUPq8OddPseudoWB_fixed: return true;
1995 case ARM::VLD2DUPq16OddPseudoWB_fixed: return true;
1996 case ARM::VLD2DUPq32OddPseudoWB_fixed: return true;
1997 }
1998 }
1999
isVSTfixed(unsigned Opc)2000 static bool isVSTfixed(unsigned Opc)
2001 {
2002 switch (Opc) {
2003 default: return false;
2004 case ARM::VST1d8wb_fixed : return true;
2005 case ARM::VST1d16wb_fixed : return true;
2006 case ARM::VST1d32wb_fixed : return true;
2007 case ARM::VST1d64wb_fixed : return true;
2008 case ARM::VST1q8wb_fixed : return true;
2009 case ARM::VST1q16wb_fixed : return true;
2010 case ARM::VST1q32wb_fixed : return true;
2011 case ARM::VST1q64wb_fixed : return true;
2012 case ARM::VST1d8TPseudoWB_fixed : return true;
2013 case ARM::VST1d16TPseudoWB_fixed : return true;
2014 case ARM::VST1d32TPseudoWB_fixed : return true;
2015 case ARM::VST1d64TPseudoWB_fixed : return true;
2016 case ARM::VST1d8QPseudoWB_fixed : return true;
2017 case ARM::VST1d16QPseudoWB_fixed : return true;
2018 case ARM::VST1d32QPseudoWB_fixed : return true;
2019 case ARM::VST1d64QPseudoWB_fixed : return true;
2020 case ARM::VST2d8wb_fixed : return true;
2021 case ARM::VST2d16wb_fixed : return true;
2022 case ARM::VST2d32wb_fixed : return true;
2023 case ARM::VST2q8PseudoWB_fixed : return true;
2024 case ARM::VST2q16PseudoWB_fixed : return true;
2025 case ARM::VST2q32PseudoWB_fixed : return true;
2026 }
2027 }
2028
2029 // Get the register stride update opcode of a VLD/VST instruction that
2030 // is otherwise equivalent to the given fixed stride updating instruction.
getVLDSTRegisterUpdateOpcode(unsigned Opc)2031 static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
2032 assert((isVLDfixed(Opc) || isVSTfixed(Opc))
2033 && "Incorrect fixed stride updating instruction.");
2034 switch (Opc) {
2035 default: break;
2036 case ARM::VLD1d8wb_fixed: return ARM::VLD1d8wb_register;
2037 case ARM::VLD1d16wb_fixed: return ARM::VLD1d16wb_register;
2038 case ARM::VLD1d32wb_fixed: return ARM::VLD1d32wb_register;
2039 case ARM::VLD1d64wb_fixed: return ARM::VLD1d64wb_register;
2040 case ARM::VLD1q8wb_fixed: return ARM::VLD1q8wb_register;
2041 case ARM::VLD1q16wb_fixed: return ARM::VLD1q16wb_register;
2042 case ARM::VLD1q32wb_fixed: return ARM::VLD1q32wb_register;
2043 case ARM::VLD1q64wb_fixed: return ARM::VLD1q64wb_register;
2044 case ARM::VLD1d64Twb_fixed: return ARM::VLD1d64Twb_register;
2045 case ARM::VLD1d64Qwb_fixed: return ARM::VLD1d64Qwb_register;
2046 case ARM::VLD1d8TPseudoWB_fixed: return ARM::VLD1d8TPseudoWB_register;
2047 case ARM::VLD1d16TPseudoWB_fixed: return ARM::VLD1d16TPseudoWB_register;
2048 case ARM::VLD1d32TPseudoWB_fixed: return ARM::VLD1d32TPseudoWB_register;
2049 case ARM::VLD1d64TPseudoWB_fixed: return ARM::VLD1d64TPseudoWB_register;
2050 case ARM::VLD1d8QPseudoWB_fixed: return ARM::VLD1d8QPseudoWB_register;
2051 case ARM::VLD1d16QPseudoWB_fixed: return ARM::VLD1d16QPseudoWB_register;
2052 case ARM::VLD1d32QPseudoWB_fixed: return ARM::VLD1d32QPseudoWB_register;
2053 case ARM::VLD1d64QPseudoWB_fixed: return ARM::VLD1d64QPseudoWB_register;
2054 case ARM::VLD1DUPd8wb_fixed : return ARM::VLD1DUPd8wb_register;
2055 case ARM::VLD1DUPd16wb_fixed : return ARM::VLD1DUPd16wb_register;
2056 case ARM::VLD1DUPd32wb_fixed : return ARM::VLD1DUPd32wb_register;
2057 case ARM::VLD1DUPq8wb_fixed : return ARM::VLD1DUPq8wb_register;
2058 case ARM::VLD1DUPq16wb_fixed : return ARM::VLD1DUPq16wb_register;
2059 case ARM::VLD1DUPq32wb_fixed : return ARM::VLD1DUPq32wb_register;
2060 case ARM::VLD2DUPq8OddPseudoWB_fixed: return ARM::VLD2DUPq8OddPseudoWB_register;
2061 case ARM::VLD2DUPq16OddPseudoWB_fixed: return ARM::VLD2DUPq16OddPseudoWB_register;
2062 case ARM::VLD2DUPq32OddPseudoWB_fixed: return ARM::VLD2DUPq32OddPseudoWB_register;
2063
2064 case ARM::VST1d8wb_fixed: return ARM::VST1d8wb_register;
2065 case ARM::VST1d16wb_fixed: return ARM::VST1d16wb_register;
2066 case ARM::VST1d32wb_fixed: return ARM::VST1d32wb_register;
2067 case ARM::VST1d64wb_fixed: return ARM::VST1d64wb_register;
2068 case ARM::VST1q8wb_fixed: return ARM::VST1q8wb_register;
2069 case ARM::VST1q16wb_fixed: return ARM::VST1q16wb_register;
2070 case ARM::VST1q32wb_fixed: return ARM::VST1q32wb_register;
2071 case ARM::VST1q64wb_fixed: return ARM::VST1q64wb_register;
2072 case ARM::VST1d8TPseudoWB_fixed: return ARM::VST1d8TPseudoWB_register;
2073 case ARM::VST1d16TPseudoWB_fixed: return ARM::VST1d16TPseudoWB_register;
2074 case ARM::VST1d32TPseudoWB_fixed: return ARM::VST1d32TPseudoWB_register;
2075 case ARM::VST1d64TPseudoWB_fixed: return ARM::VST1d64TPseudoWB_register;
2076 case ARM::VST1d8QPseudoWB_fixed: return ARM::VST1d8QPseudoWB_register;
2077 case ARM::VST1d16QPseudoWB_fixed: return ARM::VST1d16QPseudoWB_register;
2078 case ARM::VST1d32QPseudoWB_fixed: return ARM::VST1d32QPseudoWB_register;
2079 case ARM::VST1d64QPseudoWB_fixed: return ARM::VST1d64QPseudoWB_register;
2080
2081 case ARM::VLD2d8wb_fixed: return ARM::VLD2d8wb_register;
2082 case ARM::VLD2d16wb_fixed: return ARM::VLD2d16wb_register;
2083 case ARM::VLD2d32wb_fixed: return ARM::VLD2d32wb_register;
2084 case ARM::VLD2q8PseudoWB_fixed: return ARM::VLD2q8PseudoWB_register;
2085 case ARM::VLD2q16PseudoWB_fixed: return ARM::VLD2q16PseudoWB_register;
2086 case ARM::VLD2q32PseudoWB_fixed: return ARM::VLD2q32PseudoWB_register;
2087
2088 case ARM::VST2d8wb_fixed: return ARM::VST2d8wb_register;
2089 case ARM::VST2d16wb_fixed: return ARM::VST2d16wb_register;
2090 case ARM::VST2d32wb_fixed: return ARM::VST2d32wb_register;
2091 case ARM::VST2q8PseudoWB_fixed: return ARM::VST2q8PseudoWB_register;
2092 case ARM::VST2q16PseudoWB_fixed: return ARM::VST2q16PseudoWB_register;
2093 case ARM::VST2q32PseudoWB_fixed: return ARM::VST2q32PseudoWB_register;
2094
2095 case ARM::VLD2DUPd8wb_fixed: return ARM::VLD2DUPd8wb_register;
2096 case ARM::VLD2DUPd16wb_fixed: return ARM::VLD2DUPd16wb_register;
2097 case ARM::VLD2DUPd32wb_fixed: return ARM::VLD2DUPd32wb_register;
2098 }
2099 return Opc; // If not one we handle, return it unchanged.
2100 }
2101
2102 /// Returns true if the given increment is a Constant known to be equal to the
2103 /// access size performed by a NEON load/store. This means the "[rN]!" form can
2104 /// be used.
isPerfectIncrement(SDValue Inc,EVT VecTy,unsigned NumVecs)2105 static bool isPerfectIncrement(SDValue Inc, EVT VecTy, unsigned NumVecs) {
2106 auto C = dyn_cast<ConstantSDNode>(Inc);
2107 return C && C->getZExtValue() == VecTy.getSizeInBits() / 8 * NumVecs;
2108 }
2109
SelectVLD(SDNode * N,bool isUpdating,unsigned NumVecs,const uint16_t * DOpcodes,const uint16_t * QOpcodes0,const uint16_t * QOpcodes1)2110 void ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
2111 const uint16_t *DOpcodes,
2112 const uint16_t *QOpcodes0,
2113 const uint16_t *QOpcodes1) {
2114 assert(Subtarget->hasNEON());
2115 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
2116 SDLoc dl(N);
2117
2118 SDValue MemAddr, Align;
2119 bool IsIntrinsic = !isUpdating; // By coincidence, all supported updating
2120 // nodes are not intrinsics.
2121 unsigned AddrOpIdx = IsIntrinsic ? 2 : 1;
2122 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2123 return;
2124
2125 SDValue Chain = N->getOperand(0);
2126 EVT VT = N->getValueType(0);
2127 bool is64BitVector = VT.is64BitVector();
2128 Align = GetVLDSTAlign(Align, dl, NumVecs, is64BitVector);
2129
2130 unsigned OpcodeIndex;
2131 switch (VT.getSimpleVT().SimpleTy) {
2132 default: llvm_unreachable("unhandled vld type");
2133 // Double-register operations:
2134 case MVT::v8i8: OpcodeIndex = 0; break;
2135 case MVT::v4f16:
2136 case MVT::v4bf16:
2137 case MVT::v4i16: OpcodeIndex = 1; break;
2138 case MVT::v2f32:
2139 case MVT::v2i32: OpcodeIndex = 2; break;
2140 case MVT::v1i64: OpcodeIndex = 3; break;
2141 // Quad-register operations:
2142 case MVT::v16i8: OpcodeIndex = 0; break;
2143 case MVT::v8f16:
2144 case MVT::v8bf16:
2145 case MVT::v8i16: OpcodeIndex = 1; break;
2146 case MVT::v4f32:
2147 case MVT::v4i32: OpcodeIndex = 2; break;
2148 case MVT::v2f64:
2149 case MVT::v2i64: OpcodeIndex = 3; break;
2150 }
2151
2152 EVT ResTy;
2153 if (NumVecs == 1)
2154 ResTy = VT;
2155 else {
2156 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2157 if (!is64BitVector)
2158 ResTyElts *= 2;
2159 ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
2160 }
2161 std::vector<EVT> ResTys;
2162 ResTys.push_back(ResTy);
2163 if (isUpdating)
2164 ResTys.push_back(MVT::i32);
2165 ResTys.push_back(MVT::Other);
2166
2167 SDValue Pred = getAL(CurDAG, dl);
2168 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2169 SDNode *VLd;
2170 SmallVector<SDValue, 7> Ops;
2171
2172 // Double registers and VLD1/VLD2 quad registers are directly supported.
2173 if (is64BitVector || NumVecs <= 2) {
2174 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
2175 QOpcodes0[OpcodeIndex]);
2176 Ops.push_back(MemAddr);
2177 Ops.push_back(Align);
2178 if (isUpdating) {
2179 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2180 bool IsImmUpdate = isPerfectIncrement(Inc, VT, NumVecs);
2181 if (!IsImmUpdate) {
2182 // We use a VLD1 for v1i64 even if the pseudo says vld2/3/4, so
2183 // check for the opcode rather than the number of vector elements.
2184 if (isVLDfixed(Opc))
2185 Opc = getVLDSTRegisterUpdateOpcode(Opc);
2186 Ops.push_back(Inc);
2187 // VLD1/VLD2 fixed increment does not need Reg0 so only include it in
2188 // the operands if not such an opcode.
2189 } else if (!isVLDfixed(Opc))
2190 Ops.push_back(Reg0);
2191 }
2192 Ops.push_back(Pred);
2193 Ops.push_back(Reg0);
2194 Ops.push_back(Chain);
2195 VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2196
2197 } else {
2198 // Otherwise, quad registers are loaded with two separate instructions,
2199 // where one loads the even registers and the other loads the odd registers.
2200 EVT AddrTy = MemAddr.getValueType();
2201
2202 // Load the even subregs. This is always an updating load, so that it
2203 // provides the address to the second load for the odd subregs.
2204 SDValue ImplDef =
2205 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
2206 const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain };
2207 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
2208 ResTy, AddrTy, MVT::Other, OpsA);
2209 Chain = SDValue(VLdA, 2);
2210
2211 // Load the odd subregs.
2212 Ops.push_back(SDValue(VLdA, 1));
2213 Ops.push_back(Align);
2214 if (isUpdating) {
2215 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2216 assert(isa<ConstantSDNode>(Inc.getNode()) &&
2217 "only constant post-increment update allowed for VLD3/4");
2218 (void)Inc;
2219 Ops.push_back(Reg0);
2220 }
2221 Ops.push_back(SDValue(VLdA, 0));
2222 Ops.push_back(Pred);
2223 Ops.push_back(Reg0);
2224 Ops.push_back(Chain);
2225 VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys, Ops);
2226 }
2227
2228 // Transfer memoperands.
2229 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2230 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VLd), {MemOp});
2231
2232 if (NumVecs == 1) {
2233 ReplaceNode(N, VLd);
2234 return;
2235 }
2236
2237 // Extract out the subregisters.
2238 SDValue SuperReg = SDValue(VLd, 0);
2239 static_assert(ARM::dsub_7 == ARM::dsub_0 + 7 &&
2240 ARM::qsub_3 == ARM::qsub_0 + 3,
2241 "Unexpected subreg numbering");
2242 unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0);
2243 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2244 ReplaceUses(SDValue(N, Vec),
2245 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
2246 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
2247 if (isUpdating)
2248 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
2249 CurDAG->RemoveDeadNode(N);
2250 }
2251
SelectVST(SDNode * N,bool isUpdating,unsigned NumVecs,const uint16_t * DOpcodes,const uint16_t * QOpcodes0,const uint16_t * QOpcodes1)2252 void ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
2253 const uint16_t *DOpcodes,
2254 const uint16_t *QOpcodes0,
2255 const uint16_t *QOpcodes1) {
2256 assert(Subtarget->hasNEON());
2257 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
2258 SDLoc dl(N);
2259
2260 SDValue MemAddr, Align;
2261 bool IsIntrinsic = !isUpdating; // By coincidence, all supported updating
2262 // nodes are not intrinsics.
2263 unsigned AddrOpIdx = IsIntrinsic ? 2 : 1;
2264 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
2265 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2266 return;
2267
2268 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2269
2270 SDValue Chain = N->getOperand(0);
2271 EVT VT = N->getOperand(Vec0Idx).getValueType();
2272 bool is64BitVector = VT.is64BitVector();
2273 Align = GetVLDSTAlign(Align, dl, NumVecs, is64BitVector);
2274
2275 unsigned OpcodeIndex;
2276 switch (VT.getSimpleVT().SimpleTy) {
2277 default: llvm_unreachable("unhandled vst type");
2278 // Double-register operations:
2279 case MVT::v8i8: OpcodeIndex = 0; break;
2280 case MVT::v4f16:
2281 case MVT::v4bf16:
2282 case MVT::v4i16: OpcodeIndex = 1; break;
2283 case MVT::v2f32:
2284 case MVT::v2i32: OpcodeIndex = 2; break;
2285 case MVT::v1i64: OpcodeIndex = 3; break;
2286 // Quad-register operations:
2287 case MVT::v16i8: OpcodeIndex = 0; break;
2288 case MVT::v8f16:
2289 case MVT::v8bf16:
2290 case MVT::v8i16: OpcodeIndex = 1; break;
2291 case MVT::v4f32:
2292 case MVT::v4i32: OpcodeIndex = 2; break;
2293 case MVT::v2f64:
2294 case MVT::v2i64: OpcodeIndex = 3; break;
2295 }
2296
2297 std::vector<EVT> ResTys;
2298 if (isUpdating)
2299 ResTys.push_back(MVT::i32);
2300 ResTys.push_back(MVT::Other);
2301
2302 SDValue Pred = getAL(CurDAG, dl);
2303 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2304 SmallVector<SDValue, 7> Ops;
2305
2306 // Double registers and VST1/VST2 quad registers are directly supported.
2307 if (is64BitVector || NumVecs <= 2) {
2308 SDValue SrcReg;
2309 if (NumVecs == 1) {
2310 SrcReg = N->getOperand(Vec0Idx);
2311 } else if (is64BitVector) {
2312 // Form a REG_SEQUENCE to force register allocation.
2313 SDValue V0 = N->getOperand(Vec0Idx + 0);
2314 SDValue V1 = N->getOperand(Vec0Idx + 1);
2315 if (NumVecs == 2)
2316 SrcReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
2317 else {
2318 SDValue V2 = N->getOperand(Vec0Idx + 2);
2319 // If it's a vst3, form a quad D-register and leave the last part as
2320 // an undef.
2321 SDValue V3 = (NumVecs == 3)
2322 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
2323 : N->getOperand(Vec0Idx + 3);
2324 SrcReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2325 }
2326 } else {
2327 // Form a QQ register.
2328 SDValue Q0 = N->getOperand(Vec0Idx);
2329 SDValue Q1 = N->getOperand(Vec0Idx + 1);
2330 SrcReg = SDValue(createQRegPairNode(MVT::v4i64, Q0, Q1), 0);
2331 }
2332
2333 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
2334 QOpcodes0[OpcodeIndex]);
2335 Ops.push_back(MemAddr);
2336 Ops.push_back(Align);
2337 if (isUpdating) {
2338 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2339 bool IsImmUpdate = isPerfectIncrement(Inc, VT, NumVecs);
2340 if (!IsImmUpdate) {
2341 // We use a VST1 for v1i64 even if the pseudo says VST2/3/4, so
2342 // check for the opcode rather than the number of vector elements.
2343 if (isVSTfixed(Opc))
2344 Opc = getVLDSTRegisterUpdateOpcode(Opc);
2345 Ops.push_back(Inc);
2346 }
2347 // VST1/VST2 fixed increment does not need Reg0 so only include it in
2348 // the operands if not such an opcode.
2349 else if (!isVSTfixed(Opc))
2350 Ops.push_back(Reg0);
2351 }
2352 Ops.push_back(SrcReg);
2353 Ops.push_back(Pred);
2354 Ops.push_back(Reg0);
2355 Ops.push_back(Chain);
2356 SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2357
2358 // Transfer memoperands.
2359 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VSt), {MemOp});
2360
2361 ReplaceNode(N, VSt);
2362 return;
2363 }
2364
2365 // Otherwise, quad registers are stored with two separate instructions,
2366 // where one stores the even registers and the other stores the odd registers.
2367
2368 // Form the QQQQ REG_SEQUENCE.
2369 SDValue V0 = N->getOperand(Vec0Idx + 0);
2370 SDValue V1 = N->getOperand(Vec0Idx + 1);
2371 SDValue V2 = N->getOperand(Vec0Idx + 2);
2372 SDValue V3 = (NumVecs == 3)
2373 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2374 : N->getOperand(Vec0Idx + 3);
2375 SDValue RegSeq = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
2376
2377 // Store the even D registers. This is always an updating store, so that it
2378 // provides the address to the second store for the odd subregs.
2379 const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain };
2380 SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
2381 MemAddr.getValueType(),
2382 MVT::Other, OpsA);
2383 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VStA), {MemOp});
2384 Chain = SDValue(VStA, 1);
2385
2386 // Store the odd D registers.
2387 Ops.push_back(SDValue(VStA, 0));
2388 Ops.push_back(Align);
2389 if (isUpdating) {
2390 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2391 assert(isa<ConstantSDNode>(Inc.getNode()) &&
2392 "only constant post-increment update allowed for VST3/4");
2393 (void)Inc;
2394 Ops.push_back(Reg0);
2395 }
2396 Ops.push_back(RegSeq);
2397 Ops.push_back(Pred);
2398 Ops.push_back(Reg0);
2399 Ops.push_back(Chain);
2400 SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
2401 Ops);
2402 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VStB), {MemOp});
2403 ReplaceNode(N, VStB);
2404 }
2405
SelectVLDSTLane(SDNode * N,bool IsLoad,bool isUpdating,unsigned NumVecs,const uint16_t * DOpcodes,const uint16_t * QOpcodes)2406 void ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
2407 unsigned NumVecs,
2408 const uint16_t *DOpcodes,
2409 const uint16_t *QOpcodes) {
2410 assert(Subtarget->hasNEON());
2411 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
2412 SDLoc dl(N);
2413
2414 SDValue MemAddr, Align;
2415 bool IsIntrinsic = !isUpdating; // By coincidence, all supported updating
2416 // nodes are not intrinsics.
2417 unsigned AddrOpIdx = IsIntrinsic ? 2 : 1;
2418 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
2419 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2420 return;
2421
2422 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2423
2424 SDValue Chain = N->getOperand(0);
2425 unsigned Lane =
2426 cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
2427 EVT VT = N->getOperand(Vec0Idx).getValueType();
2428 bool is64BitVector = VT.is64BitVector();
2429
2430 unsigned Alignment = 0;
2431 if (NumVecs != 3) {
2432 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2433 unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
2434 if (Alignment > NumBytes)
2435 Alignment = NumBytes;
2436 if (Alignment < 8 && Alignment < NumBytes)
2437 Alignment = 0;
2438 // Alignment must be a power of two; make sure of that.
2439 Alignment = (Alignment & -Alignment);
2440 if (Alignment == 1)
2441 Alignment = 0;
2442 }
2443 Align = CurDAG->getTargetConstant(Alignment, dl, MVT::i32);
2444
2445 unsigned OpcodeIndex;
2446 switch (VT.getSimpleVT().SimpleTy) {
2447 default: llvm_unreachable("unhandled vld/vst lane type");
2448 // Double-register operations:
2449 case MVT::v8i8: OpcodeIndex = 0; break;
2450 case MVT::v4f16:
2451 case MVT::v4bf16:
2452 case MVT::v4i16: OpcodeIndex = 1; break;
2453 case MVT::v2f32:
2454 case MVT::v2i32: OpcodeIndex = 2; break;
2455 // Quad-register operations:
2456 case MVT::v8f16:
2457 case MVT::v8bf16:
2458 case MVT::v8i16: OpcodeIndex = 0; break;
2459 case MVT::v4f32:
2460 case MVT::v4i32: OpcodeIndex = 1; break;
2461 }
2462
2463 std::vector<EVT> ResTys;
2464 if (IsLoad) {
2465 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2466 if (!is64BitVector)
2467 ResTyElts *= 2;
2468 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(),
2469 MVT::i64, ResTyElts));
2470 }
2471 if (isUpdating)
2472 ResTys.push_back(MVT::i32);
2473 ResTys.push_back(MVT::Other);
2474
2475 SDValue Pred = getAL(CurDAG, dl);
2476 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2477
2478 SmallVector<SDValue, 8> Ops;
2479 Ops.push_back(MemAddr);
2480 Ops.push_back(Align);
2481 if (isUpdating) {
2482 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2483 bool IsImmUpdate =
2484 isPerfectIncrement(Inc, VT.getVectorElementType(), NumVecs);
2485 Ops.push_back(IsImmUpdate ? Reg0 : Inc);
2486 }
2487
2488 SDValue SuperReg;
2489 SDValue V0 = N->getOperand(Vec0Idx + 0);
2490 SDValue V1 = N->getOperand(Vec0Idx + 1);
2491 if (NumVecs == 2) {
2492 if (is64BitVector)
2493 SuperReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
2494 else
2495 SuperReg = SDValue(createQRegPairNode(MVT::v4i64, V0, V1), 0);
2496 } else {
2497 SDValue V2 = N->getOperand(Vec0Idx + 2);
2498 SDValue V3 = (NumVecs == 3)
2499 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2500 : N->getOperand(Vec0Idx + 3);
2501 if (is64BitVector)
2502 SuperReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2503 else
2504 SuperReg = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
2505 }
2506 Ops.push_back(SuperReg);
2507 Ops.push_back(getI32Imm(Lane, dl));
2508 Ops.push_back(Pred);
2509 Ops.push_back(Reg0);
2510 Ops.push_back(Chain);
2511
2512 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
2513 QOpcodes[OpcodeIndex]);
2514 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2515 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VLdLn), {MemOp});
2516 if (!IsLoad) {
2517 ReplaceNode(N, VLdLn);
2518 return;
2519 }
2520
2521 // Extract the subregisters.
2522 SuperReg = SDValue(VLdLn, 0);
2523 static_assert(ARM::dsub_7 == ARM::dsub_0 + 7 &&
2524 ARM::qsub_3 == ARM::qsub_0 + 3,
2525 "Unexpected subreg numbering");
2526 unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
2527 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2528 ReplaceUses(SDValue(N, Vec),
2529 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
2530 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
2531 if (isUpdating)
2532 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
2533 CurDAG->RemoveDeadNode(N);
2534 }
2535
2536 template <typename SDValueVector>
AddMVEPredicateToOps(SDValueVector & Ops,SDLoc Loc,SDValue PredicateMask)2537 void ARMDAGToDAGISel::AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
2538 SDValue PredicateMask) {
2539 Ops.push_back(CurDAG->getTargetConstant(ARMVCC::Then, Loc, MVT::i32));
2540 Ops.push_back(PredicateMask);
2541 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // tp_reg
2542 }
2543
2544 template <typename SDValueVector>
AddMVEPredicateToOps(SDValueVector & Ops,SDLoc Loc,SDValue PredicateMask,SDValue Inactive)2545 void ARMDAGToDAGISel::AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
2546 SDValue PredicateMask,
2547 SDValue Inactive) {
2548 Ops.push_back(CurDAG->getTargetConstant(ARMVCC::Then, Loc, MVT::i32));
2549 Ops.push_back(PredicateMask);
2550 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // tp_reg
2551 Ops.push_back(Inactive);
2552 }
2553
2554 template <typename SDValueVector>
AddEmptyMVEPredicateToOps(SDValueVector & Ops,SDLoc Loc)2555 void ARMDAGToDAGISel::AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc) {
2556 Ops.push_back(CurDAG->getTargetConstant(ARMVCC::None, Loc, MVT::i32));
2557 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
2558 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // tp_reg
2559 }
2560
2561 template <typename SDValueVector>
AddEmptyMVEPredicateToOps(SDValueVector & Ops,SDLoc Loc,EVT InactiveTy)2562 void ARMDAGToDAGISel::AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
2563 EVT InactiveTy) {
2564 Ops.push_back(CurDAG->getTargetConstant(ARMVCC::None, Loc, MVT::i32));
2565 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
2566 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // tp_reg
2567 Ops.push_back(SDValue(
2568 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, Loc, InactiveTy), 0));
2569 }
2570
SelectMVE_WB(SDNode * N,const uint16_t * Opcodes,bool Predicated)2571 void ARMDAGToDAGISel::SelectMVE_WB(SDNode *N, const uint16_t *Opcodes,
2572 bool Predicated) {
2573 SDLoc Loc(N);
2574 SmallVector<SDValue, 8> Ops;
2575
2576 uint16_t Opcode;
2577 switch (N->getValueType(1).getVectorElementType().getSizeInBits()) {
2578 case 32:
2579 Opcode = Opcodes[0];
2580 break;
2581 case 64:
2582 Opcode = Opcodes[1];
2583 break;
2584 default:
2585 llvm_unreachable("bad vector element size in SelectMVE_WB");
2586 }
2587
2588 Ops.push_back(N->getOperand(2)); // vector of base addresses
2589
2590 int32_t ImmValue = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
2591 Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate offset
2592
2593 if (Predicated)
2594 AddMVEPredicateToOps(Ops, Loc, N->getOperand(4));
2595 else
2596 AddEmptyMVEPredicateToOps(Ops, Loc);
2597
2598 Ops.push_back(N->getOperand(0)); // chain
2599
2600 SmallVector<EVT, 8> VTs;
2601 VTs.push_back(N->getValueType(1));
2602 VTs.push_back(N->getValueType(0));
2603 VTs.push_back(N->getValueType(2));
2604
2605 SDNode *New = CurDAG->getMachineNode(Opcode, SDLoc(N), VTs, Ops);
2606 ReplaceUses(SDValue(N, 0), SDValue(New, 1));
2607 ReplaceUses(SDValue(N, 1), SDValue(New, 0));
2608 ReplaceUses(SDValue(N, 2), SDValue(New, 2));
2609 transferMemOperands(N, New);
2610 CurDAG->RemoveDeadNode(N);
2611 }
2612
SelectMVE_LongShift(SDNode * N,uint16_t Opcode,bool Immediate,bool HasSaturationOperand)2613 void ARMDAGToDAGISel::SelectMVE_LongShift(SDNode *N, uint16_t Opcode,
2614 bool Immediate,
2615 bool HasSaturationOperand) {
2616 SDLoc Loc(N);
2617 SmallVector<SDValue, 8> Ops;
2618
2619 // Two 32-bit halves of the value to be shifted
2620 Ops.push_back(N->getOperand(1));
2621 Ops.push_back(N->getOperand(2));
2622
2623 // The shift count
2624 if (Immediate) {
2625 int32_t ImmValue = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
2626 Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate shift count
2627 } else {
2628 Ops.push_back(N->getOperand(3));
2629 }
2630
2631 // The immediate saturation operand, if any
2632 if (HasSaturationOperand) {
2633 int32_t SatOp = cast<ConstantSDNode>(N->getOperand(4))->getZExtValue();
2634 int SatBit = (SatOp == 64 ? 0 : 1);
2635 Ops.push_back(getI32Imm(SatBit, Loc));
2636 }
2637
2638 // MVE scalar shifts are IT-predicable, so include the standard
2639 // predicate arguments.
2640 Ops.push_back(getAL(CurDAG, Loc));
2641 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
2642
2643 CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), ArrayRef(Ops));
2644 }
2645
SelectMVE_VADCSBC(SDNode * N,uint16_t OpcodeWithCarry,uint16_t OpcodeWithNoCarry,bool Add,bool Predicated)2646 void ARMDAGToDAGISel::SelectMVE_VADCSBC(SDNode *N, uint16_t OpcodeWithCarry,
2647 uint16_t OpcodeWithNoCarry,
2648 bool Add, bool Predicated) {
2649 SDLoc Loc(N);
2650 SmallVector<SDValue, 8> Ops;
2651 uint16_t Opcode;
2652
2653 unsigned FirstInputOp = Predicated ? 2 : 1;
2654
2655 // Two input vectors and the input carry flag
2656 Ops.push_back(N->getOperand(FirstInputOp));
2657 Ops.push_back(N->getOperand(FirstInputOp + 1));
2658 SDValue CarryIn = N->getOperand(FirstInputOp + 2);
2659 ConstantSDNode *CarryInConstant = dyn_cast<ConstantSDNode>(CarryIn);
2660 uint32_t CarryMask = 1 << 29;
2661 uint32_t CarryExpected = Add ? 0 : CarryMask;
2662 if (CarryInConstant &&
2663 (CarryInConstant->getZExtValue() & CarryMask) == CarryExpected) {
2664 Opcode = OpcodeWithNoCarry;
2665 } else {
2666 Ops.push_back(CarryIn);
2667 Opcode = OpcodeWithCarry;
2668 }
2669
2670 if (Predicated)
2671 AddMVEPredicateToOps(Ops, Loc,
2672 N->getOperand(FirstInputOp + 3), // predicate
2673 N->getOperand(FirstInputOp - 1)); // inactive
2674 else
2675 AddEmptyMVEPredicateToOps(Ops, Loc, N->getValueType(0));
2676
2677 CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), ArrayRef(Ops));
2678 }
2679
SelectMVE_VSHLC(SDNode * N,bool Predicated)2680 void ARMDAGToDAGISel::SelectMVE_VSHLC(SDNode *N, bool Predicated) {
2681 SDLoc Loc(N);
2682 SmallVector<SDValue, 8> Ops;
2683
2684 // One vector input, followed by a 32-bit word of bits to shift in
2685 // and then an immediate shift count
2686 Ops.push_back(N->getOperand(1));
2687 Ops.push_back(N->getOperand(2));
2688 int32_t ImmValue = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
2689 Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate shift count
2690
2691 if (Predicated)
2692 AddMVEPredicateToOps(Ops, Loc, N->getOperand(4));
2693 else
2694 AddEmptyMVEPredicateToOps(Ops, Loc);
2695
2696 CurDAG->SelectNodeTo(N, ARM::MVE_VSHLC, N->getVTList(), ArrayRef(Ops));
2697 }
2698
SDValueToConstBool(SDValue SDVal)2699 static bool SDValueToConstBool(SDValue SDVal) {
2700 assert(isa<ConstantSDNode>(SDVal) && "expected a compile-time constant");
2701 ConstantSDNode *SDValConstant = dyn_cast<ConstantSDNode>(SDVal);
2702 uint64_t Value = SDValConstant->getZExtValue();
2703 assert((Value == 0 || Value == 1) && "expected value 0 or 1");
2704 return Value;
2705 }
2706
SelectBaseMVE_VMLLDAV(SDNode * N,bool Predicated,const uint16_t * OpcodesS,const uint16_t * OpcodesU,size_t Stride,size_t TySize)2707 void ARMDAGToDAGISel::SelectBaseMVE_VMLLDAV(SDNode *N, bool Predicated,
2708 const uint16_t *OpcodesS,
2709 const uint16_t *OpcodesU,
2710 size_t Stride, size_t TySize) {
2711 assert(TySize < Stride && "Invalid TySize");
2712 bool IsUnsigned = SDValueToConstBool(N->getOperand(1));
2713 bool IsSub = SDValueToConstBool(N->getOperand(2));
2714 bool IsExchange = SDValueToConstBool(N->getOperand(3));
2715 if (IsUnsigned) {
2716 assert(!IsSub &&
2717 "Unsigned versions of vmlsldav[a]/vrmlsldavh[a] do not exist");
2718 assert(!IsExchange &&
2719 "Unsigned versions of vmlaldav[a]x/vrmlaldavh[a]x do not exist");
2720 }
2721
2722 auto OpIsZero = [N](size_t OpNo) {
2723 if (ConstantSDNode *OpConst = dyn_cast<ConstantSDNode>(N->getOperand(OpNo)))
2724 if (OpConst->getZExtValue() == 0)
2725 return true;
2726 return false;
2727 };
2728
2729 // If the input accumulator value is not zero, select an instruction with
2730 // accumulator, otherwise select an instruction without accumulator
2731 bool IsAccum = !(OpIsZero(4) && OpIsZero(5));
2732
2733 const uint16_t *Opcodes = IsUnsigned ? OpcodesU : OpcodesS;
2734 if (IsSub)
2735 Opcodes += 4 * Stride;
2736 if (IsExchange)
2737 Opcodes += 2 * Stride;
2738 if (IsAccum)
2739 Opcodes += Stride;
2740 uint16_t Opcode = Opcodes[TySize];
2741
2742 SDLoc Loc(N);
2743 SmallVector<SDValue, 8> Ops;
2744 // Push the accumulator operands, if they are used
2745 if (IsAccum) {
2746 Ops.push_back(N->getOperand(4));
2747 Ops.push_back(N->getOperand(5));
2748 }
2749 // Push the two vector operands
2750 Ops.push_back(N->getOperand(6));
2751 Ops.push_back(N->getOperand(7));
2752
2753 if (Predicated)
2754 AddMVEPredicateToOps(Ops, Loc, N->getOperand(8));
2755 else
2756 AddEmptyMVEPredicateToOps(Ops, Loc);
2757
2758 CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), ArrayRef(Ops));
2759 }
2760
SelectMVE_VMLLDAV(SDNode * N,bool Predicated,const uint16_t * OpcodesS,const uint16_t * OpcodesU)2761 void ARMDAGToDAGISel::SelectMVE_VMLLDAV(SDNode *N, bool Predicated,
2762 const uint16_t *OpcodesS,
2763 const uint16_t *OpcodesU) {
2764 EVT VecTy = N->getOperand(6).getValueType();
2765 size_t SizeIndex;
2766 switch (VecTy.getVectorElementType().getSizeInBits()) {
2767 case 16:
2768 SizeIndex = 0;
2769 break;
2770 case 32:
2771 SizeIndex = 1;
2772 break;
2773 default:
2774 llvm_unreachable("bad vector element size");
2775 }
2776
2777 SelectBaseMVE_VMLLDAV(N, Predicated, OpcodesS, OpcodesU, 2, SizeIndex);
2778 }
2779
SelectMVE_VRMLLDAVH(SDNode * N,bool Predicated,const uint16_t * OpcodesS,const uint16_t * OpcodesU)2780 void ARMDAGToDAGISel::SelectMVE_VRMLLDAVH(SDNode *N, bool Predicated,
2781 const uint16_t *OpcodesS,
2782 const uint16_t *OpcodesU) {
2783 assert(
2784 N->getOperand(6).getValueType().getVectorElementType().getSizeInBits() ==
2785 32 &&
2786 "bad vector element size");
2787 SelectBaseMVE_VMLLDAV(N, Predicated, OpcodesS, OpcodesU, 1, 0);
2788 }
2789
SelectMVE_VLD(SDNode * N,unsigned NumVecs,const uint16_t * const * Opcodes,bool HasWriteback)2790 void ARMDAGToDAGISel::SelectMVE_VLD(SDNode *N, unsigned NumVecs,
2791 const uint16_t *const *Opcodes,
2792 bool HasWriteback) {
2793 EVT VT = N->getValueType(0);
2794 SDLoc Loc(N);
2795
2796 const uint16_t *OurOpcodes;
2797 switch (VT.getVectorElementType().getSizeInBits()) {
2798 case 8:
2799 OurOpcodes = Opcodes[0];
2800 break;
2801 case 16:
2802 OurOpcodes = Opcodes[1];
2803 break;
2804 case 32:
2805 OurOpcodes = Opcodes[2];
2806 break;
2807 default:
2808 llvm_unreachable("bad vector element size in SelectMVE_VLD");
2809 }
2810
2811 EVT DataTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, NumVecs * 2);
2812 SmallVector<EVT, 4> ResultTys = {DataTy, MVT::Other};
2813 unsigned PtrOperand = HasWriteback ? 1 : 2;
2814
2815 auto Data = SDValue(
2816 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, Loc, DataTy), 0);
2817 SDValue Chain = N->getOperand(0);
2818 // Add a MVE_VLDn instruction for each Vec, except the last
2819 for (unsigned Stage = 0; Stage < NumVecs - 1; ++Stage) {
2820 SDValue Ops[] = {Data, N->getOperand(PtrOperand), Chain};
2821 auto LoadInst =
2822 CurDAG->getMachineNode(OurOpcodes[Stage], Loc, ResultTys, Ops);
2823 Data = SDValue(LoadInst, 0);
2824 Chain = SDValue(LoadInst, 1);
2825 transferMemOperands(N, LoadInst);
2826 }
2827 // The last may need a writeback on it
2828 if (HasWriteback)
2829 ResultTys = {DataTy, MVT::i32, MVT::Other};
2830 SDValue Ops[] = {Data, N->getOperand(PtrOperand), Chain};
2831 auto LoadInst =
2832 CurDAG->getMachineNode(OurOpcodes[NumVecs - 1], Loc, ResultTys, Ops);
2833 transferMemOperands(N, LoadInst);
2834
2835 unsigned i;
2836 for (i = 0; i < NumVecs; i++)
2837 ReplaceUses(SDValue(N, i),
2838 CurDAG->getTargetExtractSubreg(ARM::qsub_0 + i, Loc, VT,
2839 SDValue(LoadInst, 0)));
2840 if (HasWriteback)
2841 ReplaceUses(SDValue(N, i++), SDValue(LoadInst, 1));
2842 ReplaceUses(SDValue(N, i), SDValue(LoadInst, HasWriteback ? 2 : 1));
2843 CurDAG->RemoveDeadNode(N);
2844 }
2845
SelectMVE_VxDUP(SDNode * N,const uint16_t * Opcodes,bool Wrapping,bool Predicated)2846 void ARMDAGToDAGISel::SelectMVE_VxDUP(SDNode *N, const uint16_t *Opcodes,
2847 bool Wrapping, bool Predicated) {
2848 EVT VT = N->getValueType(0);
2849 SDLoc Loc(N);
2850
2851 uint16_t Opcode;
2852 switch (VT.getScalarSizeInBits()) {
2853 case 8:
2854 Opcode = Opcodes[0];
2855 break;
2856 case 16:
2857 Opcode = Opcodes[1];
2858 break;
2859 case 32:
2860 Opcode = Opcodes[2];
2861 break;
2862 default:
2863 llvm_unreachable("bad vector element size in SelectMVE_VxDUP");
2864 }
2865
2866 SmallVector<SDValue, 8> Ops;
2867 unsigned OpIdx = 1;
2868
2869 SDValue Inactive;
2870 if (Predicated)
2871 Inactive = N->getOperand(OpIdx++);
2872
2873 Ops.push_back(N->getOperand(OpIdx++)); // base
2874 if (Wrapping)
2875 Ops.push_back(N->getOperand(OpIdx++)); // limit
2876
2877 SDValue ImmOp = N->getOperand(OpIdx++); // step
2878 int ImmValue = cast<ConstantSDNode>(ImmOp)->getZExtValue();
2879 Ops.push_back(getI32Imm(ImmValue, Loc));
2880
2881 if (Predicated)
2882 AddMVEPredicateToOps(Ops, Loc, N->getOperand(OpIdx), Inactive);
2883 else
2884 AddEmptyMVEPredicateToOps(Ops, Loc, N->getValueType(0));
2885
2886 CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), ArrayRef(Ops));
2887 }
2888
SelectCDE_CXxD(SDNode * N,uint16_t Opcode,size_t NumExtraOps,bool HasAccum)2889 void ARMDAGToDAGISel::SelectCDE_CXxD(SDNode *N, uint16_t Opcode,
2890 size_t NumExtraOps, bool HasAccum) {
2891 bool IsBigEndian = CurDAG->getDataLayout().isBigEndian();
2892 SDLoc Loc(N);
2893 SmallVector<SDValue, 8> Ops;
2894
2895 unsigned OpIdx = 1;
2896
2897 // Convert and append the immediate operand designating the coprocessor.
2898 SDValue ImmCorpoc = N->getOperand(OpIdx++);
2899 uint32_t ImmCoprocVal = cast<ConstantSDNode>(ImmCorpoc)->getZExtValue();
2900 Ops.push_back(getI32Imm(ImmCoprocVal, Loc));
2901
2902 // For accumulating variants copy the low and high order parts of the
2903 // accumulator into a register pair and add it to the operand vector.
2904 if (HasAccum) {
2905 SDValue AccLo = N->getOperand(OpIdx++);
2906 SDValue AccHi = N->getOperand(OpIdx++);
2907 if (IsBigEndian)
2908 std::swap(AccLo, AccHi);
2909 Ops.push_back(SDValue(createGPRPairNode(MVT::Untyped, AccLo, AccHi), 0));
2910 }
2911
2912 // Copy extra operands as-is.
2913 for (size_t I = 0; I < NumExtraOps; I++)
2914 Ops.push_back(N->getOperand(OpIdx++));
2915
2916 // Convert and append the immediate operand
2917 SDValue Imm = N->getOperand(OpIdx);
2918 uint32_t ImmVal = cast<ConstantSDNode>(Imm)->getZExtValue();
2919 Ops.push_back(getI32Imm(ImmVal, Loc));
2920
2921 // Accumulating variants are IT-predicable, add predicate operands.
2922 if (HasAccum) {
2923 SDValue Pred = getAL(CurDAG, Loc);
2924 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2925 Ops.push_back(Pred);
2926 Ops.push_back(PredReg);
2927 }
2928
2929 // Create the CDE intruction
2930 SDNode *InstrNode = CurDAG->getMachineNode(Opcode, Loc, MVT::Untyped, Ops);
2931 SDValue ResultPair = SDValue(InstrNode, 0);
2932
2933 // The original intrinsic had two outputs, and the output of the dual-register
2934 // CDE instruction is a register pair. We need to extract the two subregisters
2935 // and replace all uses of the original outputs with the extracted
2936 // subregisters.
2937 uint16_t SubRegs[2] = {ARM::gsub_0, ARM::gsub_1};
2938 if (IsBigEndian)
2939 std::swap(SubRegs[0], SubRegs[1]);
2940
2941 for (size_t ResIdx = 0; ResIdx < 2; ResIdx++) {
2942 if (SDValue(N, ResIdx).use_empty())
2943 continue;
2944 SDValue SubReg = CurDAG->getTargetExtractSubreg(SubRegs[ResIdx], Loc,
2945 MVT::i32, ResultPair);
2946 ReplaceUses(SDValue(N, ResIdx), SubReg);
2947 }
2948
2949 CurDAG->RemoveDeadNode(N);
2950 }
2951
SelectVLDDup(SDNode * N,bool IsIntrinsic,bool isUpdating,unsigned NumVecs,const uint16_t * DOpcodes,const uint16_t * QOpcodes0,const uint16_t * QOpcodes1)2952 void ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool IsIntrinsic,
2953 bool isUpdating, unsigned NumVecs,
2954 const uint16_t *DOpcodes,
2955 const uint16_t *QOpcodes0,
2956 const uint16_t *QOpcodes1) {
2957 assert(Subtarget->hasNEON());
2958 assert(NumVecs >= 1 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
2959 SDLoc dl(N);
2960
2961 SDValue MemAddr, Align;
2962 unsigned AddrOpIdx = IsIntrinsic ? 2 : 1;
2963 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2964 return;
2965
2966 SDValue Chain = N->getOperand(0);
2967 EVT VT = N->getValueType(0);
2968 bool is64BitVector = VT.is64BitVector();
2969
2970 unsigned Alignment = 0;
2971 if (NumVecs != 3) {
2972 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2973 unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
2974 if (Alignment > NumBytes)
2975 Alignment = NumBytes;
2976 if (Alignment < 8 && Alignment < NumBytes)
2977 Alignment = 0;
2978 // Alignment must be a power of two; make sure of that.
2979 Alignment = (Alignment & -Alignment);
2980 if (Alignment == 1)
2981 Alignment = 0;
2982 }
2983 Align = CurDAG->getTargetConstant(Alignment, dl, MVT::i32);
2984
2985 unsigned OpcodeIndex;
2986 switch (VT.getSimpleVT().SimpleTy) {
2987 default: llvm_unreachable("unhandled vld-dup type");
2988 case MVT::v8i8:
2989 case MVT::v16i8: OpcodeIndex = 0; break;
2990 case MVT::v4i16:
2991 case MVT::v8i16:
2992 case MVT::v4f16:
2993 case MVT::v8f16:
2994 case MVT::v4bf16:
2995 case MVT::v8bf16:
2996 OpcodeIndex = 1; break;
2997 case MVT::v2f32:
2998 case MVT::v2i32:
2999 case MVT::v4f32:
3000 case MVT::v4i32: OpcodeIndex = 2; break;
3001 case MVT::v1f64:
3002 case MVT::v1i64: OpcodeIndex = 3; break;
3003 }
3004
3005 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
3006 if (!is64BitVector)
3007 ResTyElts *= 2;
3008 EVT ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
3009
3010 std::vector<EVT> ResTys;
3011 ResTys.push_back(ResTy);
3012 if (isUpdating)
3013 ResTys.push_back(MVT::i32);
3014 ResTys.push_back(MVT::Other);
3015
3016 SDValue Pred = getAL(CurDAG, dl);
3017 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3018
3019 SmallVector<SDValue, 6> Ops;
3020 Ops.push_back(MemAddr);
3021 Ops.push_back(Align);
3022 unsigned Opc = is64BitVector ? DOpcodes[OpcodeIndex]
3023 : (NumVecs == 1) ? QOpcodes0[OpcodeIndex]
3024 : QOpcodes1[OpcodeIndex];
3025 if (isUpdating) {
3026 SDValue Inc = N->getOperand(2);
3027 bool IsImmUpdate =
3028 isPerfectIncrement(Inc, VT.getVectorElementType(), NumVecs);
3029 if (IsImmUpdate) {
3030 if (!isVLDfixed(Opc))
3031 Ops.push_back(Reg0);
3032 } else {
3033 if (isVLDfixed(Opc))
3034 Opc = getVLDSTRegisterUpdateOpcode(Opc);
3035 Ops.push_back(Inc);
3036 }
3037 }
3038 if (is64BitVector || NumVecs == 1) {
3039 // Double registers and VLD1 quad registers are directly supported.
3040 } else if (NumVecs == 2) {
3041 const SDValue OpsA[] = {MemAddr, Align, Pred, Reg0, Chain};
3042 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl, ResTy,
3043 MVT::Other, OpsA);
3044 Chain = SDValue(VLdA, 1);
3045 } else {
3046 SDValue ImplDef = SDValue(
3047 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
3048 const SDValue OpsA[] = {MemAddr, Align, ImplDef, Pred, Reg0, Chain};
3049 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl, ResTy,
3050 MVT::Other, OpsA);
3051 Ops.push_back(SDValue(VLdA, 0));
3052 Chain = SDValue(VLdA, 1);
3053 }
3054
3055 Ops.push_back(Pred);
3056 Ops.push_back(Reg0);
3057 Ops.push_back(Chain);
3058
3059 SDNode *VLdDup = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
3060
3061 // Transfer memoperands.
3062 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3063 CurDAG->setNodeMemRefs(cast<MachineSDNode>(VLdDup), {MemOp});
3064
3065 // Extract the subregisters.
3066 if (NumVecs == 1) {
3067 ReplaceUses(SDValue(N, 0), SDValue(VLdDup, 0));
3068 } else {
3069 SDValue SuperReg = SDValue(VLdDup, 0);
3070 static_assert(ARM::dsub_7 == ARM::dsub_0 + 7, "Unexpected subreg numbering");
3071 unsigned SubIdx = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
3072 for (unsigned Vec = 0; Vec != NumVecs; ++Vec) {
3073 ReplaceUses(SDValue(N, Vec),
3074 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
3075 }
3076 }
3077 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
3078 if (isUpdating)
3079 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
3080 CurDAG->RemoveDeadNode(N);
3081 }
3082
tryInsertVectorElt(SDNode * N)3083 bool ARMDAGToDAGISel::tryInsertVectorElt(SDNode *N) {
3084 if (!Subtarget->hasMVEIntegerOps())
3085 return false;
3086
3087 SDLoc dl(N);
3088
3089 // We are trying to use VMOV/VMOVX/VINS to more efficiently lower insert and
3090 // extracts of v8f16 and v8i16 vectors. Check that we have two adjacent
3091 // inserts of the correct type:
3092 SDValue Ins1 = SDValue(N, 0);
3093 SDValue Ins2 = N->getOperand(0);
3094 EVT VT = Ins1.getValueType();
3095 if (Ins2.getOpcode() != ISD::INSERT_VECTOR_ELT || !Ins2.hasOneUse() ||
3096 !isa<ConstantSDNode>(Ins1.getOperand(2)) ||
3097 !isa<ConstantSDNode>(Ins2.getOperand(2)) ||
3098 (VT != MVT::v8f16 && VT != MVT::v8i16) || (Ins2.getValueType() != VT))
3099 return false;
3100
3101 unsigned Lane1 = Ins1.getConstantOperandVal(2);
3102 unsigned Lane2 = Ins2.getConstantOperandVal(2);
3103 if (Lane2 % 2 != 0 || Lane1 != Lane2 + 1)
3104 return false;
3105
3106 // If the inserted values will be able to use T/B already, leave it to the
3107 // existing tablegen patterns. For example VCVTT/VCVTB.
3108 SDValue Val1 = Ins1.getOperand(1);
3109 SDValue Val2 = Ins2.getOperand(1);
3110 if (Val1.getOpcode() == ISD::FP_ROUND || Val2.getOpcode() == ISD::FP_ROUND)
3111 return false;
3112
3113 // Check if the inserted values are both extracts.
3114 if ((Val1.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
3115 Val1.getOpcode() == ARMISD::VGETLANEu) &&
3116 (Val2.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
3117 Val2.getOpcode() == ARMISD::VGETLANEu) &&
3118 isa<ConstantSDNode>(Val1.getOperand(1)) &&
3119 isa<ConstantSDNode>(Val2.getOperand(1)) &&
3120 (Val1.getOperand(0).getValueType() == MVT::v8f16 ||
3121 Val1.getOperand(0).getValueType() == MVT::v8i16) &&
3122 (Val2.getOperand(0).getValueType() == MVT::v8f16 ||
3123 Val2.getOperand(0).getValueType() == MVT::v8i16)) {
3124 unsigned ExtractLane1 = Val1.getConstantOperandVal(1);
3125 unsigned ExtractLane2 = Val2.getConstantOperandVal(1);
3126
3127 // If the two extracted lanes are from the same place and adjacent, this
3128 // simplifies into a f32 lane move.
3129 if (Val1.getOperand(0) == Val2.getOperand(0) && ExtractLane2 % 2 == 0 &&
3130 ExtractLane1 == ExtractLane2 + 1) {
3131 SDValue NewExt = CurDAG->getTargetExtractSubreg(
3132 ARM::ssub_0 + ExtractLane2 / 2, dl, MVT::f32, Val1.getOperand(0));
3133 SDValue NewIns = CurDAG->getTargetInsertSubreg(
3134 ARM::ssub_0 + Lane2 / 2, dl, VT, Ins2.getOperand(0),
3135 NewExt);
3136 ReplaceUses(Ins1, NewIns);
3137 return true;
3138 }
3139
3140 // Else v8i16 pattern of an extract and an insert, with a optional vmovx for
3141 // extracting odd lanes.
3142 if (VT == MVT::v8i16 && Subtarget->hasFullFP16()) {
3143 SDValue Inp1 = CurDAG->getTargetExtractSubreg(
3144 ARM::ssub_0 + ExtractLane1 / 2, dl, MVT::f32, Val1.getOperand(0));
3145 SDValue Inp2 = CurDAG->getTargetExtractSubreg(
3146 ARM::ssub_0 + ExtractLane2 / 2, dl, MVT::f32, Val2.getOperand(0));
3147 if (ExtractLane1 % 2 != 0)
3148 Inp1 = SDValue(CurDAG->getMachineNode(ARM::VMOVH, dl, MVT::f32, Inp1), 0);
3149 if (ExtractLane2 % 2 != 0)
3150 Inp2 = SDValue(CurDAG->getMachineNode(ARM::VMOVH, dl, MVT::f32, Inp2), 0);
3151 SDNode *VINS = CurDAG->getMachineNode(ARM::VINSH, dl, MVT::f32, Inp2, Inp1);
3152 SDValue NewIns =
3153 CurDAG->getTargetInsertSubreg(ARM::ssub_0 + Lane2 / 2, dl, MVT::v4f32,
3154 Ins2.getOperand(0), SDValue(VINS, 0));
3155 ReplaceUses(Ins1, NewIns);
3156 return true;
3157 }
3158 }
3159
3160 // The inserted values are not extracted - if they are f16 then insert them
3161 // directly using a VINS.
3162 if (VT == MVT::v8f16 && Subtarget->hasFullFP16()) {
3163 SDNode *VINS = CurDAG->getMachineNode(ARM::VINSH, dl, MVT::f32, Val2, Val1);
3164 SDValue NewIns =
3165 CurDAG->getTargetInsertSubreg(ARM::ssub_0 + Lane2 / 2, dl, MVT::v4f32,
3166 Ins2.getOperand(0), SDValue(VINS, 0));
3167 ReplaceUses(Ins1, NewIns);
3168 return true;
3169 }
3170
3171 return false;
3172 }
3173
transformFixedFloatingPointConversion(SDNode * N,SDNode * FMul,bool IsUnsigned,bool FixedToFloat)3174 bool ARMDAGToDAGISel::transformFixedFloatingPointConversion(SDNode *N,
3175 SDNode *FMul,
3176 bool IsUnsigned,
3177 bool FixedToFloat) {
3178 auto Type = N->getValueType(0);
3179 unsigned ScalarBits = Type.getScalarSizeInBits();
3180 if (ScalarBits > 32)
3181 return false;
3182
3183 SDNodeFlags FMulFlags = FMul->getFlags();
3184 // The fixed-point vcvt and vcvt+vmul are not always equivalent if inf is
3185 // allowed in 16 bit unsigned floats
3186 if (ScalarBits == 16 && !FMulFlags.hasNoInfs() && IsUnsigned)
3187 return false;
3188
3189 SDValue ImmNode = FMul->getOperand(1);
3190 SDValue VecVal = FMul->getOperand(0);
3191 if (VecVal->getOpcode() == ISD::UINT_TO_FP ||
3192 VecVal->getOpcode() == ISD::SINT_TO_FP)
3193 VecVal = VecVal->getOperand(0);
3194
3195 if (VecVal.getValueType().getScalarSizeInBits() != ScalarBits)
3196 return false;
3197
3198 if (ImmNode.getOpcode() == ISD::BITCAST) {
3199 if (ImmNode.getValueType().getScalarSizeInBits() != ScalarBits)
3200 return false;
3201 ImmNode = ImmNode.getOperand(0);
3202 }
3203
3204 if (ImmNode.getValueType().getScalarSizeInBits() != ScalarBits)
3205 return false;
3206
3207 APFloat ImmAPF(0.0f);
3208 switch (ImmNode.getOpcode()) {
3209 case ARMISD::VMOVIMM:
3210 case ARMISD::VDUP: {
3211 if (!isa<ConstantSDNode>(ImmNode.getOperand(0)))
3212 return false;
3213 unsigned Imm = ImmNode.getConstantOperandVal(0);
3214 if (ImmNode.getOpcode() == ARMISD::VMOVIMM)
3215 Imm = ARM_AM::decodeVMOVModImm(Imm, ScalarBits);
3216 ImmAPF =
3217 APFloat(ScalarBits == 32 ? APFloat::IEEEsingle() : APFloat::IEEEhalf(),
3218 APInt(ScalarBits, Imm));
3219 break;
3220 }
3221 case ARMISD::VMOVFPIMM: {
3222 ImmAPF = APFloat(ARM_AM::getFPImmFloat(ImmNode.getConstantOperandVal(0)));
3223 break;
3224 }
3225 default:
3226 return false;
3227 }
3228
3229 // Where n is the number of fractional bits, multiplying by 2^n will convert
3230 // from float to fixed and multiplying by 2^-n will convert from fixed to
3231 // float. Taking log2 of the factor (after taking the inverse in the case of
3232 // float to fixed) will give n.
3233 APFloat ToConvert = ImmAPF;
3234 if (FixedToFloat) {
3235 if (!ImmAPF.getExactInverse(&ToConvert))
3236 return false;
3237 }
3238 APSInt Converted(64, false);
3239 bool IsExact;
3240 ToConvert.convertToInteger(Converted, llvm::RoundingMode::NearestTiesToEven,
3241 &IsExact);
3242 if (!IsExact || !Converted.isPowerOf2())
3243 return false;
3244
3245 unsigned FracBits = Converted.logBase2();
3246 if (FracBits > ScalarBits)
3247 return false;
3248
3249 SmallVector<SDValue, 3> Ops{
3250 VecVal, CurDAG->getConstant(FracBits, SDLoc(N), MVT::i32)};
3251 AddEmptyMVEPredicateToOps(Ops, SDLoc(N), Type);
3252
3253 unsigned int Opcode;
3254 switch (ScalarBits) {
3255 case 16:
3256 if (FixedToFloat)
3257 Opcode = IsUnsigned ? ARM::MVE_VCVTf16u16_fix : ARM::MVE_VCVTf16s16_fix;
3258 else
3259 Opcode = IsUnsigned ? ARM::MVE_VCVTu16f16_fix : ARM::MVE_VCVTs16f16_fix;
3260 break;
3261 case 32:
3262 if (FixedToFloat)
3263 Opcode = IsUnsigned ? ARM::MVE_VCVTf32u32_fix : ARM::MVE_VCVTf32s32_fix;
3264 else
3265 Opcode = IsUnsigned ? ARM::MVE_VCVTu32f32_fix : ARM::MVE_VCVTs32f32_fix;
3266 break;
3267 default:
3268 llvm_unreachable("unexpected number of scalar bits");
3269 break;
3270 }
3271
3272 ReplaceNode(N, CurDAG->getMachineNode(Opcode, SDLoc(N), Type, Ops));
3273 return true;
3274 }
3275
tryFP_TO_INT(SDNode * N,SDLoc dl)3276 bool ARMDAGToDAGISel::tryFP_TO_INT(SDNode *N, SDLoc dl) {
3277 // Transform a floating-point to fixed-point conversion to a VCVT
3278 if (!Subtarget->hasMVEFloatOps())
3279 return false;
3280 EVT Type = N->getValueType(0);
3281 if (!Type.isVector())
3282 return false;
3283 unsigned int ScalarBits = Type.getScalarSizeInBits();
3284
3285 bool IsUnsigned = N->getOpcode() == ISD::FP_TO_UINT ||
3286 N->getOpcode() == ISD::FP_TO_UINT_SAT;
3287 SDNode *Node = N->getOperand(0).getNode();
3288
3289 // floating-point to fixed-point with one fractional bit gets turned into an
3290 // FP_TO_[U|S]INT(FADD (x, x)) rather than an FP_TO_[U|S]INT(FMUL (x, y))
3291 if (Node->getOpcode() == ISD::FADD) {
3292 if (Node->getOperand(0) != Node->getOperand(1))
3293 return false;
3294 SDNodeFlags Flags = Node->getFlags();
3295 // The fixed-point vcvt and vcvt+vmul are not always equivalent if inf is
3296 // allowed in 16 bit unsigned floats
3297 if (ScalarBits == 16 && !Flags.hasNoInfs() && IsUnsigned)
3298 return false;
3299
3300 unsigned Opcode;
3301 switch (ScalarBits) {
3302 case 16:
3303 Opcode = IsUnsigned ? ARM::MVE_VCVTu16f16_fix : ARM::MVE_VCVTs16f16_fix;
3304 break;
3305 case 32:
3306 Opcode = IsUnsigned ? ARM::MVE_VCVTu32f32_fix : ARM::MVE_VCVTs32f32_fix;
3307 break;
3308 }
3309 SmallVector<SDValue, 3> Ops{Node->getOperand(0),
3310 CurDAG->getConstant(1, dl, MVT::i32)};
3311 AddEmptyMVEPredicateToOps(Ops, dl, Type);
3312
3313 ReplaceNode(N, CurDAG->getMachineNode(Opcode, dl, Type, Ops));
3314 return true;
3315 }
3316
3317 if (Node->getOpcode() != ISD::FMUL)
3318 return false;
3319
3320 return transformFixedFloatingPointConversion(N, Node, IsUnsigned, false);
3321 }
3322
tryFMULFixed(SDNode * N,SDLoc dl)3323 bool ARMDAGToDAGISel::tryFMULFixed(SDNode *N, SDLoc dl) {
3324 // Transform a fixed-point to floating-point conversion to a VCVT
3325 if (!Subtarget->hasMVEFloatOps())
3326 return false;
3327 auto Type = N->getValueType(0);
3328 if (!Type.isVector())
3329 return false;
3330
3331 auto LHS = N->getOperand(0);
3332 if (LHS.getOpcode() != ISD::SINT_TO_FP && LHS.getOpcode() != ISD::UINT_TO_FP)
3333 return false;
3334
3335 return transformFixedFloatingPointConversion(
3336 N, N, LHS.getOpcode() == ISD::UINT_TO_FP, true);
3337 }
3338
tryV6T2BitfieldExtractOp(SDNode * N,bool isSigned)3339 bool ARMDAGToDAGISel::tryV6T2BitfieldExtractOp(SDNode *N, bool isSigned) {
3340 if (!Subtarget->hasV6T2Ops())
3341 return false;
3342
3343 unsigned Opc = isSigned
3344 ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
3345 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX);
3346 SDLoc dl(N);
3347
3348 // For unsigned extracts, check for a shift right and mask
3349 unsigned And_imm = 0;
3350 if (N->getOpcode() == ISD::AND) {
3351 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) {
3352
3353 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
3354 if (And_imm & (And_imm + 1))
3355 return false;
3356
3357 unsigned Srl_imm = 0;
3358 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
3359 Srl_imm)) {
3360 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
3361
3362 // Mask off the unnecessary bits of the AND immediate; normally
3363 // DAGCombine will do this, but that might not happen if
3364 // targetShrinkDemandedConstant chooses a different immediate.
3365 And_imm &= -1U >> Srl_imm;
3366
3367 // Note: The width operand is encoded as width-1.
3368 unsigned Width = countTrailingOnes(And_imm) - 1;
3369 unsigned LSB = Srl_imm;
3370
3371 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3372
3373 if ((LSB + Width + 1) == N->getValueType(0).getSizeInBits()) {
3374 // It's cheaper to use a right shift to extract the top bits.
3375 if (Subtarget->isThumb()) {
3376 Opc = isSigned ? ARM::t2ASRri : ARM::t2LSRri;
3377 SDValue Ops[] = { N->getOperand(0).getOperand(0),
3378 CurDAG->getTargetConstant(LSB, dl, MVT::i32),
3379 getAL(CurDAG, dl), Reg0, Reg0 };
3380 CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3381 return true;
3382 }
3383
3384 // ARM models shift instructions as MOVsi with shifter operand.
3385 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(ISD::SRL);
3386 SDValue ShOpc =
3387 CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, LSB), dl,
3388 MVT::i32);
3389 SDValue Ops[] = { N->getOperand(0).getOperand(0), ShOpc,
3390 getAL(CurDAG, dl), Reg0, Reg0 };
3391 CurDAG->SelectNodeTo(N, ARM::MOVsi, MVT::i32, Ops);
3392 return true;
3393 }
3394
3395 assert(LSB + Width + 1 <= 32 && "Shouldn't create an invalid ubfx");
3396 SDValue Ops[] = { N->getOperand(0).getOperand(0),
3397 CurDAG->getTargetConstant(LSB, dl, MVT::i32),
3398 CurDAG->getTargetConstant(Width, dl, MVT::i32),
3399 getAL(CurDAG, dl), Reg0 };
3400 CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3401 return true;
3402 }
3403 }
3404 return false;
3405 }
3406
3407 // Otherwise, we're looking for a shift of a shift
3408 unsigned Shl_imm = 0;
3409 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
3410 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!");
3411 unsigned Srl_imm = 0;
3412 if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
3413 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
3414 // Note: The width operand is encoded as width-1.
3415 unsigned Width = 32 - Srl_imm - 1;
3416 int LSB = Srl_imm - Shl_imm;
3417 if (LSB < 0)
3418 return false;
3419 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3420 assert(LSB + Width + 1 <= 32 && "Shouldn't create an invalid ubfx");
3421 SDValue Ops[] = { N->getOperand(0).getOperand(0),
3422 CurDAG->getTargetConstant(LSB, dl, MVT::i32),
3423 CurDAG->getTargetConstant(Width, dl, MVT::i32),
3424 getAL(CurDAG, dl), Reg0 };
3425 CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3426 return true;
3427 }
3428 }
3429
3430 // Or we are looking for a shift of an and, with a mask operand
3431 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_imm) &&
3432 isShiftedMask_32(And_imm)) {
3433 unsigned Srl_imm = 0;
3434 unsigned LSB = countTrailingZeros(And_imm);
3435 // Shift must be the same as the ands lsb
3436 if (isInt32Immediate(N->getOperand(1), Srl_imm) && Srl_imm == LSB) {
3437 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
3438 unsigned MSB = 31 - countLeadingZeros(And_imm);
3439 // Note: The width operand is encoded as width-1.
3440 unsigned Width = MSB - LSB;
3441 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3442 assert(Srl_imm + Width + 1 <= 32 && "Shouldn't create an invalid ubfx");
3443 SDValue Ops[] = { N->getOperand(0).getOperand(0),
3444 CurDAG->getTargetConstant(Srl_imm, dl, MVT::i32),
3445 CurDAG->getTargetConstant(Width, dl, MVT::i32),
3446 getAL(CurDAG, dl), Reg0 };
3447 CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3448 return true;
3449 }
3450 }
3451
3452 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG) {
3453 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
3454 unsigned LSB = 0;
3455 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL, LSB) &&
3456 !isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRA, LSB))
3457 return false;
3458
3459 if (LSB + Width > 32)
3460 return false;
3461
3462 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3463 assert(LSB + Width <= 32 && "Shouldn't create an invalid ubfx");
3464 SDValue Ops[] = { N->getOperand(0).getOperand(0),
3465 CurDAG->getTargetConstant(LSB, dl, MVT::i32),
3466 CurDAG->getTargetConstant(Width - 1, dl, MVT::i32),
3467 getAL(CurDAG, dl), Reg0 };
3468 CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3469 return true;
3470 }
3471
3472 return false;
3473 }
3474
3475 /// Target-specific DAG combining for ISD::SUB.
3476 /// Target-independent combining lowers SELECT_CC nodes of the form
3477 /// select_cc setg[ge] X, 0, X, -X
3478 /// select_cc setgt X, -1, X, -X
3479 /// select_cc setl[te] X, 0, -X, X
3480 /// select_cc setlt X, 1, -X, X
3481 /// which represent Integer ABS into:
3482 /// Y = sra (X, size(X)-1); sub (xor (X, Y), Y)
3483 /// ARM instruction selection detects the latter and matches it to
3484 /// ARM::ABS or ARM::t2ABS machine node.
tryABSOp(SDNode * N)3485 bool ARMDAGToDAGISel::tryABSOp(SDNode *N){
3486 SDValue SUBSrc0 = N->getOperand(0);
3487 SDValue SUBSrc1 = N->getOperand(1);
3488 EVT VT = N->getValueType(0);
3489
3490 if (Subtarget->isThumb1Only())
3491 return false;
3492
3493 if (SUBSrc0.getOpcode() != ISD::XOR || SUBSrc1.getOpcode() != ISD::SRA)
3494 return false;
3495
3496 SDValue XORSrc0 = SUBSrc0.getOperand(0);
3497 SDValue XORSrc1 = SUBSrc0.getOperand(1);
3498 SDValue SRASrc0 = SUBSrc1.getOperand(0);
3499 SDValue SRASrc1 = SUBSrc1.getOperand(1);
3500 ConstantSDNode *SRAConstant = dyn_cast<ConstantSDNode>(SRASrc1);
3501 EVT XType = SRASrc0.getValueType();
3502 unsigned Size = XType.getSizeInBits() - 1;
3503
3504 if (XORSrc1 == SUBSrc1 && XORSrc0 == SRASrc0 && XType.isInteger() &&
3505 SRAConstant != nullptr && Size == SRAConstant->getZExtValue()) {
3506 unsigned Opcode = Subtarget->isThumb2() ? ARM::t2ABS : ARM::ABS;
3507 CurDAG->SelectNodeTo(N, Opcode, VT, XORSrc0);
3508 return true;
3509 }
3510
3511 return false;
3512 }
3513
3514 /// We've got special pseudo-instructions for these
SelectCMP_SWAP(SDNode * N)3515 void ARMDAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
3516 unsigned Opcode;
3517 EVT MemTy = cast<MemSDNode>(N)->getMemoryVT();
3518 if (MemTy == MVT::i8)
3519 Opcode = Subtarget->isThumb() ? ARM::tCMP_SWAP_8 : ARM::CMP_SWAP_8;
3520 else if (MemTy == MVT::i16)
3521 Opcode = Subtarget->isThumb() ? ARM::tCMP_SWAP_16 : ARM::CMP_SWAP_16;
3522 else if (MemTy == MVT::i32)
3523 Opcode = Subtarget->isThumb() ? ARM::tCMP_SWAP_32 : ARM::CMP_SWAP_32;
3524 else
3525 llvm_unreachable("Unknown AtomicCmpSwap type");
3526
3527 SDValue Ops[] = {N->getOperand(1), N->getOperand(2), N->getOperand(3),
3528 N->getOperand(0)};
3529 SDNode *CmpSwap = CurDAG->getMachineNode(
3530 Opcode, SDLoc(N),
3531 CurDAG->getVTList(MVT::i32, MVT::i32, MVT::Other), Ops);
3532
3533 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
3534 CurDAG->setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});
3535
3536 ReplaceUses(SDValue(N, 0), SDValue(CmpSwap, 0));
3537 ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 2));
3538 CurDAG->RemoveDeadNode(N);
3539 }
3540
3541 static std::optional<std::pair<unsigned, unsigned>>
getContiguousRangeOfSetBits(const APInt & A)3542 getContiguousRangeOfSetBits(const APInt &A) {
3543 unsigned FirstOne = A.getBitWidth() - A.countLeadingZeros() - 1;
3544 unsigned LastOne = A.countTrailingZeros();
3545 if (A.countPopulation() != (FirstOne - LastOne + 1))
3546 return std::nullopt;
3547 return std::make_pair(FirstOne, LastOne);
3548 }
3549
SelectCMPZ(SDNode * N,bool & SwitchEQNEToPLMI)3550 void ARMDAGToDAGISel::SelectCMPZ(SDNode *N, bool &SwitchEQNEToPLMI) {
3551 assert(N->getOpcode() == ARMISD::CMPZ);
3552 SwitchEQNEToPLMI = false;
3553
3554 if (!Subtarget->isThumb())
3555 // FIXME: Work out whether it is profitable to do this in A32 mode - LSL and
3556 // LSR don't exist as standalone instructions - they need the barrel shifter.
3557 return;
3558
3559 // select (cmpz (and X, C), #0) -> (LSLS X) or (LSRS X) or (LSRS (LSLS X))
3560 SDValue And = N->getOperand(0);
3561 if (!And->hasOneUse())
3562 return;
3563
3564 SDValue Zero = N->getOperand(1);
3565 if (!isa<ConstantSDNode>(Zero) || !cast<ConstantSDNode>(Zero)->isZero() ||
3566 And->getOpcode() != ISD::AND)
3567 return;
3568 SDValue X = And.getOperand(0);
3569 auto C = dyn_cast<ConstantSDNode>(And.getOperand(1));
3570
3571 if (!C)
3572 return;
3573 auto Range = getContiguousRangeOfSetBits(C->getAPIntValue());
3574 if (!Range)
3575 return;
3576
3577 // There are several ways to lower this:
3578 SDNode *NewN;
3579 SDLoc dl(N);
3580
3581 auto EmitShift = [&](unsigned Opc, SDValue Src, unsigned Imm) -> SDNode* {
3582 if (Subtarget->isThumb2()) {
3583 Opc = (Opc == ARM::tLSLri) ? ARM::t2LSLri : ARM::t2LSRri;
3584 SDValue Ops[] = { Src, CurDAG->getTargetConstant(Imm, dl, MVT::i32),
3585 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
3586 CurDAG->getRegister(0, MVT::i32) };
3587 return CurDAG->getMachineNode(Opc, dl, MVT::i32, Ops);
3588 } else {
3589 SDValue Ops[] = {CurDAG->getRegister(ARM::CPSR, MVT::i32), Src,
3590 CurDAG->getTargetConstant(Imm, dl, MVT::i32),
3591 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32)};
3592 return CurDAG->getMachineNode(Opc, dl, MVT::i32, Ops);
3593 }
3594 };
3595
3596 if (Range->second == 0) {
3597 // 1. Mask includes the LSB -> Simply shift the top N bits off
3598 NewN = EmitShift(ARM::tLSLri, X, 31 - Range->first);
3599 ReplaceNode(And.getNode(), NewN);
3600 } else if (Range->first == 31) {
3601 // 2. Mask includes the MSB -> Simply shift the bottom N bits off
3602 NewN = EmitShift(ARM::tLSRri, X, Range->second);
3603 ReplaceNode(And.getNode(), NewN);
3604 } else if (Range->first == Range->second) {
3605 // 3. Only one bit is set. We can shift this into the sign bit and use a
3606 // PL/MI comparison.
3607 NewN = EmitShift(ARM::tLSLri, X, 31 - Range->first);
3608 ReplaceNode(And.getNode(), NewN);
3609
3610 SwitchEQNEToPLMI = true;
3611 } else if (!Subtarget->hasV6T2Ops()) {
3612 // 4. Do a double shift to clear bottom and top bits, but only in
3613 // thumb-1 mode as in thumb-2 we can use UBFX.
3614 NewN = EmitShift(ARM::tLSLri, X, 31 - Range->first);
3615 NewN = EmitShift(ARM::tLSRri, SDValue(NewN, 0),
3616 Range->second + (31 - Range->first));
3617 ReplaceNode(And.getNode(), NewN);
3618 }
3619 }
3620
getVectorShuffleOpcode(EVT VT,unsigned Opc64[3],unsigned Opc128[3])3621 static unsigned getVectorShuffleOpcode(EVT VT, unsigned Opc64[3],
3622 unsigned Opc128[3]) {
3623 assert((VT.is64BitVector() || VT.is128BitVector()) &&
3624 "Unexpected vector shuffle length");
3625 switch (VT.getScalarSizeInBits()) {
3626 default:
3627 llvm_unreachable("Unexpected vector shuffle element size");
3628 case 8:
3629 return VT.is64BitVector() ? Opc64[0] : Opc128[0];
3630 case 16:
3631 return VT.is64BitVector() ? Opc64[1] : Opc128[1];
3632 case 32:
3633 return VT.is64BitVector() ? Opc64[2] : Opc128[2];
3634 }
3635 }
3636
Select(SDNode * N)3637 void ARMDAGToDAGISel::Select(SDNode *N) {
3638 SDLoc dl(N);
3639
3640 if (N->isMachineOpcode()) {
3641 N->setNodeId(-1);
3642 return; // Already selected.
3643 }
3644
3645 switch (N->getOpcode()) {
3646 default: break;
3647 case ISD::STORE: {
3648 // For Thumb1, match an sp-relative store in C++. This is a little
3649 // unfortunate, but I don't think I can make the chain check work
3650 // otherwise. (The chain of the store has to be the same as the chain
3651 // of the CopyFromReg, or else we can't replace the CopyFromReg with
3652 // a direct reference to "SP".)
3653 //
3654 // This is only necessary on Thumb1 because Thumb1 sp-relative stores use
3655 // a different addressing mode from other four-byte stores.
3656 //
3657 // This pattern usually comes up with call arguments.
3658 StoreSDNode *ST = cast<StoreSDNode>(N);
3659 SDValue Ptr = ST->getBasePtr();
3660 if (Subtarget->isThumb1Only() && ST->isUnindexed()) {
3661 int RHSC = 0;
3662 if (Ptr.getOpcode() == ISD::ADD &&
3663 isScaledConstantInRange(Ptr.getOperand(1), /*Scale=*/4, 0, 256, RHSC))
3664 Ptr = Ptr.getOperand(0);
3665
3666 if (Ptr.getOpcode() == ISD::CopyFromReg &&
3667 cast<RegisterSDNode>(Ptr.getOperand(1))->getReg() == ARM::SP &&
3668 Ptr.getOperand(0) == ST->getChain()) {
3669 SDValue Ops[] = {ST->getValue(),
3670 CurDAG->getRegister(ARM::SP, MVT::i32),
3671 CurDAG->getTargetConstant(RHSC, dl, MVT::i32),
3672 getAL(CurDAG, dl),
3673 CurDAG->getRegister(0, MVT::i32),
3674 ST->getChain()};
3675 MachineSDNode *ResNode =
3676 CurDAG->getMachineNode(ARM::tSTRspi, dl, MVT::Other, Ops);
3677 MachineMemOperand *MemOp = ST->getMemOperand();
3678 CurDAG->setNodeMemRefs(cast<MachineSDNode>(ResNode), {MemOp});
3679 ReplaceNode(N, ResNode);
3680 return;
3681 }
3682 }
3683 break;
3684 }
3685 case ISD::WRITE_REGISTER:
3686 if (tryWriteRegister(N))
3687 return;
3688 break;
3689 case ISD::READ_REGISTER:
3690 if (tryReadRegister(N))
3691 return;
3692 break;
3693 case ISD::INLINEASM:
3694 case ISD::INLINEASM_BR:
3695 if (tryInlineAsm(N))
3696 return;
3697 break;
3698 case ISD::SUB:
3699 // Select special operations if SUB node forms integer ABS pattern
3700 if (tryABSOp(N))
3701 return;
3702 // Other cases are autogenerated.
3703 break;
3704 case ISD::Constant: {
3705 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
3706 // If we can't materialize the constant we need to use a literal pool
3707 if (ConstantMaterializationCost(Val, Subtarget) > 2) {
3708 SDValue CPIdx = CurDAG->getTargetConstantPool(
3709 ConstantInt::get(Type::getInt32Ty(*CurDAG->getContext()), Val),
3710 TLI->getPointerTy(CurDAG->getDataLayout()));
3711
3712 SDNode *ResNode;
3713 if (Subtarget->isThumb()) {
3714 SDValue Ops[] = {
3715 CPIdx,
3716 getAL(CurDAG, dl),
3717 CurDAG->getRegister(0, MVT::i32),
3718 CurDAG->getEntryNode()
3719 };
3720 ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
3721 Ops);
3722 } else {
3723 SDValue Ops[] = {
3724 CPIdx,
3725 CurDAG->getTargetConstant(0, dl, MVT::i32),
3726 getAL(CurDAG, dl),
3727 CurDAG->getRegister(0, MVT::i32),
3728 CurDAG->getEntryNode()
3729 };
3730 ResNode = CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
3731 Ops);
3732 }
3733 // Annotate the Node with memory operand information so that MachineInstr
3734 // queries work properly. This e.g. gives the register allocation the
3735 // required information for rematerialization.
3736 MachineFunction& MF = CurDAG->getMachineFunction();
3737 MachineMemOperand *MemOp =
3738 MF.getMachineMemOperand(MachinePointerInfo::getConstantPool(MF),
3739 MachineMemOperand::MOLoad, 4, Align(4));
3740
3741 CurDAG->setNodeMemRefs(cast<MachineSDNode>(ResNode), {MemOp});
3742
3743 ReplaceNode(N, ResNode);
3744 return;
3745 }
3746
3747 // Other cases are autogenerated.
3748 break;
3749 }
3750 case ISD::FrameIndex: {
3751 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
3752 int FI = cast<FrameIndexSDNode>(N)->getIndex();
3753 SDValue TFI = CurDAG->getTargetFrameIndex(
3754 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
3755 if (Subtarget->isThumb1Only()) {
3756 // Set the alignment of the frame object to 4, to avoid having to generate
3757 // more than one ADD
3758 MachineFrameInfo &MFI = MF->getFrameInfo();
3759 if (MFI.getObjectAlign(FI) < Align(4))
3760 MFI.setObjectAlignment(FI, Align(4));
3761 CurDAG->SelectNodeTo(N, ARM::tADDframe, MVT::i32, TFI,
3762 CurDAG->getTargetConstant(0, dl, MVT::i32));
3763 return;
3764 } else {
3765 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
3766 ARM::t2ADDri : ARM::ADDri);
3767 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, dl, MVT::i32),
3768 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
3769 CurDAG->getRegister(0, MVT::i32) };
3770 CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
3771 return;
3772 }
3773 }
3774 case ISD::INSERT_VECTOR_ELT: {
3775 if (tryInsertVectorElt(N))
3776 return;
3777 break;
3778 }
3779 case ISD::SRL:
3780 if (tryV6T2BitfieldExtractOp(N, false))
3781 return;
3782 break;
3783 case ISD::SIGN_EXTEND_INREG:
3784 case ISD::SRA:
3785 if (tryV6T2BitfieldExtractOp(N, true))
3786 return;
3787 break;
3788 case ISD::FP_TO_UINT:
3789 case ISD::FP_TO_SINT:
3790 case ISD::FP_TO_UINT_SAT:
3791 case ISD::FP_TO_SINT_SAT:
3792 if (tryFP_TO_INT(N, dl))
3793 return;
3794 break;
3795 case ISD::FMUL:
3796 if (tryFMULFixed(N, dl))
3797 return;
3798 break;
3799 case ISD::MUL:
3800 if (Subtarget->isThumb1Only())
3801 break;
3802 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
3803 unsigned RHSV = C->getZExtValue();
3804 if (!RHSV) break;
3805 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
3806 unsigned ShImm = Log2_32(RHSV-1);
3807 if (ShImm >= 32)
3808 break;
3809 SDValue V = N->getOperand(0);
3810 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
3811 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, dl, MVT::i32);
3812 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3813 if (Subtarget->isThumb()) {
3814 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG, dl), Reg0, Reg0 };
3815 CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops);
3816 return;
3817 } else {
3818 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG, dl), Reg0,
3819 Reg0 };
3820 CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops);
3821 return;
3822 }
3823 }
3824 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
3825 unsigned ShImm = Log2_32(RHSV+1);
3826 if (ShImm >= 32)
3827 break;
3828 SDValue V = N->getOperand(0);
3829 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
3830 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, dl, MVT::i32);
3831 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
3832 if (Subtarget->isThumb()) {
3833 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG, dl), Reg0, Reg0 };
3834 CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops);
3835 return;
3836 } else {
3837 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG, dl), Reg0,
3838 Reg0 };
3839 CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops);
3840 return;
3841 }
3842 }
3843 }
3844 break;
3845 case ISD::AND: {
3846 // Check for unsigned bitfield extract
3847 if (tryV6T2BitfieldExtractOp(N, false))
3848 return;
3849
3850 // If an immediate is used in an AND node, it is possible that the immediate
3851 // can be more optimally materialized when negated. If this is the case we
3852 // can negate the immediate and use a BIC instead.
3853 auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
3854 if (N1C && N1C->hasOneUse() && Subtarget->isThumb()) {
3855 uint32_t Imm = (uint32_t) N1C->getZExtValue();
3856
3857 // In Thumb2 mode, an AND can take a 12-bit immediate. If this
3858 // immediate can be negated and fit in the immediate operand of
3859 // a t2BIC, don't do any manual transform here as this can be
3860 // handled by the generic ISel machinery.
3861 bool PreferImmediateEncoding =
3862 Subtarget->hasThumb2() && (is_t2_so_imm(Imm) || is_t2_so_imm_not(Imm));
3863 if (!PreferImmediateEncoding &&
3864 ConstantMaterializationCost(Imm, Subtarget) >
3865 ConstantMaterializationCost(~Imm, Subtarget)) {
3866 // The current immediate costs more to materialize than a negated
3867 // immediate, so negate the immediate and use a BIC.
3868 SDValue NewImm =
3869 CurDAG->getConstant(~N1C->getZExtValue(), dl, MVT::i32);
3870 // If the new constant didn't exist before, reposition it in the topological
3871 // ordering so it is just before N. Otherwise, don't touch its location.
3872 if (NewImm->getNodeId() == -1)
3873 CurDAG->RepositionNode(N->getIterator(), NewImm.getNode());
3874
3875 if (!Subtarget->hasThumb2()) {
3876 SDValue Ops[] = {CurDAG->getRegister(ARM::CPSR, MVT::i32),
3877 N->getOperand(0), NewImm, getAL(CurDAG, dl),
3878 CurDAG->getRegister(0, MVT::i32)};
3879 ReplaceNode(N, CurDAG->getMachineNode(ARM::tBIC, dl, MVT::i32, Ops));
3880 return;
3881 } else {
3882 SDValue Ops[] = {N->getOperand(0), NewImm, getAL(CurDAG, dl),
3883 CurDAG->getRegister(0, MVT::i32),
3884 CurDAG->getRegister(0, MVT::i32)};
3885 ReplaceNode(N,
3886 CurDAG->getMachineNode(ARM::t2BICrr, dl, MVT::i32, Ops));
3887 return;
3888 }
3889 }
3890 }
3891
3892 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
3893 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
3894 // are entirely contributed by c2 and lower 16-bits are entirely contributed
3895 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)).
3896 // Select it to: "movt x, ((c1 & 0xffff) >> 16)
3897 EVT VT = N->getValueType(0);
3898 if (VT != MVT::i32)
3899 break;
3900 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2())
3901 ? ARM::t2MOVTi16
3902 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0);
3903 if (!Opc)
3904 break;
3905 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
3906 N1C = dyn_cast<ConstantSDNode>(N1);
3907 if (!N1C)
3908 break;
3909 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
3910 SDValue N2 = N0.getOperand(1);
3911 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
3912 if (!N2C)
3913 break;
3914 unsigned N1CVal = N1C->getZExtValue();
3915 unsigned N2CVal = N2C->getZExtValue();
3916 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) &&
3917 (N1CVal & 0xffffU) == 0xffffU &&
3918 (N2CVal & 0xffffU) == 0x0U) {
3919 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
3920 dl, MVT::i32);
3921 SDValue Ops[] = { N0.getOperand(0), Imm16,
3922 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32) };
3923 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops));
3924 return;
3925 }
3926 }
3927
3928 break;
3929 }
3930 case ARMISD::UMAAL: {
3931 unsigned Opc = Subtarget->isThumb() ? ARM::t2UMAAL : ARM::UMAAL;
3932 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
3933 N->getOperand(2), N->getOperand(3),
3934 getAL(CurDAG, dl),
3935 CurDAG->getRegister(0, MVT::i32) };
3936 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, MVT::i32, MVT::i32, Ops));
3937 return;
3938 }
3939 case ARMISD::UMLAL:{
3940 if (Subtarget->isThumb()) {
3941 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
3942 N->getOperand(3), getAL(CurDAG, dl),
3943 CurDAG->getRegister(0, MVT::i32)};
3944 ReplaceNode(
3945 N, CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops));
3946 return;
3947 }else{
3948 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
3949 N->getOperand(3), getAL(CurDAG, dl),
3950 CurDAG->getRegister(0, MVT::i32),
3951 CurDAG->getRegister(0, MVT::i32) };
3952 ReplaceNode(N, CurDAG->getMachineNode(
3953 Subtarget->hasV6Ops() ? ARM::UMLAL : ARM::UMLALv5, dl,
3954 MVT::i32, MVT::i32, Ops));
3955 return;
3956 }
3957 }
3958 case ARMISD::SMLAL:{
3959 if (Subtarget->isThumb()) {
3960 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
3961 N->getOperand(3), getAL(CurDAG, dl),
3962 CurDAG->getRegister(0, MVT::i32)};
3963 ReplaceNode(
3964 N, CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops));
3965 return;
3966 }else{
3967 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
3968 N->getOperand(3), getAL(CurDAG, dl),
3969 CurDAG->getRegister(0, MVT::i32),
3970 CurDAG->getRegister(0, MVT::i32) };
3971 ReplaceNode(N, CurDAG->getMachineNode(
3972 Subtarget->hasV6Ops() ? ARM::SMLAL : ARM::SMLALv5, dl,
3973 MVT::i32, MVT::i32, Ops));
3974 return;
3975 }
3976 }
3977 case ARMISD::SUBE: {
3978 if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
3979 break;
3980 // Look for a pattern to match SMMLS
3981 // (sube a, (smul_loHi a, b), (subc 0, (smul_LOhi(a, b))))
3982 if (N->getOperand(1).getOpcode() != ISD::SMUL_LOHI ||
3983 N->getOperand(2).getOpcode() != ARMISD::SUBC ||
3984 !SDValue(N, 1).use_empty())
3985 break;
3986
3987 if (Subtarget->isThumb())
3988 assert(Subtarget->hasThumb2() &&
3989 "This pattern should not be generated for Thumb");
3990
3991 SDValue SmulLoHi = N->getOperand(1);
3992 SDValue Subc = N->getOperand(2);
3993 auto *Zero = dyn_cast<ConstantSDNode>(Subc.getOperand(0));
3994
3995 if (!Zero || Zero->getZExtValue() != 0 ||
3996 Subc.getOperand(1) != SmulLoHi.getValue(0) ||
3997 N->getOperand(1) != SmulLoHi.getValue(1) ||
3998 N->getOperand(2) != Subc.getValue(1))
3999 break;
4000
4001 unsigned Opc = Subtarget->isThumb2() ? ARM::t2SMMLS : ARM::SMMLS;
4002 SDValue Ops[] = { SmulLoHi.getOperand(0), SmulLoHi.getOperand(1),
4003 N->getOperand(0), getAL(CurDAG, dl),
4004 CurDAG->getRegister(0, MVT::i32) };
4005 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, MVT::i32, Ops));
4006 return;
4007 }
4008 case ISD::LOAD: {
4009 if (Subtarget->hasMVEIntegerOps() && tryMVEIndexedLoad(N))
4010 return;
4011 if (Subtarget->isThumb() && Subtarget->hasThumb2()) {
4012 if (tryT2IndexedLoad(N))
4013 return;
4014 } else if (Subtarget->isThumb()) {
4015 if (tryT1IndexedLoad(N))
4016 return;
4017 } else if (tryARMIndexedLoad(N))
4018 return;
4019 // Other cases are autogenerated.
4020 break;
4021 }
4022 case ISD::MLOAD:
4023 if (Subtarget->hasMVEIntegerOps() && tryMVEIndexedLoad(N))
4024 return;
4025 // Other cases are autogenerated.
4026 break;
4027 case ARMISD::WLSSETUP: {
4028 SDNode *New = CurDAG->getMachineNode(ARM::t2WhileLoopSetup, dl, MVT::i32,
4029 N->getOperand(0));
4030 ReplaceUses(N, New);
4031 CurDAG->RemoveDeadNode(N);
4032 return;
4033 }
4034 case ARMISD::WLS: {
4035 SDNode *New = CurDAG->getMachineNode(ARM::t2WhileLoopStart, dl, MVT::Other,
4036 N->getOperand(1), N->getOperand(2),
4037 N->getOperand(0));
4038 ReplaceUses(N, New);
4039 CurDAG->RemoveDeadNode(N);
4040 return;
4041 }
4042 case ARMISD::LE: {
4043 SDValue Ops[] = { N->getOperand(1),
4044 N->getOperand(2),
4045 N->getOperand(0) };
4046 unsigned Opc = ARM::t2LoopEnd;
4047 SDNode *New = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
4048 ReplaceUses(N, New);
4049 CurDAG->RemoveDeadNode(N);
4050 return;
4051 }
4052 case ARMISD::LDRD: {
4053 if (Subtarget->isThumb2())
4054 break; // TableGen handles isel in this case.
4055 SDValue Base, RegOffset, ImmOffset;
4056 const SDValue &Chain = N->getOperand(0);
4057 const SDValue &Addr = N->getOperand(1);
4058 SelectAddrMode3(Addr, Base, RegOffset, ImmOffset);
4059 if (RegOffset != CurDAG->getRegister(0, MVT::i32)) {
4060 // The register-offset variant of LDRD mandates that the register
4061 // allocated to RegOffset is not reused in any of the remaining operands.
4062 // This restriction is currently not enforced. Therefore emitting this
4063 // variant is explicitly avoided.
4064 Base = Addr;
4065 RegOffset = CurDAG->getRegister(0, MVT::i32);
4066 }
4067 SDValue Ops[] = {Base, RegOffset, ImmOffset, Chain};
4068 SDNode *New = CurDAG->getMachineNode(ARM::LOADDUAL, dl,
4069 {MVT::Untyped, MVT::Other}, Ops);
4070 SDValue Lo = CurDAG->getTargetExtractSubreg(ARM::gsub_0, dl, MVT::i32,
4071 SDValue(New, 0));
4072 SDValue Hi = CurDAG->getTargetExtractSubreg(ARM::gsub_1, dl, MVT::i32,
4073 SDValue(New, 0));
4074 transferMemOperands(N, New);
4075 ReplaceUses(SDValue(N, 0), Lo);
4076 ReplaceUses(SDValue(N, 1), Hi);
4077 ReplaceUses(SDValue(N, 2), SDValue(New, 1));
4078 CurDAG->RemoveDeadNode(N);
4079 return;
4080 }
4081 case ARMISD::STRD: {
4082 if (Subtarget->isThumb2())
4083 break; // TableGen handles isel in this case.
4084 SDValue Base, RegOffset, ImmOffset;
4085 const SDValue &Chain = N->getOperand(0);
4086 const SDValue &Addr = N->getOperand(3);
4087 SelectAddrMode3(Addr, Base, RegOffset, ImmOffset);
4088 if (RegOffset != CurDAG->getRegister(0, MVT::i32)) {
4089 // The register-offset variant of STRD mandates that the register
4090 // allocated to RegOffset is not reused in any of the remaining operands.
4091 // This restriction is currently not enforced. Therefore emitting this
4092 // variant is explicitly avoided.
4093 Base = Addr;
4094 RegOffset = CurDAG->getRegister(0, MVT::i32);
4095 }
4096 SDNode *RegPair =
4097 createGPRPairNode(MVT::Untyped, N->getOperand(1), N->getOperand(2));
4098 SDValue Ops[] = {SDValue(RegPair, 0), Base, RegOffset, ImmOffset, Chain};
4099 SDNode *New = CurDAG->getMachineNode(ARM::STOREDUAL, dl, MVT::Other, Ops);
4100 transferMemOperands(N, New);
4101 ReplaceUses(SDValue(N, 0), SDValue(New, 0));
4102 CurDAG->RemoveDeadNode(N);
4103 return;
4104 }
4105 case ARMISD::LOOP_DEC: {
4106 SDValue Ops[] = { N->getOperand(1),
4107 N->getOperand(2),
4108 N->getOperand(0) };
4109 SDNode *Dec =
4110 CurDAG->getMachineNode(ARM::t2LoopDec, dl,
4111 CurDAG->getVTList(MVT::i32, MVT::Other), Ops);
4112 ReplaceUses(N, Dec);
4113 CurDAG->RemoveDeadNode(N);
4114 return;
4115 }
4116 case ARMISD::BRCOND: {
4117 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
4118 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
4119 // Pattern complexity = 6 cost = 1 size = 0
4120
4121 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
4122 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
4123 // Pattern complexity = 6 cost = 1 size = 0
4124
4125 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
4126 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
4127 // Pattern complexity = 6 cost = 1 size = 0
4128
4129 unsigned Opc = Subtarget->isThumb() ?
4130 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
4131 SDValue Chain = N->getOperand(0);
4132 SDValue N1 = N->getOperand(1);
4133 SDValue N2 = N->getOperand(2);
4134 SDValue N3 = N->getOperand(3);
4135 SDValue InFlag = N->getOperand(4);
4136 assert(N1.getOpcode() == ISD::BasicBlock);
4137 assert(N2.getOpcode() == ISD::Constant);
4138 assert(N3.getOpcode() == ISD::Register);
4139
4140 unsigned CC = (unsigned) cast<ConstantSDNode>(N2)->getZExtValue();
4141
4142 if (InFlag.getOpcode() == ARMISD::CMPZ) {
4143 if (InFlag.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN) {
4144 SDValue Int = InFlag.getOperand(0);
4145 uint64_t ID = cast<ConstantSDNode>(Int->getOperand(1))->getZExtValue();
4146
4147 // Handle low-overhead loops.
4148 if (ID == Intrinsic::loop_decrement_reg) {
4149 SDValue Elements = Int.getOperand(2);
4150 SDValue Size = CurDAG->getTargetConstant(
4151 cast<ConstantSDNode>(Int.getOperand(3))->getZExtValue(), dl,
4152 MVT::i32);
4153
4154 SDValue Args[] = { Elements, Size, Int.getOperand(0) };
4155 SDNode *LoopDec =
4156 CurDAG->getMachineNode(ARM::t2LoopDec, dl,
4157 CurDAG->getVTList(MVT::i32, MVT::Other),
4158 Args);
4159 ReplaceUses(Int.getNode(), LoopDec);
4160
4161 SDValue EndArgs[] = { SDValue(LoopDec, 0), N1, Chain };
4162 SDNode *LoopEnd =
4163 CurDAG->getMachineNode(ARM::t2LoopEnd, dl, MVT::Other, EndArgs);
4164
4165 ReplaceUses(N, LoopEnd);
4166 CurDAG->RemoveDeadNode(N);
4167 CurDAG->RemoveDeadNode(InFlag.getNode());
4168 CurDAG->RemoveDeadNode(Int.getNode());
4169 return;
4170 }
4171 }
4172
4173 bool SwitchEQNEToPLMI;
4174 SelectCMPZ(InFlag.getNode(), SwitchEQNEToPLMI);
4175 InFlag = N->getOperand(4);
4176
4177 if (SwitchEQNEToPLMI) {
4178 switch ((ARMCC::CondCodes)CC) {
4179 default: llvm_unreachable("CMPZ must be either NE or EQ!");
4180 case ARMCC::NE:
4181 CC = (unsigned)ARMCC::MI;
4182 break;
4183 case ARMCC::EQ:
4184 CC = (unsigned)ARMCC::PL;
4185 break;
4186 }
4187 }
4188 }
4189
4190 SDValue Tmp2 = CurDAG->getTargetConstant(CC, dl, MVT::i32);
4191 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
4192 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
4193 MVT::Glue, Ops);
4194 Chain = SDValue(ResNode, 0);
4195 if (N->getNumValues() == 2) {
4196 InFlag = SDValue(ResNode, 1);
4197 ReplaceUses(SDValue(N, 1), InFlag);
4198 }
4199 ReplaceUses(SDValue(N, 0),
4200 SDValue(Chain.getNode(), Chain.getResNo()));
4201 CurDAG->RemoveDeadNode(N);
4202 return;
4203 }
4204
4205 case ARMISD::CMPZ: {
4206 // select (CMPZ X, #-C) -> (CMPZ (ADDS X, #C), #0)
4207 // This allows us to avoid materializing the expensive negative constant.
4208 // The CMPZ #0 is useless and will be peepholed away but we need to keep it
4209 // for its glue output.
4210 SDValue X = N->getOperand(0);
4211 auto *C = dyn_cast<ConstantSDNode>(N->getOperand(1).getNode());
4212 if (C && C->getSExtValue() < 0 && Subtarget->isThumb()) {
4213 int64_t Addend = -C->getSExtValue();
4214
4215 SDNode *Add = nullptr;
4216 // ADDS can be better than CMN if the immediate fits in a
4217 // 16-bit ADDS, which means either [0,256) for tADDi8 or [0,8) for tADDi3.
4218 // Outside that range we can just use a CMN which is 32-bit but has a
4219 // 12-bit immediate range.
4220 if (Addend < 1<<8) {
4221 if (Subtarget->isThumb2()) {
4222 SDValue Ops[] = { X, CurDAG->getTargetConstant(Addend, dl, MVT::i32),
4223 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
4224 CurDAG->getRegister(0, MVT::i32) };
4225 Add = CurDAG->getMachineNode(ARM::t2ADDri, dl, MVT::i32, Ops);
4226 } else {
4227 unsigned Opc = (Addend < 1<<3) ? ARM::tADDi3 : ARM::tADDi8;
4228 SDValue Ops[] = {CurDAG->getRegister(ARM::CPSR, MVT::i32), X,
4229 CurDAG->getTargetConstant(Addend, dl, MVT::i32),
4230 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32)};
4231 Add = CurDAG->getMachineNode(Opc, dl, MVT::i32, Ops);
4232 }
4233 }
4234 if (Add) {
4235 SDValue Ops2[] = {SDValue(Add, 0), CurDAG->getConstant(0, dl, MVT::i32)};
4236 CurDAG->MorphNodeTo(N, ARMISD::CMPZ, CurDAG->getVTList(MVT::Glue), Ops2);
4237 }
4238 }
4239 // Other cases are autogenerated.
4240 break;
4241 }
4242
4243 case ARMISD::CMOV: {
4244 SDValue InFlag = N->getOperand(4);
4245
4246 if (InFlag.getOpcode() == ARMISD::CMPZ) {
4247 bool SwitchEQNEToPLMI;
4248 SelectCMPZ(InFlag.getNode(), SwitchEQNEToPLMI);
4249
4250 if (SwitchEQNEToPLMI) {
4251 SDValue ARMcc = N->getOperand(2);
4252 ARMCC::CondCodes CC =
4253 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
4254
4255 switch (CC) {
4256 default: llvm_unreachable("CMPZ must be either NE or EQ!");
4257 case ARMCC::NE:
4258 CC = ARMCC::MI;
4259 break;
4260 case ARMCC::EQ:
4261 CC = ARMCC::PL;
4262 break;
4263 }
4264 SDValue NewARMcc = CurDAG->getConstant((unsigned)CC, dl, MVT::i32);
4265 SDValue Ops[] = {N->getOperand(0), N->getOperand(1), NewARMcc,
4266 N->getOperand(3), N->getOperand(4)};
4267 CurDAG->MorphNodeTo(N, ARMISD::CMOV, N->getVTList(), Ops);
4268 }
4269
4270 }
4271 // Other cases are autogenerated.
4272 break;
4273 }
4274 case ARMISD::VZIP: {
4275 EVT VT = N->getValueType(0);
4276 // vzip.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
4277 unsigned Opc64[] = {ARM::VZIPd8, ARM::VZIPd16, ARM::VTRNd32};
4278 unsigned Opc128[] = {ARM::VZIPq8, ARM::VZIPq16, ARM::VZIPq32};
4279 unsigned Opc = getVectorShuffleOpcode(VT, Opc64, Opc128);
4280 SDValue Pred = getAL(CurDAG, dl);
4281 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
4282 SDValue Ops[] = {N->getOperand(0), N->getOperand(1), Pred, PredReg};
4283 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, VT, Ops));
4284 return;
4285 }
4286 case ARMISD::VUZP: {
4287 EVT VT = N->getValueType(0);
4288 // vuzp.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
4289 unsigned Opc64[] = {ARM::VUZPd8, ARM::VUZPd16, ARM::VTRNd32};
4290 unsigned Opc128[] = {ARM::VUZPq8, ARM::VUZPq16, ARM::VUZPq32};
4291 unsigned Opc = getVectorShuffleOpcode(VT, Opc64, Opc128);
4292 SDValue Pred = getAL(CurDAG, dl);
4293 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
4294 SDValue Ops[] = {N->getOperand(0), N->getOperand(1), Pred, PredReg};
4295 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, VT, Ops));
4296 return;
4297 }
4298 case ARMISD::VTRN: {
4299 EVT VT = N->getValueType(0);
4300 unsigned Opc64[] = {ARM::VTRNd8, ARM::VTRNd16, ARM::VTRNd32};
4301 unsigned Opc128[] = {ARM::VTRNq8, ARM::VTRNq16, ARM::VTRNq32};
4302 unsigned Opc = getVectorShuffleOpcode(VT, Opc64, Opc128);
4303 SDValue Pred = getAL(CurDAG, dl);
4304 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
4305 SDValue Ops[] = {N->getOperand(0), N->getOperand(1), Pred, PredReg};
4306 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, VT, Ops));
4307 return;
4308 }
4309 case ARMISD::BUILD_VECTOR: {
4310 EVT VecVT = N->getValueType(0);
4311 EVT EltVT = VecVT.getVectorElementType();
4312 unsigned NumElts = VecVT.getVectorNumElements();
4313 if (EltVT == MVT::f64) {
4314 assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
4315 ReplaceNode(
4316 N, createDRegPairNode(VecVT, N->getOperand(0), N->getOperand(1)));
4317 return;
4318 }
4319 assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR");
4320 if (NumElts == 2) {
4321 ReplaceNode(
4322 N, createSRegPairNode(VecVT, N->getOperand(0), N->getOperand(1)));
4323 return;
4324 }
4325 assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
4326 ReplaceNode(N,
4327 createQuadSRegsNode(VecVT, N->getOperand(0), N->getOperand(1),
4328 N->getOperand(2), N->getOperand(3)));
4329 return;
4330 }
4331
4332 case ARMISD::VLD1DUP: {
4333 static const uint16_t DOpcodes[] = { ARM::VLD1DUPd8, ARM::VLD1DUPd16,
4334 ARM::VLD1DUPd32 };
4335 static const uint16_t QOpcodes[] = { ARM::VLD1DUPq8, ARM::VLD1DUPq16,
4336 ARM::VLD1DUPq32 };
4337 SelectVLDDup(N, /* IsIntrinsic= */ false, false, 1, DOpcodes, QOpcodes);
4338 return;
4339 }
4340
4341 case ARMISD::VLD2DUP: {
4342 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
4343 ARM::VLD2DUPd32 };
4344 SelectVLDDup(N, /* IsIntrinsic= */ false, false, 2, Opcodes);
4345 return;
4346 }
4347
4348 case ARMISD::VLD3DUP: {
4349 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo,
4350 ARM::VLD3DUPd16Pseudo,
4351 ARM::VLD3DUPd32Pseudo };
4352 SelectVLDDup(N, /* IsIntrinsic= */ false, false, 3, Opcodes);
4353 return;
4354 }
4355
4356 case ARMISD::VLD4DUP: {
4357 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo,
4358 ARM::VLD4DUPd16Pseudo,
4359 ARM::VLD4DUPd32Pseudo };
4360 SelectVLDDup(N, /* IsIntrinsic= */ false, false, 4, Opcodes);
4361 return;
4362 }
4363
4364 case ARMISD::VLD1DUP_UPD: {
4365 static const uint16_t DOpcodes[] = { ARM::VLD1DUPd8wb_fixed,
4366 ARM::VLD1DUPd16wb_fixed,
4367 ARM::VLD1DUPd32wb_fixed };
4368 static const uint16_t QOpcodes[] = { ARM::VLD1DUPq8wb_fixed,
4369 ARM::VLD1DUPq16wb_fixed,
4370 ARM::VLD1DUPq32wb_fixed };
4371 SelectVLDDup(N, /* IsIntrinsic= */ false, true, 1, DOpcodes, QOpcodes);
4372 return;
4373 }
4374
4375 case ARMISD::VLD2DUP_UPD: {
4376 static const uint16_t DOpcodes[] = { ARM::VLD2DUPd8wb_fixed,
4377 ARM::VLD2DUPd16wb_fixed,
4378 ARM::VLD2DUPd32wb_fixed,
4379 ARM::VLD1q64wb_fixed };
4380 static const uint16_t QOpcodes0[] = { ARM::VLD2DUPq8EvenPseudo,
4381 ARM::VLD2DUPq16EvenPseudo,
4382 ARM::VLD2DUPq32EvenPseudo };
4383 static const uint16_t QOpcodes1[] = { ARM::VLD2DUPq8OddPseudoWB_fixed,
4384 ARM::VLD2DUPq16OddPseudoWB_fixed,
4385 ARM::VLD2DUPq32OddPseudoWB_fixed };
4386 SelectVLDDup(N, /* IsIntrinsic= */ false, true, 2, DOpcodes, QOpcodes0, QOpcodes1);
4387 return;
4388 }
4389
4390 case ARMISD::VLD3DUP_UPD: {
4391 static const uint16_t DOpcodes[] = { ARM::VLD3DUPd8Pseudo_UPD,
4392 ARM::VLD3DUPd16Pseudo_UPD,
4393 ARM::VLD3DUPd32Pseudo_UPD,
4394 ARM::VLD1d64TPseudoWB_fixed };
4395 static const uint16_t QOpcodes0[] = { ARM::VLD3DUPq8EvenPseudo,
4396 ARM::VLD3DUPq16EvenPseudo,
4397 ARM::VLD3DUPq32EvenPseudo };
4398 static const uint16_t QOpcodes1[] = { ARM::VLD3DUPq8OddPseudo_UPD,
4399 ARM::VLD3DUPq16OddPseudo_UPD,
4400 ARM::VLD3DUPq32OddPseudo_UPD };
4401 SelectVLDDup(N, /* IsIntrinsic= */ false, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4402 return;
4403 }
4404
4405 case ARMISD::VLD4DUP_UPD: {
4406 static const uint16_t DOpcodes[] = { ARM::VLD4DUPd8Pseudo_UPD,
4407 ARM::VLD4DUPd16Pseudo_UPD,
4408 ARM::VLD4DUPd32Pseudo_UPD,
4409 ARM::VLD1d64QPseudoWB_fixed };
4410 static const uint16_t QOpcodes0[] = { ARM::VLD4DUPq8EvenPseudo,
4411 ARM::VLD4DUPq16EvenPseudo,
4412 ARM::VLD4DUPq32EvenPseudo };
4413 static const uint16_t QOpcodes1[] = { ARM::VLD4DUPq8OddPseudo_UPD,
4414 ARM::VLD4DUPq16OddPseudo_UPD,
4415 ARM::VLD4DUPq32OddPseudo_UPD };
4416 SelectVLDDup(N, /* IsIntrinsic= */ false, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4417 return;
4418 }
4419
4420 case ARMISD::VLD1_UPD: {
4421 static const uint16_t DOpcodes[] = { ARM::VLD1d8wb_fixed,
4422 ARM::VLD1d16wb_fixed,
4423 ARM::VLD1d32wb_fixed,
4424 ARM::VLD1d64wb_fixed };
4425 static const uint16_t QOpcodes[] = { ARM::VLD1q8wb_fixed,
4426 ARM::VLD1q16wb_fixed,
4427 ARM::VLD1q32wb_fixed,
4428 ARM::VLD1q64wb_fixed };
4429 SelectVLD(N, true, 1, DOpcodes, QOpcodes, nullptr);
4430 return;
4431 }
4432
4433 case ARMISD::VLD2_UPD: {
4434 if (Subtarget->hasNEON()) {
4435 static const uint16_t DOpcodes[] = {
4436 ARM::VLD2d8wb_fixed, ARM::VLD2d16wb_fixed, ARM::VLD2d32wb_fixed,
4437 ARM::VLD1q64wb_fixed};
4438 static const uint16_t QOpcodes[] = {ARM::VLD2q8PseudoWB_fixed,
4439 ARM::VLD2q16PseudoWB_fixed,
4440 ARM::VLD2q32PseudoWB_fixed};
4441 SelectVLD(N, true, 2, DOpcodes, QOpcodes, nullptr);
4442 } else {
4443 static const uint16_t Opcodes8[] = {ARM::MVE_VLD20_8,
4444 ARM::MVE_VLD21_8_wb};
4445 static const uint16_t Opcodes16[] = {ARM::MVE_VLD20_16,
4446 ARM::MVE_VLD21_16_wb};
4447 static const uint16_t Opcodes32[] = {ARM::MVE_VLD20_32,
4448 ARM::MVE_VLD21_32_wb};
4449 static const uint16_t *const Opcodes[] = {Opcodes8, Opcodes16, Opcodes32};
4450 SelectMVE_VLD(N, 2, Opcodes, true);
4451 }
4452 return;
4453 }
4454
4455 case ARMISD::VLD3_UPD: {
4456 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo_UPD,
4457 ARM::VLD3d16Pseudo_UPD,
4458 ARM::VLD3d32Pseudo_UPD,
4459 ARM::VLD1d64TPseudoWB_fixed};
4460 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
4461 ARM::VLD3q16Pseudo_UPD,
4462 ARM::VLD3q32Pseudo_UPD };
4463 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
4464 ARM::VLD3q16oddPseudo_UPD,
4465 ARM::VLD3q32oddPseudo_UPD };
4466 SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4467 return;
4468 }
4469
4470 case ARMISD::VLD4_UPD: {
4471 if (Subtarget->hasNEON()) {
4472 static const uint16_t DOpcodes[] = {
4473 ARM::VLD4d8Pseudo_UPD, ARM::VLD4d16Pseudo_UPD, ARM::VLD4d32Pseudo_UPD,
4474 ARM::VLD1d64QPseudoWB_fixed};
4475 static const uint16_t QOpcodes0[] = {ARM::VLD4q8Pseudo_UPD,
4476 ARM::VLD4q16Pseudo_UPD,
4477 ARM::VLD4q32Pseudo_UPD};
4478 static const uint16_t QOpcodes1[] = {ARM::VLD4q8oddPseudo_UPD,
4479 ARM::VLD4q16oddPseudo_UPD,
4480 ARM::VLD4q32oddPseudo_UPD};
4481 SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4482 } else {
4483 static const uint16_t Opcodes8[] = {ARM::MVE_VLD40_8, ARM::MVE_VLD41_8,
4484 ARM::MVE_VLD42_8,
4485 ARM::MVE_VLD43_8_wb};
4486 static const uint16_t Opcodes16[] = {ARM::MVE_VLD40_16, ARM::MVE_VLD41_16,
4487 ARM::MVE_VLD42_16,
4488 ARM::MVE_VLD43_16_wb};
4489 static const uint16_t Opcodes32[] = {ARM::MVE_VLD40_32, ARM::MVE_VLD41_32,
4490 ARM::MVE_VLD42_32,
4491 ARM::MVE_VLD43_32_wb};
4492 static const uint16_t *const Opcodes[] = {Opcodes8, Opcodes16, Opcodes32};
4493 SelectMVE_VLD(N, 4, Opcodes, true);
4494 }
4495 return;
4496 }
4497
4498 case ARMISD::VLD1x2_UPD: {
4499 if (Subtarget->hasNEON()) {
4500 static const uint16_t DOpcodes[] = {
4501 ARM::VLD1q8wb_fixed, ARM::VLD1q16wb_fixed, ARM::VLD1q32wb_fixed,
4502 ARM::VLD1q64wb_fixed};
4503 static const uint16_t QOpcodes[] = {
4504 ARM::VLD1d8QPseudoWB_fixed, ARM::VLD1d16QPseudoWB_fixed,
4505 ARM::VLD1d32QPseudoWB_fixed, ARM::VLD1d64QPseudoWB_fixed};
4506 SelectVLD(N, true, 2, DOpcodes, QOpcodes, nullptr);
4507 return;
4508 }
4509 break;
4510 }
4511
4512 case ARMISD::VLD1x3_UPD: {
4513 if (Subtarget->hasNEON()) {
4514 static const uint16_t DOpcodes[] = {
4515 ARM::VLD1d8TPseudoWB_fixed, ARM::VLD1d16TPseudoWB_fixed,
4516 ARM::VLD1d32TPseudoWB_fixed, ARM::VLD1d64TPseudoWB_fixed};
4517 static const uint16_t QOpcodes0[] = {
4518 ARM::VLD1q8LowTPseudo_UPD, ARM::VLD1q16LowTPseudo_UPD,
4519 ARM::VLD1q32LowTPseudo_UPD, ARM::VLD1q64LowTPseudo_UPD};
4520 static const uint16_t QOpcodes1[] = {
4521 ARM::VLD1q8HighTPseudo_UPD, ARM::VLD1q16HighTPseudo_UPD,
4522 ARM::VLD1q32HighTPseudo_UPD, ARM::VLD1q64HighTPseudo_UPD};
4523 SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4524 return;
4525 }
4526 break;
4527 }
4528
4529 case ARMISD::VLD1x4_UPD: {
4530 if (Subtarget->hasNEON()) {
4531 static const uint16_t DOpcodes[] = {
4532 ARM::VLD1d8QPseudoWB_fixed, ARM::VLD1d16QPseudoWB_fixed,
4533 ARM::VLD1d32QPseudoWB_fixed, ARM::VLD1d64QPseudoWB_fixed};
4534 static const uint16_t QOpcodes0[] = {
4535 ARM::VLD1q8LowQPseudo_UPD, ARM::VLD1q16LowQPseudo_UPD,
4536 ARM::VLD1q32LowQPseudo_UPD, ARM::VLD1q64LowQPseudo_UPD};
4537 static const uint16_t QOpcodes1[] = {
4538 ARM::VLD1q8HighQPseudo_UPD, ARM::VLD1q16HighQPseudo_UPD,
4539 ARM::VLD1q32HighQPseudo_UPD, ARM::VLD1q64HighQPseudo_UPD};
4540 SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4541 return;
4542 }
4543 break;
4544 }
4545
4546 case ARMISD::VLD2LN_UPD: {
4547 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD,
4548 ARM::VLD2LNd16Pseudo_UPD,
4549 ARM::VLD2LNd32Pseudo_UPD };
4550 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
4551 ARM::VLD2LNq32Pseudo_UPD };
4552 SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
4553 return;
4554 }
4555
4556 case ARMISD::VLD3LN_UPD: {
4557 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD,
4558 ARM::VLD3LNd16Pseudo_UPD,
4559 ARM::VLD3LNd32Pseudo_UPD };
4560 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
4561 ARM::VLD3LNq32Pseudo_UPD };
4562 SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
4563 return;
4564 }
4565
4566 case ARMISD::VLD4LN_UPD: {
4567 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD,
4568 ARM::VLD4LNd16Pseudo_UPD,
4569 ARM::VLD4LNd32Pseudo_UPD };
4570 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
4571 ARM::VLD4LNq32Pseudo_UPD };
4572 SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
4573 return;
4574 }
4575
4576 case ARMISD::VST1_UPD: {
4577 static const uint16_t DOpcodes[] = { ARM::VST1d8wb_fixed,
4578 ARM::VST1d16wb_fixed,
4579 ARM::VST1d32wb_fixed,
4580 ARM::VST1d64wb_fixed };
4581 static const uint16_t QOpcodes[] = { ARM::VST1q8wb_fixed,
4582 ARM::VST1q16wb_fixed,
4583 ARM::VST1q32wb_fixed,
4584 ARM::VST1q64wb_fixed };
4585 SelectVST(N, true, 1, DOpcodes, QOpcodes, nullptr);
4586 return;
4587 }
4588
4589 case ARMISD::VST2_UPD: {
4590 if (Subtarget->hasNEON()) {
4591 static const uint16_t DOpcodes[] = {
4592 ARM::VST2d8wb_fixed, ARM::VST2d16wb_fixed, ARM::VST2d32wb_fixed,
4593 ARM::VST1q64wb_fixed};
4594 static const uint16_t QOpcodes[] = {ARM::VST2q8PseudoWB_fixed,
4595 ARM::VST2q16PseudoWB_fixed,
4596 ARM::VST2q32PseudoWB_fixed};
4597 SelectVST(N, true, 2, DOpcodes, QOpcodes, nullptr);
4598 return;
4599 }
4600 break;
4601 }
4602
4603 case ARMISD::VST3_UPD: {
4604 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo_UPD,
4605 ARM::VST3d16Pseudo_UPD,
4606 ARM::VST3d32Pseudo_UPD,
4607 ARM::VST1d64TPseudoWB_fixed};
4608 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
4609 ARM::VST3q16Pseudo_UPD,
4610 ARM::VST3q32Pseudo_UPD };
4611 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
4612 ARM::VST3q16oddPseudo_UPD,
4613 ARM::VST3q32oddPseudo_UPD };
4614 SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4615 return;
4616 }
4617
4618 case ARMISD::VST4_UPD: {
4619 if (Subtarget->hasNEON()) {
4620 static const uint16_t DOpcodes[] = {
4621 ARM::VST4d8Pseudo_UPD, ARM::VST4d16Pseudo_UPD, ARM::VST4d32Pseudo_UPD,
4622 ARM::VST1d64QPseudoWB_fixed};
4623 static const uint16_t QOpcodes0[] = {ARM::VST4q8Pseudo_UPD,
4624 ARM::VST4q16Pseudo_UPD,
4625 ARM::VST4q32Pseudo_UPD};
4626 static const uint16_t QOpcodes1[] = {ARM::VST4q8oddPseudo_UPD,
4627 ARM::VST4q16oddPseudo_UPD,
4628 ARM::VST4q32oddPseudo_UPD};
4629 SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4630 return;
4631 }
4632 break;
4633 }
4634
4635 case ARMISD::VST1x2_UPD: {
4636 if (Subtarget->hasNEON()) {
4637 static const uint16_t DOpcodes[] = { ARM::VST1q8wb_fixed,
4638 ARM::VST1q16wb_fixed,
4639 ARM::VST1q32wb_fixed,
4640 ARM::VST1q64wb_fixed};
4641 static const uint16_t QOpcodes[] = { ARM::VST1d8QPseudoWB_fixed,
4642 ARM::VST1d16QPseudoWB_fixed,
4643 ARM::VST1d32QPseudoWB_fixed,
4644 ARM::VST1d64QPseudoWB_fixed };
4645 SelectVST(N, true, 2, DOpcodes, QOpcodes, nullptr);
4646 return;
4647 }
4648 break;
4649 }
4650
4651 case ARMISD::VST1x3_UPD: {
4652 if (Subtarget->hasNEON()) {
4653 static const uint16_t DOpcodes[] = { ARM::VST1d8TPseudoWB_fixed,
4654 ARM::VST1d16TPseudoWB_fixed,
4655 ARM::VST1d32TPseudoWB_fixed,
4656 ARM::VST1d64TPseudoWB_fixed };
4657 static const uint16_t QOpcodes0[] = { ARM::VST1q8LowTPseudo_UPD,
4658 ARM::VST1q16LowTPseudo_UPD,
4659 ARM::VST1q32LowTPseudo_UPD,
4660 ARM::VST1q64LowTPseudo_UPD };
4661 static const uint16_t QOpcodes1[] = { ARM::VST1q8HighTPseudo_UPD,
4662 ARM::VST1q16HighTPseudo_UPD,
4663 ARM::VST1q32HighTPseudo_UPD,
4664 ARM::VST1q64HighTPseudo_UPD };
4665 SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
4666 return;
4667 }
4668 break;
4669 }
4670
4671 case ARMISD::VST1x4_UPD: {
4672 if (Subtarget->hasNEON()) {
4673 static const uint16_t DOpcodes[] = { ARM::VST1d8QPseudoWB_fixed,
4674 ARM::VST1d16QPseudoWB_fixed,
4675 ARM::VST1d32QPseudoWB_fixed,
4676 ARM::VST1d64QPseudoWB_fixed };
4677 static const uint16_t QOpcodes0[] = { ARM::VST1q8LowQPseudo_UPD,
4678 ARM::VST1q16LowQPseudo_UPD,
4679 ARM::VST1q32LowQPseudo_UPD,
4680 ARM::VST1q64LowQPseudo_UPD };
4681 static const uint16_t QOpcodes1[] = { ARM::VST1q8HighQPseudo_UPD,
4682 ARM::VST1q16HighQPseudo_UPD,
4683 ARM::VST1q32HighQPseudo_UPD,
4684 ARM::VST1q64HighQPseudo_UPD };
4685 SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
4686 return;
4687 }
4688 break;
4689 }
4690 case ARMISD::VST2LN_UPD: {
4691 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD,
4692 ARM::VST2LNd16Pseudo_UPD,
4693 ARM::VST2LNd32Pseudo_UPD };
4694 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
4695 ARM::VST2LNq32Pseudo_UPD };
4696 SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
4697 return;
4698 }
4699
4700 case ARMISD::VST3LN_UPD: {
4701 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD,
4702 ARM::VST3LNd16Pseudo_UPD,
4703 ARM::VST3LNd32Pseudo_UPD };
4704 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
4705 ARM::VST3LNq32Pseudo_UPD };
4706 SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
4707 return;
4708 }
4709
4710 case ARMISD::VST4LN_UPD: {
4711 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD,
4712 ARM::VST4LNd16Pseudo_UPD,
4713 ARM::VST4LNd32Pseudo_UPD };
4714 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
4715 ARM::VST4LNq32Pseudo_UPD };
4716 SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
4717 return;
4718 }
4719
4720 case ISD::INTRINSIC_VOID:
4721 case ISD::INTRINSIC_W_CHAIN: {
4722 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
4723 switch (IntNo) {
4724 default:
4725 break;
4726
4727 case Intrinsic::arm_mrrc:
4728 case Intrinsic::arm_mrrc2: {
4729 SDLoc dl(N);
4730 SDValue Chain = N->getOperand(0);
4731 unsigned Opc;
4732
4733 if (Subtarget->isThumb())
4734 Opc = (IntNo == Intrinsic::arm_mrrc ? ARM::t2MRRC : ARM::t2MRRC2);
4735 else
4736 Opc = (IntNo == Intrinsic::arm_mrrc ? ARM::MRRC : ARM::MRRC2);
4737
4738 SmallVector<SDValue, 5> Ops;
4739 Ops.push_back(getI32Imm(cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(), dl)); /* coproc */
4740 Ops.push_back(getI32Imm(cast<ConstantSDNode>(N->getOperand(3))->getZExtValue(), dl)); /* opc */
4741 Ops.push_back(getI32Imm(cast<ConstantSDNode>(N->getOperand(4))->getZExtValue(), dl)); /* CRm */
4742
4743 // The mrrc2 instruction in ARM doesn't allow predicates, the top 4 bits of the encoded
4744 // instruction will always be '1111' but it is possible in assembly language to specify
4745 // AL as a predicate to mrrc2 but it doesn't make any difference to the encoded instruction.
4746 if (Opc != ARM::MRRC2) {
4747 Ops.push_back(getAL(CurDAG, dl));
4748 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
4749 }
4750
4751 Ops.push_back(Chain);
4752
4753 // Writes to two registers.
4754 const EVT RetType[] = {MVT::i32, MVT::i32, MVT::Other};
4755
4756 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, RetType, Ops));
4757 return;
4758 }
4759 case Intrinsic::arm_ldaexd:
4760 case Intrinsic::arm_ldrexd: {
4761 SDLoc dl(N);
4762 SDValue Chain = N->getOperand(0);
4763 SDValue MemAddr = N->getOperand(2);
4764 bool isThumb = Subtarget->isThumb() && Subtarget->hasV8MBaselineOps();
4765
4766 bool IsAcquire = IntNo == Intrinsic::arm_ldaexd;
4767 unsigned NewOpc = isThumb ? (IsAcquire ? ARM::t2LDAEXD : ARM::t2LDREXD)
4768 : (IsAcquire ? ARM::LDAEXD : ARM::LDREXD);
4769
4770 // arm_ldrexd returns a i64 value in {i32, i32}
4771 std::vector<EVT> ResTys;
4772 if (isThumb) {
4773 ResTys.push_back(MVT::i32);
4774 ResTys.push_back(MVT::i32);
4775 } else
4776 ResTys.push_back(MVT::Untyped);
4777 ResTys.push_back(MVT::Other);
4778
4779 // Place arguments in the right order.
4780 SDValue Ops[] = {MemAddr, getAL(CurDAG, dl),
4781 CurDAG->getRegister(0, MVT::i32), Chain};
4782 SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
4783 // Transfer memoperands.
4784 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
4785 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Ld), {MemOp});
4786
4787 // Remap uses.
4788 SDValue OutChain = isThumb ? SDValue(Ld, 2) : SDValue(Ld, 1);
4789 if (!SDValue(N, 0).use_empty()) {
4790 SDValue Result;
4791 if (isThumb)
4792 Result = SDValue(Ld, 0);
4793 else {
4794 SDValue SubRegIdx =
4795 CurDAG->getTargetConstant(ARM::gsub_0, dl, MVT::i32);
4796 SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
4797 dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
4798 Result = SDValue(ResNode,0);
4799 }
4800 ReplaceUses(SDValue(N, 0), Result);
4801 }
4802 if (!SDValue(N, 1).use_empty()) {
4803 SDValue Result;
4804 if (isThumb)
4805 Result = SDValue(Ld, 1);
4806 else {
4807 SDValue SubRegIdx =
4808 CurDAG->getTargetConstant(ARM::gsub_1, dl, MVT::i32);
4809 SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
4810 dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
4811 Result = SDValue(ResNode,0);
4812 }
4813 ReplaceUses(SDValue(N, 1), Result);
4814 }
4815 ReplaceUses(SDValue(N, 2), OutChain);
4816 CurDAG->RemoveDeadNode(N);
4817 return;
4818 }
4819 case Intrinsic::arm_stlexd:
4820 case Intrinsic::arm_strexd: {
4821 SDLoc dl(N);
4822 SDValue Chain = N->getOperand(0);
4823 SDValue Val0 = N->getOperand(2);
4824 SDValue Val1 = N->getOperand(3);
4825 SDValue MemAddr = N->getOperand(4);
4826
4827 // Store exclusive double return a i32 value which is the return status
4828 // of the issued store.
4829 const EVT ResTys[] = {MVT::i32, MVT::Other};
4830
4831 bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
4832 // Place arguments in the right order.
4833 SmallVector<SDValue, 7> Ops;
4834 if (isThumb) {
4835 Ops.push_back(Val0);
4836 Ops.push_back(Val1);
4837 } else
4838 // arm_strexd uses GPRPair.
4839 Ops.push_back(SDValue(createGPRPairNode(MVT::Untyped, Val0, Val1), 0));
4840 Ops.push_back(MemAddr);
4841 Ops.push_back(getAL(CurDAG, dl));
4842 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
4843 Ops.push_back(Chain);
4844
4845 bool IsRelease = IntNo == Intrinsic::arm_stlexd;
4846 unsigned NewOpc = isThumb ? (IsRelease ? ARM::t2STLEXD : ARM::t2STREXD)
4847 : (IsRelease ? ARM::STLEXD : ARM::STREXD);
4848
4849 SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
4850 // Transfer memoperands.
4851 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(N)->getMemOperand();
4852 CurDAG->setNodeMemRefs(cast<MachineSDNode>(St), {MemOp});
4853
4854 ReplaceNode(N, St);
4855 return;
4856 }
4857
4858 case Intrinsic::arm_neon_vld1: {
4859 static const uint16_t DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
4860 ARM::VLD1d32, ARM::VLD1d64 };
4861 static const uint16_t QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
4862 ARM::VLD1q32, ARM::VLD1q64};
4863 SelectVLD(N, false, 1, DOpcodes, QOpcodes, nullptr);
4864 return;
4865 }
4866
4867 case Intrinsic::arm_neon_vld1x2: {
4868 static const uint16_t DOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
4869 ARM::VLD1q32, ARM::VLD1q64 };
4870 static const uint16_t QOpcodes[] = { ARM::VLD1d8QPseudo,
4871 ARM::VLD1d16QPseudo,
4872 ARM::VLD1d32QPseudo,
4873 ARM::VLD1d64QPseudo };
4874 SelectVLD(N, false, 2, DOpcodes, QOpcodes, nullptr);
4875 return;
4876 }
4877
4878 case Intrinsic::arm_neon_vld1x3: {
4879 static const uint16_t DOpcodes[] = { ARM::VLD1d8TPseudo,
4880 ARM::VLD1d16TPseudo,
4881 ARM::VLD1d32TPseudo,
4882 ARM::VLD1d64TPseudo };
4883 static const uint16_t QOpcodes0[] = { ARM::VLD1q8LowTPseudo_UPD,
4884 ARM::VLD1q16LowTPseudo_UPD,
4885 ARM::VLD1q32LowTPseudo_UPD,
4886 ARM::VLD1q64LowTPseudo_UPD };
4887 static const uint16_t QOpcodes1[] = { ARM::VLD1q8HighTPseudo,
4888 ARM::VLD1q16HighTPseudo,
4889 ARM::VLD1q32HighTPseudo,
4890 ARM::VLD1q64HighTPseudo };
4891 SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
4892 return;
4893 }
4894
4895 case Intrinsic::arm_neon_vld1x4: {
4896 static const uint16_t DOpcodes[] = { ARM::VLD1d8QPseudo,
4897 ARM::VLD1d16QPseudo,
4898 ARM::VLD1d32QPseudo,
4899 ARM::VLD1d64QPseudo };
4900 static const uint16_t QOpcodes0[] = { ARM::VLD1q8LowQPseudo_UPD,
4901 ARM::VLD1q16LowQPseudo_UPD,
4902 ARM::VLD1q32LowQPseudo_UPD,
4903 ARM::VLD1q64LowQPseudo_UPD };
4904 static const uint16_t QOpcodes1[] = { ARM::VLD1q8HighQPseudo,
4905 ARM::VLD1q16HighQPseudo,
4906 ARM::VLD1q32HighQPseudo,
4907 ARM::VLD1q64HighQPseudo };
4908 SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
4909 return;
4910 }
4911
4912 case Intrinsic::arm_neon_vld2: {
4913 static const uint16_t DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16,
4914 ARM::VLD2d32, ARM::VLD1q64 };
4915 static const uint16_t QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
4916 ARM::VLD2q32Pseudo };
4917 SelectVLD(N, false, 2, DOpcodes, QOpcodes, nullptr);
4918 return;
4919 }
4920
4921 case Intrinsic::arm_neon_vld3: {
4922 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo,
4923 ARM::VLD3d16Pseudo,
4924 ARM::VLD3d32Pseudo,
4925 ARM::VLD1d64TPseudo };
4926 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
4927 ARM::VLD3q16Pseudo_UPD,
4928 ARM::VLD3q32Pseudo_UPD };
4929 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo,
4930 ARM::VLD3q16oddPseudo,
4931 ARM::VLD3q32oddPseudo };
4932 SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
4933 return;
4934 }
4935
4936 case Intrinsic::arm_neon_vld4: {
4937 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo,
4938 ARM::VLD4d16Pseudo,
4939 ARM::VLD4d32Pseudo,
4940 ARM::VLD1d64QPseudo };
4941 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
4942 ARM::VLD4q16Pseudo_UPD,
4943 ARM::VLD4q32Pseudo_UPD };
4944 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo,
4945 ARM::VLD4q16oddPseudo,
4946 ARM::VLD4q32oddPseudo };
4947 SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
4948 return;
4949 }
4950
4951 case Intrinsic::arm_neon_vld2dup: {
4952 static const uint16_t DOpcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
4953 ARM::VLD2DUPd32, ARM::VLD1q64 };
4954 static const uint16_t QOpcodes0[] = { ARM::VLD2DUPq8EvenPseudo,
4955 ARM::VLD2DUPq16EvenPseudo,
4956 ARM::VLD2DUPq32EvenPseudo };
4957 static const uint16_t QOpcodes1[] = { ARM::VLD2DUPq8OddPseudo,
4958 ARM::VLD2DUPq16OddPseudo,
4959 ARM::VLD2DUPq32OddPseudo };
4960 SelectVLDDup(N, /* IsIntrinsic= */ true, false, 2,
4961 DOpcodes, QOpcodes0, QOpcodes1);
4962 return;
4963 }
4964
4965 case Intrinsic::arm_neon_vld3dup: {
4966 static const uint16_t DOpcodes[] = { ARM::VLD3DUPd8Pseudo,
4967 ARM::VLD3DUPd16Pseudo,
4968 ARM::VLD3DUPd32Pseudo,
4969 ARM::VLD1d64TPseudo };
4970 static const uint16_t QOpcodes0[] = { ARM::VLD3DUPq8EvenPseudo,
4971 ARM::VLD3DUPq16EvenPseudo,
4972 ARM::VLD3DUPq32EvenPseudo };
4973 static const uint16_t QOpcodes1[] = { ARM::VLD3DUPq8OddPseudo,
4974 ARM::VLD3DUPq16OddPseudo,
4975 ARM::VLD3DUPq32OddPseudo };
4976 SelectVLDDup(N, /* IsIntrinsic= */ true, false, 3,
4977 DOpcodes, QOpcodes0, QOpcodes1);
4978 return;
4979 }
4980
4981 case Intrinsic::arm_neon_vld4dup: {
4982 static const uint16_t DOpcodes[] = { ARM::VLD4DUPd8Pseudo,
4983 ARM::VLD4DUPd16Pseudo,
4984 ARM::VLD4DUPd32Pseudo,
4985 ARM::VLD1d64QPseudo };
4986 static const uint16_t QOpcodes0[] = { ARM::VLD4DUPq8EvenPseudo,
4987 ARM::VLD4DUPq16EvenPseudo,
4988 ARM::VLD4DUPq32EvenPseudo };
4989 static const uint16_t QOpcodes1[] = { ARM::VLD4DUPq8OddPseudo,
4990 ARM::VLD4DUPq16OddPseudo,
4991 ARM::VLD4DUPq32OddPseudo };
4992 SelectVLDDup(N, /* IsIntrinsic= */ true, false, 4,
4993 DOpcodes, QOpcodes0, QOpcodes1);
4994 return;
4995 }
4996
4997 case Intrinsic::arm_neon_vld2lane: {
4998 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo,
4999 ARM::VLD2LNd16Pseudo,
5000 ARM::VLD2LNd32Pseudo };
5001 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo,
5002 ARM::VLD2LNq32Pseudo };
5003 SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
5004 return;
5005 }
5006
5007 case Intrinsic::arm_neon_vld3lane: {
5008 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo,
5009 ARM::VLD3LNd16Pseudo,
5010 ARM::VLD3LNd32Pseudo };
5011 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo,
5012 ARM::VLD3LNq32Pseudo };
5013 SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
5014 return;
5015 }
5016
5017 case Intrinsic::arm_neon_vld4lane: {
5018 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo,
5019 ARM::VLD4LNd16Pseudo,
5020 ARM::VLD4LNd32Pseudo };
5021 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo,
5022 ARM::VLD4LNq32Pseudo };
5023 SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
5024 return;
5025 }
5026
5027 case Intrinsic::arm_neon_vst1: {
5028 static const uint16_t DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
5029 ARM::VST1d32, ARM::VST1d64 };
5030 static const uint16_t QOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
5031 ARM::VST1q32, ARM::VST1q64 };
5032 SelectVST(N, false, 1, DOpcodes, QOpcodes, nullptr);
5033 return;
5034 }
5035
5036 case Intrinsic::arm_neon_vst1x2: {
5037 static const uint16_t DOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
5038 ARM::VST1q32, ARM::VST1q64 };
5039 static const uint16_t QOpcodes[] = { ARM::VST1d8QPseudo,
5040 ARM::VST1d16QPseudo,
5041 ARM::VST1d32QPseudo,
5042 ARM::VST1d64QPseudo };
5043 SelectVST(N, false, 2, DOpcodes, QOpcodes, nullptr);
5044 return;
5045 }
5046
5047 case Intrinsic::arm_neon_vst1x3: {
5048 static const uint16_t DOpcodes[] = { ARM::VST1d8TPseudo,
5049 ARM::VST1d16TPseudo,
5050 ARM::VST1d32TPseudo,
5051 ARM::VST1d64TPseudo };
5052 static const uint16_t QOpcodes0[] = { ARM::VST1q8LowTPseudo_UPD,
5053 ARM::VST1q16LowTPseudo_UPD,
5054 ARM::VST1q32LowTPseudo_UPD,
5055 ARM::VST1q64LowTPseudo_UPD };
5056 static const uint16_t QOpcodes1[] = { ARM::VST1q8HighTPseudo,
5057 ARM::VST1q16HighTPseudo,
5058 ARM::VST1q32HighTPseudo,
5059 ARM::VST1q64HighTPseudo };
5060 SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
5061 return;
5062 }
5063
5064 case Intrinsic::arm_neon_vst1x4: {
5065 static const uint16_t DOpcodes[] = { ARM::VST1d8QPseudo,
5066 ARM::VST1d16QPseudo,
5067 ARM::VST1d32QPseudo,
5068 ARM::VST1d64QPseudo };
5069 static const uint16_t QOpcodes0[] = { ARM::VST1q8LowQPseudo_UPD,
5070 ARM::VST1q16LowQPseudo_UPD,
5071 ARM::VST1q32LowQPseudo_UPD,
5072 ARM::VST1q64LowQPseudo_UPD };
5073 static const uint16_t QOpcodes1[] = { ARM::VST1q8HighQPseudo,
5074 ARM::VST1q16HighQPseudo,
5075 ARM::VST1q32HighQPseudo,
5076 ARM::VST1q64HighQPseudo };
5077 SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
5078 return;
5079 }
5080
5081 case Intrinsic::arm_neon_vst2: {
5082 static const uint16_t DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
5083 ARM::VST2d32, ARM::VST1q64 };
5084 static const uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
5085 ARM::VST2q32Pseudo };
5086 SelectVST(N, false, 2, DOpcodes, QOpcodes, nullptr);
5087 return;
5088 }
5089
5090 case Intrinsic::arm_neon_vst3: {
5091 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo,
5092 ARM::VST3d16Pseudo,
5093 ARM::VST3d32Pseudo,
5094 ARM::VST1d64TPseudo };
5095 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
5096 ARM::VST3q16Pseudo_UPD,
5097 ARM::VST3q32Pseudo_UPD };
5098 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo,
5099 ARM::VST3q16oddPseudo,
5100 ARM::VST3q32oddPseudo };
5101 SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
5102 return;
5103 }
5104
5105 case Intrinsic::arm_neon_vst4: {
5106 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo,
5107 ARM::VST4d16Pseudo,
5108 ARM::VST4d32Pseudo,
5109 ARM::VST1d64QPseudo };
5110 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
5111 ARM::VST4q16Pseudo_UPD,
5112 ARM::VST4q32Pseudo_UPD };
5113 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo,
5114 ARM::VST4q16oddPseudo,
5115 ARM::VST4q32oddPseudo };
5116 SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
5117 return;
5118 }
5119
5120 case Intrinsic::arm_neon_vst2lane: {
5121 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo,
5122 ARM::VST2LNd16Pseudo,
5123 ARM::VST2LNd32Pseudo };
5124 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo,
5125 ARM::VST2LNq32Pseudo };
5126 SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
5127 return;
5128 }
5129
5130 case Intrinsic::arm_neon_vst3lane: {
5131 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo,
5132 ARM::VST3LNd16Pseudo,
5133 ARM::VST3LNd32Pseudo };
5134 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo,
5135 ARM::VST3LNq32Pseudo };
5136 SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
5137 return;
5138 }
5139
5140 case Intrinsic::arm_neon_vst4lane: {
5141 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo,
5142 ARM::VST4LNd16Pseudo,
5143 ARM::VST4LNd32Pseudo };
5144 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo,
5145 ARM::VST4LNq32Pseudo };
5146 SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
5147 return;
5148 }
5149
5150 case Intrinsic::arm_mve_vldr_gather_base_wb:
5151 case Intrinsic::arm_mve_vldr_gather_base_wb_predicated: {
5152 static const uint16_t Opcodes[] = {ARM::MVE_VLDRWU32_qi_pre,
5153 ARM::MVE_VLDRDU64_qi_pre};
5154 SelectMVE_WB(N, Opcodes,
5155 IntNo == Intrinsic::arm_mve_vldr_gather_base_wb_predicated);
5156 return;
5157 }
5158
5159 case Intrinsic::arm_mve_vld2q: {
5160 static const uint16_t Opcodes8[] = {ARM::MVE_VLD20_8, ARM::MVE_VLD21_8};
5161 static const uint16_t Opcodes16[] = {ARM::MVE_VLD20_16,
5162 ARM::MVE_VLD21_16};
5163 static const uint16_t Opcodes32[] = {ARM::MVE_VLD20_32,
5164 ARM::MVE_VLD21_32};
5165 static const uint16_t *const Opcodes[] = {Opcodes8, Opcodes16, Opcodes32};
5166 SelectMVE_VLD(N, 2, Opcodes, false);
5167 return;
5168 }
5169
5170 case Intrinsic::arm_mve_vld4q: {
5171 static const uint16_t Opcodes8[] = {ARM::MVE_VLD40_8, ARM::MVE_VLD41_8,
5172 ARM::MVE_VLD42_8, ARM::MVE_VLD43_8};
5173 static const uint16_t Opcodes16[] = {ARM::MVE_VLD40_16, ARM::MVE_VLD41_16,
5174 ARM::MVE_VLD42_16,
5175 ARM::MVE_VLD43_16};
5176 static const uint16_t Opcodes32[] = {ARM::MVE_VLD40_32, ARM::MVE_VLD41_32,
5177 ARM::MVE_VLD42_32,
5178 ARM::MVE_VLD43_32};
5179 static const uint16_t *const Opcodes[] = {Opcodes8, Opcodes16, Opcodes32};
5180 SelectMVE_VLD(N, 4, Opcodes, false);
5181 return;
5182 }
5183 }
5184 break;
5185 }
5186
5187 case ISD::INTRINSIC_WO_CHAIN: {
5188 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
5189 switch (IntNo) {
5190 default:
5191 break;
5192
5193 // Scalar f32 -> bf16
5194 case Intrinsic::arm_neon_vcvtbfp2bf: {
5195 SDLoc dl(N);
5196 const SDValue &Src = N->getOperand(1);
5197 llvm::EVT DestTy = N->getValueType(0);
5198 SDValue Pred = getAL(CurDAG, dl);
5199 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
5200 SDValue Ops[] = { Src, Src, Pred, Reg0 };
5201 CurDAG->SelectNodeTo(N, ARM::BF16_VCVTB, DestTy, Ops);
5202 return;
5203 }
5204
5205 // Vector v4f32 -> v4bf16
5206 case Intrinsic::arm_neon_vcvtfp2bf: {
5207 SDLoc dl(N);
5208 const SDValue &Src = N->getOperand(1);
5209 SDValue Pred = getAL(CurDAG, dl);
5210 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
5211 SDValue Ops[] = { Src, Pred, Reg0 };
5212 CurDAG->SelectNodeTo(N, ARM::BF16_VCVT, MVT::v4bf16, Ops);
5213 return;
5214 }
5215
5216 case Intrinsic::arm_mve_urshrl:
5217 SelectMVE_LongShift(N, ARM::MVE_URSHRL, true, false);
5218 return;
5219 case Intrinsic::arm_mve_uqshll:
5220 SelectMVE_LongShift(N, ARM::MVE_UQSHLL, true, false);
5221 return;
5222 case Intrinsic::arm_mve_srshrl:
5223 SelectMVE_LongShift(N, ARM::MVE_SRSHRL, true, false);
5224 return;
5225 case Intrinsic::arm_mve_sqshll:
5226 SelectMVE_LongShift(N, ARM::MVE_SQSHLL, true, false);
5227 return;
5228 case Intrinsic::arm_mve_uqrshll:
5229 SelectMVE_LongShift(N, ARM::MVE_UQRSHLL, false, true);
5230 return;
5231 case Intrinsic::arm_mve_sqrshrl:
5232 SelectMVE_LongShift(N, ARM::MVE_SQRSHRL, false, true);
5233 return;
5234
5235 case Intrinsic::arm_mve_vadc:
5236 case Intrinsic::arm_mve_vadc_predicated:
5237 SelectMVE_VADCSBC(N, ARM::MVE_VADC, ARM::MVE_VADCI, true,
5238 IntNo == Intrinsic::arm_mve_vadc_predicated);
5239 return;
5240 case Intrinsic::arm_mve_vsbc:
5241 case Intrinsic::arm_mve_vsbc_predicated:
5242 SelectMVE_VADCSBC(N, ARM::MVE_VSBC, ARM::MVE_VSBCI, true,
5243 IntNo == Intrinsic::arm_mve_vsbc_predicated);
5244 return;
5245 case Intrinsic::arm_mve_vshlc:
5246 case Intrinsic::arm_mve_vshlc_predicated:
5247 SelectMVE_VSHLC(N, IntNo == Intrinsic::arm_mve_vshlc_predicated);
5248 return;
5249
5250 case Intrinsic::arm_mve_vmlldava:
5251 case Intrinsic::arm_mve_vmlldava_predicated: {
5252 static const uint16_t OpcodesU[] = {
5253 ARM::MVE_VMLALDAVu16, ARM::MVE_VMLALDAVu32,
5254 ARM::MVE_VMLALDAVau16, ARM::MVE_VMLALDAVau32,
5255 };
5256 static const uint16_t OpcodesS[] = {
5257 ARM::MVE_VMLALDAVs16, ARM::MVE_VMLALDAVs32,
5258 ARM::MVE_VMLALDAVas16, ARM::MVE_VMLALDAVas32,
5259 ARM::MVE_VMLALDAVxs16, ARM::MVE_VMLALDAVxs32,
5260 ARM::MVE_VMLALDAVaxs16, ARM::MVE_VMLALDAVaxs32,
5261 ARM::MVE_VMLSLDAVs16, ARM::MVE_VMLSLDAVs32,
5262 ARM::MVE_VMLSLDAVas16, ARM::MVE_VMLSLDAVas32,
5263 ARM::MVE_VMLSLDAVxs16, ARM::MVE_VMLSLDAVxs32,
5264 ARM::MVE_VMLSLDAVaxs16, ARM::MVE_VMLSLDAVaxs32,
5265 };
5266 SelectMVE_VMLLDAV(N, IntNo == Intrinsic::arm_mve_vmlldava_predicated,
5267 OpcodesS, OpcodesU);
5268 return;
5269 }
5270
5271 case Intrinsic::arm_mve_vrmlldavha:
5272 case Intrinsic::arm_mve_vrmlldavha_predicated: {
5273 static const uint16_t OpcodesU[] = {
5274 ARM::MVE_VRMLALDAVHu32, ARM::MVE_VRMLALDAVHau32,
5275 };
5276 static const uint16_t OpcodesS[] = {
5277 ARM::MVE_VRMLALDAVHs32, ARM::MVE_VRMLALDAVHas32,
5278 ARM::MVE_VRMLALDAVHxs32, ARM::MVE_VRMLALDAVHaxs32,
5279 ARM::MVE_VRMLSLDAVHs32, ARM::MVE_VRMLSLDAVHas32,
5280 ARM::MVE_VRMLSLDAVHxs32, ARM::MVE_VRMLSLDAVHaxs32,
5281 };
5282 SelectMVE_VRMLLDAVH(N, IntNo == Intrinsic::arm_mve_vrmlldavha_predicated,
5283 OpcodesS, OpcodesU);
5284 return;
5285 }
5286
5287 case Intrinsic::arm_mve_vidup:
5288 case Intrinsic::arm_mve_vidup_predicated: {
5289 static const uint16_t Opcodes[] = {
5290 ARM::MVE_VIDUPu8, ARM::MVE_VIDUPu16, ARM::MVE_VIDUPu32,
5291 };
5292 SelectMVE_VxDUP(N, Opcodes, false,
5293 IntNo == Intrinsic::arm_mve_vidup_predicated);
5294 return;
5295 }
5296
5297 case Intrinsic::arm_mve_vddup:
5298 case Intrinsic::arm_mve_vddup_predicated: {
5299 static const uint16_t Opcodes[] = {
5300 ARM::MVE_VDDUPu8, ARM::MVE_VDDUPu16, ARM::MVE_VDDUPu32,
5301 };
5302 SelectMVE_VxDUP(N, Opcodes, false,
5303 IntNo == Intrinsic::arm_mve_vddup_predicated);
5304 return;
5305 }
5306
5307 case Intrinsic::arm_mve_viwdup:
5308 case Intrinsic::arm_mve_viwdup_predicated: {
5309 static const uint16_t Opcodes[] = {
5310 ARM::MVE_VIWDUPu8, ARM::MVE_VIWDUPu16, ARM::MVE_VIWDUPu32,
5311 };
5312 SelectMVE_VxDUP(N, Opcodes, true,
5313 IntNo == Intrinsic::arm_mve_viwdup_predicated);
5314 return;
5315 }
5316
5317 case Intrinsic::arm_mve_vdwdup:
5318 case Intrinsic::arm_mve_vdwdup_predicated: {
5319 static const uint16_t Opcodes[] = {
5320 ARM::MVE_VDWDUPu8, ARM::MVE_VDWDUPu16, ARM::MVE_VDWDUPu32,
5321 };
5322 SelectMVE_VxDUP(N, Opcodes, true,
5323 IntNo == Intrinsic::arm_mve_vdwdup_predicated);
5324 return;
5325 }
5326
5327 case Intrinsic::arm_cde_cx1d:
5328 case Intrinsic::arm_cde_cx1da:
5329 case Intrinsic::arm_cde_cx2d:
5330 case Intrinsic::arm_cde_cx2da:
5331 case Intrinsic::arm_cde_cx3d:
5332 case Intrinsic::arm_cde_cx3da: {
5333 bool HasAccum = IntNo == Intrinsic::arm_cde_cx1da ||
5334 IntNo == Intrinsic::arm_cde_cx2da ||
5335 IntNo == Intrinsic::arm_cde_cx3da;
5336 size_t NumExtraOps;
5337 uint16_t Opcode;
5338 switch (IntNo) {
5339 case Intrinsic::arm_cde_cx1d:
5340 case Intrinsic::arm_cde_cx1da:
5341 NumExtraOps = 0;
5342 Opcode = HasAccum ? ARM::CDE_CX1DA : ARM::CDE_CX1D;
5343 break;
5344 case Intrinsic::arm_cde_cx2d:
5345 case Intrinsic::arm_cde_cx2da:
5346 NumExtraOps = 1;
5347 Opcode = HasAccum ? ARM::CDE_CX2DA : ARM::CDE_CX2D;
5348 break;
5349 case Intrinsic::arm_cde_cx3d:
5350 case Intrinsic::arm_cde_cx3da:
5351 NumExtraOps = 2;
5352 Opcode = HasAccum ? ARM::CDE_CX3DA : ARM::CDE_CX3D;
5353 break;
5354 default:
5355 llvm_unreachable("Unexpected opcode");
5356 }
5357 SelectCDE_CXxD(N, Opcode, NumExtraOps, HasAccum);
5358 return;
5359 }
5360 }
5361 break;
5362 }
5363
5364 case ISD::ATOMIC_CMP_SWAP:
5365 SelectCMP_SWAP(N);
5366 return;
5367 }
5368
5369 SelectCode(N);
5370 }
5371
5372 // Inspect a register string of the form
5373 // cp<coprocessor>:<opc1>:c<CRn>:c<CRm>:<opc2> (32bit) or
5374 // cp<coprocessor>:<opc1>:c<CRm> (64bit) inspect the fields of the string
5375 // and obtain the integer operands from them, adding these operands to the
5376 // provided vector.
getIntOperandsFromRegisterString(StringRef RegString,SelectionDAG * CurDAG,const SDLoc & DL,std::vector<SDValue> & Ops)5377 static void getIntOperandsFromRegisterString(StringRef RegString,
5378 SelectionDAG *CurDAG,
5379 const SDLoc &DL,
5380 std::vector<SDValue> &Ops) {
5381 SmallVector<StringRef, 5> Fields;
5382 RegString.split(Fields, ':');
5383
5384 if (Fields.size() > 1) {
5385 bool AllIntFields = true;
5386
5387 for (StringRef Field : Fields) {
5388 // Need to trim out leading 'cp' characters and get the integer field.
5389 unsigned IntField;
5390 AllIntFields &= !Field.trim("CPcp").getAsInteger(10, IntField);
5391 Ops.push_back(CurDAG->getTargetConstant(IntField, DL, MVT::i32));
5392 }
5393
5394 assert(AllIntFields &&
5395 "Unexpected non-integer value in special register string.");
5396 (void)AllIntFields;
5397 }
5398 }
5399
5400 // Maps a Banked Register string to its mask value. The mask value returned is
5401 // for use in the MRSbanked / MSRbanked instruction nodes as the Banked Register
5402 // mask operand, which expresses which register is to be used, e.g. r8, and in
5403 // which mode it is to be used, e.g. usr. Returns -1 to signify that the string
5404 // was invalid.
getBankedRegisterMask(StringRef RegString)5405 static inline int getBankedRegisterMask(StringRef RegString) {
5406 auto TheReg = ARMBankedReg::lookupBankedRegByName(RegString.lower());
5407 if (!TheReg)
5408 return -1;
5409 return TheReg->Encoding;
5410 }
5411
5412 // The flags here are common to those allowed for apsr in the A class cores and
5413 // those allowed for the special registers in the M class cores. Returns a
5414 // value representing which flags were present, -1 if invalid.
getMClassFlagsMask(StringRef Flags)5415 static inline int getMClassFlagsMask(StringRef Flags) {
5416 return StringSwitch<int>(Flags)
5417 .Case("", 0x2) // no flags means nzcvq for psr registers, and 0x2 is
5418 // correct when flags are not permitted
5419 .Case("g", 0x1)
5420 .Case("nzcvq", 0x2)
5421 .Case("nzcvqg", 0x3)
5422 .Default(-1);
5423 }
5424
5425 // Maps MClass special registers string to its value for use in the
5426 // t2MRS_M/t2MSR_M instruction nodes as the SYSm value operand.
5427 // Returns -1 to signify that the string was invalid.
getMClassRegisterMask(StringRef Reg,const ARMSubtarget * Subtarget)5428 static int getMClassRegisterMask(StringRef Reg, const ARMSubtarget *Subtarget) {
5429 auto TheReg = ARMSysReg::lookupMClassSysRegByName(Reg);
5430 const FeatureBitset &FeatureBits = Subtarget->getFeatureBits();
5431 if (!TheReg || !TheReg->hasRequiredFeatures(FeatureBits))
5432 return -1;
5433 return (int)(TheReg->Encoding & 0xFFF); // SYSm value
5434 }
5435
getARClassRegisterMask(StringRef Reg,StringRef Flags)5436 static int getARClassRegisterMask(StringRef Reg, StringRef Flags) {
5437 // The mask operand contains the special register (R Bit) in bit 4, whether
5438 // the register is spsr (R bit is 1) or one of cpsr/apsr (R bit is 0), and
5439 // bits 3-0 contains the fields to be accessed in the special register, set by
5440 // the flags provided with the register.
5441 int Mask = 0;
5442 if (Reg == "apsr") {
5443 // The flags permitted for apsr are the same flags that are allowed in
5444 // M class registers. We get the flag value and then shift the flags into
5445 // the correct place to combine with the mask.
5446 Mask = getMClassFlagsMask(Flags);
5447 if (Mask == -1)
5448 return -1;
5449 return Mask << 2;
5450 }
5451
5452 if (Reg != "cpsr" && Reg != "spsr") {
5453 return -1;
5454 }
5455
5456 // This is the same as if the flags were "fc"
5457 if (Flags.empty() || Flags == "all")
5458 return Mask | 0x9;
5459
5460 // Inspect the supplied flags string and set the bits in the mask for
5461 // the relevant and valid flags allowed for cpsr and spsr.
5462 for (char Flag : Flags) {
5463 int FlagVal;
5464 switch (Flag) {
5465 case 'c':
5466 FlagVal = 0x1;
5467 break;
5468 case 'x':
5469 FlagVal = 0x2;
5470 break;
5471 case 's':
5472 FlagVal = 0x4;
5473 break;
5474 case 'f':
5475 FlagVal = 0x8;
5476 break;
5477 default:
5478 FlagVal = 0;
5479 }
5480
5481 // This avoids allowing strings where the same flag bit appears twice.
5482 if (!FlagVal || (Mask & FlagVal))
5483 return -1;
5484 Mask |= FlagVal;
5485 }
5486
5487 // If the register is spsr then we need to set the R bit.
5488 if (Reg == "spsr")
5489 Mask |= 0x10;
5490
5491 return Mask;
5492 }
5493
5494 // Lower the read_register intrinsic to ARM specific DAG nodes
5495 // using the supplied metadata string to select the instruction node to use
5496 // and the registers/masks to construct as operands for the node.
tryReadRegister(SDNode * N)5497 bool ARMDAGToDAGISel::tryReadRegister(SDNode *N){
5498 const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
5499 const auto *RegString = cast<MDString>(MD->getMD()->getOperand(0));
5500 bool IsThumb2 = Subtarget->isThumb2();
5501 SDLoc DL(N);
5502
5503 std::vector<SDValue> Ops;
5504 getIntOperandsFromRegisterString(RegString->getString(), CurDAG, DL, Ops);
5505
5506 if (!Ops.empty()) {
5507 // If the special register string was constructed of fields (as defined
5508 // in the ACLE) then need to lower to MRC node (32 bit) or
5509 // MRRC node(64 bit), we can make the distinction based on the number of
5510 // operands we have.
5511 unsigned Opcode;
5512 SmallVector<EVT, 3> ResTypes;
5513 if (Ops.size() == 5){
5514 Opcode = IsThumb2 ? ARM::t2MRC : ARM::MRC;
5515 ResTypes.append({ MVT::i32, MVT::Other });
5516 } else {
5517 assert(Ops.size() == 3 &&
5518 "Invalid number of fields in special register string.");
5519 Opcode = IsThumb2 ? ARM::t2MRRC : ARM::MRRC;
5520 ResTypes.append({ MVT::i32, MVT::i32, MVT::Other });
5521 }
5522
5523 Ops.push_back(getAL(CurDAG, DL));
5524 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
5525 Ops.push_back(N->getOperand(0));
5526 ReplaceNode(N, CurDAG->getMachineNode(Opcode, DL, ResTypes, Ops));
5527 return true;
5528 }
5529
5530 std::string SpecialReg = RegString->getString().lower();
5531
5532 int BankedReg = getBankedRegisterMask(SpecialReg);
5533 if (BankedReg != -1) {
5534 Ops = { CurDAG->getTargetConstant(BankedReg, DL, MVT::i32),
5535 getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5536 N->getOperand(0) };
5537 ReplaceNode(
5538 N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MRSbanked : ARM::MRSbanked,
5539 DL, MVT::i32, MVT::Other, Ops));
5540 return true;
5541 }
5542
5543 // The VFP registers are read by creating SelectionDAG nodes with opcodes
5544 // corresponding to the register that is being read from. So we switch on the
5545 // string to find which opcode we need to use.
5546 unsigned Opcode = StringSwitch<unsigned>(SpecialReg)
5547 .Case("fpscr", ARM::VMRS)
5548 .Case("fpexc", ARM::VMRS_FPEXC)
5549 .Case("fpsid", ARM::VMRS_FPSID)
5550 .Case("mvfr0", ARM::VMRS_MVFR0)
5551 .Case("mvfr1", ARM::VMRS_MVFR1)
5552 .Case("mvfr2", ARM::VMRS_MVFR2)
5553 .Case("fpinst", ARM::VMRS_FPINST)
5554 .Case("fpinst2", ARM::VMRS_FPINST2)
5555 .Default(0);
5556
5557 // If an opcode was found then we can lower the read to a VFP instruction.
5558 if (Opcode) {
5559 if (!Subtarget->hasVFP2Base())
5560 return false;
5561 if (Opcode == ARM::VMRS_MVFR2 && !Subtarget->hasFPARMv8Base())
5562 return false;
5563
5564 Ops = { getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5565 N->getOperand(0) };
5566 ReplaceNode(N,
5567 CurDAG->getMachineNode(Opcode, DL, MVT::i32, MVT::Other, Ops));
5568 return true;
5569 }
5570
5571 // If the target is M Class then need to validate that the register string
5572 // is an acceptable value, so check that a mask can be constructed from the
5573 // string.
5574 if (Subtarget->isMClass()) {
5575 int SYSmValue = getMClassRegisterMask(SpecialReg, Subtarget);
5576 if (SYSmValue == -1)
5577 return false;
5578
5579 SDValue Ops[] = { CurDAG->getTargetConstant(SYSmValue, DL, MVT::i32),
5580 getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5581 N->getOperand(0) };
5582 ReplaceNode(
5583 N, CurDAG->getMachineNode(ARM::t2MRS_M, DL, MVT::i32, MVT::Other, Ops));
5584 return true;
5585 }
5586
5587 // Here we know the target is not M Class so we need to check if it is one
5588 // of the remaining possible values which are apsr, cpsr or spsr.
5589 if (SpecialReg == "apsr" || SpecialReg == "cpsr") {
5590 Ops = { getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5591 N->getOperand(0) };
5592 ReplaceNode(N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MRS_AR : ARM::MRS,
5593 DL, MVT::i32, MVT::Other, Ops));
5594 return true;
5595 }
5596
5597 if (SpecialReg == "spsr") {
5598 Ops = { getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5599 N->getOperand(0) };
5600 ReplaceNode(
5601 N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MRSsys_AR : ARM::MRSsys, DL,
5602 MVT::i32, MVT::Other, Ops));
5603 return true;
5604 }
5605
5606 return false;
5607 }
5608
5609 // Lower the write_register intrinsic to ARM specific DAG nodes
5610 // using the supplied metadata string to select the instruction node to use
5611 // and the registers/masks to use in the nodes
tryWriteRegister(SDNode * N)5612 bool ARMDAGToDAGISel::tryWriteRegister(SDNode *N){
5613 const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
5614 const auto *RegString = cast<MDString>(MD->getMD()->getOperand(0));
5615 bool IsThumb2 = Subtarget->isThumb2();
5616 SDLoc DL(N);
5617
5618 std::vector<SDValue> Ops;
5619 getIntOperandsFromRegisterString(RegString->getString(), CurDAG, DL, Ops);
5620
5621 if (!Ops.empty()) {
5622 // If the special register string was constructed of fields (as defined
5623 // in the ACLE) then need to lower to MCR node (32 bit) or
5624 // MCRR node(64 bit), we can make the distinction based on the number of
5625 // operands we have.
5626 unsigned Opcode;
5627 if (Ops.size() == 5) {
5628 Opcode = IsThumb2 ? ARM::t2MCR : ARM::MCR;
5629 Ops.insert(Ops.begin()+2, N->getOperand(2));
5630 } else {
5631 assert(Ops.size() == 3 &&
5632 "Invalid number of fields in special register string.");
5633 Opcode = IsThumb2 ? ARM::t2MCRR : ARM::MCRR;
5634 SDValue WriteValue[] = { N->getOperand(2), N->getOperand(3) };
5635 Ops.insert(Ops.begin()+2, WriteValue, WriteValue+2);
5636 }
5637
5638 Ops.push_back(getAL(CurDAG, DL));
5639 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
5640 Ops.push_back(N->getOperand(0));
5641
5642 ReplaceNode(N, CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops));
5643 return true;
5644 }
5645
5646 std::string SpecialReg = RegString->getString().lower();
5647 int BankedReg = getBankedRegisterMask(SpecialReg);
5648 if (BankedReg != -1) {
5649 Ops = { CurDAG->getTargetConstant(BankedReg, DL, MVT::i32), N->getOperand(2),
5650 getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5651 N->getOperand(0) };
5652 ReplaceNode(
5653 N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MSRbanked : ARM::MSRbanked,
5654 DL, MVT::Other, Ops));
5655 return true;
5656 }
5657
5658 // The VFP registers are written to by creating SelectionDAG nodes with
5659 // opcodes corresponding to the register that is being written. So we switch
5660 // on the string to find which opcode we need to use.
5661 unsigned Opcode = StringSwitch<unsigned>(SpecialReg)
5662 .Case("fpscr", ARM::VMSR)
5663 .Case("fpexc", ARM::VMSR_FPEXC)
5664 .Case("fpsid", ARM::VMSR_FPSID)
5665 .Case("fpinst", ARM::VMSR_FPINST)
5666 .Case("fpinst2", ARM::VMSR_FPINST2)
5667 .Default(0);
5668
5669 if (Opcode) {
5670 if (!Subtarget->hasVFP2Base())
5671 return false;
5672 Ops = { N->getOperand(2), getAL(CurDAG, DL),
5673 CurDAG->getRegister(0, MVT::i32), N->getOperand(0) };
5674 ReplaceNode(N, CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops));
5675 return true;
5676 }
5677
5678 std::pair<StringRef, StringRef> Fields;
5679 Fields = StringRef(SpecialReg).rsplit('_');
5680 std::string Reg = Fields.first.str();
5681 StringRef Flags = Fields.second;
5682
5683 // If the target was M Class then need to validate the special register value
5684 // and retrieve the mask for use in the instruction node.
5685 if (Subtarget->isMClass()) {
5686 int SYSmValue = getMClassRegisterMask(SpecialReg, Subtarget);
5687 if (SYSmValue == -1)
5688 return false;
5689
5690 SDValue Ops[] = { CurDAG->getTargetConstant(SYSmValue, DL, MVT::i32),
5691 N->getOperand(2), getAL(CurDAG, DL),
5692 CurDAG->getRegister(0, MVT::i32), N->getOperand(0) };
5693 ReplaceNode(N, CurDAG->getMachineNode(ARM::t2MSR_M, DL, MVT::Other, Ops));
5694 return true;
5695 }
5696
5697 // We then check to see if a valid mask can be constructed for one of the
5698 // register string values permitted for the A and R class cores. These values
5699 // are apsr, spsr and cpsr; these are also valid on older cores.
5700 int Mask = getARClassRegisterMask(Reg, Flags);
5701 if (Mask != -1) {
5702 Ops = { CurDAG->getTargetConstant(Mask, DL, MVT::i32), N->getOperand(2),
5703 getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
5704 N->getOperand(0) };
5705 ReplaceNode(N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MSR_AR : ARM::MSR,
5706 DL, MVT::Other, Ops));
5707 return true;
5708 }
5709
5710 return false;
5711 }
5712
tryInlineAsm(SDNode * N)5713 bool ARMDAGToDAGISel::tryInlineAsm(SDNode *N){
5714 std::vector<SDValue> AsmNodeOperands;
5715 unsigned Flag, Kind;
5716 bool Changed = false;
5717 unsigned NumOps = N->getNumOperands();
5718
5719 // Normally, i64 data is bounded to two arbitrary GRPs for "%r" constraint.
5720 // However, some instrstions (e.g. ldrexd/strexd in ARM mode) require
5721 // (even/even+1) GPRs and use %n and %Hn to refer to the individual regs
5722 // respectively. Since there is no constraint to explicitly specify a
5723 // reg pair, we use GPRPair reg class for "%r" for 64-bit data. For Thumb,
5724 // the 64-bit data may be referred by H, Q, R modifiers, so we still pack
5725 // them into a GPRPair.
5726
5727 SDLoc dl(N);
5728 SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps - 1) : SDValue();
5729
5730 SmallVector<bool, 8> OpChanged;
5731 // Glue node will be appended late.
5732 for(unsigned i = 0, e = N->getGluedNode() ? NumOps - 1 : NumOps; i < e; ++i) {
5733 SDValue op = N->getOperand(i);
5734 AsmNodeOperands.push_back(op);
5735
5736 if (i < InlineAsm::Op_FirstOperand)
5737 continue;
5738
5739 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(i))) {
5740 Flag = C->getZExtValue();
5741 Kind = InlineAsm::getKind(Flag);
5742 }
5743 else
5744 continue;
5745
5746 // Immediate operands to inline asm in the SelectionDAG are modeled with
5747 // two operands. The first is a constant of value InlineAsm::Kind_Imm, and
5748 // the second is a constant with the value of the immediate. If we get here
5749 // and we have a Kind_Imm, skip the next operand, and continue.
5750 if (Kind == InlineAsm::Kind_Imm) {
5751 SDValue op = N->getOperand(++i);
5752 AsmNodeOperands.push_back(op);
5753 continue;
5754 }
5755
5756 unsigned NumRegs = InlineAsm::getNumOperandRegisters(Flag);
5757 if (NumRegs)
5758 OpChanged.push_back(false);
5759
5760 unsigned DefIdx = 0;
5761 bool IsTiedToChangedOp = false;
5762 // If it's a use that is tied with a previous def, it has no
5763 // reg class constraint.
5764 if (Changed && InlineAsm::isUseOperandTiedToDef(Flag, DefIdx))
5765 IsTiedToChangedOp = OpChanged[DefIdx];
5766
5767 // Memory operands to inline asm in the SelectionDAG are modeled with two
5768 // operands: a constant of value InlineAsm::Kind_Mem followed by the input
5769 // operand. If we get here and we have a Kind_Mem, skip the next operand (so
5770 // it doesn't get misinterpreted), and continue. We do this here because
5771 // it's important to update the OpChanged array correctly before moving on.
5772 if (Kind == InlineAsm::Kind_Mem) {
5773 SDValue op = N->getOperand(++i);
5774 AsmNodeOperands.push_back(op);
5775 continue;
5776 }
5777
5778 if (Kind != InlineAsm::Kind_RegUse && Kind != InlineAsm::Kind_RegDef
5779 && Kind != InlineAsm::Kind_RegDefEarlyClobber)
5780 continue;
5781
5782 unsigned RC;
5783 bool HasRC = InlineAsm::hasRegClassConstraint(Flag, RC);
5784 if ((!IsTiedToChangedOp && (!HasRC || RC != ARM::GPRRegClassID))
5785 || NumRegs != 2)
5786 continue;
5787
5788 assert((i+2 < NumOps) && "Invalid number of operands in inline asm");
5789 SDValue V0 = N->getOperand(i+1);
5790 SDValue V1 = N->getOperand(i+2);
5791 Register Reg0 = cast<RegisterSDNode>(V0)->getReg();
5792 Register Reg1 = cast<RegisterSDNode>(V1)->getReg();
5793 SDValue PairedReg;
5794 MachineRegisterInfo &MRI = MF->getRegInfo();
5795
5796 if (Kind == InlineAsm::Kind_RegDef ||
5797 Kind == InlineAsm::Kind_RegDefEarlyClobber) {
5798 // Replace the two GPRs with 1 GPRPair and copy values from GPRPair to
5799 // the original GPRs.
5800
5801 Register GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
5802 PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
5803 SDValue Chain = SDValue(N,0);
5804
5805 SDNode *GU = N->getGluedUser();
5806 SDValue RegCopy = CurDAG->getCopyFromReg(Chain, dl, GPVR, MVT::Untyped,
5807 Chain.getValue(1));
5808
5809 // Extract values from a GPRPair reg and copy to the original GPR reg.
5810 SDValue Sub0 = CurDAG->getTargetExtractSubreg(ARM::gsub_0, dl, MVT::i32,
5811 RegCopy);
5812 SDValue Sub1 = CurDAG->getTargetExtractSubreg(ARM::gsub_1, dl, MVT::i32,
5813 RegCopy);
5814 SDValue T0 = CurDAG->getCopyToReg(Sub0, dl, Reg0, Sub0,
5815 RegCopy.getValue(1));
5816 SDValue T1 = CurDAG->getCopyToReg(Sub1, dl, Reg1, Sub1, T0.getValue(1));
5817
5818 // Update the original glue user.
5819 std::vector<SDValue> Ops(GU->op_begin(), GU->op_end()-1);
5820 Ops.push_back(T1.getValue(1));
5821 CurDAG->UpdateNodeOperands(GU, Ops);
5822 }
5823 else {
5824 // For Kind == InlineAsm::Kind_RegUse, we first copy two GPRs into a
5825 // GPRPair and then pass the GPRPair to the inline asm.
5826 SDValue Chain = AsmNodeOperands[InlineAsm::Op_InputChain];
5827
5828 // As REG_SEQ doesn't take RegisterSDNode, we copy them first.
5829 SDValue T0 = CurDAG->getCopyFromReg(Chain, dl, Reg0, MVT::i32,
5830 Chain.getValue(1));
5831 SDValue T1 = CurDAG->getCopyFromReg(Chain, dl, Reg1, MVT::i32,
5832 T0.getValue(1));
5833 SDValue Pair = SDValue(createGPRPairNode(MVT::Untyped, T0, T1), 0);
5834
5835 // Copy REG_SEQ into a GPRPair-typed VR and replace the original two
5836 // i32 VRs of inline asm with it.
5837 Register GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
5838 PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
5839 Chain = CurDAG->getCopyToReg(T1, dl, GPVR, Pair, T1.getValue(1));
5840
5841 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
5842 Glue = Chain.getValue(1);
5843 }
5844
5845 Changed = true;
5846
5847 if(PairedReg.getNode()) {
5848 OpChanged[OpChanged.size() -1 ] = true;
5849 Flag = InlineAsm::getFlagWord(Kind, 1 /* RegNum*/);
5850 if (IsTiedToChangedOp)
5851 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, DefIdx);
5852 else
5853 Flag = InlineAsm::getFlagWordForRegClass(Flag, ARM::GPRPairRegClassID);
5854 // Replace the current flag.
5855 AsmNodeOperands[AsmNodeOperands.size() -1] = CurDAG->getTargetConstant(
5856 Flag, dl, MVT::i32);
5857 // Add the new register node and skip the original two GPRs.
5858 AsmNodeOperands.push_back(PairedReg);
5859 // Skip the next two GPRs.
5860 i += 2;
5861 }
5862 }
5863
5864 if (Glue.getNode())
5865 AsmNodeOperands.push_back(Glue);
5866 if (!Changed)
5867 return false;
5868
5869 SDValue New = CurDAG->getNode(N->getOpcode(), SDLoc(N),
5870 CurDAG->getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
5871 New->setNodeId(-1);
5872 ReplaceNode(N, New.getNode());
5873 return true;
5874 }
5875
5876
5877 bool ARMDAGToDAGISel::
SelectInlineAsmMemoryOperand(const SDValue & Op,unsigned ConstraintID,std::vector<SDValue> & OutOps)5878 SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
5879 std::vector<SDValue> &OutOps) {
5880 switch(ConstraintID) {
5881 default:
5882 llvm_unreachable("Unexpected asm memory constraint");
5883 case InlineAsm::Constraint_m:
5884 case InlineAsm::Constraint_o:
5885 case InlineAsm::Constraint_Q:
5886 case InlineAsm::Constraint_Um:
5887 case InlineAsm::Constraint_Un:
5888 case InlineAsm::Constraint_Uq:
5889 case InlineAsm::Constraint_Us:
5890 case InlineAsm::Constraint_Ut:
5891 case InlineAsm::Constraint_Uv:
5892 case InlineAsm::Constraint_Uy:
5893 // Require the address to be in a register. That is safe for all ARM
5894 // variants and it is hard to do anything much smarter without knowing
5895 // how the operand is used.
5896 OutOps.push_back(Op);
5897 return false;
5898 }
5899 return true;
5900 }
5901
5902 /// createARMISelDag - This pass converts a legalized DAG into a
5903 /// ARM-specific DAG, ready for instruction scheduling.
5904 ///
createARMISelDag(ARMBaseTargetMachine & TM,CodeGenOpt::Level OptLevel)5905 FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM,
5906 CodeGenOpt::Level OptLevel) {
5907 return new ARMDAGToDAGISel(TM, OptLevel);
5908 }
5909