xref: /llvm-project/llvm/lib/Target/AVR/AVRISelLowering.cpp (revision 754ed95b6672b9a678a994cc652862a91cdc4406)
1 //===-- AVRISelLowering.cpp - AVR DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that AVR uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AVRISelLowering.h"
15 
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/Support/ErrorHandling.h"
26 
27 #include "AVR.h"
28 #include "AVRMachineFunctionInfo.h"
29 #include "AVRSubtarget.h"
30 #include "AVRTargetMachine.h"
31 #include "MCTargetDesc/AVRMCTargetDesc.h"
32 
33 namespace llvm {
34 
35 AVRTargetLowering::AVRTargetLowering(const AVRTargetMachine &TM,
36                                      const AVRSubtarget &STI)
37     : TargetLowering(TM), Subtarget(STI) {
38   // Set up the register classes.
39   addRegisterClass(MVT::i8, &AVR::GPR8RegClass);
40   addRegisterClass(MVT::i16, &AVR::DREGSRegClass);
41 
42   // Compute derived properties from the register classes.
43   computeRegisterProperties(Subtarget.getRegisterInfo());
44 
45   setBooleanContents(ZeroOrOneBooleanContent);
46   setBooleanVectorContents(ZeroOrOneBooleanContent);
47   setSchedulingPreference(Sched::RegPressure);
48   setStackPointerRegisterToSaveRestore(AVR::SP);
49   setSupportsUnalignedAtomics(true);
50 
51   setOperationAction(ISD::GlobalAddress, MVT::i16, Custom);
52   setOperationAction(ISD::BlockAddress, MVT::i16, Custom);
53 
54   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
55   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
56   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i8, Expand);
57   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i16, Expand);
58 
59   setOperationAction(ISD::INLINEASM, MVT::Other, Custom);
60 
61   for (MVT VT : MVT::integer_valuetypes()) {
62     for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
63       setLoadExtAction(N, VT, MVT::i1, Promote);
64       setLoadExtAction(N, VT, MVT::i8, Expand);
65     }
66   }
67 
68   setTruncStoreAction(MVT::i16, MVT::i8, Expand);
69 
70   for (MVT VT : MVT::integer_valuetypes()) {
71     setOperationAction(ISD::ADDC, VT, Legal);
72     setOperationAction(ISD::SUBC, VT, Legal);
73     setOperationAction(ISD::ADDE, VT, Legal);
74     setOperationAction(ISD::SUBE, VT, Legal);
75   }
76 
77   // sub (x, imm) gets canonicalized to add (x, -imm), so for illegal types
78   // revert into a sub since we don't have an add with immediate instruction.
79   setOperationAction(ISD::ADD, MVT::i32, Custom);
80   setOperationAction(ISD::ADD, MVT::i64, Custom);
81 
82   // our shift instructions are only able to shift 1 bit at a time, so handle
83   // this in a custom way.
84   setOperationAction(ISD::SRA, MVT::i8, Custom);
85   setOperationAction(ISD::SHL, MVT::i8, Custom);
86   setOperationAction(ISD::SRL, MVT::i8, Custom);
87   setOperationAction(ISD::SRA, MVT::i16, Custom);
88   setOperationAction(ISD::SHL, MVT::i16, Custom);
89   setOperationAction(ISD::SRL, MVT::i16, Custom);
90   setOperationAction(ISD::SRA, MVT::i32, Custom);
91   setOperationAction(ISD::SHL, MVT::i32, Custom);
92   setOperationAction(ISD::SRL, MVT::i32, Custom);
93   setOperationAction(ISD::SHL_PARTS, MVT::i16, Expand);
94   setOperationAction(ISD::SRA_PARTS, MVT::i16, Expand);
95   setOperationAction(ISD::SRL_PARTS, MVT::i16, Expand);
96 
97   setOperationAction(ISD::ROTL, MVT::i8, Custom);
98   setOperationAction(ISD::ROTL, MVT::i16, Expand);
99   setOperationAction(ISD::ROTR, MVT::i8, Custom);
100   setOperationAction(ISD::ROTR, MVT::i16, Expand);
101 
102   setOperationAction(ISD::BR_CC, MVT::i8, Custom);
103   setOperationAction(ISD::BR_CC, MVT::i16, Custom);
104   setOperationAction(ISD::BR_CC, MVT::i32, Custom);
105   setOperationAction(ISD::BR_CC, MVT::i64, Custom);
106   setOperationAction(ISD::BRCOND, MVT::Other, Expand);
107 
108   setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
109   setOperationAction(ISD::SELECT_CC, MVT::i16, Custom);
110   setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
111   setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
112   setOperationAction(ISD::SETCC, MVT::i8, Custom);
113   setOperationAction(ISD::SETCC, MVT::i16, Custom);
114   setOperationAction(ISD::SETCC, MVT::i32, Custom);
115   setOperationAction(ISD::SETCC, MVT::i64, Custom);
116   setOperationAction(ISD::SELECT, MVT::i8, Expand);
117   setOperationAction(ISD::SELECT, MVT::i16, Expand);
118 
119   setOperationAction(ISD::BSWAP, MVT::i16, Expand);
120 
121   // Add support for postincrement and predecrement load/stores.
122   setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
123   setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
124   setIndexedLoadAction(ISD::PRE_DEC, MVT::i8, Legal);
125   setIndexedLoadAction(ISD::PRE_DEC, MVT::i16, Legal);
126   setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal);
127   setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal);
128   setIndexedStoreAction(ISD::PRE_DEC, MVT::i8, Legal);
129   setIndexedStoreAction(ISD::PRE_DEC, MVT::i16, Legal);
130 
131   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
132 
133   setOperationAction(ISD::VASTART, MVT::Other, Custom);
134   setOperationAction(ISD::VAEND, MVT::Other, Expand);
135   setOperationAction(ISD::VAARG, MVT::Other, Expand);
136   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
137 
138   // Atomic operations which must be lowered to rtlib calls
139   for (MVT VT : MVT::integer_valuetypes()) {
140     setOperationAction(ISD::ATOMIC_SWAP, VT, Expand);
141     setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Expand);
142     setOperationAction(ISD::ATOMIC_LOAD_NAND, VT, Expand);
143     setOperationAction(ISD::ATOMIC_LOAD_MAX, VT, Expand);
144     setOperationAction(ISD::ATOMIC_LOAD_MIN, VT, Expand);
145     setOperationAction(ISD::ATOMIC_LOAD_UMAX, VT, Expand);
146     setOperationAction(ISD::ATOMIC_LOAD_UMIN, VT, Expand);
147   }
148 
149   // Division/remainder
150   setOperationAction(ISD::UDIV, MVT::i8, Expand);
151   setOperationAction(ISD::UDIV, MVT::i16, Expand);
152   setOperationAction(ISD::UREM, MVT::i8, Expand);
153   setOperationAction(ISD::UREM, MVT::i16, Expand);
154   setOperationAction(ISD::SDIV, MVT::i8, Expand);
155   setOperationAction(ISD::SDIV, MVT::i16, Expand);
156   setOperationAction(ISD::SREM, MVT::i8, Expand);
157   setOperationAction(ISD::SREM, MVT::i16, Expand);
158 
159   // Make division and modulus custom
160   setOperationAction(ISD::UDIVREM, MVT::i8, Custom);
161   setOperationAction(ISD::UDIVREM, MVT::i16, Custom);
162   setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
163   setOperationAction(ISD::SDIVREM, MVT::i8, Custom);
164   setOperationAction(ISD::SDIVREM, MVT::i16, Custom);
165   setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
166 
167   // Do not use MUL. The AVR instructions are closer to SMUL_LOHI &co.
168   setOperationAction(ISD::MUL, MVT::i8, Expand);
169   setOperationAction(ISD::MUL, MVT::i16, Expand);
170 
171   // Expand 16 bit multiplications.
172   setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
173   setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
174 
175   // Expand multiplications to libcalls when there is
176   // no hardware MUL.
177   if (!Subtarget.supportsMultiplication()) {
178     setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand);
179     setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand);
180   }
181 
182   for (MVT VT : MVT::integer_valuetypes()) {
183     setOperationAction(ISD::MULHS, VT, Expand);
184     setOperationAction(ISD::MULHU, VT, Expand);
185   }
186 
187   for (MVT VT : MVT::integer_valuetypes()) {
188     setOperationAction(ISD::CTPOP, VT, Expand);
189     setOperationAction(ISD::CTLZ, VT, Expand);
190     setOperationAction(ISD::CTTZ, VT, Expand);
191   }
192 
193   for (MVT VT : MVT::integer_valuetypes()) {
194     setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
195     // TODO: The generated code is pretty poor. Investigate using the
196     // same "shift and subtract with carry" trick that we do for
197     // extending 8-bit to 16-bit. This may require infrastructure
198     // improvements in how we treat 16-bit "registers" to be feasible.
199   }
200 
201   // Division and modulus rtlib functions
202   setLibcallName(RTLIB::SDIVREM_I8, "__divmodqi4");
203   setLibcallName(RTLIB::SDIVREM_I16, "__divmodhi4");
204   setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
205   setLibcallName(RTLIB::UDIVREM_I8, "__udivmodqi4");
206   setLibcallName(RTLIB::UDIVREM_I16, "__udivmodhi4");
207   setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
208 
209   // Several of the runtime library functions use a special calling conv
210   setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::AVR_BUILTIN);
211   setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::AVR_BUILTIN);
212   setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::AVR_BUILTIN);
213   setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::AVR_BUILTIN);
214 
215   // Trigonometric rtlib functions
216   setLibcallName(RTLIB::SIN_F32, "sin");
217   setLibcallName(RTLIB::COS_F32, "cos");
218 
219   setMinFunctionAlignment(Align(2));
220   setMinimumJumpTableEntries(UINT_MAX);
221 }
222 
223 const char *AVRTargetLowering::getTargetNodeName(unsigned Opcode) const {
224 #define NODE(name)                                                             \
225   case AVRISD::name:                                                           \
226     return #name
227 
228   switch (Opcode) {
229   default:
230     return nullptr;
231     NODE(RET_GLUE);
232     NODE(RETI_GLUE);
233     NODE(CALL);
234     NODE(WRAPPER);
235     NODE(LSL);
236     NODE(LSLW);
237     NODE(LSR);
238     NODE(LSRW);
239     NODE(ROL);
240     NODE(ROR);
241     NODE(ASR);
242     NODE(ASRW);
243     NODE(LSLLOOP);
244     NODE(LSRLOOP);
245     NODE(ROLLOOP);
246     NODE(RORLOOP);
247     NODE(ASRLOOP);
248     NODE(BRCOND);
249     NODE(CMP);
250     NODE(CMPC);
251     NODE(TST);
252     NODE(SELECT_CC);
253 #undef NODE
254   }
255 }
256 
257 EVT AVRTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
258                                           EVT VT) const {
259   assert(!VT.isVector() && "No AVR SetCC type for vectors!");
260   return MVT::i8;
261 }
262 
263 SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const {
264   unsigned Opc8;
265   const SDNode *N = Op.getNode();
266   EVT VT = Op.getValueType();
267   SDLoc dl(N);
268   assert(llvm::has_single_bit<uint32_t>(VT.getSizeInBits()) &&
269          "Expected power-of-2 shift amount");
270 
271   if (VT.getSizeInBits() == 32) {
272     if (!isa<ConstantSDNode>(N->getOperand(1))) {
273       // 32-bit shifts are converted to a loop in IR.
274       // This should be unreachable.
275       report_fatal_error("Expected a constant shift amount!");
276     }
277     SDVTList ResTys = DAG.getVTList(MVT::i16, MVT::i16);
278     SDValue SrcLo =
279         DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i16, Op.getOperand(0),
280                     DAG.getConstant(0, dl, MVT::i16));
281     SDValue SrcHi =
282         DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i16, Op.getOperand(0),
283                     DAG.getConstant(1, dl, MVT::i16));
284     uint64_t ShiftAmount = N->getConstantOperandVal(1);
285     if (ShiftAmount == 16) {
286       // Special case these two operations because they appear to be used by the
287       // generic codegen parts to lower 32-bit numbers.
288       // TODO: perhaps we can lower shift amounts bigger than 16 to a 16-bit
289       // shift of a part of the 32-bit value?
290       switch (Op.getOpcode()) {
291       case ISD::SHL: {
292         SDValue Zero = DAG.getConstant(0, dl, MVT::i16);
293         return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i32, Zero, SrcLo);
294       }
295       case ISD::SRL: {
296         SDValue Zero = DAG.getConstant(0, dl, MVT::i16);
297         return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i32, SrcHi, Zero);
298       }
299       }
300     }
301     SDValue Cnt = DAG.getTargetConstant(ShiftAmount, dl, MVT::i8);
302     unsigned Opc;
303     switch (Op.getOpcode()) {
304     default:
305       llvm_unreachable("Invalid 32-bit shift opcode!");
306     case ISD::SHL:
307       Opc = AVRISD::LSLW;
308       break;
309     case ISD::SRL:
310       Opc = AVRISD::LSRW;
311       break;
312     case ISD::SRA:
313       Opc = AVRISD::ASRW;
314       break;
315     }
316     SDValue Result = DAG.getNode(Opc, dl, ResTys, SrcLo, SrcHi, Cnt);
317     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i32, Result.getValue(0),
318                        Result.getValue(1));
319   }
320 
321   // Expand non-constant shifts to loops.
322   if (!isa<ConstantSDNode>(N->getOperand(1))) {
323     switch (Op.getOpcode()) {
324     default:
325       llvm_unreachable("Invalid shift opcode!");
326     case ISD::SHL:
327       return DAG.getNode(AVRISD::LSLLOOP, dl, VT, N->getOperand(0),
328                          N->getOperand(1));
329     case ISD::SRL:
330       return DAG.getNode(AVRISD::LSRLOOP, dl, VT, N->getOperand(0),
331                          N->getOperand(1));
332     case ISD::ROTL: {
333       SDValue Amt = N->getOperand(1);
334       EVT AmtVT = Amt.getValueType();
335       Amt = DAG.getNode(ISD::AND, dl, AmtVT, Amt,
336                         DAG.getConstant(VT.getSizeInBits() - 1, dl, AmtVT));
337       return DAG.getNode(AVRISD::ROLLOOP, dl, VT, N->getOperand(0), Amt);
338     }
339     case ISD::ROTR: {
340       SDValue Amt = N->getOperand(1);
341       EVT AmtVT = Amt.getValueType();
342       Amt = DAG.getNode(ISD::AND, dl, AmtVT, Amt,
343                         DAG.getConstant(VT.getSizeInBits() - 1, dl, AmtVT));
344       return DAG.getNode(AVRISD::RORLOOP, dl, VT, N->getOperand(0), Amt);
345     }
346     case ISD::SRA:
347       return DAG.getNode(AVRISD::ASRLOOP, dl, VT, N->getOperand(0),
348                          N->getOperand(1));
349     }
350   }
351 
352   uint64_t ShiftAmount = N->getConstantOperandVal(1);
353   SDValue Victim = N->getOperand(0);
354 
355   switch (Op.getOpcode()) {
356   case ISD::SRA:
357     Opc8 = AVRISD::ASR;
358     break;
359   case ISD::ROTL:
360     Opc8 = AVRISD::ROL;
361     ShiftAmount = ShiftAmount % VT.getSizeInBits();
362     break;
363   case ISD::ROTR:
364     Opc8 = AVRISD::ROR;
365     ShiftAmount = ShiftAmount % VT.getSizeInBits();
366     break;
367   case ISD::SRL:
368     Opc8 = AVRISD::LSR;
369     break;
370   case ISD::SHL:
371     Opc8 = AVRISD::LSL;
372     break;
373   default:
374     llvm_unreachable("Invalid shift opcode");
375   }
376 
377   // Optimize int8/int16 shifts.
378   if (VT.getSizeInBits() == 8) {
379     if (Op.getOpcode() == ISD::SHL && 4 <= ShiftAmount && ShiftAmount < 7) {
380       // Optimize LSL when 4 <= ShiftAmount <= 6.
381       Victim = DAG.getNode(AVRISD::SWAP, dl, VT, Victim);
382       Victim =
383           DAG.getNode(ISD::AND, dl, VT, Victim, DAG.getConstant(0xf0, dl, VT));
384       ShiftAmount -= 4;
385     } else if (Op.getOpcode() == ISD::SRL && 4 <= ShiftAmount &&
386                ShiftAmount < 7) {
387       // Optimize LSR when 4 <= ShiftAmount <= 6.
388       Victim = DAG.getNode(AVRISD::SWAP, dl, VT, Victim);
389       Victim =
390           DAG.getNode(ISD::AND, dl, VT, Victim, DAG.getConstant(0x0f, dl, VT));
391       ShiftAmount -= 4;
392     } else if (Op.getOpcode() == ISD::SHL && ShiftAmount == 7) {
393       // Optimize LSL when ShiftAmount == 7.
394       Victim = DAG.getNode(AVRISD::LSLBN, dl, VT, Victim,
395                            DAG.getConstant(7, dl, VT));
396       ShiftAmount = 0;
397     } else if (Op.getOpcode() == ISD::SRL && ShiftAmount == 7) {
398       // Optimize LSR when ShiftAmount == 7.
399       Victim = DAG.getNode(AVRISD::LSRBN, dl, VT, Victim,
400                            DAG.getConstant(7, dl, VT));
401       ShiftAmount = 0;
402     } else if (Op.getOpcode() == ISD::SRA && ShiftAmount == 6) {
403       // Optimize ASR when ShiftAmount == 6.
404       Victim = DAG.getNode(AVRISD::ASRBN, dl, VT, Victim,
405                            DAG.getConstant(6, dl, VT));
406       ShiftAmount = 0;
407     } else if (Op.getOpcode() == ISD::SRA && ShiftAmount == 7) {
408       // Optimize ASR when ShiftAmount == 7.
409       Victim = DAG.getNode(AVRISD::ASRBN, dl, VT, Victim,
410                            DAG.getConstant(7, dl, VT));
411       ShiftAmount = 0;
412     } else if (Op.getOpcode() == ISD::ROTL && ShiftAmount == 3) {
413       // Optimize left rotation 3 bits to swap then right rotation 1 bit.
414       Victim = DAG.getNode(AVRISD::SWAP, dl, VT, Victim);
415       Victim = DAG.getNode(AVRISD::ROR, dl, VT, Victim);
416       ShiftAmount = 0;
417     } else if (Op.getOpcode() == ISD::ROTR && ShiftAmount == 3) {
418       // Optimize right rotation 3 bits to swap then left rotation 1 bit.
419       Victim = DAG.getNode(AVRISD::SWAP, dl, VT, Victim);
420       Victim = DAG.getNode(AVRISD::ROL, dl, VT, Victim);
421       ShiftAmount = 0;
422     } else if (Op.getOpcode() == ISD::ROTL && ShiftAmount == 7) {
423       // Optimize left rotation 7 bits to right rotation 1 bit.
424       Victim = DAG.getNode(AVRISD::ROR, dl, VT, Victim);
425       ShiftAmount = 0;
426     } else if (Op.getOpcode() == ISD::ROTR && ShiftAmount == 7) {
427       // Optimize right rotation 7 bits to left rotation 1 bit.
428       Victim = DAG.getNode(AVRISD::ROL, dl, VT, Victim);
429       ShiftAmount = 0;
430     } else if ((Op.getOpcode() == ISD::ROTR || Op.getOpcode() == ISD::ROTL) &&
431                ShiftAmount >= 4) {
432       // Optimize left/right rotation with the SWAP instruction.
433       Victim = DAG.getNode(AVRISD::SWAP, dl, VT, Victim);
434       ShiftAmount -= 4;
435     }
436   } else if (VT.getSizeInBits() == 16) {
437     if (Op.getOpcode() == ISD::SRA)
438       // Special optimization for int16 arithmetic right shift.
439       switch (ShiftAmount) {
440       case 15:
441         Victim = DAG.getNode(AVRISD::ASRWN, dl, VT, Victim,
442                              DAG.getConstant(15, dl, VT));
443         ShiftAmount = 0;
444         break;
445       case 14:
446         Victim = DAG.getNode(AVRISD::ASRWN, dl, VT, Victim,
447                              DAG.getConstant(14, dl, VT));
448         ShiftAmount = 0;
449         break;
450       case 7:
451         Victim = DAG.getNode(AVRISD::ASRWN, dl, VT, Victim,
452                              DAG.getConstant(7, dl, VT));
453         ShiftAmount = 0;
454         break;
455       default:
456         break;
457       }
458     if (4 <= ShiftAmount && ShiftAmount < 8)
459       switch (Op.getOpcode()) {
460       case ISD::SHL:
461         Victim = DAG.getNode(AVRISD::LSLWN, dl, VT, Victim,
462                              DAG.getConstant(4, dl, VT));
463         ShiftAmount -= 4;
464         break;
465       case ISD::SRL:
466         Victim = DAG.getNode(AVRISD::LSRWN, dl, VT, Victim,
467                              DAG.getConstant(4, dl, VT));
468         ShiftAmount -= 4;
469         break;
470       default:
471         break;
472       }
473     else if (8 <= ShiftAmount && ShiftAmount < 12)
474       switch (Op.getOpcode()) {
475       case ISD::SHL:
476         Victim = DAG.getNode(AVRISD::LSLWN, dl, VT, Victim,
477                              DAG.getConstant(8, dl, VT));
478         ShiftAmount -= 8;
479         // Only operate on the higher byte for remaining shift bits.
480         Opc8 = AVRISD::LSLHI;
481         break;
482       case ISD::SRL:
483         Victim = DAG.getNode(AVRISD::LSRWN, dl, VT, Victim,
484                              DAG.getConstant(8, dl, VT));
485         ShiftAmount -= 8;
486         // Only operate on the lower byte for remaining shift bits.
487         Opc8 = AVRISD::LSRLO;
488         break;
489       case ISD::SRA:
490         Victim = DAG.getNode(AVRISD::ASRWN, dl, VT, Victim,
491                              DAG.getConstant(8, dl, VT));
492         ShiftAmount -= 8;
493         // Only operate on the lower byte for remaining shift bits.
494         Opc8 = AVRISD::ASRLO;
495         break;
496       default:
497         break;
498       }
499     else if (12 <= ShiftAmount)
500       switch (Op.getOpcode()) {
501       case ISD::SHL:
502         Victim = DAG.getNode(AVRISD::LSLWN, dl, VT, Victim,
503                              DAG.getConstant(12, dl, VT));
504         ShiftAmount -= 12;
505         // Only operate on the higher byte for remaining shift bits.
506         Opc8 = AVRISD::LSLHI;
507         break;
508       case ISD::SRL:
509         Victim = DAG.getNode(AVRISD::LSRWN, dl, VT, Victim,
510                              DAG.getConstant(12, dl, VT));
511         ShiftAmount -= 12;
512         // Only operate on the lower byte for remaining shift bits.
513         Opc8 = AVRISD::LSRLO;
514         break;
515       case ISD::SRA:
516         Victim = DAG.getNode(AVRISD::ASRWN, dl, VT, Victim,
517                              DAG.getConstant(8, dl, VT));
518         ShiftAmount -= 8;
519         // Only operate on the lower byte for remaining shift bits.
520         Opc8 = AVRISD::ASRLO;
521         break;
522       default:
523         break;
524       }
525   }
526 
527   while (ShiftAmount--) {
528     Victim = DAG.getNode(Opc8, dl, VT, Victim);
529   }
530 
531   return Victim;
532 }
533 
534 SDValue AVRTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
535   unsigned Opcode = Op->getOpcode();
536   assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
537          "Invalid opcode for Div/Rem lowering");
538   bool IsSigned = (Opcode == ISD::SDIVREM);
539   EVT VT = Op->getValueType(0);
540   Type *Ty = VT.getTypeForEVT(*DAG.getContext());
541 
542   RTLIB::Libcall LC;
543   switch (VT.getSimpleVT().SimpleTy) {
544   default:
545     llvm_unreachable("Unexpected request for libcall!");
546   case MVT::i8:
547     LC = IsSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;
548     break;
549   case MVT::i16:
550     LC = IsSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;
551     break;
552   case MVT::i32:
553     LC = IsSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
554     break;
555   }
556 
557   SDValue InChain = DAG.getEntryNode();
558 
559   TargetLowering::ArgListTy Args;
560   TargetLowering::ArgListEntry Entry;
561   for (SDValue const &Value : Op->op_values()) {
562     Entry.Node = Value;
563     Entry.Ty = Value.getValueType().getTypeForEVT(*DAG.getContext());
564     Entry.IsSExt = IsSigned;
565     Entry.IsZExt = !IsSigned;
566     Args.push_back(Entry);
567   }
568 
569   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
570                                          getPointerTy(DAG.getDataLayout()));
571 
572   Type *RetTy = (Type *)StructType::get(Ty, Ty);
573 
574   SDLoc dl(Op);
575   TargetLowering::CallLoweringInfo CLI(DAG);
576   CLI.setDebugLoc(dl)
577       .setChain(InChain)
578       .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
579       .setInRegister()
580       .setSExtResult(IsSigned)
581       .setZExtResult(!IsSigned);
582 
583   std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
584   return CallInfo.first;
585 }
586 
587 SDValue AVRTargetLowering::LowerGlobalAddress(SDValue Op,
588                                               SelectionDAG &DAG) const {
589   auto DL = DAG.getDataLayout();
590 
591   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
592   int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
593 
594   // Create the TargetGlobalAddress node, folding in the constant offset.
595   SDValue Result =
596       DAG.getTargetGlobalAddress(GV, SDLoc(Op), getPointerTy(DL), Offset);
597   return DAG.getNode(AVRISD::WRAPPER, SDLoc(Op), getPointerTy(DL), Result);
598 }
599 
600 SDValue AVRTargetLowering::LowerBlockAddress(SDValue Op,
601                                              SelectionDAG &DAG) const {
602   auto DL = DAG.getDataLayout();
603   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
604 
605   SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(DL));
606 
607   return DAG.getNode(AVRISD::WRAPPER, SDLoc(Op), getPointerTy(DL), Result);
608 }
609 
610 /// IntCCToAVRCC - Convert a DAG integer condition code to an AVR CC.
611 static AVRCC::CondCodes intCCToAVRCC(ISD::CondCode CC) {
612   switch (CC) {
613   default:
614     llvm_unreachable("Unknown condition code!");
615   case ISD::SETEQ:
616     return AVRCC::COND_EQ;
617   case ISD::SETNE:
618     return AVRCC::COND_NE;
619   case ISD::SETGE:
620     return AVRCC::COND_GE;
621   case ISD::SETLT:
622     return AVRCC::COND_LT;
623   case ISD::SETUGE:
624     return AVRCC::COND_SH;
625   case ISD::SETULT:
626     return AVRCC::COND_LO;
627   }
628 }
629 
630 /// Returns appropriate CP/CPI/CPC nodes code for the given 8/16-bit operands.
631 SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS,
632                                      SelectionDAG &DAG, SDLoc DL) const {
633   assert((LHS.getSimpleValueType() == RHS.getSimpleValueType()) &&
634          "LHS and RHS have different types");
635   assert(((LHS.getSimpleValueType() == MVT::i16) ||
636           (LHS.getSimpleValueType() == MVT::i8)) &&
637          "invalid comparison type");
638 
639   SDValue Cmp;
640 
641   if (LHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(RHS)) {
642     uint64_t Imm = RHS->getAsZExtVal();
643     // Generate a CPI/CPC pair if RHS is a 16-bit constant. Use the zero
644     // register for the constant RHS if its lower or higher byte is zero.
645     SDValue LHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS,
646                                 DAG.getIntPtrConstant(0, DL));
647     SDValue LHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS,
648                                 DAG.getIntPtrConstant(1, DL));
649     SDValue RHSlo = (Imm & 0xff) == 0
650                         ? DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8)
651                         : DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, RHS,
652                                       DAG.getIntPtrConstant(0, DL));
653     SDValue RHShi = (Imm & 0xff00) == 0
654                         ? DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8)
655                         : DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, RHS,
656                                       DAG.getIntPtrConstant(1, DL));
657     Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHSlo, RHSlo);
658     Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp);
659   } else if (RHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(LHS)) {
660     // Generate a CPI/CPC pair if LHS is a 16-bit constant. Use the zero
661     // register for the constant LHS if its lower or higher byte is zero.
662     uint64_t Imm = LHS->getAsZExtVal();
663     SDValue LHSlo = (Imm & 0xff) == 0
664                         ? DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8)
665                         : DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS,
666                                       DAG.getIntPtrConstant(0, DL));
667     SDValue LHShi = (Imm & 0xff00) == 0
668                         ? DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8)
669                         : DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS,
670                                       DAG.getIntPtrConstant(1, DL));
671     SDValue RHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, RHS,
672                                 DAG.getIntPtrConstant(0, DL));
673     SDValue RHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, RHS,
674                                 DAG.getIntPtrConstant(1, DL));
675     Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHSlo, RHSlo);
676     Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp);
677   } else {
678     // Generate ordinary 16-bit comparison.
679     Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHS, RHS);
680   }
681 
682   return Cmp;
683 }
684 
685 /// Returns appropriate AVR CMP/CMPC nodes and corresponding condition code for
686 /// the given operands.
687 SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
688                                      SDValue &AVRcc, SelectionDAG &DAG,
689                                      SDLoc DL) const {
690   SDValue Cmp;
691   EVT VT = LHS.getValueType();
692   bool UseTest = false;
693 
694   switch (CC) {
695   default:
696     break;
697   case ISD::SETLE: {
698     // Swap operands and reverse the branching condition.
699     std::swap(LHS, RHS);
700     CC = ISD::SETGE;
701     break;
702   }
703   case ISD::SETGT: {
704     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
705       switch (C->getSExtValue()) {
706       case -1: {
707         // When doing lhs > -1 use a tst instruction on the top part of lhs
708         // and use brpl instead of using a chain of cp/cpc.
709         UseTest = true;
710         AVRcc = DAG.getConstant(AVRCC::COND_PL, DL, MVT::i8);
711         break;
712       }
713       case 0: {
714         // Turn lhs > 0 into 0 < lhs since 0 can be materialized with
715         // __zero_reg__ in lhs.
716         RHS = LHS;
717         LHS = DAG.getConstant(0, DL, VT);
718         CC = ISD::SETLT;
719         break;
720       }
721       default: {
722         // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows
723         // us to  fold the constant into the cmp instruction.
724         RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT);
725         CC = ISD::SETGE;
726         break;
727       }
728       }
729       break;
730     }
731     // Swap operands and reverse the branching condition.
732     std::swap(LHS, RHS);
733     CC = ISD::SETLT;
734     break;
735   }
736   case ISD::SETLT: {
737     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
738       switch (C->getSExtValue()) {
739       case 1: {
740         // Turn lhs < 1 into 0 >= lhs since 0 can be materialized with
741         // __zero_reg__ in lhs.
742         RHS = LHS;
743         LHS = DAG.getConstant(0, DL, VT);
744         CC = ISD::SETGE;
745         break;
746       }
747       case 0: {
748         // When doing lhs < 0 use a tst instruction on the top part of lhs
749         // and use brmi instead of using a chain of cp/cpc.
750         UseTest = true;
751         AVRcc = DAG.getConstant(AVRCC::COND_MI, DL, MVT::i8);
752         break;
753       }
754       }
755     }
756     break;
757   }
758   case ISD::SETULE: {
759     // Swap operands and reverse the branching condition.
760     std::swap(LHS, RHS);
761     CC = ISD::SETUGE;
762     break;
763   }
764   case ISD::SETUGT: {
765     // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows us to
766     // fold the constant into the cmp instruction.
767     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
768       RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT);
769       CC = ISD::SETUGE;
770       break;
771     }
772     // Swap operands and reverse the branching condition.
773     std::swap(LHS, RHS);
774     CC = ISD::SETULT;
775     break;
776   }
777   }
778 
779   // Expand 32 and 64 bit comparisons with custom CMP and CMPC nodes instead of
780   // using the default and/or/xor expansion code which is much longer.
781   if (VT == MVT::i32) {
782     SDValue LHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS,
783                                 DAG.getIntPtrConstant(0, DL));
784     SDValue LHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS,
785                                 DAG.getIntPtrConstant(1, DL));
786     SDValue RHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS,
787                                 DAG.getIntPtrConstant(0, DL));
788     SDValue RHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS,
789                                 DAG.getIntPtrConstant(1, DL));
790 
791     if (UseTest) {
792       // When using tst we only care about the highest part.
793       SDValue Top = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHShi,
794                                 DAG.getIntPtrConstant(1, DL));
795       Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, Top);
796     } else {
797       Cmp = getAVRCmp(LHSlo, RHSlo, DAG, DL);
798       Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp);
799     }
800   } else if (VT == MVT::i64) {
801     SDValue LHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS,
802                                 DAG.getIntPtrConstant(0, DL));
803     SDValue LHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS,
804                                 DAG.getIntPtrConstant(1, DL));
805 
806     SDValue LHS0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_0,
807                                DAG.getIntPtrConstant(0, DL));
808     SDValue LHS1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_0,
809                                DAG.getIntPtrConstant(1, DL));
810     SDValue LHS2 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_1,
811                                DAG.getIntPtrConstant(0, DL));
812     SDValue LHS3 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_1,
813                                DAG.getIntPtrConstant(1, DL));
814 
815     SDValue RHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS,
816                                 DAG.getIntPtrConstant(0, DL));
817     SDValue RHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS,
818                                 DAG.getIntPtrConstant(1, DL));
819 
820     SDValue RHS0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_0,
821                                DAG.getIntPtrConstant(0, DL));
822     SDValue RHS1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_0,
823                                DAG.getIntPtrConstant(1, DL));
824     SDValue RHS2 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_1,
825                                DAG.getIntPtrConstant(0, DL));
826     SDValue RHS3 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_1,
827                                DAG.getIntPtrConstant(1, DL));
828 
829     if (UseTest) {
830       // When using tst we only care about the highest part.
831       SDValue Top = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS3,
832                                 DAG.getIntPtrConstant(1, DL));
833       Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, Top);
834     } else {
835       Cmp = getAVRCmp(LHS0, RHS0, DAG, DL);
836       Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS1, RHS1, Cmp);
837       Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS2, RHS2, Cmp);
838       Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS3, RHS3, Cmp);
839     }
840   } else if (VT == MVT::i8 || VT == MVT::i16) {
841     if (UseTest) {
842       // When using tst we only care about the highest part.
843       Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue,
844                         (VT == MVT::i8)
845                             ? LHS
846                             : DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8,
847                                           LHS, DAG.getIntPtrConstant(1, DL)));
848     } else {
849       Cmp = getAVRCmp(LHS, RHS, DAG, DL);
850     }
851   } else {
852     llvm_unreachable("Invalid comparison size");
853   }
854 
855   // When using a test instruction AVRcc is already set.
856   if (!UseTest) {
857     AVRcc = DAG.getConstant(intCCToAVRCC(CC), DL, MVT::i8);
858   }
859 
860   return Cmp;
861 }
862 
863 SDValue AVRTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
864   SDValue Chain = Op.getOperand(0);
865   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
866   SDValue LHS = Op.getOperand(2);
867   SDValue RHS = Op.getOperand(3);
868   SDValue Dest = Op.getOperand(4);
869   SDLoc dl(Op);
870 
871   SDValue TargetCC;
872   SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, dl);
873 
874   return DAG.getNode(AVRISD::BRCOND, dl, MVT::Other, Chain, Dest, TargetCC,
875                      Cmp);
876 }
877 
878 SDValue AVRTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
879   SDValue LHS = Op.getOperand(0);
880   SDValue RHS = Op.getOperand(1);
881   SDValue TrueV = Op.getOperand(2);
882   SDValue FalseV = Op.getOperand(3);
883   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
884   SDLoc dl(Op);
885 
886   SDValue TargetCC;
887   SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, dl);
888 
889   SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp};
890 
891   return DAG.getNode(AVRISD::SELECT_CC, dl, Op.getValueType(), Ops);
892 }
893 
894 SDValue AVRTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
895   SDValue LHS = Op.getOperand(0);
896   SDValue RHS = Op.getOperand(1);
897   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
898   SDLoc DL(Op);
899 
900   SDValue TargetCC;
901   SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, DL);
902 
903   SDValue TrueV = DAG.getConstant(1, DL, Op.getValueType());
904   SDValue FalseV = DAG.getConstant(0, DL, Op.getValueType());
905   SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp};
906 
907   return DAG.getNode(AVRISD::SELECT_CC, DL, Op.getValueType(), Ops);
908 }
909 
910 SDValue AVRTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
911   const MachineFunction &MF = DAG.getMachineFunction();
912   const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
913   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
914   auto DL = DAG.getDataLayout();
915   SDLoc dl(Op);
916 
917   // Vastart just stores the address of the VarArgsFrameIndex slot into the
918   // memory location argument.
919   SDValue FI = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), getPointerTy(DL));
920 
921   return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1),
922                       MachinePointerInfo(SV));
923 }
924 
925 // Modify the existing ISD::INLINEASM node to add the implicit zero register.
926 SDValue AVRTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
927   SDValue ZeroReg = DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8);
928   if (Op.getOperand(Op.getNumOperands() - 1) == ZeroReg ||
929       Op.getOperand(Op.getNumOperands() - 2) == ZeroReg) {
930     // Zero register has already been added. Don't add it again.
931     // If this isn't handled, we get called over and over again.
932     return Op;
933   }
934 
935   // Get a list of operands to the new INLINEASM node. This is mostly a copy,
936   // with some edits.
937   // Add the following operands at the end (but before the glue node, if it's
938   // there):
939   //  - The flags of the implicit zero register operand.
940   //  - The implicit zero register operand itself.
941   SDLoc dl(Op);
942   SmallVector<SDValue, 8> Ops;
943   SDNode *N = Op.getNode();
944   SDValue Glue;
945   for (unsigned I = 0; I < N->getNumOperands(); I++) {
946     SDValue Operand = N->getOperand(I);
947     if (Operand.getValueType() == MVT::Glue) {
948       // The glue operand always needs to be at the end, so we need to treat it
949       // specially.
950       Glue = Operand;
951     } else {
952       Ops.push_back(Operand);
953     }
954   }
955   InlineAsm::Flag Flags(InlineAsm::Kind::RegUse, 1);
956   Ops.push_back(DAG.getTargetConstant(Flags, dl, MVT::i32));
957   Ops.push_back(ZeroReg);
958   if (Glue) {
959     Ops.push_back(Glue);
960   }
961 
962   // Replace the current INLINEASM node with a new one that has the zero
963   // register as implicit parameter.
964   SDValue New = DAG.getNode(N->getOpcode(), dl, N->getVTList(), Ops);
965   DAG.ReplaceAllUsesOfValueWith(Op, New);
966   DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), New.getValue(1));
967 
968   return New;
969 }
970 
971 SDValue AVRTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
972   switch (Op.getOpcode()) {
973   default:
974     llvm_unreachable("Don't know how to custom lower this!");
975   case ISD::SHL:
976   case ISD::SRA:
977   case ISD::SRL:
978   case ISD::ROTL:
979   case ISD::ROTR:
980     return LowerShifts(Op, DAG);
981   case ISD::GlobalAddress:
982     return LowerGlobalAddress(Op, DAG);
983   case ISD::BlockAddress:
984     return LowerBlockAddress(Op, DAG);
985   case ISD::BR_CC:
986     return LowerBR_CC(Op, DAG);
987   case ISD::SELECT_CC:
988     return LowerSELECT_CC(Op, DAG);
989   case ISD::SETCC:
990     return LowerSETCC(Op, DAG);
991   case ISD::VASTART:
992     return LowerVASTART(Op, DAG);
993   case ISD::SDIVREM:
994   case ISD::UDIVREM:
995     return LowerDivRem(Op, DAG);
996   case ISD::INLINEASM:
997     return LowerINLINEASM(Op, DAG);
998   }
999 
1000   return SDValue();
1001 }
1002 
1003 /// Replace a node with an illegal result type
1004 /// with a new node built out of custom code.
1005 void AVRTargetLowering::ReplaceNodeResults(SDNode *N,
1006                                            SmallVectorImpl<SDValue> &Results,
1007                                            SelectionDAG &DAG) const {
1008   SDLoc DL(N);
1009 
1010   switch (N->getOpcode()) {
1011   case ISD::ADD: {
1012     // Convert add (x, imm) into sub (x, -imm).
1013     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
1014       SDValue Sub = DAG.getNode(
1015           ISD::SUB, DL, N->getValueType(0), N->getOperand(0),
1016           DAG.getConstant(-C->getAPIntValue(), DL, C->getValueType(0)));
1017       Results.push_back(Sub);
1018     }
1019     break;
1020   }
1021   default: {
1022     SDValue Res = LowerOperation(SDValue(N, 0), DAG);
1023 
1024     for (unsigned I = 0, E = Res->getNumValues(); I != E; ++I)
1025       Results.push_back(Res.getValue(I));
1026 
1027     break;
1028   }
1029   }
1030 }
1031 
1032 /// Return true if the addressing mode represented
1033 /// by AM is legal for this target, for a load/store of the specified type.
1034 bool AVRTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1035                                               const AddrMode &AM, Type *Ty,
1036                                               unsigned AS,
1037                                               Instruction *I) const {
1038   int64_t Offs = AM.BaseOffs;
1039 
1040   // Allow absolute addresses.
1041   if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && Offs == 0) {
1042     return true;
1043   }
1044 
1045   // Flash memory instructions only allow zero offsets.
1046   if (isa<PointerType>(Ty) && AS == AVR::ProgramMemory) {
1047     return false;
1048   }
1049 
1050   // Allow reg+<6bit> offset.
1051   if (Offs < 0)
1052     Offs = -Offs;
1053   if (AM.BaseGV == nullptr && AM.HasBaseReg && AM.Scale == 0 &&
1054       isUInt<6>(Offs)) {
1055     return true;
1056   }
1057 
1058   return false;
1059 }
1060 
1061 /// Returns true by value, base pointer and
1062 /// offset pointer and addressing mode by reference if the node's address
1063 /// can be legally represented as pre-indexed load / store address.
1064 bool AVRTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
1065                                                   SDValue &Offset,
1066                                                   ISD::MemIndexedMode &AM,
1067                                                   SelectionDAG &DAG) const {
1068   EVT VT;
1069   const SDNode *Op;
1070   SDLoc DL(N);
1071 
1072   if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
1073     VT = LD->getMemoryVT();
1074     Op = LD->getBasePtr().getNode();
1075     if (LD->getExtensionType() != ISD::NON_EXTLOAD)
1076       return false;
1077     if (AVR::isProgramMemoryAccess(LD)) {
1078       return false;
1079     }
1080   } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
1081     VT = ST->getMemoryVT();
1082     Op = ST->getBasePtr().getNode();
1083     if (AVR::isProgramMemoryAccess(ST)) {
1084       return false;
1085     }
1086   } else {
1087     return false;
1088   }
1089 
1090   if (VT != MVT::i8 && VT != MVT::i16) {
1091     return false;
1092   }
1093 
1094   if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) {
1095     return false;
1096   }
1097 
1098   if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) {
1099     int RHSC = RHS->getSExtValue();
1100     if (Op->getOpcode() == ISD::SUB)
1101       RHSC = -RHSC;
1102 
1103     if ((VT == MVT::i16 && RHSC != -2) || (VT == MVT::i8 && RHSC != -1)) {
1104       return false;
1105     }
1106 
1107     Base = Op->getOperand(0);
1108     Offset = DAG.getSignedConstant(RHSC, DL, MVT::i8);
1109     AM = ISD::PRE_DEC;
1110 
1111     return true;
1112   }
1113 
1114   return false;
1115 }
1116 
1117 /// Returns true by value, base pointer and
1118 /// offset pointer and addressing mode by reference if this node can be
1119 /// combined with a load / store to form a post-indexed load / store.
1120 bool AVRTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
1121                                                    SDValue &Base,
1122                                                    SDValue &Offset,
1123                                                    ISD::MemIndexedMode &AM,
1124                                                    SelectionDAG &DAG) const {
1125   EVT VT;
1126   SDLoc DL(N);
1127 
1128   if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
1129     VT = LD->getMemoryVT();
1130     if (LD->getExtensionType() != ISD::NON_EXTLOAD)
1131       return false;
1132   } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
1133     VT = ST->getMemoryVT();
1134     // We can not store to program memory.
1135     if (AVR::isProgramMemoryAccess(ST))
1136       return false;
1137     // Since the high byte need to be stored first, we can not emit
1138     // i16 post increment store like:
1139     // st X+, r24
1140     // st X+, r25
1141     if (VT == MVT::i16 && !Subtarget.hasLowByteFirst())
1142       return false;
1143   } else {
1144     return false;
1145   }
1146 
1147   if (VT != MVT::i8 && VT != MVT::i16) {
1148     return false;
1149   }
1150 
1151   if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) {
1152     return false;
1153   }
1154 
1155   if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) {
1156     int RHSC = RHS->getSExtValue();
1157     if (Op->getOpcode() == ISD::SUB)
1158       RHSC = -RHSC;
1159     if ((VT == MVT::i16 && RHSC != 2) || (VT == MVT::i8 && RHSC != 1)) {
1160       return false;
1161     }
1162 
1163     // FIXME: We temporarily disable post increment load from program memory,
1164     //        due to bug https://github.com/llvm/llvm-project/issues/59914.
1165     if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
1166       if (AVR::isProgramMemoryAccess(LD))
1167         return false;
1168 
1169     Base = Op->getOperand(0);
1170     Offset = DAG.getConstant(RHSC, DL, MVT::i8);
1171     AM = ISD::POST_INC;
1172 
1173     return true;
1174   }
1175 
1176   return false;
1177 }
1178 
1179 bool AVRTargetLowering::isOffsetFoldingLegal(
1180     const GlobalAddressSDNode *GA) const {
1181   return true;
1182 }
1183 
1184 //===----------------------------------------------------------------------===//
1185 //             Formal Arguments Calling Convention Implementation
1186 //===----------------------------------------------------------------------===//
1187 
1188 #include "AVRGenCallingConv.inc"
1189 
1190 /// Registers for calling conventions, ordered in reverse as required by ABI.
1191 /// Both arrays must be of the same length.
1192 static const MCPhysReg RegList8AVR[] = {
1193     AVR::R25, AVR::R24, AVR::R23, AVR::R22, AVR::R21, AVR::R20,
1194     AVR::R19, AVR::R18, AVR::R17, AVR::R16, AVR::R15, AVR::R14,
1195     AVR::R13, AVR::R12, AVR::R11, AVR::R10, AVR::R9,  AVR::R8};
1196 static const MCPhysReg RegList8Tiny[] = {AVR::R25, AVR::R24, AVR::R23,
1197                                          AVR::R22, AVR::R21, AVR::R20};
1198 static const MCPhysReg RegList16AVR[] = {
1199     AVR::R26R25, AVR::R25R24, AVR::R24R23, AVR::R23R22, AVR::R22R21,
1200     AVR::R21R20, AVR::R20R19, AVR::R19R18, AVR::R18R17, AVR::R17R16,
1201     AVR::R16R15, AVR::R15R14, AVR::R14R13, AVR::R13R12, AVR::R12R11,
1202     AVR::R11R10, AVR::R10R9,  AVR::R9R8};
1203 static const MCPhysReg RegList16Tiny[] = {AVR::R26R25, AVR::R25R24,
1204                                           AVR::R24R23, AVR::R23R22,
1205                                           AVR::R22R21, AVR::R21R20};
1206 
1207 static_assert(std::size(RegList8AVR) == std::size(RegList16AVR),
1208               "8-bit and 16-bit register arrays must be of equal length");
1209 static_assert(std::size(RegList8Tiny) == std::size(RegList16Tiny),
1210               "8-bit and 16-bit register arrays must be of equal length");
1211 
1212 /// Analyze incoming and outgoing function arguments. We need custom C++ code
1213 /// to handle special constraints in the ABI.
1214 /// In addition, all pieces of a certain argument have to be passed either
1215 /// using registers or the stack but never mixing both.
1216 template <typename ArgT>
1217 static void analyzeArguments(TargetLowering::CallLoweringInfo *CLI,
1218                              const Function *F, const DataLayout *TD,
1219                              const SmallVectorImpl<ArgT> &Args,
1220                              SmallVectorImpl<CCValAssign> &ArgLocs,
1221                              CCState &CCInfo, bool Tiny) {
1222   // Choose the proper register list for argument passing according to the ABI.
1223   ArrayRef<MCPhysReg> RegList8;
1224   ArrayRef<MCPhysReg> RegList16;
1225   if (Tiny) {
1226     RegList8 = ArrayRef(RegList8Tiny);
1227     RegList16 = ArrayRef(RegList16Tiny);
1228   } else {
1229     RegList8 = ArrayRef(RegList8AVR);
1230     RegList16 = ArrayRef(RegList16AVR);
1231   }
1232 
1233   unsigned NumArgs = Args.size();
1234   // This is the index of the last used register, in RegList*.
1235   // -1 means R26 (R26 is never actually used in CC).
1236   int RegLastIdx = -1;
1237   // Once a value is passed to the stack it will always be used
1238   bool UseStack = false;
1239   for (unsigned i = 0; i != NumArgs;) {
1240     MVT VT = Args[i].VT;
1241     // We have to count the number of bytes for each function argument, that is
1242     // those Args with the same OrigArgIndex. This is important in case the
1243     // function takes an aggregate type.
1244     // Current argument will be between [i..j).
1245     unsigned ArgIndex = Args[i].OrigArgIndex;
1246     unsigned TotalBytes = VT.getStoreSize();
1247     unsigned j = i + 1;
1248     for (; j != NumArgs; ++j) {
1249       if (Args[j].OrigArgIndex != ArgIndex)
1250         break;
1251       TotalBytes += Args[j].VT.getStoreSize();
1252     }
1253     // Round up to even number of bytes.
1254     TotalBytes = alignTo(TotalBytes, 2);
1255     // Skip zero sized arguments
1256     if (TotalBytes == 0)
1257       continue;
1258     // The index of the first register to be used
1259     unsigned RegIdx = RegLastIdx + TotalBytes;
1260     RegLastIdx = RegIdx;
1261     // If there are not enough registers, use the stack
1262     if (RegIdx >= RegList8.size()) {
1263       UseStack = true;
1264     }
1265     for (; i != j; ++i) {
1266       MVT VT = Args[i].VT;
1267 
1268       if (UseStack) {
1269         auto evt = EVT(VT).getTypeForEVT(CCInfo.getContext());
1270         unsigned Offset = CCInfo.AllocateStack(TD->getTypeAllocSize(evt),
1271                                                TD->getABITypeAlign(evt));
1272         CCInfo.addLoc(
1273             CCValAssign::getMem(i, VT, Offset, VT, CCValAssign::Full));
1274       } else {
1275         unsigned Reg;
1276         if (VT == MVT::i8) {
1277           Reg = CCInfo.AllocateReg(RegList8[RegIdx]);
1278         } else if (VT == MVT::i16) {
1279           Reg = CCInfo.AllocateReg(RegList16[RegIdx]);
1280         } else {
1281           llvm_unreachable(
1282               "calling convention can only manage i8 and i16 types");
1283         }
1284         assert(Reg && "register not available in calling convention");
1285         CCInfo.addLoc(CCValAssign::getReg(i, VT, Reg, VT, CCValAssign::Full));
1286         // Registers inside a particular argument are sorted in increasing order
1287         // (remember the array is reversed).
1288         RegIdx -= VT.getStoreSize();
1289       }
1290     }
1291   }
1292 }
1293 
1294 /// Count the total number of bytes needed to pass or return these arguments.
1295 template <typename ArgT>
1296 static unsigned
1297 getTotalArgumentsSizeInBytes(const SmallVectorImpl<ArgT> &Args) {
1298   unsigned TotalBytes = 0;
1299 
1300   for (const ArgT &Arg : Args) {
1301     TotalBytes += Arg.VT.getStoreSize();
1302   }
1303   return TotalBytes;
1304 }
1305 
1306 /// Analyze incoming and outgoing value of returning from a function.
1307 /// The algorithm is similar to analyzeArguments, but there can only be
1308 /// one value, possibly an aggregate, and it is limited to 8 bytes.
1309 template <typename ArgT>
1310 static void analyzeReturnValues(const SmallVectorImpl<ArgT> &Args,
1311                                 CCState &CCInfo, bool Tiny) {
1312   unsigned NumArgs = Args.size();
1313   unsigned TotalBytes = getTotalArgumentsSizeInBytes(Args);
1314   // CanLowerReturn() guarantees this assertion.
1315   if (Tiny)
1316     assert(TotalBytes <= 4 &&
1317            "return values greater than 4 bytes cannot be lowered on AVRTiny");
1318   else
1319     assert(TotalBytes <= 8 &&
1320            "return values greater than 8 bytes cannot be lowered on AVR");
1321 
1322   // Choose the proper register list for argument passing according to the ABI.
1323   ArrayRef<MCPhysReg> RegList8;
1324   ArrayRef<MCPhysReg> RegList16;
1325   if (Tiny) {
1326     RegList8 = ArrayRef(RegList8Tiny);
1327     RegList16 = ArrayRef(RegList16Tiny);
1328   } else {
1329     RegList8 = ArrayRef(RegList8AVR);
1330     RegList16 = ArrayRef(RegList16AVR);
1331   }
1332 
1333   // GCC-ABI says that the size is rounded up to the next even number,
1334   // but actually once it is more than 4 it will always round up to 8.
1335   if (TotalBytes > 4) {
1336     TotalBytes = 8;
1337   } else {
1338     TotalBytes = alignTo(TotalBytes, 2);
1339   }
1340 
1341   // The index of the first register to use.
1342   int RegIdx = TotalBytes - 1;
1343   for (unsigned i = 0; i != NumArgs; ++i) {
1344     MVT VT = Args[i].VT;
1345     unsigned Reg;
1346     if (VT == MVT::i8) {
1347       Reg = CCInfo.AllocateReg(RegList8[RegIdx]);
1348     } else if (VT == MVT::i16) {
1349       Reg = CCInfo.AllocateReg(RegList16[RegIdx]);
1350     } else {
1351       llvm_unreachable("calling convention can only manage i8 and i16 types");
1352     }
1353     assert(Reg && "register not available in calling convention");
1354     CCInfo.addLoc(CCValAssign::getReg(i, VT, Reg, VT, CCValAssign::Full));
1355     // Registers sort in increasing order
1356     RegIdx -= VT.getStoreSize();
1357   }
1358 }
1359 
1360 SDValue AVRTargetLowering::LowerFormalArguments(
1361     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1362     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1363     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1364   MachineFunction &MF = DAG.getMachineFunction();
1365   MachineFrameInfo &MFI = MF.getFrameInfo();
1366   auto DL = DAG.getDataLayout();
1367 
1368   // Assign locations to all of the incoming arguments.
1369   SmallVector<CCValAssign, 16> ArgLocs;
1370   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1371                  *DAG.getContext());
1372 
1373   // Variadic functions do not need all the analysis below.
1374   if (isVarArg) {
1375     CCInfo.AnalyzeFormalArguments(Ins, ArgCC_AVR_Vararg);
1376   } else {
1377     analyzeArguments(nullptr, &MF.getFunction(), &DL, Ins, ArgLocs, CCInfo,
1378                      Subtarget.hasTinyEncoding());
1379   }
1380 
1381   SDValue ArgValue;
1382   for (CCValAssign &VA : ArgLocs) {
1383 
1384     // Arguments stored on registers.
1385     if (VA.isRegLoc()) {
1386       EVT RegVT = VA.getLocVT();
1387       const TargetRegisterClass *RC;
1388       if (RegVT == MVT::i8) {
1389         RC = &AVR::GPR8RegClass;
1390       } else if (RegVT == MVT::i16) {
1391         RC = &AVR::DREGSRegClass;
1392       } else {
1393         llvm_unreachable("Unknown argument type!");
1394       }
1395 
1396       Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
1397       ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
1398 
1399       // :NOTE: Clang should not promote any i8 into i16 but for safety the
1400       // following code will handle zexts or sexts generated by other
1401       // front ends. Otherwise:
1402       // If this is an 8 bit value, it is really passed promoted
1403       // to 16 bits. Insert an assert[sz]ext to capture this, then
1404       // truncate to the right size.
1405       switch (VA.getLocInfo()) {
1406       default:
1407         llvm_unreachable("Unknown loc info!");
1408       case CCValAssign::Full:
1409         break;
1410       case CCValAssign::BCvt:
1411         ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
1412         break;
1413       case CCValAssign::SExt:
1414         ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
1415                                DAG.getValueType(VA.getValVT()));
1416         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1417         break;
1418       case CCValAssign::ZExt:
1419         ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
1420                                DAG.getValueType(VA.getValVT()));
1421         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1422         break;
1423       }
1424 
1425       InVals.push_back(ArgValue);
1426     } else {
1427       // Only arguments passed on the stack should make it here.
1428       assert(VA.isMemLoc());
1429 
1430       EVT LocVT = VA.getLocVT();
1431 
1432       // Create the frame index object for this incoming parameter.
1433       int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
1434                                      VA.getLocMemOffset(), true);
1435 
1436       // Create the SelectionDAG nodes corresponding to a load
1437       // from this parameter.
1438       SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DL));
1439       InVals.push_back(DAG.getLoad(LocVT, dl, Chain, FIN,
1440                                    MachinePointerInfo::getFixedStack(MF, FI)));
1441     }
1442   }
1443 
1444   // If the function takes variable number of arguments, make a frame index for
1445   // the start of the first vararg value... for expansion of llvm.va_start.
1446   if (isVarArg) {
1447     unsigned StackSize = CCInfo.getStackSize();
1448     AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1449 
1450     AFI->setVarArgsFrameIndex(MFI.CreateFixedObject(2, StackSize, true));
1451   }
1452 
1453   return Chain;
1454 }
1455 
1456 //===----------------------------------------------------------------------===//
1457 //                  Call Calling Convention Implementation
1458 //===----------------------------------------------------------------------===//
1459 
1460 SDValue AVRTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1461                                      SmallVectorImpl<SDValue> &InVals) const {
1462   SelectionDAG &DAG = CLI.DAG;
1463   SDLoc &DL = CLI.DL;
1464   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1465   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1466   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1467   SDValue Chain = CLI.Chain;
1468   SDValue Callee = CLI.Callee;
1469   bool &isTailCall = CLI.IsTailCall;
1470   CallingConv::ID CallConv = CLI.CallConv;
1471   bool isVarArg = CLI.IsVarArg;
1472 
1473   MachineFunction &MF = DAG.getMachineFunction();
1474 
1475   // AVR does not yet support tail call optimization.
1476   isTailCall = false;
1477 
1478   // Analyze operands of the call, assigning locations to each operand.
1479   SmallVector<CCValAssign, 16> ArgLocs;
1480   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1481                  *DAG.getContext());
1482 
1483   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1484   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1485   // node so that legalize doesn't hack it.
1486   const Function *F = nullptr;
1487   if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1488     const GlobalValue *GV = G->getGlobal();
1489     if (isa<Function>(GV))
1490       F = cast<Function>(GV);
1491     Callee =
1492         DAG.getTargetGlobalAddress(GV, DL, getPointerTy(DAG.getDataLayout()));
1493   } else if (const ExternalSymbolSDNode *ES =
1494                  dyn_cast<ExternalSymbolSDNode>(Callee)) {
1495     Callee = DAG.getTargetExternalSymbol(ES->getSymbol(),
1496                                          getPointerTy(DAG.getDataLayout()));
1497   }
1498 
1499   // Variadic functions do not need all the analysis below.
1500   if (isVarArg) {
1501     CCInfo.AnalyzeCallOperands(Outs, ArgCC_AVR_Vararg);
1502   } else {
1503     analyzeArguments(&CLI, F, &DAG.getDataLayout(), Outs, ArgLocs, CCInfo,
1504                      Subtarget.hasTinyEncoding());
1505   }
1506 
1507   // Get a count of how many bytes are to be pushed on the stack.
1508   unsigned NumBytes = CCInfo.getStackSize();
1509 
1510   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL);
1511 
1512   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1513 
1514   // First, walk the register assignments, inserting copies.
1515   unsigned AI, AE;
1516   bool HasStackArgs = false;
1517   for (AI = 0, AE = ArgLocs.size(); AI != AE; ++AI) {
1518     CCValAssign &VA = ArgLocs[AI];
1519     EVT RegVT = VA.getLocVT();
1520     SDValue Arg = OutVals[AI];
1521 
1522     // Promote the value if needed. With Clang this should not happen.
1523     switch (VA.getLocInfo()) {
1524     default:
1525       llvm_unreachable("Unknown loc info!");
1526     case CCValAssign::Full:
1527       break;
1528     case CCValAssign::SExt:
1529       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, RegVT, Arg);
1530       break;
1531     case CCValAssign::ZExt:
1532       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, RegVT, Arg);
1533       break;
1534     case CCValAssign::AExt:
1535       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, RegVT, Arg);
1536       break;
1537     case CCValAssign::BCvt:
1538       Arg = DAG.getNode(ISD::BITCAST, DL, RegVT, Arg);
1539       break;
1540     }
1541 
1542     // Stop when we encounter a stack argument, we need to process them
1543     // in reverse order in the loop below.
1544     if (VA.isMemLoc()) {
1545       HasStackArgs = true;
1546       break;
1547     }
1548 
1549     // Arguments that can be passed on registers must be kept in the RegsToPass
1550     // vector.
1551     RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1552   }
1553 
1554   // Second, stack arguments have to walked.
1555   // Previously this code created chained stores but those chained stores appear
1556   // to be unchained in the legalization phase. Therefore, do not attempt to
1557   // chain them here. In fact, chaining them here somehow causes the first and
1558   // second store to be reversed which is the exact opposite of the intended
1559   // effect.
1560   if (HasStackArgs) {
1561     SmallVector<SDValue, 8> MemOpChains;
1562     for (; AI != AE; AI++) {
1563       CCValAssign &VA = ArgLocs[AI];
1564       SDValue Arg = OutVals[AI];
1565 
1566       assert(VA.isMemLoc());
1567 
1568       // SP points to one stack slot further so add one to adjust it.
1569       SDValue PtrOff = DAG.getNode(
1570           ISD::ADD, DL, getPointerTy(DAG.getDataLayout()),
1571           DAG.getRegister(AVR::SP, getPointerTy(DAG.getDataLayout())),
1572           DAG.getIntPtrConstant(VA.getLocMemOffset() + 1, DL));
1573 
1574       MemOpChains.push_back(
1575           DAG.getStore(Chain, DL, Arg, PtrOff,
1576                        MachinePointerInfo::getStack(MF, VA.getLocMemOffset())));
1577     }
1578 
1579     if (!MemOpChains.empty())
1580       Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1581   }
1582 
1583   // Build a sequence of copy-to-reg nodes chained together with token chain and
1584   // flag operands which copy the outgoing args into registers.  The InGlue in
1585   // necessary since all emited instructions must be stuck together.
1586   SDValue InGlue;
1587   for (auto Reg : RegsToPass) {
1588     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, InGlue);
1589     InGlue = Chain.getValue(1);
1590   }
1591 
1592   // Returns a chain & a flag for retval copy to use.
1593   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1594   SmallVector<SDValue, 8> Ops;
1595   Ops.push_back(Chain);
1596   Ops.push_back(Callee);
1597 
1598   // Add argument registers to the end of the list so that they are known live
1599   // into the call.
1600   for (auto Reg : RegsToPass) {
1601     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
1602   }
1603 
1604   // The zero register (usually R1) must be passed as an implicit register so
1605   // that this register is correctly zeroed in interrupts.
1606   Ops.push_back(DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8));
1607 
1608   // Add a register mask operand representing the call-preserved registers.
1609   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
1610   const uint32_t *Mask =
1611       TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
1612   assert(Mask && "Missing call preserved mask for calling convention");
1613   Ops.push_back(DAG.getRegisterMask(Mask));
1614 
1615   if (InGlue.getNode()) {
1616     Ops.push_back(InGlue);
1617   }
1618 
1619   Chain = DAG.getNode(AVRISD::CALL, DL, NodeTys, Ops);
1620   InGlue = Chain.getValue(1);
1621 
1622   // Create the CALLSEQ_END node.
1623   Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, DL);
1624 
1625   if (!Ins.empty()) {
1626     InGlue = Chain.getValue(1);
1627   }
1628 
1629   // Handle result values, copying them out of physregs into vregs that we
1630   // return.
1631   return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, DL, DAG,
1632                          InVals);
1633 }
1634 
1635 /// Lower the result values of a call into the
1636 /// appropriate copies out of appropriate physical registers.
1637 ///
1638 SDValue AVRTargetLowering::LowerCallResult(
1639     SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg,
1640     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1641     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1642 
1643   // Assign locations to each value returned by this call.
1644   SmallVector<CCValAssign, 16> RVLocs;
1645   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1646                  *DAG.getContext());
1647 
1648   // Handle runtime calling convs.
1649   if (CallConv == CallingConv::AVR_BUILTIN) {
1650     CCInfo.AnalyzeCallResult(Ins, RetCC_AVR_BUILTIN);
1651   } else {
1652     analyzeReturnValues(Ins, CCInfo, Subtarget.hasTinyEncoding());
1653   }
1654 
1655   // Copy all of the result registers out of their specified physreg.
1656   for (CCValAssign const &RVLoc : RVLocs) {
1657     Chain = DAG.getCopyFromReg(Chain, dl, RVLoc.getLocReg(), RVLoc.getValVT(),
1658                                InGlue)
1659                 .getValue(1);
1660     InGlue = Chain.getValue(2);
1661     InVals.push_back(Chain.getValue(0));
1662   }
1663 
1664   return Chain;
1665 }
1666 
1667 //===----------------------------------------------------------------------===//
1668 //               Return Value Calling Convention Implementation
1669 //===----------------------------------------------------------------------===//
1670 
1671 bool AVRTargetLowering::CanLowerReturn(
1672     CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
1673     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context,
1674     const Type *RetTy) const {
1675   if (CallConv == CallingConv::AVR_BUILTIN) {
1676     SmallVector<CCValAssign, 16> RVLocs;
1677     CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1678     return CCInfo.CheckReturn(Outs, RetCC_AVR_BUILTIN);
1679   }
1680 
1681   unsigned TotalBytes = getTotalArgumentsSizeInBytes(Outs);
1682   return TotalBytes <= (unsigned)(Subtarget.hasTinyEncoding() ? 4 : 8);
1683 }
1684 
1685 SDValue
1686 AVRTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1687                                bool isVarArg,
1688                                const SmallVectorImpl<ISD::OutputArg> &Outs,
1689                                const SmallVectorImpl<SDValue> &OutVals,
1690                                const SDLoc &dl, SelectionDAG &DAG) const {
1691   // CCValAssign - represent the assignment of the return value to locations.
1692   SmallVector<CCValAssign, 16> RVLocs;
1693 
1694   // CCState - Info about the registers and stack slot.
1695   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1696                  *DAG.getContext());
1697 
1698   MachineFunction &MF = DAG.getMachineFunction();
1699 
1700   // Analyze return values.
1701   if (CallConv == CallingConv::AVR_BUILTIN) {
1702     CCInfo.AnalyzeReturn(Outs, RetCC_AVR_BUILTIN);
1703   } else {
1704     analyzeReturnValues(Outs, CCInfo, Subtarget.hasTinyEncoding());
1705   }
1706 
1707   SDValue Glue;
1708   SmallVector<SDValue, 4> RetOps(1, Chain);
1709   // Copy the result values into the output registers.
1710   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1711     CCValAssign &VA = RVLocs[i];
1712     assert(VA.isRegLoc() && "Can only return in registers!");
1713 
1714     Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Glue);
1715 
1716     // Guarantee that all emitted copies are stuck together with flags.
1717     Glue = Chain.getValue(1);
1718     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1719   }
1720 
1721   // Don't emit the ret/reti instruction when the naked attribute is present in
1722   // the function being compiled.
1723   if (MF.getFunction().getAttributes().hasFnAttr(Attribute::Naked)) {
1724     return Chain;
1725   }
1726 
1727   const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1728 
1729   if (!AFI->isInterruptOrSignalHandler()) {
1730     // The return instruction has an implicit zero register operand: it must
1731     // contain zero on return.
1732     // This is not needed in interrupts however, where the zero register is
1733     // handled specially (only pushed/popped when needed).
1734     RetOps.push_back(DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8));
1735   }
1736 
1737   unsigned RetOpc =
1738       AFI->isInterruptOrSignalHandler() ? AVRISD::RETI_GLUE : AVRISD::RET_GLUE;
1739 
1740   RetOps[0] = Chain; // Update chain.
1741 
1742   if (Glue.getNode()) {
1743     RetOps.push_back(Glue);
1744   }
1745 
1746   return DAG.getNode(RetOpc, dl, MVT::Other, RetOps);
1747 }
1748 
1749 //===----------------------------------------------------------------------===//
1750 //  Custom Inserters
1751 //===----------------------------------------------------------------------===//
1752 
1753 MachineBasicBlock *AVRTargetLowering::insertShift(MachineInstr &MI,
1754                                                   MachineBasicBlock *BB,
1755                                                   bool Tiny) const {
1756   unsigned Opc;
1757   const TargetRegisterClass *RC;
1758   bool HasRepeatedOperand = false;
1759   MachineFunction *F = BB->getParent();
1760   MachineRegisterInfo &RI = F->getRegInfo();
1761   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1762   DebugLoc dl = MI.getDebugLoc();
1763 
1764   switch (MI.getOpcode()) {
1765   default:
1766     llvm_unreachable("Invalid shift opcode!");
1767   case AVR::Lsl8:
1768     Opc = AVR::ADDRdRr; // LSL is an alias of ADD Rd, Rd
1769     RC = &AVR::GPR8RegClass;
1770     HasRepeatedOperand = true;
1771     break;
1772   case AVR::Lsl16:
1773     Opc = AVR::LSLWRd;
1774     RC = &AVR::DREGSRegClass;
1775     break;
1776   case AVR::Asr8:
1777     Opc = AVR::ASRRd;
1778     RC = &AVR::GPR8RegClass;
1779     break;
1780   case AVR::Asr16:
1781     Opc = AVR::ASRWRd;
1782     RC = &AVR::DREGSRegClass;
1783     break;
1784   case AVR::Lsr8:
1785     Opc = AVR::LSRRd;
1786     RC = &AVR::GPR8RegClass;
1787     break;
1788   case AVR::Lsr16:
1789     Opc = AVR::LSRWRd;
1790     RC = &AVR::DREGSRegClass;
1791     break;
1792   case AVR::Rol8:
1793     Opc = Tiny ? AVR::ROLBRdR17 : AVR::ROLBRdR1;
1794     RC = &AVR::GPR8RegClass;
1795     break;
1796   case AVR::Rol16:
1797     Opc = AVR::ROLWRd;
1798     RC = &AVR::DREGSRegClass;
1799     break;
1800   case AVR::Ror8:
1801     Opc = AVR::RORBRd;
1802     RC = &AVR::GPR8RegClass;
1803     break;
1804   case AVR::Ror16:
1805     Opc = AVR::RORWRd;
1806     RC = &AVR::DREGSRegClass;
1807     break;
1808   }
1809 
1810   const BasicBlock *LLVM_BB = BB->getBasicBlock();
1811 
1812   MachineFunction::iterator I;
1813   for (I = BB->getIterator(); I != F->end() && &(*I) != BB; ++I)
1814     ;
1815   if (I != F->end())
1816     ++I;
1817 
1818   // Create loop block.
1819   MachineBasicBlock *LoopBB = F->CreateMachineBasicBlock(LLVM_BB);
1820   MachineBasicBlock *CheckBB = F->CreateMachineBasicBlock(LLVM_BB);
1821   MachineBasicBlock *RemBB = F->CreateMachineBasicBlock(LLVM_BB);
1822 
1823   F->insert(I, LoopBB);
1824   F->insert(I, CheckBB);
1825   F->insert(I, RemBB);
1826 
1827   // Update machine-CFG edges by transferring all successors of the current
1828   // block to the block containing instructions after shift.
1829   RemBB->splice(RemBB->begin(), BB, std::next(MachineBasicBlock::iterator(MI)),
1830                 BB->end());
1831   RemBB->transferSuccessorsAndUpdatePHIs(BB);
1832 
1833   // Add edges BB => LoopBB => CheckBB => RemBB, CheckBB => LoopBB.
1834   BB->addSuccessor(CheckBB);
1835   LoopBB->addSuccessor(CheckBB);
1836   CheckBB->addSuccessor(LoopBB);
1837   CheckBB->addSuccessor(RemBB);
1838 
1839   Register ShiftAmtReg = RI.createVirtualRegister(&AVR::GPR8RegClass);
1840   Register ShiftAmtReg2 = RI.createVirtualRegister(&AVR::GPR8RegClass);
1841   Register ShiftReg = RI.createVirtualRegister(RC);
1842   Register ShiftReg2 = RI.createVirtualRegister(RC);
1843   Register ShiftAmtSrcReg = MI.getOperand(2).getReg();
1844   Register SrcReg = MI.getOperand(1).getReg();
1845   Register DstReg = MI.getOperand(0).getReg();
1846 
1847   // BB:
1848   // rjmp CheckBB
1849   BuildMI(BB, dl, TII.get(AVR::RJMPk)).addMBB(CheckBB);
1850 
1851   // LoopBB:
1852   // ShiftReg2 = shift ShiftReg
1853   auto ShiftMI = BuildMI(LoopBB, dl, TII.get(Opc), ShiftReg2).addReg(ShiftReg);
1854   if (HasRepeatedOperand)
1855     ShiftMI.addReg(ShiftReg);
1856 
1857   // CheckBB:
1858   // ShiftReg = phi [%SrcReg, BB], [%ShiftReg2, LoopBB]
1859   // ShiftAmt = phi [%N,      BB], [%ShiftAmt2, LoopBB]
1860   // DestReg  = phi [%SrcReg, BB], [%ShiftReg,  LoopBB]
1861   // ShiftAmt2 = ShiftAmt - 1;
1862   // if (ShiftAmt2 >= 0) goto LoopBB;
1863   BuildMI(CheckBB, dl, TII.get(AVR::PHI), ShiftReg)
1864       .addReg(SrcReg)
1865       .addMBB(BB)
1866       .addReg(ShiftReg2)
1867       .addMBB(LoopBB);
1868   BuildMI(CheckBB, dl, TII.get(AVR::PHI), ShiftAmtReg)
1869       .addReg(ShiftAmtSrcReg)
1870       .addMBB(BB)
1871       .addReg(ShiftAmtReg2)
1872       .addMBB(LoopBB);
1873   BuildMI(CheckBB, dl, TII.get(AVR::PHI), DstReg)
1874       .addReg(SrcReg)
1875       .addMBB(BB)
1876       .addReg(ShiftReg2)
1877       .addMBB(LoopBB);
1878 
1879   BuildMI(CheckBB, dl, TII.get(AVR::DECRd), ShiftAmtReg2).addReg(ShiftAmtReg);
1880   BuildMI(CheckBB, dl, TII.get(AVR::BRPLk)).addMBB(LoopBB);
1881 
1882   MI.eraseFromParent(); // The pseudo instruction is gone now.
1883   return RemBB;
1884 }
1885 
1886 // Do a multibyte AVR shift. Insert shift instructions and put the output
1887 // registers in the Regs array.
1888 // Because AVR does not have a normal shift instruction (only a single bit shift
1889 // instruction), we have to emulate this behavior with other instructions.
1890 // It first tries large steps (moving registers around) and then smaller steps
1891 // like single bit shifts.
1892 // Large shifts actually reduce the number of shifted registers, so the below
1893 // algorithms have to work independently of the number of registers that are
1894 // shifted.
1895 // For more information and background, see this blogpost:
1896 // https://aykevl.nl/2021/02/avr-bitshift
1897 static void insertMultibyteShift(MachineInstr &MI, MachineBasicBlock *BB,
1898                                  MutableArrayRef<std::pair<Register, int>> Regs,
1899                                  ISD::NodeType Opc, int64_t ShiftAmt) {
1900   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
1901   const AVRSubtarget &STI = BB->getParent()->getSubtarget<AVRSubtarget>();
1902   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
1903   const DebugLoc &dl = MI.getDebugLoc();
1904 
1905   const bool ShiftLeft = Opc == ISD::SHL;
1906   const bool ArithmeticShift = Opc == ISD::SRA;
1907 
1908   // Zero a register, for use in later operations.
1909   Register ZeroReg = MRI.createVirtualRegister(&AVR::GPR8RegClass);
1910   BuildMI(*BB, MI, dl, TII.get(AVR::COPY), ZeroReg)
1911       .addReg(STI.getZeroRegister());
1912 
1913   // Do a shift modulo 6 or 7. This is a bit more complicated than most shifts
1914   // and is hard to compose with the rest, so these are special cased.
1915   // The basic idea is to shift one or two bits in the opposite direction and
1916   // then move registers around to get the correct end result.
1917   if (ShiftLeft && (ShiftAmt % 8) >= 6) {
1918     // Left shift modulo 6 or 7.
1919 
1920     // Create a slice of the registers we're going to modify, to ease working
1921     // with them.
1922     size_t ShiftRegsOffset = ShiftAmt / 8;
1923     size_t ShiftRegsSize = Regs.size() - ShiftRegsOffset;
1924     MutableArrayRef<std::pair<Register, int>> ShiftRegs =
1925         Regs.slice(ShiftRegsOffset, ShiftRegsSize);
1926 
1927     // Shift one to the right, keeping the least significant bit as the carry
1928     // bit.
1929     insertMultibyteShift(MI, BB, ShiftRegs, ISD::SRL, 1);
1930 
1931     // Rotate the least significant bit from the carry bit into a new register
1932     // (that starts out zero).
1933     Register LowByte = MRI.createVirtualRegister(&AVR::GPR8RegClass);
1934     BuildMI(*BB, MI, dl, TII.get(AVR::RORRd), LowByte).addReg(ZeroReg);
1935 
1936     // Shift one more to the right if this is a modulo-6 shift.
1937     if (ShiftAmt % 8 == 6) {
1938       insertMultibyteShift(MI, BB, ShiftRegs, ISD::SRL, 1);
1939       Register NewLowByte = MRI.createVirtualRegister(&AVR::GPR8RegClass);
1940       BuildMI(*BB, MI, dl, TII.get(AVR::RORRd), NewLowByte).addReg(LowByte);
1941       LowByte = NewLowByte;
1942     }
1943 
1944     // Move all registers to the left, zeroing the bottom registers as needed.
1945     for (size_t I = 0; I < Regs.size(); I++) {
1946       int ShiftRegsIdx = I + 1;
1947       if (ShiftRegsIdx < (int)ShiftRegs.size()) {
1948         Regs[I] = ShiftRegs[ShiftRegsIdx];
1949       } else if (ShiftRegsIdx == (int)ShiftRegs.size()) {
1950         Regs[I] = std::pair(LowByte, 0);
1951       } else {
1952         Regs[I] = std::pair(ZeroReg, 0);
1953       }
1954     }
1955 
1956     return;
1957   }
1958 
1959   // Right shift modulo 6 or 7.
1960   if (!ShiftLeft && (ShiftAmt % 8) >= 6) {
1961     // Create a view on the registers we're going to modify, to ease working
1962     // with them.
1963     size_t ShiftRegsSize = Regs.size() - (ShiftAmt / 8);
1964     MutableArrayRef<std::pair<Register, int>> ShiftRegs =
1965         Regs.slice(0, ShiftRegsSize);
1966 
1967     // Shift one to the left.
1968     insertMultibyteShift(MI, BB, ShiftRegs, ISD::SHL, 1);
1969 
1970     // Sign or zero extend the most significant register into a new register.
1971     // The HighByte is the byte that still has one (or two) bits from the
1972     // original value. The ExtByte is purely a zero/sign extend byte (all bits
1973     // are either 0 or 1).
1974     Register HighByte = MRI.createVirtualRegister(&AVR::GPR8RegClass);
1975     Register ExtByte = 0;
1976     if (ArithmeticShift) {
1977       // Sign-extend bit that was shifted out last.
1978       BuildMI(*BB, MI, dl, TII.get(AVR::SBCRdRr), HighByte)
1979           .addReg(HighByte, RegState::Undef)
1980           .addReg(HighByte, RegState::Undef);
1981       ExtByte = HighByte;
1982       // The highest bit of the original value is the same as the zero-extend
1983       // byte, so HighByte and ExtByte are the same.
1984     } else {
1985       // Use the zero register for zero extending.
1986       ExtByte = ZeroReg;
1987       // Rotate most significant bit into a new register (that starts out zero).
1988       BuildMI(*BB, MI, dl, TII.get(AVR::ADCRdRr), HighByte)
1989           .addReg(ExtByte)
1990           .addReg(ExtByte);
1991     }
1992 
1993     // Shift one more to the left for modulo 6 shifts.
1994     if (ShiftAmt % 8 == 6) {
1995       insertMultibyteShift(MI, BB, ShiftRegs, ISD::SHL, 1);
1996       // Shift the topmost bit into the HighByte.
1997       Register NewExt = MRI.createVirtualRegister(&AVR::GPR8RegClass);
1998       BuildMI(*BB, MI, dl, TII.get(AVR::ADCRdRr), NewExt)
1999           .addReg(HighByte)
2000           .addReg(HighByte);
2001       HighByte = NewExt;
2002     }
2003 
2004     // Move all to the right, while sign or zero extending.
2005     for (int I = Regs.size() - 1; I >= 0; I--) {
2006       int ShiftRegsIdx = I - (Regs.size() - ShiftRegs.size()) - 1;
2007       if (ShiftRegsIdx >= 0) {
2008         Regs[I] = ShiftRegs[ShiftRegsIdx];
2009       } else if (ShiftRegsIdx == -1) {
2010         Regs[I] = std::pair(HighByte, 0);
2011       } else {
2012         Regs[I] = std::pair(ExtByte, 0);
2013       }
2014     }
2015 
2016     return;
2017   }
2018 
2019   // For shift amounts of at least one register, simply rename the registers and
2020   // zero the bottom registers.
2021   while (ShiftLeft && ShiftAmt >= 8) {
2022     // Move all registers one to the left.
2023     for (size_t I = 0; I < Regs.size() - 1; I++) {
2024       Regs[I] = Regs[I + 1];
2025     }
2026 
2027     // Zero the least significant register.
2028     Regs[Regs.size() - 1] = std::pair(ZeroReg, 0);
2029 
2030     // Continue shifts with the leftover registers.
2031     Regs = Regs.drop_back(1);
2032 
2033     ShiftAmt -= 8;
2034   }
2035 
2036   // And again, the same for right shifts.
2037   Register ShrExtendReg = 0;
2038   if (!ShiftLeft && ShiftAmt >= 8) {
2039     if (ArithmeticShift) {
2040       // Sign extend the most significant register into ShrExtendReg.
2041       ShrExtendReg = MRI.createVirtualRegister(&AVR::GPR8RegClass);
2042       Register Tmp = MRI.createVirtualRegister(&AVR::GPR8RegClass);
2043       BuildMI(*BB, MI, dl, TII.get(AVR::ADDRdRr), Tmp)
2044           .addReg(Regs[0].first, 0, Regs[0].second)
2045           .addReg(Regs[0].first, 0, Regs[0].second);
2046       BuildMI(*BB, MI, dl, TII.get(AVR::SBCRdRr), ShrExtendReg)
2047           .addReg(Tmp)
2048           .addReg(Tmp);
2049     } else {
2050       ShrExtendReg = ZeroReg;
2051     }
2052     for (; ShiftAmt >= 8; ShiftAmt -= 8) {
2053       // Move all registers one to the right.
2054       for (size_t I = Regs.size() - 1; I != 0; I--) {
2055         Regs[I] = Regs[I - 1];
2056       }
2057 
2058       // Zero or sign extend the most significant register.
2059       Regs[0] = std::pair(ShrExtendReg, 0);
2060 
2061       // Continue shifts with the leftover registers.
2062       Regs = Regs.drop_front(1);
2063     }
2064   }
2065 
2066   // The bigger shifts are already handled above.
2067   assert((ShiftAmt < 8) && "Unexpect shift amount");
2068 
2069   // Shift by four bits, using a complicated swap/eor/andi/eor sequence.
2070   // It only works for logical shifts because the bits shifted in are all
2071   // zeroes.
2072   // To shift a single byte right, it produces code like this:
2073   //   swap r0
2074   //   andi r0, 0x0f
2075   // For a two-byte (16-bit) shift, it adds the following instructions to shift
2076   // the upper byte into the lower byte:
2077   //   swap r1
2078   //   eor r0, r1
2079   //   andi r1, 0x0f
2080   //   eor r0, r1
2081   // For bigger shifts, it repeats the above sequence. For example, for a 3-byte
2082   // (24-bit) shift it adds:
2083   //   swap r2
2084   //   eor r1, r2
2085   //   andi r2, 0x0f
2086   //   eor r1, r2
2087   if (!ArithmeticShift && ShiftAmt >= 4) {
2088     Register Prev = 0;
2089     for (size_t I = 0; I < Regs.size(); I++) {
2090       size_t Idx = ShiftLeft ? I : Regs.size() - I - 1;
2091       Register SwapReg = MRI.createVirtualRegister(&AVR::LD8RegClass);
2092       BuildMI(*BB, MI, dl, TII.get(AVR::SWAPRd), SwapReg)
2093           .addReg(Regs[Idx].first, 0, Regs[Idx].second);
2094       if (I != 0) {
2095         Register R = MRI.createVirtualRegister(&AVR::GPR8RegClass);
2096         BuildMI(*BB, MI, dl, TII.get(AVR::EORRdRr), R)
2097             .addReg(Prev)
2098             .addReg(SwapReg);
2099         Prev = R;
2100       }
2101       Register AndReg = MRI.createVirtualRegister(&AVR::LD8RegClass);
2102       BuildMI(*BB, MI, dl, TII.get(AVR::ANDIRdK), AndReg)
2103           .addReg(SwapReg)
2104           .addImm(ShiftLeft ? 0xf0 : 0x0f);
2105       if (I != 0) {
2106         Register R = MRI.createVirtualRegister(&AVR::GPR8RegClass);
2107         BuildMI(*BB, MI, dl, TII.get(AVR::EORRdRr), R)
2108             .addReg(Prev)
2109             .addReg(AndReg);
2110         size_t PrevIdx = ShiftLeft ? Idx - 1 : Idx + 1;
2111         Regs[PrevIdx] = std::pair(R, 0);
2112       }
2113       Prev = AndReg;
2114       Regs[Idx] = std::pair(AndReg, 0);
2115     }
2116     ShiftAmt -= 4;
2117   }
2118 
2119   // Shift by one. This is the fallback that always works, and the shift
2120   // operation that is used for 1, 2, and 3 bit shifts.
2121   while (ShiftLeft && ShiftAmt) {
2122     // Shift one to the left.
2123     for (ssize_t I = Regs.size() - 1; I >= 0; I--) {
2124       Register Out = MRI.createVirtualRegister(&AVR::GPR8RegClass);
2125       Register In = Regs[I].first;
2126       Register InSubreg = Regs[I].second;
2127       if (I == (ssize_t)Regs.size() - 1) { // first iteration
2128         BuildMI(*BB, MI, dl, TII.get(AVR::ADDRdRr), Out)
2129             .addReg(In, 0, InSubreg)
2130             .addReg(In, 0, InSubreg);
2131       } else {
2132         BuildMI(*BB, MI, dl, TII.get(AVR::ADCRdRr), Out)
2133             .addReg(In, 0, InSubreg)
2134             .addReg(In, 0, InSubreg);
2135       }
2136       Regs[I] = std::pair(Out, 0);
2137     }
2138     ShiftAmt--;
2139   }
2140   while (!ShiftLeft && ShiftAmt) {
2141     // Shift one to the right.
2142     for (size_t I = 0; I < Regs.size(); I++) {
2143       Register Out = MRI.createVirtualRegister(&AVR::GPR8RegClass);
2144       Register In = Regs[I].first;
2145       Register InSubreg = Regs[I].second;
2146       if (I == 0) {
2147         unsigned Opc = ArithmeticShift ? AVR::ASRRd : AVR::LSRRd;
2148         BuildMI(*BB, MI, dl, TII.get(Opc), Out).addReg(In, 0, InSubreg);
2149       } else {
2150         BuildMI(*BB, MI, dl, TII.get(AVR::RORRd), Out).addReg(In, 0, InSubreg);
2151       }
2152       Regs[I] = std::pair(Out, 0);
2153     }
2154     ShiftAmt--;
2155   }
2156 
2157   if (ShiftAmt != 0) {
2158     llvm_unreachable("don't know how to shift!"); // sanity check
2159   }
2160 }
2161 
2162 // Do a wide (32-bit) shift.
2163 MachineBasicBlock *
2164 AVRTargetLowering::insertWideShift(MachineInstr &MI,
2165                                    MachineBasicBlock *BB) const {
2166   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
2167   const DebugLoc &dl = MI.getDebugLoc();
2168 
2169   // How much to shift to the right (meaning: a negative number indicates a left
2170   // shift).
2171   int64_t ShiftAmt = MI.getOperand(4).getImm();
2172   ISD::NodeType Opc;
2173   switch (MI.getOpcode()) {
2174   case AVR::Lsl32:
2175     Opc = ISD::SHL;
2176     break;
2177   case AVR::Lsr32:
2178     Opc = ISD::SRL;
2179     break;
2180   case AVR::Asr32:
2181     Opc = ISD::SRA;
2182     break;
2183   }
2184 
2185   // Read the input registers, with the most significant register at index 0.
2186   std::array<std::pair<Register, int>, 4> Registers = {
2187       std::pair(MI.getOperand(3).getReg(), AVR::sub_hi),
2188       std::pair(MI.getOperand(3).getReg(), AVR::sub_lo),
2189       std::pair(MI.getOperand(2).getReg(), AVR::sub_hi),
2190       std::pair(MI.getOperand(2).getReg(), AVR::sub_lo),
2191   };
2192 
2193   // Do the shift. The registers are modified in-place.
2194   insertMultibyteShift(MI, BB, Registers, Opc, ShiftAmt);
2195 
2196   // Combine the 8-bit registers into 16-bit register pairs.
2197   // This done either from LSB to MSB or from MSB to LSB, depending on the
2198   // shift. It's an optimization so that the register allocator will use the
2199   // fewest movs possible (which order we use isn't a correctness issue, just an
2200   // optimization issue).
2201   //   - lsl prefers starting from the most significant byte (2nd case).
2202   //   - lshr prefers starting from the least significant byte (1st case).
2203   //   - for ashr it depends on the number of shifted bytes.
2204   // Some shift operations still don't get the most optimal mov sequences even
2205   // with this distinction. TODO: figure out why and try to fix it (but we're
2206   // already equal to or faster than avr-gcc in all cases except ashr 8).
2207   if (Opc != ISD::SHL &&
2208       (Opc != ISD::SRA || (ShiftAmt < 16 || ShiftAmt >= 22))) {
2209     // Use the resulting registers starting with the least significant byte.
2210     BuildMI(*BB, MI, dl, TII.get(AVR::REG_SEQUENCE), MI.getOperand(0).getReg())
2211         .addReg(Registers[3].first, 0, Registers[3].second)
2212         .addImm(AVR::sub_lo)
2213         .addReg(Registers[2].first, 0, Registers[2].second)
2214         .addImm(AVR::sub_hi);
2215     BuildMI(*BB, MI, dl, TII.get(AVR::REG_SEQUENCE), MI.getOperand(1).getReg())
2216         .addReg(Registers[1].first, 0, Registers[1].second)
2217         .addImm(AVR::sub_lo)
2218         .addReg(Registers[0].first, 0, Registers[0].second)
2219         .addImm(AVR::sub_hi);
2220   } else {
2221     // Use the resulting registers starting with the most significant byte.
2222     BuildMI(*BB, MI, dl, TII.get(AVR::REG_SEQUENCE), MI.getOperand(1).getReg())
2223         .addReg(Registers[0].first, 0, Registers[0].second)
2224         .addImm(AVR::sub_hi)
2225         .addReg(Registers[1].first, 0, Registers[1].second)
2226         .addImm(AVR::sub_lo);
2227     BuildMI(*BB, MI, dl, TII.get(AVR::REG_SEQUENCE), MI.getOperand(0).getReg())
2228         .addReg(Registers[2].first, 0, Registers[2].second)
2229         .addImm(AVR::sub_hi)
2230         .addReg(Registers[3].first, 0, Registers[3].second)
2231         .addImm(AVR::sub_lo);
2232   }
2233 
2234   // Remove the pseudo instruction.
2235   MI.eraseFromParent();
2236   return BB;
2237 }
2238 
2239 static bool isCopyMulResult(MachineBasicBlock::iterator const &I) {
2240   if (I->getOpcode() == AVR::COPY) {
2241     Register SrcReg = I->getOperand(1).getReg();
2242     return (SrcReg == AVR::R0 || SrcReg == AVR::R1);
2243   }
2244 
2245   return false;
2246 }
2247 
2248 // The mul instructions wreak havock on our zero_reg R1. We need to clear it
2249 // after the result has been evacuated. This is probably not the best way to do
2250 // it, but it works for now.
2251 MachineBasicBlock *AVRTargetLowering::insertMul(MachineInstr &MI,
2252                                                 MachineBasicBlock *BB) const {
2253   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
2254   MachineBasicBlock::iterator I(MI);
2255   ++I; // in any case insert *after* the mul instruction
2256   if (isCopyMulResult(I))
2257     ++I;
2258   if (isCopyMulResult(I))
2259     ++I;
2260   BuildMI(*BB, I, MI.getDebugLoc(), TII.get(AVR::EORRdRr), AVR::R1)
2261       .addReg(AVR::R1)
2262       .addReg(AVR::R1);
2263   return BB;
2264 }
2265 
2266 // Insert a read from the zero register.
2267 MachineBasicBlock *
2268 AVRTargetLowering::insertCopyZero(MachineInstr &MI,
2269                                   MachineBasicBlock *BB) const {
2270   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
2271   MachineBasicBlock::iterator I(MI);
2272   BuildMI(*BB, I, MI.getDebugLoc(), TII.get(AVR::COPY))
2273       .add(MI.getOperand(0))
2274       .addReg(Subtarget.getZeroRegister());
2275   MI.eraseFromParent();
2276   return BB;
2277 }
2278 
2279 // Lower atomicrmw operation to disable interrupts, do operation, and restore
2280 // interrupts. This works because all AVR microcontrollers are single core.
2281 MachineBasicBlock *AVRTargetLowering::insertAtomicArithmeticOp(
2282     MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode, int Width) const {
2283   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
2284   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
2285   MachineBasicBlock::iterator I(MI);
2286   DebugLoc dl = MI.getDebugLoc();
2287 
2288   // Example instruction sequence, for an atomic 8-bit add:
2289   //   ldi r25, 5
2290   //   in r0, SREG
2291   //   cli
2292   //   ld r24, X
2293   //   add r25, r24
2294   //   st X, r25
2295   //   out SREG, r0
2296 
2297   const TargetRegisterClass *RC =
2298       (Width == 8) ? &AVR::GPR8RegClass : &AVR::DREGSRegClass;
2299   unsigned LoadOpcode = (Width == 8) ? AVR::LDRdPtr : AVR::LDWRdPtr;
2300   unsigned StoreOpcode = (Width == 8) ? AVR::STPtrRr : AVR::STWPtrRr;
2301 
2302   // Disable interrupts.
2303   BuildMI(*BB, I, dl, TII.get(AVR::INRdA), Subtarget.getTmpRegister())
2304       .addImm(Subtarget.getIORegSREG());
2305   BuildMI(*BB, I, dl, TII.get(AVR::BCLRs)).addImm(7);
2306 
2307   // Load the original value.
2308   BuildMI(*BB, I, dl, TII.get(LoadOpcode), MI.getOperand(0).getReg())
2309       .add(MI.getOperand(1));
2310 
2311   // Do the arithmetic operation.
2312   Register Result = MRI.createVirtualRegister(RC);
2313   BuildMI(*BB, I, dl, TII.get(Opcode), Result)
2314       .addReg(MI.getOperand(0).getReg())
2315       .add(MI.getOperand(2));
2316 
2317   // Store the result.
2318   BuildMI(*BB, I, dl, TII.get(StoreOpcode))
2319       .add(MI.getOperand(1))
2320       .addReg(Result);
2321 
2322   // Restore interrupts.
2323   BuildMI(*BB, I, dl, TII.get(AVR::OUTARr))
2324       .addImm(Subtarget.getIORegSREG())
2325       .addReg(Subtarget.getTmpRegister());
2326 
2327   // Remove the pseudo instruction.
2328   MI.eraseFromParent();
2329   return BB;
2330 }
2331 
2332 MachineBasicBlock *
2333 AVRTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
2334                                                MachineBasicBlock *MBB) const {
2335   int Opc = MI.getOpcode();
2336   const AVRSubtarget &STI = MBB->getParent()->getSubtarget<AVRSubtarget>();
2337 
2338   // Pseudo shift instructions with a non constant shift amount are expanded
2339   // into a loop.
2340   switch (Opc) {
2341   case AVR::Lsl8:
2342   case AVR::Lsl16:
2343   case AVR::Lsr8:
2344   case AVR::Lsr16:
2345   case AVR::Rol8:
2346   case AVR::Rol16:
2347   case AVR::Ror8:
2348   case AVR::Ror16:
2349   case AVR::Asr8:
2350   case AVR::Asr16:
2351     return insertShift(MI, MBB, STI.hasTinyEncoding());
2352   case AVR::Lsl32:
2353   case AVR::Lsr32:
2354   case AVR::Asr32:
2355     return insertWideShift(MI, MBB);
2356   case AVR::MULRdRr:
2357   case AVR::MULSRdRr:
2358     return insertMul(MI, MBB);
2359   case AVR::CopyZero:
2360     return insertCopyZero(MI, MBB);
2361   case AVR::AtomicLoadAdd8:
2362     return insertAtomicArithmeticOp(MI, MBB, AVR::ADDRdRr, 8);
2363   case AVR::AtomicLoadAdd16:
2364     return insertAtomicArithmeticOp(MI, MBB, AVR::ADDWRdRr, 16);
2365   case AVR::AtomicLoadSub8:
2366     return insertAtomicArithmeticOp(MI, MBB, AVR::SUBRdRr, 8);
2367   case AVR::AtomicLoadSub16:
2368     return insertAtomicArithmeticOp(MI, MBB, AVR::SUBWRdRr, 16);
2369   case AVR::AtomicLoadAnd8:
2370     return insertAtomicArithmeticOp(MI, MBB, AVR::ANDRdRr, 8);
2371   case AVR::AtomicLoadAnd16:
2372     return insertAtomicArithmeticOp(MI, MBB, AVR::ANDWRdRr, 16);
2373   case AVR::AtomicLoadOr8:
2374     return insertAtomicArithmeticOp(MI, MBB, AVR::ORRdRr, 8);
2375   case AVR::AtomicLoadOr16:
2376     return insertAtomicArithmeticOp(MI, MBB, AVR::ORWRdRr, 16);
2377   case AVR::AtomicLoadXor8:
2378     return insertAtomicArithmeticOp(MI, MBB, AVR::EORRdRr, 8);
2379   case AVR::AtomicLoadXor16:
2380     return insertAtomicArithmeticOp(MI, MBB, AVR::EORWRdRr, 16);
2381   }
2382 
2383   assert((Opc == AVR::Select16 || Opc == AVR::Select8) &&
2384          "Unexpected instr type to insert");
2385 
2386   const AVRInstrInfo &TII = (const AVRInstrInfo &)*MI.getParent()
2387                                 ->getParent()
2388                                 ->getSubtarget()
2389                                 .getInstrInfo();
2390   DebugLoc dl = MI.getDebugLoc();
2391 
2392   // To "insert" a SELECT instruction, we insert the diamond
2393   // control-flow pattern. The incoming instruction knows the
2394   // destination vreg to set, the condition code register to branch
2395   // on, the true/false values to select between, and a branch opcode
2396   // to use.
2397 
2398   MachineFunction *MF = MBB->getParent();
2399   const BasicBlock *LLVM_BB = MBB->getBasicBlock();
2400   MachineBasicBlock *FallThrough = MBB->getFallThrough();
2401 
2402   // If the current basic block falls through to another basic block,
2403   // we must insert an unconditional branch to the fallthrough destination
2404   // if we are to insert basic blocks at the prior fallthrough point.
2405   if (FallThrough != nullptr) {
2406     BuildMI(MBB, dl, TII.get(AVR::RJMPk)).addMBB(FallThrough);
2407   }
2408 
2409   MachineBasicBlock *trueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
2410   MachineBasicBlock *falseMBB = MF->CreateMachineBasicBlock(LLVM_BB);
2411 
2412   MachineFunction::iterator I;
2413   for (I = MF->begin(); I != MF->end() && &(*I) != MBB; ++I)
2414     ;
2415   if (I != MF->end())
2416     ++I;
2417   MF->insert(I, trueMBB);
2418   MF->insert(I, falseMBB);
2419 
2420   // Set the call frame size on entry to the new basic blocks.
2421   unsigned CallFrameSize = TII.getCallFrameSizeAt(MI);
2422   trueMBB->setCallFrameSize(CallFrameSize);
2423   falseMBB->setCallFrameSize(CallFrameSize);
2424 
2425   // Transfer remaining instructions and all successors of the current
2426   // block to the block which will contain the Phi node for the
2427   // select.
2428   trueMBB->splice(trueMBB->begin(), MBB,
2429                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
2430   trueMBB->transferSuccessorsAndUpdatePHIs(MBB);
2431 
2432   AVRCC::CondCodes CC = (AVRCC::CondCodes)MI.getOperand(3).getImm();
2433   BuildMI(MBB, dl, TII.getBrCond(CC)).addMBB(trueMBB);
2434   BuildMI(MBB, dl, TII.get(AVR::RJMPk)).addMBB(falseMBB);
2435   MBB->addSuccessor(falseMBB);
2436   MBB->addSuccessor(trueMBB);
2437 
2438   // Unconditionally flow back to the true block
2439   BuildMI(falseMBB, dl, TII.get(AVR::RJMPk)).addMBB(trueMBB);
2440   falseMBB->addSuccessor(trueMBB);
2441 
2442   // Set up the Phi node to determine where we came from
2443   BuildMI(*trueMBB, trueMBB->begin(), dl, TII.get(AVR::PHI),
2444           MI.getOperand(0).getReg())
2445       .addReg(MI.getOperand(1).getReg())
2446       .addMBB(MBB)
2447       .addReg(MI.getOperand(2).getReg())
2448       .addMBB(falseMBB);
2449 
2450   MI.eraseFromParent(); // The pseudo instruction is gone now.
2451   return trueMBB;
2452 }
2453 
2454 //===----------------------------------------------------------------------===//
2455 //  Inline Asm Support
2456 //===----------------------------------------------------------------------===//
2457 
2458 AVRTargetLowering::ConstraintType
2459 AVRTargetLowering::getConstraintType(StringRef Constraint) const {
2460   if (Constraint.size() == 1) {
2461     // See http://www.nongnu.org/avr-libc/user-manual/inline_asm.html
2462     switch (Constraint[0]) {
2463     default:
2464       break;
2465     case 'a': // Simple upper registers
2466     case 'b': // Base pointer registers pairs
2467     case 'd': // Upper register
2468     case 'l': // Lower registers
2469     case 'e': // Pointer register pairs
2470     case 'q': // Stack pointer register
2471     case 'r': // Any register
2472     case 'w': // Special upper register pairs
2473       return C_RegisterClass;
2474     case 't': // Temporary register
2475     case 'x':
2476     case 'X': // Pointer register pair X
2477     case 'y':
2478     case 'Y': // Pointer register pair Y
2479     case 'z':
2480     case 'Z': // Pointer register pair Z
2481       return C_Register;
2482     case 'Q': // A memory address based on Y or Z pointer with displacement.
2483       return C_Memory;
2484     case 'G': // Floating point constant
2485     case 'I': // 6-bit positive integer constant
2486     case 'J': // 6-bit negative integer constant
2487     case 'K': // Integer constant (Range: 2)
2488     case 'L': // Integer constant (Range: 0)
2489     case 'M': // 8-bit integer constant
2490     case 'N': // Integer constant (Range: -1)
2491     case 'O': // Integer constant (Range: 8, 16, 24)
2492     case 'P': // Integer constant (Range: 1)
2493     case 'R': // Integer constant (Range: -6 to 5)x
2494       return C_Immediate;
2495     }
2496   }
2497 
2498   return TargetLowering::getConstraintType(Constraint);
2499 }
2500 
2501 InlineAsm::ConstraintCode
2502 AVRTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
2503   // Not sure if this is actually the right thing to do, but we got to do
2504   // *something* [agnat]
2505   switch (ConstraintCode[0]) {
2506   case 'Q':
2507     return InlineAsm::ConstraintCode::Q;
2508   }
2509   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
2510 }
2511 
2512 AVRTargetLowering::ConstraintWeight
2513 AVRTargetLowering::getSingleConstraintMatchWeight(
2514     AsmOperandInfo &info, const char *constraint) const {
2515   ConstraintWeight weight = CW_Invalid;
2516   Value *CallOperandVal = info.CallOperandVal;
2517 
2518   // If we don't have a value, we can't do a match,
2519   // but allow it at the lowest weight.
2520   // (this behaviour has been copied from the ARM backend)
2521   if (!CallOperandVal) {
2522     return CW_Default;
2523   }
2524 
2525   // Look at the constraint type.
2526   switch (*constraint) {
2527   default:
2528     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
2529     break;
2530   case 'd':
2531   case 'r':
2532   case 'l':
2533     weight = CW_Register;
2534     break;
2535   case 'a':
2536   case 'b':
2537   case 'e':
2538   case 'q':
2539   case 't':
2540   case 'w':
2541   case 'x':
2542   case 'X':
2543   case 'y':
2544   case 'Y':
2545   case 'z':
2546   case 'Z':
2547     weight = CW_SpecificReg;
2548     break;
2549   case 'G':
2550     if (const ConstantFP *C = dyn_cast<ConstantFP>(CallOperandVal)) {
2551       if (C->isZero()) {
2552         weight = CW_Constant;
2553       }
2554     }
2555     break;
2556   case 'I':
2557     if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
2558       if (isUInt<6>(C->getZExtValue())) {
2559         weight = CW_Constant;
2560       }
2561     }
2562     break;
2563   case 'J':
2564     if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
2565       if ((C->getSExtValue() >= -63) && (C->getSExtValue() <= 0)) {
2566         weight = CW_Constant;
2567       }
2568     }
2569     break;
2570   case 'K':
2571     if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
2572       if (C->getZExtValue() == 2) {
2573         weight = CW_Constant;
2574       }
2575     }
2576     break;
2577   case 'L':
2578     if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
2579       if (C->getZExtValue() == 0) {
2580         weight = CW_Constant;
2581       }
2582     }
2583     break;
2584   case 'M':
2585     if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
2586       if (isUInt<8>(C->getZExtValue())) {
2587         weight = CW_Constant;
2588       }
2589     }
2590     break;
2591   case 'N':
2592     if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
2593       if (C->getSExtValue() == -1) {
2594         weight = CW_Constant;
2595       }
2596     }
2597     break;
2598   case 'O':
2599     if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
2600       if ((C->getZExtValue() == 8) || (C->getZExtValue() == 16) ||
2601           (C->getZExtValue() == 24)) {
2602         weight = CW_Constant;
2603       }
2604     }
2605     break;
2606   case 'P':
2607     if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
2608       if (C->getZExtValue() == 1) {
2609         weight = CW_Constant;
2610       }
2611     }
2612     break;
2613   case 'R':
2614     if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
2615       if ((C->getSExtValue() >= -6) && (C->getSExtValue() <= 5)) {
2616         weight = CW_Constant;
2617       }
2618     }
2619     break;
2620   case 'Q':
2621     weight = CW_Memory;
2622     break;
2623   }
2624 
2625   return weight;
2626 }
2627 
2628 std::pair<unsigned, const TargetRegisterClass *>
2629 AVRTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
2630                                                 StringRef Constraint,
2631                                                 MVT VT) const {
2632   if (Constraint.size() == 1) {
2633     switch (Constraint[0]) {
2634     case 'a': // Simple upper registers r16..r23.
2635       if (VT == MVT::i8)
2636         return std::make_pair(0U, &AVR::LD8loRegClass);
2637       else if (VT == MVT::i16)
2638         return std::make_pair(0U, &AVR::DREGSLD8loRegClass);
2639       break;
2640     case 'b': // Base pointer registers: y, z.
2641       if (VT == MVT::i8 || VT == MVT::i16)
2642         return std::make_pair(0U, &AVR::PTRDISPREGSRegClass);
2643       break;
2644     case 'd': // Upper registers r16..r31.
2645       if (VT == MVT::i8)
2646         return std::make_pair(0U, &AVR::LD8RegClass);
2647       else if (VT == MVT::i16)
2648         return std::make_pair(0U, &AVR::DLDREGSRegClass);
2649       break;
2650     case 'l': // Lower registers r0..r15.
2651       if (VT == MVT::i8)
2652         return std::make_pair(0U, &AVR::GPR8loRegClass);
2653       else if (VT == MVT::i16)
2654         return std::make_pair(0U, &AVR::DREGSloRegClass);
2655       break;
2656     case 'e': // Pointer register pairs: x, y, z.
2657       if (VT == MVT::i8 || VT == MVT::i16)
2658         return std::make_pair(0U, &AVR::PTRREGSRegClass);
2659       break;
2660     case 'q': // Stack pointer register: SPH:SPL.
2661       return std::make_pair(0U, &AVR::GPRSPRegClass);
2662     case 'r': // Any register: r0..r31.
2663       if (VT == MVT::i8)
2664         return std::make_pair(0U, &AVR::GPR8RegClass);
2665       else if (VT == MVT::i16)
2666         return std::make_pair(0U, &AVR::DREGSRegClass);
2667       break;
2668     case 't': // Temporary register: r0.
2669       if (VT == MVT::i8)
2670         return std::make_pair(unsigned(Subtarget.getTmpRegister()),
2671                               &AVR::GPR8RegClass);
2672       break;
2673     case 'w': // Special upper register pairs: r24, r26, r28, r30.
2674       if (VT == MVT::i8 || VT == MVT::i16)
2675         return std::make_pair(0U, &AVR::IWREGSRegClass);
2676       break;
2677     case 'x': // Pointer register pair X: r27:r26.
2678     case 'X':
2679       if (VT == MVT::i8 || VT == MVT::i16)
2680         return std::make_pair(unsigned(AVR::R27R26), &AVR::PTRREGSRegClass);
2681       break;
2682     case 'y': // Pointer register pair Y: r29:r28.
2683     case 'Y':
2684       if (VT == MVT::i8 || VT == MVT::i16)
2685         return std::make_pair(unsigned(AVR::R29R28), &AVR::PTRREGSRegClass);
2686       break;
2687     case 'z': // Pointer register pair Z: r31:r30.
2688     case 'Z':
2689       if (VT == MVT::i8 || VT == MVT::i16)
2690         return std::make_pair(unsigned(AVR::R31R30), &AVR::PTRREGSRegClass);
2691       break;
2692     default:
2693       break;
2694     }
2695   }
2696 
2697   return TargetLowering::getRegForInlineAsmConstraint(
2698       Subtarget.getRegisterInfo(), Constraint, VT);
2699 }
2700 
2701 void AVRTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
2702                                                      StringRef Constraint,
2703                                                      std::vector<SDValue> &Ops,
2704                                                      SelectionDAG &DAG) const {
2705   SDValue Result;
2706   SDLoc DL(Op);
2707   EVT Ty = Op.getValueType();
2708 
2709   // Currently only support length 1 constraints.
2710   if (Constraint.size() != 1) {
2711     return;
2712   }
2713 
2714   char ConstraintLetter = Constraint[0];
2715   switch (ConstraintLetter) {
2716   default:
2717     break;
2718   // Deal with integers first:
2719   case 'I':
2720   case 'J':
2721   case 'K':
2722   case 'L':
2723   case 'M':
2724   case 'N':
2725   case 'O':
2726   case 'P':
2727   case 'R': {
2728     const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
2729     if (!C) {
2730       return;
2731     }
2732 
2733     int64_t CVal64 = C->getSExtValue();
2734     uint64_t CUVal64 = C->getZExtValue();
2735     switch (ConstraintLetter) {
2736     case 'I': // 0..63
2737       if (!isUInt<6>(CUVal64))
2738         return;
2739       Result = DAG.getTargetConstant(CUVal64, DL, Ty);
2740       break;
2741     case 'J': // -63..0
2742       if (CVal64 < -63 || CVal64 > 0)
2743         return;
2744       Result = DAG.getTargetConstant(CVal64, DL, Ty);
2745       break;
2746     case 'K': // 2
2747       if (CUVal64 != 2)
2748         return;
2749       Result = DAG.getTargetConstant(CUVal64, DL, Ty);
2750       break;
2751     case 'L': // 0
2752       if (CUVal64 != 0)
2753         return;
2754       Result = DAG.getTargetConstant(CUVal64, DL, Ty);
2755       break;
2756     case 'M': // 0..255
2757       if (!isUInt<8>(CUVal64))
2758         return;
2759       // i8 type may be printed as a negative number,
2760       // e.g. 254 would be printed as -2,
2761       // so we force it to i16 at least.
2762       if (Ty.getSimpleVT() == MVT::i8) {
2763         Ty = MVT::i16;
2764       }
2765       Result = DAG.getTargetConstant(CUVal64, DL, Ty);
2766       break;
2767     case 'N': // -1
2768       if (CVal64 != -1)
2769         return;
2770       Result = DAG.getTargetConstant(CVal64, DL, Ty);
2771       break;
2772     case 'O': // 8, 16, 24
2773       if (CUVal64 != 8 && CUVal64 != 16 && CUVal64 != 24)
2774         return;
2775       Result = DAG.getTargetConstant(CUVal64, DL, Ty);
2776       break;
2777     case 'P': // 1
2778       if (CUVal64 != 1)
2779         return;
2780       Result = DAG.getTargetConstant(CUVal64, DL, Ty);
2781       break;
2782     case 'R': // -6..5
2783       if (CVal64 < -6 || CVal64 > 5)
2784         return;
2785       Result = DAG.getTargetConstant(CVal64, DL, Ty);
2786       break;
2787     }
2788 
2789     break;
2790   }
2791   case 'G':
2792     const ConstantFPSDNode *FC = dyn_cast<ConstantFPSDNode>(Op);
2793     if (!FC || !FC->isZero())
2794       return;
2795     // Soften float to i8 0
2796     Result = DAG.getTargetConstant(0, DL, MVT::i8);
2797     break;
2798   }
2799 
2800   if (Result.getNode()) {
2801     Ops.push_back(Result);
2802     return;
2803   }
2804 
2805   return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
2806 }
2807 
2808 Register AVRTargetLowering::getRegisterByName(const char *RegName, LLT VT,
2809                                               const MachineFunction &MF) const {
2810   Register Reg;
2811 
2812   if (VT == LLT::scalar(8)) {
2813     Reg = StringSwitch<unsigned>(RegName)
2814               .Case("r0", AVR::R0)
2815               .Case("r1", AVR::R1)
2816               .Default(0);
2817   } else {
2818     Reg = StringSwitch<unsigned>(RegName)
2819               .Case("r0", AVR::R1R0)
2820               .Case("sp", AVR::SP)
2821               .Default(0);
2822   }
2823 
2824   if (Reg)
2825     return Reg;
2826 
2827   report_fatal_error(
2828       Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
2829 }
2830 
2831 } // end of namespace llvm
2832