xref: /freebsd-src/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp (revision 0eae32dcef82f6f06de6419a0d623d7def0cc8f6)
1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/Analysis/MemoryLocation.h"
13 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineInstr.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/CodeGen/TargetLowering.h"
20 #include "llvm/CodeGen/TargetOpcodes.h"
21 #include "llvm/CodeGen/TargetSubtargetInfo.h"
22 #include "llvm/IR/DebugInfo.h"
23 
24 using namespace llvm;
25 
26 void MachineIRBuilder::setMF(MachineFunction &MF) {
27   State.MF = &MF;
28   State.MBB = nullptr;
29   State.MRI = &MF.getRegInfo();
30   State.TII = MF.getSubtarget().getInstrInfo();
31   State.DL = DebugLoc();
32   State.II = MachineBasicBlock::iterator();
33   State.Observer = nullptr;
34 }
35 
36 //------------------------------------------------------------------------------
37 // Build instruction variants.
38 //------------------------------------------------------------------------------
39 
40 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
41   MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
42   return MIB;
43 }
44 
45 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
46   getMBB().insert(getInsertPt(), MIB);
47   recordInsertion(MIB);
48   return MIB;
49 }
50 
51 MachineInstrBuilder
52 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
53                                       const MDNode *Expr) {
54   assert(isa<DILocalVariable>(Variable) && "not a variable");
55   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56   assert(
57       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58       "Expected inlined-at fields to agree");
59   return insertInstr(BuildMI(getMF(), getDL(),
60                              getTII().get(TargetOpcode::DBG_VALUE),
61                              /*IsIndirect*/ false, Reg, Variable, Expr));
62 }
63 
64 MachineInstrBuilder
65 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
66                                         const MDNode *Expr) {
67   assert(isa<DILocalVariable>(Variable) && "not a variable");
68   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69   assert(
70       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71       "Expected inlined-at fields to agree");
72   return insertInstr(BuildMI(getMF(), getDL(),
73                              getTII().get(TargetOpcode::DBG_VALUE),
74                              /*IsIndirect*/ true, Reg, Variable, Expr));
75 }
76 
77 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
78                                                       const MDNode *Variable,
79                                                       const MDNode *Expr) {
80   assert(isa<DILocalVariable>(Variable) && "not a variable");
81   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82   assert(
83       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84       "Expected inlined-at fields to agree");
85   return buildInstr(TargetOpcode::DBG_VALUE)
86       .addFrameIndex(FI)
87       .addImm(0)
88       .addMetadata(Variable)
89       .addMetadata(Expr);
90 }
91 
92 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
93                                                          const MDNode *Variable,
94                                                          const MDNode *Expr) {
95   assert(isa<DILocalVariable>(Variable) && "not a variable");
96   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97   assert(
98       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99       "Expected inlined-at fields to agree");
100   auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
101   if (auto *CI = dyn_cast<ConstantInt>(&C)) {
102     if (CI->getBitWidth() > 64)
103       MIB.addCImm(CI);
104     else
105       MIB.addImm(CI->getZExtValue());
106   } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) {
107     MIB.addFPImm(CFP);
108   } else {
109     // Insert $noreg if we didn't find a usable constant and had to drop it.
110     MIB.addReg(Register());
111   }
112 
113   MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
114   return insertInstr(MIB);
115 }
116 
117 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
118   assert(isa<DILabel>(Label) && "not a label");
119   assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
120          "Expected inlined-at fields to agree");
121   auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
122 
123   return MIB.addMetadata(Label);
124 }
125 
126 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
127                                                          const SrcOp &Size,
128                                                          Align Alignment) {
129   assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
130   auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
131   Res.addDefToMIB(*getMRI(), MIB);
132   Size.addSrcToMIB(MIB);
133   MIB.addImm(Alignment.value());
134   return MIB;
135 }
136 
137 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
138                                                       int Idx) {
139   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
140   auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
141   Res.addDefToMIB(*getMRI(), MIB);
142   MIB.addFrameIndex(Idx);
143   return MIB;
144 }
145 
146 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
147                                                        const GlobalValue *GV) {
148   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
149   assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
150              GV->getType()->getAddressSpace() &&
151          "address space mismatch");
152 
153   auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
154   Res.addDefToMIB(*getMRI(), MIB);
155   MIB.addGlobalAddress(GV);
156   return MIB;
157 }
158 
159 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
160                                                      unsigned JTI) {
161   return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
162       .addJumpTableIndex(JTI);
163 }
164 
165 void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
166   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
167   assert((Res == Op0) && "type mismatch");
168 }
169 
170 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
171                                         const LLT Op1) {
172   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
173   assert((Res == Op0 && Res == Op1) && "type mismatch");
174 }
175 
176 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
177                                        const LLT Op1) {
178   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
179   assert((Res == Op0) && "type mismatch");
180 }
181 
182 MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res,
183                                                   const SrcOp &Op0,
184                                                   const SrcOp &Op1) {
185   assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() &&
186          Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
187   assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
188 
189   return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
190 }
191 
192 Optional<MachineInstrBuilder>
193 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
194                                     const LLT ValueTy, uint64_t Value) {
195   assert(Res == 0 && "Res is a result argument");
196   assert(ValueTy.isScalar()  && "invalid offset type");
197 
198   if (Value == 0) {
199     Res = Op0;
200     return None;
201   }
202 
203   Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
204   auto Cst = buildConstant(ValueTy, Value);
205   return buildPtrAdd(Res, Op0, Cst.getReg(0));
206 }
207 
208 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
209                                                           const SrcOp &Op0,
210                                                           uint32_t NumBits) {
211   LLT PtrTy = Res.getLLTTy(*getMRI());
212   LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
213   Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
214   buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
215   return buildPtrMask(Res, Op0, MaskReg);
216 }
217 
218 MachineInstrBuilder
219 MachineIRBuilder::buildPadVectorWithUndefElements(const DstOp &Res,
220                                                   const SrcOp &Op0) {
221   LLT ResTy = Res.getLLTTy(*getMRI());
222   LLT Op0Ty = Op0.getLLTTy(*getMRI());
223 
224   assert((ResTy.isVector() && Op0Ty.isVector()) && "Non vector type");
225   assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
226          "Different vector element types");
227   assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
228          "Op0 has more elements");
229 
230   auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
231   SmallVector<Register, 8> Regs;
232   for (auto Op : Unmerge.getInstr()->defs())
233     Regs.push_back(Op.getReg());
234   Register Undef = buildUndef(Op0Ty.getElementType()).getReg(0);
235   unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
236   for (unsigned i = 0; i < NumberOfPadElts; ++i)
237     Regs.push_back(Undef);
238   return buildMerge(Res, Regs);
239 }
240 
241 MachineInstrBuilder
242 MachineIRBuilder::buildDeleteTrailingVectorElements(const DstOp &Res,
243                                                     const SrcOp &Op0) {
244   LLT ResTy = Res.getLLTTy(*getMRI());
245   LLT Op0Ty = Op0.getLLTTy(*getMRI());
246 
247   assert((ResTy.isVector() && Op0Ty.isVector()) && "Non vector type");
248   assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
249          "Different vector element types");
250   assert((ResTy.getNumElements() < Op0Ty.getNumElements()) &&
251          "Op0 has fewer elements");
252 
253   SmallVector<Register, 8> Regs;
254   auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
255   for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
256     Regs.push_back(Unmerge.getReg(i));
257   return buildMerge(Res, Regs);
258 }
259 
260 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
261   return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
262 }
263 
264 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
265   assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
266   return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
267 }
268 
269 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
270                                                 unsigned JTI,
271                                                 Register IndexReg) {
272   assert(getMRI()->getType(TablePtr).isPointer() &&
273          "Table reg must be a pointer");
274   return buildInstr(TargetOpcode::G_BRJT)
275       .addUse(TablePtr)
276       .addJumpTableIndex(JTI)
277       .addUse(IndexReg);
278 }
279 
280 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
281                                                 const SrcOp &Op) {
282   return buildInstr(TargetOpcode::COPY, Res, Op);
283 }
284 
285 MachineInstrBuilder MachineIRBuilder::buildAssertSExt(const DstOp &Res,
286                                                       const SrcOp &Op,
287                                                       unsigned Size) {
288   return buildInstr(TargetOpcode::G_ASSERT_SEXT, Res, Op).addImm(Size);
289 }
290 
291 MachineInstrBuilder MachineIRBuilder::buildAssertZExt(const DstOp &Res,
292                                                       const SrcOp &Op,
293                                                       unsigned Size) {
294   return buildInstr(TargetOpcode::G_ASSERT_ZEXT, Res, Op).addImm(Size);
295 }
296 
297 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
298                                                     const ConstantInt &Val) {
299   LLT Ty = Res.getLLTTy(*getMRI());
300   LLT EltTy = Ty.getScalarType();
301   assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
302          "creating constant with the wrong size");
303 
304   if (Ty.isVector()) {
305     auto Const = buildInstr(TargetOpcode::G_CONSTANT)
306     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
307     .addCImm(&Val);
308     return buildSplatVector(Res, Const);
309   }
310 
311   auto Const = buildInstr(TargetOpcode::G_CONSTANT);
312   Const->setDebugLoc(DebugLoc());
313   Res.addDefToMIB(*getMRI(), Const);
314   Const.addCImm(&Val);
315   return Const;
316 }
317 
318 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
319                                                     int64_t Val) {
320   auto IntN = IntegerType::get(getMF().getFunction().getContext(),
321                                Res.getLLTTy(*getMRI()).getScalarSizeInBits());
322   ConstantInt *CI = ConstantInt::get(IntN, Val, true);
323   return buildConstant(Res, *CI);
324 }
325 
326 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
327                                                      const ConstantFP &Val) {
328   LLT Ty = Res.getLLTTy(*getMRI());
329   LLT EltTy = Ty.getScalarType();
330 
331   assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
332          == EltTy.getSizeInBits() &&
333          "creating fconstant with the wrong size");
334 
335   assert(!Ty.isPointer() && "invalid operand type");
336 
337   if (Ty.isVector()) {
338     auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
339     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
340     .addFPImm(&Val);
341 
342     return buildSplatVector(Res, Const);
343   }
344 
345   auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
346   Const->setDebugLoc(DebugLoc());
347   Res.addDefToMIB(*getMRI(), Const);
348   Const.addFPImm(&Val);
349   return Const;
350 }
351 
352 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
353                                                     const APInt &Val) {
354   ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
355   return buildConstant(Res, *CI);
356 }
357 
358 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
359                                                      double Val) {
360   LLT DstTy = Res.getLLTTy(*getMRI());
361   auto &Ctx = getMF().getFunction().getContext();
362   auto *CFP =
363       ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
364   return buildFConstant(Res, *CFP);
365 }
366 
367 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
368                                                      const APFloat &Val) {
369   auto &Ctx = getMF().getFunction().getContext();
370   auto *CFP = ConstantFP::get(Ctx, Val);
371   return buildFConstant(Res, *CFP);
372 }
373 
374 MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
375                                                   MachineBasicBlock &Dest) {
376   assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
377 
378   auto MIB = buildInstr(TargetOpcode::G_BRCOND);
379   Tst.addSrcToMIB(MIB);
380   MIB.addMBB(&Dest);
381   return MIB;
382 }
383 
384 MachineInstrBuilder
385 MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
386                             MachinePointerInfo PtrInfo, Align Alignment,
387                             MachineMemOperand::Flags MMOFlags,
388                             const AAMDNodes &AAInfo) {
389   MMOFlags |= MachineMemOperand::MOLoad;
390   assert((MMOFlags & MachineMemOperand::MOStore) == 0);
391 
392   LLT Ty = Dst.getLLTTy(*getMRI());
393   MachineMemOperand *MMO =
394       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
395   return buildLoad(Dst, Addr, *MMO);
396 }
397 
398 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
399                                                      const DstOp &Res,
400                                                      const SrcOp &Addr,
401                                                      MachineMemOperand &MMO) {
402   assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
403   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
404 
405   auto MIB = buildInstr(Opcode);
406   Res.addDefToMIB(*getMRI(), MIB);
407   Addr.addSrcToMIB(MIB);
408   MIB.addMemOperand(&MMO);
409   return MIB;
410 }
411 
412 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
413   const DstOp &Dst, const SrcOp &BasePtr,
414   MachineMemOperand &BaseMMO, int64_t Offset) {
415   LLT LoadTy = Dst.getLLTTy(*getMRI());
416   MachineMemOperand *OffsetMMO =
417       getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
418 
419   if (Offset == 0) // This may be a size or type changing load.
420     return buildLoad(Dst, BasePtr, *OffsetMMO);
421 
422   LLT PtrTy = BasePtr.getLLTTy(*getMRI());
423   LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
424   auto ConstOffset = buildConstant(OffsetTy, Offset);
425   auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
426   return buildLoad(Dst, Ptr, *OffsetMMO);
427 }
428 
429 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
430                                                  const SrcOp &Addr,
431                                                  MachineMemOperand &MMO) {
432   assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
433   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
434 
435   auto MIB = buildInstr(TargetOpcode::G_STORE);
436   Val.addSrcToMIB(MIB);
437   Addr.addSrcToMIB(MIB);
438   MIB.addMemOperand(&MMO);
439   return MIB;
440 }
441 
442 MachineInstrBuilder
443 MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
444                              MachinePointerInfo PtrInfo, Align Alignment,
445                              MachineMemOperand::Flags MMOFlags,
446                              const AAMDNodes &AAInfo) {
447   MMOFlags |= MachineMemOperand::MOStore;
448   assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
449 
450   LLT Ty = Val.getLLTTy(*getMRI());
451   MachineMemOperand *MMO =
452       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
453   return buildStore(Val, Addr, *MMO);
454 }
455 
456 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
457                                                   const SrcOp &Op) {
458   return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
459 }
460 
461 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
462                                                 const SrcOp &Op) {
463   return buildInstr(TargetOpcode::G_SEXT, Res, Op);
464 }
465 
466 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
467                                                 const SrcOp &Op) {
468   return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
469 }
470 
471 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
472   const auto *TLI = getMF().getSubtarget().getTargetLowering();
473   switch (TLI->getBooleanContents(IsVec, IsFP)) {
474   case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
475     return TargetOpcode::G_SEXT;
476   case TargetLoweringBase::ZeroOrOneBooleanContent:
477     return TargetOpcode::G_ZEXT;
478   default:
479     return TargetOpcode::G_ANYEXT;
480   }
481 }
482 
483 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
484                                                    const SrcOp &Op,
485                                                    bool IsFP) {
486   unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
487   return buildInstr(ExtOp, Res, Op);
488 }
489 
490 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
491                                                       const DstOp &Res,
492                                                       const SrcOp &Op) {
493   assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
494           TargetOpcode::G_SEXT == ExtOpc) &&
495          "Expecting Extending Opc");
496   assert(Res.getLLTTy(*getMRI()).isScalar() ||
497          Res.getLLTTy(*getMRI()).isVector());
498   assert(Res.getLLTTy(*getMRI()).isScalar() ==
499          Op.getLLTTy(*getMRI()).isScalar());
500 
501   unsigned Opcode = TargetOpcode::COPY;
502   if (Res.getLLTTy(*getMRI()).getSizeInBits() >
503       Op.getLLTTy(*getMRI()).getSizeInBits())
504     Opcode = ExtOpc;
505   else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
506            Op.getLLTTy(*getMRI()).getSizeInBits())
507     Opcode = TargetOpcode::G_TRUNC;
508   else
509     assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
510 
511   return buildInstr(Opcode, Res, Op);
512 }
513 
514 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
515                                                        const SrcOp &Op) {
516   return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
517 }
518 
519 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
520                                                        const SrcOp &Op) {
521   return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
522 }
523 
524 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
525                                                          const SrcOp &Op) {
526   return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
527 }
528 
529 MachineInstrBuilder MachineIRBuilder::buildZExtInReg(const DstOp &Res,
530                                                      const SrcOp &Op,
531                                                      int64_t ImmOp) {
532   LLT ResTy = Res.getLLTTy(*getMRI());
533   auto Mask = buildConstant(
534       ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
535   return buildAnd(Res, Op, Mask);
536 }
537 
538 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
539                                                 const SrcOp &Src) {
540   LLT SrcTy = Src.getLLTTy(*getMRI());
541   LLT DstTy = Dst.getLLTTy(*getMRI());
542   if (SrcTy == DstTy)
543     return buildCopy(Dst, Src);
544 
545   unsigned Opcode;
546   if (SrcTy.isPointer() && DstTy.isScalar())
547     Opcode = TargetOpcode::G_PTRTOINT;
548   else if (DstTy.isPointer() && SrcTy.isScalar())
549     Opcode = TargetOpcode::G_INTTOPTR;
550   else {
551     assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
552     Opcode = TargetOpcode::G_BITCAST;
553   }
554 
555   return buildInstr(Opcode, Dst, Src);
556 }
557 
558 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
559                                                    const SrcOp &Src,
560                                                    uint64_t Index) {
561   LLT SrcTy = Src.getLLTTy(*getMRI());
562   LLT DstTy = Dst.getLLTTy(*getMRI());
563 
564 #ifndef NDEBUG
565   assert(SrcTy.isValid() && "invalid operand type");
566   assert(DstTy.isValid() && "invalid operand type");
567   assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
568          "extracting off end of register");
569 #endif
570 
571   if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
572     assert(Index == 0 && "insertion past the end of a register");
573     return buildCast(Dst, Src);
574   }
575 
576   auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
577   Dst.addDefToMIB(*getMRI(), Extract);
578   Src.addSrcToMIB(Extract);
579   Extract.addImm(Index);
580   return Extract;
581 }
582 
583 void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops,
584                                      ArrayRef<uint64_t> Indices) {
585 #ifndef NDEBUG
586   assert(Ops.size() == Indices.size() && "incompatible args");
587   assert(!Ops.empty() && "invalid trivial sequence");
588   assert(llvm::is_sorted(Indices) &&
589          "sequence offsets must be in ascending order");
590 
591   assert(getMRI()->getType(Res).isValid() && "invalid operand type");
592   for (auto Op : Ops)
593     assert(getMRI()->getType(Op).isValid() && "invalid operand type");
594 #endif
595 
596   LLT ResTy = getMRI()->getType(Res);
597   LLT OpTy = getMRI()->getType(Ops[0]);
598   unsigned OpSize = OpTy.getSizeInBits();
599   bool MaybeMerge = true;
600   for (unsigned i = 0; i < Ops.size(); ++i) {
601     if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
602       MaybeMerge = false;
603       break;
604     }
605   }
606 
607   if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) {
608     buildMerge(Res, Ops);
609     return;
610   }
611 
612   Register ResIn = getMRI()->createGenericVirtualRegister(ResTy);
613   buildUndef(ResIn);
614 
615   for (unsigned i = 0; i < Ops.size(); ++i) {
616     Register ResOut = i + 1 == Ops.size()
617                           ? Res
618                           : getMRI()->createGenericVirtualRegister(ResTy);
619     buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
620     ResIn = ResOut;
621   }
622 }
623 
624 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
625   return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
626 }
627 
628 MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
629                                                  ArrayRef<Register> Ops) {
630   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
631   // we need some temporary storage for the DstOp objects. Here we use a
632   // sufficiently large SmallVector to not go through the heap.
633   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
634   assert(TmpVec.size() > 1);
635   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
636 }
637 
638 MachineInstrBuilder
639 MachineIRBuilder::buildMerge(const DstOp &Res,
640                              std::initializer_list<SrcOp> Ops) {
641   assert(Ops.size() > 1);
642   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, Ops);
643 }
644 
645 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
646                                                    const SrcOp &Op) {
647   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
648   // we need some temporary storage for the DstOp objects. Here we use a
649   // sufficiently large SmallVector to not go through the heap.
650   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
651   assert(TmpVec.size() > 1);
652   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
653 }
654 
655 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
656                                                    const SrcOp &Op) {
657   unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
658   SmallVector<DstOp, 8> TmpVec(NumReg, Res);
659   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
660 }
661 
662 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
663                                                    const SrcOp &Op) {
664   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
665   // we need some temporary storage for the DstOp objects. Here we use a
666   // sufficiently large SmallVector to not go through the heap.
667   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
668   assert(TmpVec.size() > 1);
669   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
670 }
671 
672 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
673                                                        ArrayRef<Register> Ops) {
674   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
675   // we need some temporary storage for the DstOp objects. Here we use a
676   // sufficiently large SmallVector to not go through the heap.
677   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
678   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
679 }
680 
681 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
682                                                        const SrcOp &Src) {
683   SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
684   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
685 }
686 
687 MachineInstrBuilder
688 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
689                                         ArrayRef<Register> Ops) {
690   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
691   // we need some temporary storage for the DstOp objects. Here we use a
692   // sufficiently large SmallVector to not go through the heap.
693   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
694   return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
695 }
696 
697 MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
698                                                         const SrcOp &Src) {
699   LLT DstTy = Res.getLLTTy(*getMRI());
700   assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
701          "Expected Src to match Dst elt ty");
702   auto UndefVec = buildUndef(DstTy);
703   auto Zero = buildConstant(LLT::scalar(64), 0);
704   auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
705   SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
706   return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
707 }
708 
709 MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
710                                                          const SrcOp &Src1,
711                                                          const SrcOp &Src2,
712                                                          ArrayRef<int> Mask) {
713   LLT DstTy = Res.getLLTTy(*getMRI());
714   LLT Src1Ty = Src1.getLLTTy(*getMRI());
715   LLT Src2Ty = Src2.getLLTTy(*getMRI());
716   assert((size_t)(Src1Ty.getNumElements() + Src2Ty.getNumElements()) >=
717          Mask.size());
718   assert(DstTy.getElementType() == Src1Ty.getElementType() &&
719          DstTy.getElementType() == Src2Ty.getElementType());
720   (void)DstTy;
721   (void)Src1Ty;
722   (void)Src2Ty;
723   ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
724   return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
725       .addShuffleMask(MaskAlloc);
726 }
727 
728 MachineInstrBuilder
729 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
730   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
731   // we need some temporary storage for the DstOp objects. Here we use a
732   // sufficiently large SmallVector to not go through the heap.
733   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
734   return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
735 }
736 
737 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
738                                                   const SrcOp &Src,
739                                                   const SrcOp &Op,
740                                                   unsigned Index) {
741   assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
742              Res.getLLTTy(*getMRI()).getSizeInBits() &&
743          "insertion past the end of a register");
744 
745   if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
746       Op.getLLTTy(*getMRI()).getSizeInBits()) {
747     return buildCast(Res, Op);
748   }
749 
750   return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
751 }
752 
753 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
754                                                      ArrayRef<Register> ResultRegs,
755                                                      bool HasSideEffects) {
756   auto MIB =
757       buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
758                                 : TargetOpcode::G_INTRINSIC);
759   for (unsigned ResultReg : ResultRegs)
760     MIB.addDef(ResultReg);
761   MIB.addIntrinsicID(ID);
762   return MIB;
763 }
764 
765 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
766                                                      ArrayRef<DstOp> Results,
767                                                      bool HasSideEffects) {
768   auto MIB =
769       buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
770                                 : TargetOpcode::G_INTRINSIC);
771   for (DstOp Result : Results)
772     Result.addDefToMIB(*getMRI(), MIB);
773   MIB.addIntrinsicID(ID);
774   return MIB;
775 }
776 
777 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
778                                                  const SrcOp &Op) {
779   return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
780 }
781 
782 MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res,
783                                                    const SrcOp &Op,
784                                                    Optional<unsigned> Flags) {
785   return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
786 }
787 
788 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
789                                                 const DstOp &Res,
790                                                 const SrcOp &Op0,
791                                                 const SrcOp &Op1) {
792   return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
793 }
794 
795 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
796                                                 const DstOp &Res,
797                                                 const SrcOp &Op0,
798                                                 const SrcOp &Op1,
799                                                 Optional<unsigned> Flags) {
800 
801   return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
802 }
803 
804 MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res,
805                                                   const SrcOp &Tst,
806                                                   const SrcOp &Op0,
807                                                   const SrcOp &Op1,
808                                                   Optional<unsigned> Flags) {
809 
810   return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
811 }
812 
813 MachineInstrBuilder
814 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
815                                            const SrcOp &Elt, const SrcOp &Idx) {
816   return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
817 }
818 
819 MachineInstrBuilder
820 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
821                                             const SrcOp &Idx) {
822   return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
823 }
824 
825 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
826     Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
827     Register NewVal, MachineMemOperand &MMO) {
828 #ifndef NDEBUG
829   LLT OldValResTy = getMRI()->getType(OldValRes);
830   LLT SuccessResTy = getMRI()->getType(SuccessRes);
831   LLT AddrTy = getMRI()->getType(Addr);
832   LLT CmpValTy = getMRI()->getType(CmpVal);
833   LLT NewValTy = getMRI()->getType(NewVal);
834   assert(OldValResTy.isScalar() && "invalid operand type");
835   assert(SuccessResTy.isScalar() && "invalid operand type");
836   assert(AddrTy.isPointer() && "invalid operand type");
837   assert(CmpValTy.isValid() && "invalid operand type");
838   assert(NewValTy.isValid() && "invalid operand type");
839   assert(OldValResTy == CmpValTy && "type mismatch");
840   assert(OldValResTy == NewValTy && "type mismatch");
841 #endif
842 
843   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
844       .addDef(OldValRes)
845       .addDef(SuccessRes)
846       .addUse(Addr)
847       .addUse(CmpVal)
848       .addUse(NewVal)
849       .addMemOperand(&MMO);
850 }
851 
852 MachineInstrBuilder
853 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr,
854                                      Register CmpVal, Register NewVal,
855                                      MachineMemOperand &MMO) {
856 #ifndef NDEBUG
857   LLT OldValResTy = getMRI()->getType(OldValRes);
858   LLT AddrTy = getMRI()->getType(Addr);
859   LLT CmpValTy = getMRI()->getType(CmpVal);
860   LLT NewValTy = getMRI()->getType(NewVal);
861   assert(OldValResTy.isScalar() && "invalid operand type");
862   assert(AddrTy.isPointer() && "invalid operand type");
863   assert(CmpValTy.isValid() && "invalid operand type");
864   assert(NewValTy.isValid() && "invalid operand type");
865   assert(OldValResTy == CmpValTy && "type mismatch");
866   assert(OldValResTy == NewValTy && "type mismatch");
867 #endif
868 
869   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
870       .addDef(OldValRes)
871       .addUse(Addr)
872       .addUse(CmpVal)
873       .addUse(NewVal)
874       .addMemOperand(&MMO);
875 }
876 
877 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
878   unsigned Opcode, const DstOp &OldValRes,
879   const SrcOp &Addr, const SrcOp &Val,
880   MachineMemOperand &MMO) {
881 
882 #ifndef NDEBUG
883   LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
884   LLT AddrTy = Addr.getLLTTy(*getMRI());
885   LLT ValTy = Val.getLLTTy(*getMRI());
886   assert(OldValResTy.isScalar() && "invalid operand type");
887   assert(AddrTy.isPointer() && "invalid operand type");
888   assert(ValTy.isValid() && "invalid operand type");
889   assert(OldValResTy == ValTy && "type mismatch");
890   assert(MMO.isAtomic() && "not atomic mem operand");
891 #endif
892 
893   auto MIB = buildInstr(Opcode);
894   OldValRes.addDefToMIB(*getMRI(), MIB);
895   Addr.addSrcToMIB(MIB);
896   Val.addSrcToMIB(MIB);
897   MIB.addMemOperand(&MMO);
898   return MIB;
899 }
900 
901 MachineInstrBuilder
902 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
903                                      Register Val, MachineMemOperand &MMO) {
904   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
905                         MMO);
906 }
907 MachineInstrBuilder
908 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
909                                     Register Val, MachineMemOperand &MMO) {
910   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
911                         MMO);
912 }
913 MachineInstrBuilder
914 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
915                                     Register Val, MachineMemOperand &MMO) {
916   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
917                         MMO);
918 }
919 MachineInstrBuilder
920 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
921                                     Register Val, MachineMemOperand &MMO) {
922   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
923                         MMO);
924 }
925 MachineInstrBuilder
926 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
927                                      Register Val, MachineMemOperand &MMO) {
928   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
929                         MMO);
930 }
931 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
932                                                        Register Addr,
933                                                        Register Val,
934                                                        MachineMemOperand &MMO) {
935   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
936                         MMO);
937 }
938 MachineInstrBuilder
939 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
940                                     Register Val, MachineMemOperand &MMO) {
941   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
942                         MMO);
943 }
944 MachineInstrBuilder
945 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
946                                     Register Val, MachineMemOperand &MMO) {
947   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
948                         MMO);
949 }
950 MachineInstrBuilder
951 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
952                                     Register Val, MachineMemOperand &MMO) {
953   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
954                         MMO);
955 }
956 MachineInstrBuilder
957 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
958                                      Register Val, MachineMemOperand &MMO) {
959   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
960                         MMO);
961 }
962 MachineInstrBuilder
963 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
964                                      Register Val, MachineMemOperand &MMO) {
965   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
966                         MMO);
967 }
968 
969 MachineInstrBuilder
970 MachineIRBuilder::buildAtomicRMWFAdd(
971   const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
972   MachineMemOperand &MMO) {
973   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
974                         MMO);
975 }
976 
977 MachineInstrBuilder
978 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
979                                      MachineMemOperand &MMO) {
980   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
981                         MMO);
982 }
983 
984 MachineInstrBuilder
985 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
986   return buildInstr(TargetOpcode::G_FENCE)
987     .addImm(Ordering)
988     .addImm(Scope);
989 }
990 
991 MachineInstrBuilder
992 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
993 #ifndef NDEBUG
994   assert(getMRI()->getType(Res).isPointer() && "invalid res type");
995 #endif
996 
997   return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
998 }
999 
1000 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1001                                         bool IsExtend) {
1002 #ifndef NDEBUG
1003   if (DstTy.isVector()) {
1004     assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1005     assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
1006            "different number of elements in a trunc/ext");
1007   } else
1008     assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1009 
1010   if (IsExtend)
1011     assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
1012            "invalid narrowing extend");
1013   else
1014     assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
1015            "invalid widening trunc");
1016 #endif
1017 }
1018 
1019 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1020                                         const LLT Op0Ty, const LLT Op1Ty) {
1021 #ifndef NDEBUG
1022   assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1023          "invalid operand type");
1024   assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1025   if (ResTy.isScalar() || ResTy.isPointer())
1026     assert(TstTy.isScalar() && "type mismatch");
1027   else
1028     assert((TstTy.isScalar() ||
1029             (TstTy.isVector() &&
1030              TstTy.getNumElements() == Op0Ty.getNumElements())) &&
1031            "type mismatch");
1032 #endif
1033 }
1034 
1035 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
1036                                                  ArrayRef<DstOp> DstOps,
1037                                                  ArrayRef<SrcOp> SrcOps,
1038                                                  Optional<unsigned> Flags) {
1039   switch (Opc) {
1040   default:
1041     break;
1042   case TargetOpcode::G_SELECT: {
1043     assert(DstOps.size() == 1 && "Invalid select");
1044     assert(SrcOps.size() == 3 && "Invalid select");
1045     validateSelectOp(
1046         DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1047         SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1048     break;
1049   }
1050   case TargetOpcode::G_FNEG:
1051   case TargetOpcode::G_ABS:
1052     // All these are unary ops.
1053     assert(DstOps.size() == 1 && "Invalid Dst");
1054     assert(SrcOps.size() == 1 && "Invalid Srcs");
1055     validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1056                     SrcOps[0].getLLTTy(*getMRI()));
1057     break;
1058   case TargetOpcode::G_ADD:
1059   case TargetOpcode::G_AND:
1060   case TargetOpcode::G_MUL:
1061   case TargetOpcode::G_OR:
1062   case TargetOpcode::G_SUB:
1063   case TargetOpcode::G_XOR:
1064   case TargetOpcode::G_UDIV:
1065   case TargetOpcode::G_SDIV:
1066   case TargetOpcode::G_UREM:
1067   case TargetOpcode::G_SREM:
1068   case TargetOpcode::G_SMIN:
1069   case TargetOpcode::G_SMAX:
1070   case TargetOpcode::G_UMIN:
1071   case TargetOpcode::G_UMAX:
1072   case TargetOpcode::G_UADDSAT:
1073   case TargetOpcode::G_SADDSAT:
1074   case TargetOpcode::G_USUBSAT:
1075   case TargetOpcode::G_SSUBSAT: {
1076     // All these are binary ops.
1077     assert(DstOps.size() == 1 && "Invalid Dst");
1078     assert(SrcOps.size() == 2 && "Invalid Srcs");
1079     validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1080                      SrcOps[0].getLLTTy(*getMRI()),
1081                      SrcOps[1].getLLTTy(*getMRI()));
1082     break;
1083   }
1084   case TargetOpcode::G_SHL:
1085   case TargetOpcode::G_ASHR:
1086   case TargetOpcode::G_LSHR:
1087   case TargetOpcode::G_USHLSAT:
1088   case TargetOpcode::G_SSHLSAT: {
1089     assert(DstOps.size() == 1 && "Invalid Dst");
1090     assert(SrcOps.size() == 2 && "Invalid Srcs");
1091     validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1092                     SrcOps[0].getLLTTy(*getMRI()),
1093                     SrcOps[1].getLLTTy(*getMRI()));
1094     break;
1095   }
1096   case TargetOpcode::G_SEXT:
1097   case TargetOpcode::G_ZEXT:
1098   case TargetOpcode::G_ANYEXT:
1099     assert(DstOps.size() == 1 && "Invalid Dst");
1100     assert(SrcOps.size() == 1 && "Invalid Srcs");
1101     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1102                      SrcOps[0].getLLTTy(*getMRI()), true);
1103     break;
1104   case TargetOpcode::G_TRUNC:
1105   case TargetOpcode::G_FPTRUNC: {
1106     assert(DstOps.size() == 1 && "Invalid Dst");
1107     assert(SrcOps.size() == 1 && "Invalid Srcs");
1108     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1109                      SrcOps[0].getLLTTy(*getMRI()), false);
1110     break;
1111   }
1112   case TargetOpcode::G_BITCAST: {
1113     assert(DstOps.size() == 1 && "Invalid Dst");
1114     assert(SrcOps.size() == 1 && "Invalid Srcs");
1115     assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1116            SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1117     break;
1118   }
1119   case TargetOpcode::COPY:
1120     assert(DstOps.size() == 1 && "Invalid Dst");
1121     // If the caller wants to add a subreg source it has to be done separately
1122     // so we may not have any SrcOps at this point yet.
1123     break;
1124   case TargetOpcode::G_FCMP:
1125   case TargetOpcode::G_ICMP: {
1126     assert(DstOps.size() == 1 && "Invalid Dst Operands");
1127     assert(SrcOps.size() == 3 && "Invalid Src Operands");
1128     // For F/ICMP, the first src operand is the predicate, followed by
1129     // the two comparands.
1130     assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1131            "Expecting predicate");
1132     assert([&]() -> bool {
1133       CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1134       return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1135                                          : CmpInst::isFPPredicate(Pred);
1136     }() && "Invalid predicate");
1137     assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1138            "Type mismatch");
1139     assert([&]() -> bool {
1140       LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1141       LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1142       if (Op0Ty.isScalar() || Op0Ty.isPointer())
1143         return DstTy.isScalar();
1144       else
1145         return DstTy.isVector() &&
1146                DstTy.getNumElements() == Op0Ty.getNumElements();
1147     }() && "Type Mismatch");
1148     break;
1149   }
1150   case TargetOpcode::G_UNMERGE_VALUES: {
1151     assert(!DstOps.empty() && "Invalid trivial sequence");
1152     assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1153     assert(llvm::all_of(DstOps,
1154                         [&, this](const DstOp &Op) {
1155                           return Op.getLLTTy(*getMRI()) ==
1156                                  DstOps[0].getLLTTy(*getMRI());
1157                         }) &&
1158            "type mismatch in output list");
1159     assert((TypeSize::ScalarTy)DstOps.size() *
1160                    DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1161                SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1162            "input operands do not cover output register");
1163     break;
1164   }
1165   case TargetOpcode::G_MERGE_VALUES: {
1166     assert(!SrcOps.empty() && "invalid trivial sequence");
1167     assert(DstOps.size() == 1 && "Invalid Dst");
1168     assert(llvm::all_of(SrcOps,
1169                         [&, this](const SrcOp &Op) {
1170                           return Op.getLLTTy(*getMRI()) ==
1171                                  SrcOps[0].getLLTTy(*getMRI());
1172                         }) &&
1173            "type mismatch in input list");
1174     assert((TypeSize::ScalarTy)SrcOps.size() *
1175                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1176                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1177            "input operands do not cover output register");
1178     if (SrcOps.size() == 1)
1179       return buildCast(DstOps[0], SrcOps[0]);
1180     if (DstOps[0].getLLTTy(*getMRI()).isVector()) {
1181       if (SrcOps[0].getLLTTy(*getMRI()).isVector())
1182         return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps);
1183       return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1184     }
1185     break;
1186   }
1187   case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1188     assert(DstOps.size() == 1 && "Invalid Dst size");
1189     assert(SrcOps.size() == 2 && "Invalid Src size");
1190     assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1191     assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1192             DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1193            "Invalid operand type");
1194     assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1195     assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1196                DstOps[0].getLLTTy(*getMRI()) &&
1197            "Type mismatch");
1198     break;
1199   }
1200   case TargetOpcode::G_INSERT_VECTOR_ELT: {
1201     assert(DstOps.size() == 1 && "Invalid dst size");
1202     assert(SrcOps.size() == 3 && "Invalid src size");
1203     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1204            SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1205     assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1206                SrcOps[1].getLLTTy(*getMRI()) &&
1207            "Type mismatch");
1208     assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1209     assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1210                SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1211            "Type mismatch");
1212     break;
1213   }
1214   case TargetOpcode::G_BUILD_VECTOR: {
1215     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1216            "Must have at least 2 operands");
1217     assert(DstOps.size() == 1 && "Invalid DstOps");
1218     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1219            "Res type must be a vector");
1220     assert(llvm::all_of(SrcOps,
1221                         [&, this](const SrcOp &Op) {
1222                           return Op.getLLTTy(*getMRI()) ==
1223                                  SrcOps[0].getLLTTy(*getMRI());
1224                         }) &&
1225            "type mismatch in input list");
1226     assert((TypeSize::ScalarTy)SrcOps.size() *
1227                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1228                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1229            "input scalars do not exactly cover the output vector register");
1230     break;
1231   }
1232   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1233     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1234            "Must have at least 2 operands");
1235     assert(DstOps.size() == 1 && "Invalid DstOps");
1236     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1237            "Res type must be a vector");
1238     assert(llvm::all_of(SrcOps,
1239                         [&, this](const SrcOp &Op) {
1240                           return Op.getLLTTy(*getMRI()) ==
1241                                  SrcOps[0].getLLTTy(*getMRI());
1242                         }) &&
1243            "type mismatch in input list");
1244     if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1245         DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
1246       return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1247     break;
1248   }
1249   case TargetOpcode::G_CONCAT_VECTORS: {
1250     assert(DstOps.size() == 1 && "Invalid DstOps");
1251     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1252            "Must have at least 2 operands");
1253     assert(llvm::all_of(SrcOps,
1254                         [&, this](const SrcOp &Op) {
1255                           return (Op.getLLTTy(*getMRI()).isVector() &&
1256                                   Op.getLLTTy(*getMRI()) ==
1257                                       SrcOps[0].getLLTTy(*getMRI()));
1258                         }) &&
1259            "type mismatch in input list");
1260     assert((TypeSize::ScalarTy)SrcOps.size() *
1261                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1262                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1263            "input vectors do not exactly cover the output vector register");
1264     break;
1265   }
1266   case TargetOpcode::G_UADDE: {
1267     assert(DstOps.size() == 2 && "Invalid no of dst operands");
1268     assert(SrcOps.size() == 3 && "Invalid no of src operands");
1269     assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1270     assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1271            (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1272            "Invalid operand");
1273     assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1274     assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1275            "type mismatch");
1276     break;
1277   }
1278   }
1279 
1280   auto MIB = buildInstr(Opc);
1281   for (const DstOp &Op : DstOps)
1282     Op.addDefToMIB(*getMRI(), MIB);
1283   for (const SrcOp &Op : SrcOps)
1284     Op.addSrcToMIB(MIB);
1285   if (Flags)
1286     MIB->setFlags(*Flags);
1287   return MIB;
1288 }
1289