xref: /llvm-project/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp (revision 0ee037b861f94604907d95d0ff0ff87805b52428)
1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/MachineFunction.h"
13 #include "llvm/CodeGen/MachineInstr.h"
14 #include "llvm/CodeGen/MachineInstrBuilder.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/CodeGen/TargetInstrInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetOpcodes.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/IR/DebugInfoMetadata.h"
21 
22 using namespace llvm;
23 
24 void MachineIRBuilder::setMF(MachineFunction &MF) {
25   State.MF = &MF;
26   State.MBB = nullptr;
27   State.MRI = &MF.getRegInfo();
28   State.TII = MF.getSubtarget().getInstrInfo();
29   State.DL = DebugLoc();
30   State.PCSections = nullptr;
31   State.MMRA = nullptr;
32   State.II = MachineBasicBlock::iterator();
33   State.Observer = nullptr;
34 }
35 
36 //------------------------------------------------------------------------------
37 // Build instruction variants.
38 //------------------------------------------------------------------------------
39 
40 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
41   return BuildMI(getMF(), {getDL(), getPCSections(), getMMRAMetadata()},
42                  getTII().get(Opcode));
43 }
44 
45 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
46   getMBB().insert(getInsertPt(), MIB);
47   recordInsertion(MIB);
48   return MIB;
49 }
50 
51 MachineInstrBuilder
52 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
53                                       const MDNode *Expr) {
54   assert(isa<DILocalVariable>(Variable) && "not a variable");
55   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56   assert(
57       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58       "Expected inlined-at fields to agree");
59   return insertInstr(BuildMI(getMF(), getDL(),
60                              getTII().get(TargetOpcode::DBG_VALUE),
61                              /*IsIndirect*/ false, Reg, Variable, Expr));
62 }
63 
64 MachineInstrBuilder
65 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
66                                         const MDNode *Expr) {
67   assert(isa<DILocalVariable>(Variable) && "not a variable");
68   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69   assert(
70       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71       "Expected inlined-at fields to agree");
72   return insertInstr(BuildMI(getMF(), getDL(),
73                              getTII().get(TargetOpcode::DBG_VALUE),
74                              /*IsIndirect*/ true, Reg, Variable, Expr));
75 }
76 
77 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
78                                                       const MDNode *Variable,
79                                                       const MDNode *Expr) {
80   assert(isa<DILocalVariable>(Variable) && "not a variable");
81   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82   assert(
83       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84       "Expected inlined-at fields to agree");
85   return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
86                          .addFrameIndex(FI)
87                          .addImm(0)
88                          .addMetadata(Variable)
89                          .addMetadata(Expr));
90 }
91 
92 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
93                                                          const MDNode *Variable,
94                                                          const MDNode *Expr) {
95   assert(isa<DILocalVariable>(Variable) && "not a variable");
96   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97   assert(
98       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99       "Expected inlined-at fields to agree");
100   auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
101 
102   auto *NumericConstant = [&] () -> const Constant* {
103     if (const auto *CE = dyn_cast<ConstantExpr>(&C))
104       if (CE->getOpcode() == Instruction::IntToPtr)
105         return CE->getOperand(0);
106     return &C;
107   }();
108 
109   if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
110     if (CI->getBitWidth() > 64)
111       MIB.addCImm(CI);
112     else
113       MIB.addImm(CI->getZExtValue());
114   } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
115     MIB.addFPImm(CFP);
116   } else if (isa<ConstantPointerNull>(NumericConstant)) {
117     MIB.addImm(0);
118   } else {
119     // Insert $noreg if we didn't find a usable constant and had to drop it.
120     MIB.addReg(Register());
121   }
122 
123   MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
124   return insertInstr(MIB);
125 }
126 
127 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
128   assert(isa<DILabel>(Label) && "not a label");
129   assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
130          "Expected inlined-at fields to agree");
131   auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
132 
133   return MIB.addMetadata(Label);
134 }
135 
136 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
137                                                          const SrcOp &Size,
138                                                          Align Alignment) {
139   assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
140   auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
141   Res.addDefToMIB(*getMRI(), MIB);
142   Size.addSrcToMIB(MIB);
143   MIB.addImm(Alignment.value());
144   return MIB;
145 }
146 
147 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
148                                                       int Idx) {
149   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
150   auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
151   Res.addDefToMIB(*getMRI(), MIB);
152   MIB.addFrameIndex(Idx);
153   return MIB;
154 }
155 
156 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
157                                                        const GlobalValue *GV) {
158   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
159   assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
160              GV->getType()->getAddressSpace() &&
161          "address space mismatch");
162 
163   auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
164   Res.addDefToMIB(*getMRI(), MIB);
165   MIB.addGlobalAddress(GV);
166   return MIB;
167 }
168 
169 MachineInstrBuilder MachineIRBuilder::buildConstantPool(const DstOp &Res,
170                                                         unsigned Idx) {
171   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
172   auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
173   Res.addDefToMIB(*getMRI(), MIB);
174   MIB.addConstantPoolIndex(Idx);
175   return MIB;
176 }
177 
178 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
179                                                      unsigned JTI) {
180   return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
181       .addJumpTableIndex(JTI);
182 }
183 
184 void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
185   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
186   assert((Res == Op0) && "type mismatch");
187 }
188 
189 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
190                                         const LLT Op1) {
191   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
192   assert((Res == Op0 && Res == Op1) && "type mismatch");
193 }
194 
195 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
196                                        const LLT Op1) {
197   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
198   assert((Res == Op0) && "type mismatch");
199 }
200 
201 MachineInstrBuilder
202 MachineIRBuilder::buildPtrAdd(const DstOp &Res, const SrcOp &Op0,
203                               const SrcOp &Op1, std::optional<unsigned> Flags) {
204   assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
205          Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
206   assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
207 
208   return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}, Flags);
209 }
210 
211 std::optional<MachineInstrBuilder>
212 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
213                                     const LLT ValueTy, uint64_t Value) {
214   assert(Res == 0 && "Res is a result argument");
215   assert(ValueTy.isScalar()  && "invalid offset type");
216 
217   if (Value == 0) {
218     Res = Op0;
219     return std::nullopt;
220   }
221 
222   Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
223   auto Cst = buildConstant(ValueTy, Value);
224   return buildPtrAdd(Res, Op0, Cst.getReg(0));
225 }
226 
227 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
228                                                           const SrcOp &Op0,
229                                                           uint32_t NumBits) {
230   LLT PtrTy = Res.getLLTTy(*getMRI());
231   LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
232   Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
233   buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
234   return buildPtrMask(Res, Op0, MaskReg);
235 }
236 
237 MachineInstrBuilder
238 MachineIRBuilder::buildPadVectorWithUndefElements(const DstOp &Res,
239                                                   const SrcOp &Op0) {
240   LLT ResTy = Res.getLLTTy(*getMRI());
241   LLT Op0Ty = Op0.getLLTTy(*getMRI());
242 
243   assert(ResTy.isVector() && "Res non vector type");
244 
245   SmallVector<Register, 8> Regs;
246   if (Op0Ty.isVector()) {
247     assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
248            "Different vector element types");
249     assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
250            "Op0 has more elements");
251     auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
252 
253     for (auto Op : Unmerge.getInstr()->defs())
254       Regs.push_back(Op.getReg());
255   } else {
256     assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
257            "Op0 has more size");
258     Regs.push_back(Op0.getReg());
259   }
260   Register Undef =
261       buildUndef(Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(0);
262   unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
263   for (unsigned i = 0; i < NumberOfPadElts; ++i)
264     Regs.push_back(Undef);
265   return buildMergeLikeInstr(Res, Regs);
266 }
267 
268 MachineInstrBuilder
269 MachineIRBuilder::buildDeleteTrailingVectorElements(const DstOp &Res,
270                                                     const SrcOp &Op0) {
271   LLT ResTy = Res.getLLTTy(*getMRI());
272   LLT Op0Ty = Op0.getLLTTy(*getMRI());
273 
274   assert(Op0Ty.isVector() && "Non vector type");
275   assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
276           (ResTy.isVector() &&
277            (ResTy.getElementType() == Op0Ty.getElementType()))) &&
278          "Different vector element types");
279   assert(
280       (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
281       "Op0 has fewer elements");
282 
283   auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
284   if (ResTy.isScalar())
285     return buildCopy(Res, Unmerge.getReg(0));
286   SmallVector<Register, 8> Regs;
287   for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
288     Regs.push_back(Unmerge.getReg(i));
289   return buildMergeLikeInstr(Res, Regs);
290 }
291 
292 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
293   return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
294 }
295 
296 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
297   assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
298   return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
299 }
300 
301 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
302                                                 unsigned JTI,
303                                                 Register IndexReg) {
304   assert(getMRI()->getType(TablePtr).isPointer() &&
305          "Table reg must be a pointer");
306   return buildInstr(TargetOpcode::G_BRJT)
307       .addUse(TablePtr)
308       .addJumpTableIndex(JTI)
309       .addUse(IndexReg);
310 }
311 
312 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
313                                                 const SrcOp &Op) {
314   return buildInstr(TargetOpcode::COPY, Res, Op);
315 }
316 
317 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
318                                                     const ConstantInt &Val) {
319   LLT Ty = Res.getLLTTy(*getMRI());
320   LLT EltTy = Ty.getScalarType();
321   assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
322          "creating constant with the wrong size");
323 
324   assert(!Ty.isScalableVector() &&
325          "unexpected scalable vector in buildConstant");
326 
327   if (Ty.isFixedVector()) {
328     auto Const = buildInstr(TargetOpcode::G_CONSTANT)
329     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
330     .addCImm(&Val);
331     return buildSplatBuildVector(Res, Const);
332   }
333 
334   auto Const = buildInstr(TargetOpcode::G_CONSTANT);
335   Const->setDebugLoc(DebugLoc());
336   Res.addDefToMIB(*getMRI(), Const);
337   Const.addCImm(&Val);
338   return Const;
339 }
340 
341 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
342                                                     int64_t Val) {
343   auto IntN = IntegerType::get(getMF().getFunction().getContext(),
344                                Res.getLLTTy(*getMRI()).getScalarSizeInBits());
345   ConstantInt *CI = ConstantInt::get(IntN, Val, true);
346   return buildConstant(Res, *CI);
347 }
348 
349 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
350                                                      const ConstantFP &Val) {
351   LLT Ty = Res.getLLTTy(*getMRI());
352   LLT EltTy = Ty.getScalarType();
353 
354   assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
355          == EltTy.getSizeInBits() &&
356          "creating fconstant with the wrong size");
357 
358   assert(!Ty.isPointer() && "invalid operand type");
359 
360   assert(!Ty.isScalableVector() &&
361          "unexpected scalable vector in buildFConstant");
362 
363   if (Ty.isFixedVector()) {
364     auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
365     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
366     .addFPImm(&Val);
367 
368     return buildSplatBuildVector(Res, Const);
369   }
370 
371   auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
372   Const->setDebugLoc(DebugLoc());
373   Res.addDefToMIB(*getMRI(), Const);
374   Const.addFPImm(&Val);
375   return Const;
376 }
377 
378 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
379                                                     const APInt &Val) {
380   ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
381   return buildConstant(Res, *CI);
382 }
383 
384 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
385                                                      double Val) {
386   LLT DstTy = Res.getLLTTy(*getMRI());
387   auto &Ctx = getMF().getFunction().getContext();
388   auto *CFP =
389       ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
390   return buildFConstant(Res, *CFP);
391 }
392 
393 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
394                                                      const APFloat &Val) {
395   auto &Ctx = getMF().getFunction().getContext();
396   auto *CFP = ConstantFP::get(Ctx, Val);
397   return buildFConstant(Res, *CFP);
398 }
399 
400 MachineInstrBuilder
401 MachineIRBuilder::buildConstantPtrAuth(const DstOp &Res,
402                                        const ConstantPtrAuth *CPA,
403                                        Register Addr, Register AddrDisc) {
404   auto MIB = buildInstr(TargetOpcode::G_PTRAUTH_GLOBAL_VALUE);
405   Res.addDefToMIB(*getMRI(), MIB);
406   MIB.addUse(Addr);
407   MIB.addImm(CPA->getKey()->getZExtValue());
408   MIB.addUse(AddrDisc);
409   MIB.addImm(CPA->getDiscriminator()->getZExtValue());
410   return MIB;
411 }
412 
413 MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
414                                                   MachineBasicBlock &Dest) {
415   assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
416 
417   auto MIB = buildInstr(TargetOpcode::G_BRCOND);
418   Tst.addSrcToMIB(MIB);
419   MIB.addMBB(&Dest);
420   return MIB;
421 }
422 
423 MachineInstrBuilder
424 MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
425                             MachinePointerInfo PtrInfo, Align Alignment,
426                             MachineMemOperand::Flags MMOFlags,
427                             const AAMDNodes &AAInfo) {
428   MMOFlags |= MachineMemOperand::MOLoad;
429   assert((MMOFlags & MachineMemOperand::MOStore) == 0);
430 
431   LLT Ty = Dst.getLLTTy(*getMRI());
432   MachineMemOperand *MMO =
433       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
434   return buildLoad(Dst, Addr, *MMO);
435 }
436 
437 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
438                                                      const DstOp &Res,
439                                                      const SrcOp &Addr,
440                                                      MachineMemOperand &MMO) {
441   assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
442   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
443 
444   auto MIB = buildInstr(Opcode);
445   Res.addDefToMIB(*getMRI(), MIB);
446   Addr.addSrcToMIB(MIB);
447   MIB.addMemOperand(&MMO);
448   return MIB;
449 }
450 
451 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
452   const DstOp &Dst, const SrcOp &BasePtr,
453   MachineMemOperand &BaseMMO, int64_t Offset) {
454   LLT LoadTy = Dst.getLLTTy(*getMRI());
455   MachineMemOperand *OffsetMMO =
456       getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
457 
458   if (Offset == 0) // This may be a size or type changing load.
459     return buildLoad(Dst, BasePtr, *OffsetMMO);
460 
461   LLT PtrTy = BasePtr.getLLTTy(*getMRI());
462   LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
463   auto ConstOffset = buildConstant(OffsetTy, Offset);
464   auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
465   return buildLoad(Dst, Ptr, *OffsetMMO);
466 }
467 
468 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
469                                                  const SrcOp &Addr,
470                                                  MachineMemOperand &MMO) {
471   assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
472   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
473 
474   auto MIB = buildInstr(TargetOpcode::G_STORE);
475   Val.addSrcToMIB(MIB);
476   Addr.addSrcToMIB(MIB);
477   MIB.addMemOperand(&MMO);
478   return MIB;
479 }
480 
481 MachineInstrBuilder
482 MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
483                              MachinePointerInfo PtrInfo, Align Alignment,
484                              MachineMemOperand::Flags MMOFlags,
485                              const AAMDNodes &AAInfo) {
486   MMOFlags |= MachineMemOperand::MOStore;
487   assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
488 
489   LLT Ty = Val.getLLTTy(*getMRI());
490   MachineMemOperand *MMO =
491       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
492   return buildStore(Val, Addr, *MMO);
493 }
494 
495 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
496                                                   const SrcOp &Op) {
497   return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
498 }
499 
500 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
501                                                 const SrcOp &Op) {
502   return buildInstr(TargetOpcode::G_SEXT, Res, Op);
503 }
504 
505 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
506                                                 const SrcOp &Op,
507                                                 std::optional<unsigned> Flags) {
508   return buildInstr(TargetOpcode::G_ZEXT, Res, Op, Flags);
509 }
510 
511 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
512   const auto *TLI = getMF().getSubtarget().getTargetLowering();
513   switch (TLI->getBooleanContents(IsVec, IsFP)) {
514   case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
515     return TargetOpcode::G_SEXT;
516   case TargetLoweringBase::ZeroOrOneBooleanContent:
517     return TargetOpcode::G_ZEXT;
518   default:
519     return TargetOpcode::G_ANYEXT;
520   }
521 }
522 
523 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
524                                                    const SrcOp &Op,
525                                                    bool IsFP) {
526   unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
527   return buildInstr(ExtOp, Res, Op);
528 }
529 
530 MachineInstrBuilder MachineIRBuilder::buildBoolExtInReg(const DstOp &Res,
531                                                         const SrcOp &Op,
532                                                         bool IsVector,
533                                                         bool IsFP) {
534   const auto *TLI = getMF().getSubtarget().getTargetLowering();
535   switch (TLI->getBooleanContents(IsVector, IsFP)) {
536   case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
537     return buildSExtInReg(Res, Op, 1);
538   case TargetLoweringBase::ZeroOrOneBooleanContent:
539     return buildZExtInReg(Res, Op, 1);
540   case TargetLoweringBase::UndefinedBooleanContent:
541     return buildCopy(Res, Op);
542   }
543 
544   llvm_unreachable("unexpected BooleanContent");
545 }
546 
547 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
548                                                       const DstOp &Res,
549                                                       const SrcOp &Op) {
550   assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
551           TargetOpcode::G_SEXT == ExtOpc) &&
552          "Expecting Extending Opc");
553   assert(Res.getLLTTy(*getMRI()).isScalar() ||
554          Res.getLLTTy(*getMRI()).isVector());
555   assert(Res.getLLTTy(*getMRI()).isScalar() ==
556          Op.getLLTTy(*getMRI()).isScalar());
557 
558   unsigned Opcode = TargetOpcode::COPY;
559   if (Res.getLLTTy(*getMRI()).getSizeInBits() >
560       Op.getLLTTy(*getMRI()).getSizeInBits())
561     Opcode = ExtOpc;
562   else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
563            Op.getLLTTy(*getMRI()).getSizeInBits())
564     Opcode = TargetOpcode::G_TRUNC;
565   else
566     assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
567 
568   return buildInstr(Opcode, Res, Op);
569 }
570 
571 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
572                                                        const SrcOp &Op) {
573   return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
574 }
575 
576 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
577                                                        const SrcOp &Op) {
578   return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
579 }
580 
581 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
582                                                          const SrcOp &Op) {
583   return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
584 }
585 
586 MachineInstrBuilder MachineIRBuilder::buildZExtInReg(const DstOp &Res,
587                                                      const SrcOp &Op,
588                                                      int64_t ImmOp) {
589   LLT ResTy = Res.getLLTTy(*getMRI());
590   auto Mask = buildConstant(
591       ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
592   return buildAnd(Res, Op, Mask);
593 }
594 
595 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
596                                                 const SrcOp &Src) {
597   LLT SrcTy = Src.getLLTTy(*getMRI());
598   LLT DstTy = Dst.getLLTTy(*getMRI());
599   if (SrcTy == DstTy)
600     return buildCopy(Dst, Src);
601 
602   unsigned Opcode;
603   if (SrcTy.isPointerOrPointerVector())
604     Opcode = TargetOpcode::G_PTRTOINT;
605   else if (DstTy.isPointerOrPointerVector())
606     Opcode = TargetOpcode::G_INTTOPTR;
607   else {
608     assert(!SrcTy.isPointerOrPointerVector() &&
609            !DstTy.isPointerOrPointerVector() && "no G_ADDRCAST yet");
610     Opcode = TargetOpcode::G_BITCAST;
611   }
612 
613   return buildInstr(Opcode, Dst, Src);
614 }
615 
616 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
617                                                    const SrcOp &Src,
618                                                    uint64_t Index) {
619   LLT SrcTy = Src.getLLTTy(*getMRI());
620   LLT DstTy = Dst.getLLTTy(*getMRI());
621 
622 #ifndef NDEBUG
623   assert(SrcTy.isValid() && "invalid operand type");
624   assert(DstTy.isValid() && "invalid operand type");
625   assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
626          "extracting off end of register");
627 #endif
628 
629   if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
630     assert(Index == 0 && "insertion past the end of a register");
631     return buildCast(Dst, Src);
632   }
633 
634   auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
635   Dst.addDefToMIB(*getMRI(), Extract);
636   Src.addSrcToMIB(Extract);
637   Extract.addImm(Index);
638   return Extract;
639 }
640 
641 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
642   return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
643 }
644 
645 MachineInstrBuilder MachineIRBuilder::buildMergeValues(const DstOp &Res,
646                                                        ArrayRef<Register> Ops) {
647   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
648   // we need some temporary storage for the DstOp objects. Here we use a
649   // sufficiently large SmallVector to not go through the heap.
650   SmallVector<SrcOp, 8> TmpVec(Ops);
651   assert(TmpVec.size() > 1);
652   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
653 }
654 
655 MachineInstrBuilder
656 MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
657                                       ArrayRef<Register> Ops) {
658   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
659   // we need some temporary storage for the DstOp objects. Here we use a
660   // sufficiently large SmallVector to not go through the heap.
661   SmallVector<SrcOp, 8> TmpVec(Ops);
662   assert(TmpVec.size() > 1);
663   return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
664 }
665 
666 MachineInstrBuilder
667 MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
668                                       std::initializer_list<SrcOp> Ops) {
669   assert(Ops.size() > 1);
670   return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
671 }
672 
673 unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
674                                              ArrayRef<SrcOp> SrcOps) const {
675   if (DstOp.getLLTTy(*getMRI()).isVector()) {
676     if (SrcOps[0].getLLTTy(*getMRI()).isVector())
677       return TargetOpcode::G_CONCAT_VECTORS;
678     return TargetOpcode::G_BUILD_VECTOR;
679   }
680 
681   return TargetOpcode::G_MERGE_VALUES;
682 }
683 
684 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
685                                                    const SrcOp &Op) {
686   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
687   // we need some temporary storage for the DstOp objects. Here we use a
688   // sufficiently large SmallVector to not go through the heap.
689   SmallVector<DstOp, 8> TmpVec(Res);
690   assert(TmpVec.size() > 1);
691   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
692 }
693 
694 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
695                                                    const SrcOp &Op) {
696   unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
697   SmallVector<DstOp, 8> TmpVec(NumReg, Res);
698   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
699 }
700 
701 MachineInstrBuilder
702 MachineIRBuilder::buildUnmerge(MachineRegisterInfo::VRegAttrs Attrs,
703                                const SrcOp &Op) {
704   LLT OpTy = Op.getLLTTy(*getMRI());
705   unsigned NumRegs = OpTy.getSizeInBits() / Attrs.Ty.getSizeInBits();
706   SmallVector<DstOp, 8> TmpVec(NumRegs, Attrs);
707   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
708 }
709 
710 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
711                                                    const SrcOp &Op) {
712   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
713   // we need some temporary storage for the DstOp objects. Here we use a
714   // sufficiently large SmallVector to not go through the heap.
715   SmallVector<DstOp, 8> TmpVec(Res);
716   assert(TmpVec.size() > 1);
717   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
718 }
719 
720 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
721                                                        ArrayRef<Register> Ops) {
722   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
723   // we need some temporary storage for the DstOp objects. Here we use a
724   // sufficiently large SmallVector to not go through the heap.
725   SmallVector<SrcOp, 8> TmpVec(Ops);
726   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
727 }
728 
729 MachineInstrBuilder
730 MachineIRBuilder::buildBuildVectorConstant(const DstOp &Res,
731                                            ArrayRef<APInt> Ops) {
732   SmallVector<SrcOp> TmpVec;
733   TmpVec.reserve(Ops.size());
734   LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
735   for (const auto &Op : Ops)
736     TmpVec.push_back(buildConstant(EltTy, Op));
737   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
738 }
739 
740 MachineInstrBuilder MachineIRBuilder::buildSplatBuildVector(const DstOp &Res,
741                                                             const SrcOp &Src) {
742   SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
743   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
744 }
745 
746 MachineInstrBuilder
747 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
748                                         ArrayRef<Register> Ops) {
749   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
750   // we need some temporary storage for the DstOp objects. Here we use a
751   // sufficiently large SmallVector to not go through the heap.
752   SmallVector<SrcOp, 8> TmpVec(Ops);
753   if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
754       Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
755     return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
756   return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
757 }
758 
759 MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
760                                                         const SrcOp &Src) {
761   LLT DstTy = Res.getLLTTy(*getMRI());
762   assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
763          "Expected Src to match Dst elt ty");
764   auto UndefVec = buildUndef(DstTy);
765   auto Zero = buildConstant(LLT::scalar(64), 0);
766   auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
767   SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
768   return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
769 }
770 
771 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
772                                                        const SrcOp &Src) {
773   assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
774          "Expected Src to match Dst elt ty");
775   return buildInstr(TargetOpcode::G_SPLAT_VECTOR, Res, Src);
776 }
777 
778 MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
779                                                          const SrcOp &Src1,
780                                                          const SrcOp &Src2,
781                                                          ArrayRef<int> Mask) {
782   LLT DstTy = Res.getLLTTy(*getMRI());
783   LLT Src1Ty = Src1.getLLTTy(*getMRI());
784   LLT Src2Ty = Src2.getLLTTy(*getMRI());
785   const LLT DstElemTy = DstTy.isVector() ? DstTy.getElementType() : DstTy;
786   const LLT ElemTy1 = Src1Ty.isVector() ? Src1Ty.getElementType() : Src1Ty;
787   const LLT ElemTy2 = Src2Ty.isVector() ? Src2Ty.getElementType() : Src2Ty;
788   assert(DstElemTy == ElemTy1 && DstElemTy == ElemTy2);
789   (void)DstElemTy;
790   (void)ElemTy1;
791   (void)ElemTy2;
792   ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
793   return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
794       .addShuffleMask(MaskAlloc);
795 }
796 
797 MachineInstrBuilder
798 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
799   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
800   // we need some temporary storage for the DstOp objects. Here we use a
801   // sufficiently large SmallVector to not go through the heap.
802   SmallVector<SrcOp, 8> TmpVec(Ops);
803   return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
804 }
805 
806 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
807                                                   const SrcOp &Src,
808                                                   const SrcOp &Op,
809                                                   unsigned Index) {
810   assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
811              Res.getLLTTy(*getMRI()).getSizeInBits() &&
812          "insertion past the end of a register");
813 
814   if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
815       Op.getLLTTy(*getMRI()).getSizeInBits()) {
816     return buildCast(Res, Op);
817   }
818 
819   return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
820 }
821 
822 MachineInstrBuilder MachineIRBuilder::buildStepVector(const DstOp &Res,
823                                                       unsigned Step) {
824   unsigned Bitwidth = Res.getLLTTy(*getMRI()).getElementType().getSizeInBits();
825   ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(),
826                                      APInt(Bitwidth, Step));
827   auto StepVector = buildInstr(TargetOpcode::G_STEP_VECTOR);
828   StepVector->setDebugLoc(DebugLoc());
829   Res.addDefToMIB(*getMRI(), StepVector);
830   StepVector.addCImm(CI);
831   return StepVector;
832 }
833 
834 MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
835                                                   unsigned MinElts) {
836 
837   auto IntN = IntegerType::get(getMF().getFunction().getContext(),
838                                Res.getLLTTy(*getMRI()).getScalarSizeInBits());
839   ConstantInt *CI = ConstantInt::get(IntN, MinElts);
840   return buildVScale(Res, *CI);
841 }
842 
843 MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
844                                                   const ConstantInt &MinElts) {
845   auto VScale = buildInstr(TargetOpcode::G_VSCALE);
846   VScale->setDebugLoc(DebugLoc());
847   Res.addDefToMIB(*getMRI(), VScale);
848   VScale.addCImm(&MinElts);
849   return VScale;
850 }
851 
852 MachineInstrBuilder MachineIRBuilder::buildVScale(const DstOp &Res,
853                                                   const APInt &MinElts) {
854   ConstantInt *CI =
855       ConstantInt::get(getMF().getFunction().getContext(), MinElts);
856   return buildVScale(Res, *CI);
857 }
858 
859 static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
860   if (HasSideEffects && IsConvergent)
861     return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
862   if (HasSideEffects)
863     return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
864   if (IsConvergent)
865     return TargetOpcode::G_INTRINSIC_CONVERGENT;
866   return TargetOpcode::G_INTRINSIC;
867 }
868 
869 MachineInstrBuilder
870 MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
871                                  ArrayRef<Register> ResultRegs,
872                                  bool HasSideEffects, bool isConvergent) {
873   auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
874   for (unsigned ResultReg : ResultRegs)
875     MIB.addDef(ResultReg);
876   MIB.addIntrinsicID(ID);
877   return MIB;
878 }
879 
880 MachineInstrBuilder
881 MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
882                                  ArrayRef<Register> ResultRegs) {
883   auto Attrs = Intrinsic::getAttributes(getContext(), ID);
884   bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
885   bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
886   return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
887 }
888 
889 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
890                                                      ArrayRef<DstOp> Results,
891                                                      bool HasSideEffects,
892                                                      bool isConvergent) {
893   auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
894   for (DstOp Result : Results)
895     Result.addDefToMIB(*getMRI(), MIB);
896   MIB.addIntrinsicID(ID);
897   return MIB;
898 }
899 
900 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
901                                                      ArrayRef<DstOp> Results) {
902   auto Attrs = Intrinsic::getAttributes(getContext(), ID);
903   bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
904   bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
905   return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
906 }
907 
908 MachineInstrBuilder
909 MachineIRBuilder::buildTrunc(const DstOp &Res, const SrcOp &Op,
910                              std::optional<unsigned> Flags) {
911   return buildInstr(TargetOpcode::G_TRUNC, Res, Op, Flags);
912 }
913 
914 MachineInstrBuilder
915 MachineIRBuilder::buildFPTrunc(const DstOp &Res, const SrcOp &Op,
916                                std::optional<unsigned> Flags) {
917   return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
918 }
919 
920 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
921                                                 const DstOp &Res,
922                                                 const SrcOp &Op0,
923                                                 const SrcOp &Op1,
924                                                 std::optional<unsigned> Flags) {
925   return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1}, Flags);
926 }
927 
928 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
929                                                 const DstOp &Res,
930                                                 const SrcOp &Op0,
931                                                 const SrcOp &Op1,
932                                                 std::optional<unsigned> Flags) {
933 
934   return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
935 }
936 
937 MachineInstrBuilder MachineIRBuilder::buildSCmp(const DstOp &Res,
938                                                 const SrcOp &Op0,
939                                                 const SrcOp &Op1) {
940   return buildInstr(TargetOpcode::G_SCMP, Res, {Op0, Op1});
941 }
942 
943 MachineInstrBuilder MachineIRBuilder::buildUCmp(const DstOp &Res,
944                                                 const SrcOp &Op0,
945                                                 const SrcOp &Op1) {
946   return buildInstr(TargetOpcode::G_UCMP, Res, {Op0, Op1});
947 }
948 
949 MachineInstrBuilder
950 MachineIRBuilder::buildSelect(const DstOp &Res, const SrcOp &Tst,
951                               const SrcOp &Op0, const SrcOp &Op1,
952                               std::optional<unsigned> Flags) {
953 
954   return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
955 }
956 
957 MachineInstrBuilder MachineIRBuilder::buildInsertSubvector(const DstOp &Res,
958                                                            const SrcOp &Src0,
959                                                            const SrcOp &Src1,
960                                                            unsigned Idx) {
961   return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
962                     {Src0, Src1, uint64_t(Idx)});
963 }
964 
965 MachineInstrBuilder MachineIRBuilder::buildExtractSubvector(const DstOp &Res,
966                                                             const SrcOp &Src,
967                                                             unsigned Idx) {
968   return buildInstr(TargetOpcode::G_EXTRACT_SUBVECTOR, Res,
969                     {Src, uint64_t(Idx)});
970 }
971 
972 MachineInstrBuilder
973 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
974                                            const SrcOp &Elt, const SrcOp &Idx) {
975   return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
976 }
977 
978 MachineInstrBuilder
979 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
980                                             const SrcOp &Idx) {
981   return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
982 }
983 
984 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
985     const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
986     const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
987 #ifndef NDEBUG
988   LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
989   LLT SuccessResTy = SuccessRes.getLLTTy(*getMRI());
990   LLT AddrTy = Addr.getLLTTy(*getMRI());
991   LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
992   LLT NewValTy = NewVal.getLLTTy(*getMRI());
993   assert(OldValResTy.isScalar() && "invalid operand type");
994   assert(SuccessResTy.isScalar() && "invalid operand type");
995   assert(AddrTy.isPointer() && "invalid operand type");
996   assert(CmpValTy.isValid() && "invalid operand type");
997   assert(NewValTy.isValid() && "invalid operand type");
998   assert(OldValResTy == CmpValTy && "type mismatch");
999   assert(OldValResTy == NewValTy && "type mismatch");
1000 #endif
1001 
1002   auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
1003   OldValRes.addDefToMIB(*getMRI(), MIB);
1004   SuccessRes.addDefToMIB(*getMRI(), MIB);
1005   Addr.addSrcToMIB(MIB);
1006   CmpVal.addSrcToMIB(MIB);
1007   NewVal.addSrcToMIB(MIB);
1008   MIB.addMemOperand(&MMO);
1009   return MIB;
1010 }
1011 
1012 MachineInstrBuilder
1013 MachineIRBuilder::buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr,
1014                                      const SrcOp &CmpVal, const SrcOp &NewVal,
1015                                      MachineMemOperand &MMO) {
1016 #ifndef NDEBUG
1017   LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1018   LLT AddrTy = Addr.getLLTTy(*getMRI());
1019   LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1020   LLT NewValTy = NewVal.getLLTTy(*getMRI());
1021   assert(OldValResTy.isScalar() && "invalid operand type");
1022   assert(AddrTy.isPointer() && "invalid operand type");
1023   assert(CmpValTy.isValid() && "invalid operand type");
1024   assert(NewValTy.isValid() && "invalid operand type");
1025   assert(OldValResTy == CmpValTy && "type mismatch");
1026   assert(OldValResTy == NewValTy && "type mismatch");
1027 #endif
1028 
1029   auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG);
1030   OldValRes.addDefToMIB(*getMRI(), MIB);
1031   Addr.addSrcToMIB(MIB);
1032   CmpVal.addSrcToMIB(MIB);
1033   NewVal.addSrcToMIB(MIB);
1034   MIB.addMemOperand(&MMO);
1035   return MIB;
1036 }
1037 
1038 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
1039   unsigned Opcode, const DstOp &OldValRes,
1040   const SrcOp &Addr, const SrcOp &Val,
1041   MachineMemOperand &MMO) {
1042 
1043 #ifndef NDEBUG
1044   LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1045   LLT AddrTy = Addr.getLLTTy(*getMRI());
1046   LLT ValTy = Val.getLLTTy(*getMRI());
1047   assert(AddrTy.isPointer() && "invalid operand type");
1048   assert(ValTy.isValid() && "invalid operand type");
1049   assert(OldValResTy == ValTy && "type mismatch");
1050   assert(MMO.isAtomic() && "not atomic mem operand");
1051 #endif
1052 
1053   auto MIB = buildInstr(Opcode);
1054   OldValRes.addDefToMIB(*getMRI(), MIB);
1055   Addr.addSrcToMIB(MIB);
1056   Val.addSrcToMIB(MIB);
1057   MIB.addMemOperand(&MMO);
1058   return MIB;
1059 }
1060 
1061 MachineInstrBuilder
1062 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
1063                                      Register Val, MachineMemOperand &MMO) {
1064   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1065                         MMO);
1066 }
1067 MachineInstrBuilder
1068 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
1069                                     Register Val, MachineMemOperand &MMO) {
1070   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1071                         MMO);
1072 }
1073 MachineInstrBuilder
1074 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
1075                                     Register Val, MachineMemOperand &MMO) {
1076   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1077                         MMO);
1078 }
1079 MachineInstrBuilder
1080 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
1081                                     Register Val, MachineMemOperand &MMO) {
1082   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1083                         MMO);
1084 }
1085 MachineInstrBuilder
1086 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
1087                                      Register Val, MachineMemOperand &MMO) {
1088   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1089                         MMO);
1090 }
1091 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
1092                                                        Register Addr,
1093                                                        Register Val,
1094                                                        MachineMemOperand &MMO) {
1095   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1096                         MMO);
1097 }
1098 MachineInstrBuilder
1099 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
1100                                     Register Val, MachineMemOperand &MMO) {
1101   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1102                         MMO);
1103 }
1104 MachineInstrBuilder
1105 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
1106                                     Register Val, MachineMemOperand &MMO) {
1107   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1108                         MMO);
1109 }
1110 MachineInstrBuilder
1111 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
1112                                     Register Val, MachineMemOperand &MMO) {
1113   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1114                         MMO);
1115 }
1116 MachineInstrBuilder
1117 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
1118                                      Register Val, MachineMemOperand &MMO) {
1119   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1120                         MMO);
1121 }
1122 MachineInstrBuilder
1123 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
1124                                      Register Val, MachineMemOperand &MMO) {
1125   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1126                         MMO);
1127 }
1128 
1129 MachineInstrBuilder
1130 MachineIRBuilder::buildAtomicRMWFAdd(
1131   const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1132   MachineMemOperand &MMO) {
1133   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1134                         MMO);
1135 }
1136 
1137 MachineInstrBuilder
1138 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1139                                      MachineMemOperand &MMO) {
1140   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1141                         MMO);
1142 }
1143 
1144 MachineInstrBuilder
1145 MachineIRBuilder::buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr,
1146                                      const SrcOp &Val, MachineMemOperand &MMO) {
1147   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1148                         MMO);
1149 }
1150 
1151 MachineInstrBuilder
1152 MachineIRBuilder::buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr,
1153                                      const SrcOp &Val, MachineMemOperand &MMO) {
1154   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1155                         MMO);
1156 }
1157 
1158 MachineInstrBuilder
1159 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1160   return buildInstr(TargetOpcode::G_FENCE)
1161     .addImm(Ordering)
1162     .addImm(Scope);
1163 }
1164 
1165 MachineInstrBuilder MachineIRBuilder::buildPrefetch(const SrcOp &Addr,
1166                                                     unsigned RW,
1167                                                     unsigned Locality,
1168                                                     unsigned CacheType,
1169                                                     MachineMemOperand &MMO) {
1170   auto MIB = buildInstr(TargetOpcode::G_PREFETCH);
1171   Addr.addSrcToMIB(MIB);
1172   MIB.addImm(RW).addImm(Locality).addImm(CacheType);
1173   MIB.addMemOperand(&MMO);
1174   return MIB;
1175 }
1176 
1177 MachineInstrBuilder
1178 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
1179 #ifndef NDEBUG
1180   assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1181 #endif
1182 
1183   return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1184 }
1185 
1186 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1187                                         bool IsExtend) {
1188 #ifndef NDEBUG
1189   if (DstTy.isVector()) {
1190     assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1191     assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1192            "different number of elements in a trunc/ext");
1193   } else
1194     assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1195 
1196   if (IsExtend)
1197     assert(TypeSize::isKnownGT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1198            "invalid narrowing extend");
1199   else
1200     assert(TypeSize::isKnownLT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1201            "invalid widening trunc");
1202 #endif
1203 }
1204 
1205 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1206                                         const LLT Op0Ty, const LLT Op1Ty) {
1207 #ifndef NDEBUG
1208   assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1209          "invalid operand type");
1210   assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1211   if (ResTy.isScalar() || ResTy.isPointer())
1212     assert(TstTy.isScalar() && "type mismatch");
1213   else
1214     assert((TstTy.isScalar() ||
1215             (TstTy.isVector() &&
1216              TstTy.getElementCount() == Op0Ty.getElementCount())) &&
1217            "type mismatch");
1218 #endif
1219 }
1220 
1221 MachineInstrBuilder
1222 MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
1223                              ArrayRef<SrcOp> SrcOps,
1224                              std::optional<unsigned> Flags) {
1225   switch (Opc) {
1226   default:
1227     break;
1228   case TargetOpcode::G_SELECT: {
1229     assert(DstOps.size() == 1 && "Invalid select");
1230     assert(SrcOps.size() == 3 && "Invalid select");
1231     validateSelectOp(
1232         DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1233         SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1234     break;
1235   }
1236   case TargetOpcode::G_FNEG:
1237   case TargetOpcode::G_ABS:
1238     // All these are unary ops.
1239     assert(DstOps.size() == 1 && "Invalid Dst");
1240     assert(SrcOps.size() == 1 && "Invalid Srcs");
1241     validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1242                     SrcOps[0].getLLTTy(*getMRI()));
1243     break;
1244   case TargetOpcode::G_ADD:
1245   case TargetOpcode::G_AND:
1246   case TargetOpcode::G_MUL:
1247   case TargetOpcode::G_OR:
1248   case TargetOpcode::G_SUB:
1249   case TargetOpcode::G_XOR:
1250   case TargetOpcode::G_UDIV:
1251   case TargetOpcode::G_SDIV:
1252   case TargetOpcode::G_UREM:
1253   case TargetOpcode::G_SREM:
1254   case TargetOpcode::G_SMIN:
1255   case TargetOpcode::G_SMAX:
1256   case TargetOpcode::G_UMIN:
1257   case TargetOpcode::G_UMAX:
1258   case TargetOpcode::G_UADDSAT:
1259   case TargetOpcode::G_SADDSAT:
1260   case TargetOpcode::G_USUBSAT:
1261   case TargetOpcode::G_SSUBSAT: {
1262     // All these are binary ops.
1263     assert(DstOps.size() == 1 && "Invalid Dst");
1264     assert(SrcOps.size() == 2 && "Invalid Srcs");
1265     validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1266                      SrcOps[0].getLLTTy(*getMRI()),
1267                      SrcOps[1].getLLTTy(*getMRI()));
1268     break;
1269   }
1270   case TargetOpcode::G_SHL:
1271   case TargetOpcode::G_ASHR:
1272   case TargetOpcode::G_LSHR:
1273   case TargetOpcode::G_USHLSAT:
1274   case TargetOpcode::G_SSHLSAT: {
1275     assert(DstOps.size() == 1 && "Invalid Dst");
1276     assert(SrcOps.size() == 2 && "Invalid Srcs");
1277     validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1278                     SrcOps[0].getLLTTy(*getMRI()),
1279                     SrcOps[1].getLLTTy(*getMRI()));
1280     break;
1281   }
1282   case TargetOpcode::G_SEXT:
1283   case TargetOpcode::G_ZEXT:
1284   case TargetOpcode::G_ANYEXT:
1285     assert(DstOps.size() == 1 && "Invalid Dst");
1286     assert(SrcOps.size() == 1 && "Invalid Srcs");
1287     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1288                      SrcOps[0].getLLTTy(*getMRI()), true);
1289     break;
1290   case TargetOpcode::G_TRUNC:
1291   case TargetOpcode::G_FPTRUNC: {
1292     assert(DstOps.size() == 1 && "Invalid Dst");
1293     assert(SrcOps.size() == 1 && "Invalid Srcs");
1294     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1295                      SrcOps[0].getLLTTy(*getMRI()), false);
1296     break;
1297   }
1298   case TargetOpcode::G_BITCAST: {
1299     assert(DstOps.size() == 1 && "Invalid Dst");
1300     assert(SrcOps.size() == 1 && "Invalid Srcs");
1301     assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1302            SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1303     break;
1304   }
1305   case TargetOpcode::COPY:
1306     assert(DstOps.size() == 1 && "Invalid Dst");
1307     // If the caller wants to add a subreg source it has to be done separately
1308     // so we may not have any SrcOps at this point yet.
1309     break;
1310   case TargetOpcode::G_FCMP:
1311   case TargetOpcode::G_ICMP: {
1312     assert(DstOps.size() == 1 && "Invalid Dst Operands");
1313     assert(SrcOps.size() == 3 && "Invalid Src Operands");
1314     // For F/ICMP, the first src operand is the predicate, followed by
1315     // the two comparands.
1316     assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1317            "Expecting predicate");
1318     assert([&]() -> bool {
1319       CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1320       return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1321                                          : CmpInst::isFPPredicate(Pred);
1322     }() && "Invalid predicate");
1323     assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1324            "Type mismatch");
1325     assert([&]() -> bool {
1326       LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1327       LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1328       if (Op0Ty.isScalar() || Op0Ty.isPointer())
1329         return DstTy.isScalar();
1330       else
1331         return DstTy.isVector() &&
1332                DstTy.getElementCount() == Op0Ty.getElementCount();
1333     }() && "Type Mismatch");
1334     break;
1335   }
1336   case TargetOpcode::G_UNMERGE_VALUES: {
1337     assert(!DstOps.empty() && "Invalid trivial sequence");
1338     assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1339     assert(llvm::all_of(DstOps,
1340                         [&, this](const DstOp &Op) {
1341                           return Op.getLLTTy(*getMRI()) ==
1342                                  DstOps[0].getLLTTy(*getMRI());
1343                         }) &&
1344            "type mismatch in output list");
1345     assert((TypeSize::ScalarTy)DstOps.size() *
1346                    DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1347                SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1348            "input operands do not cover output register");
1349     break;
1350   }
1351   case TargetOpcode::G_MERGE_VALUES: {
1352     assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1353     assert(DstOps.size() == 1 && "Invalid Dst");
1354     assert(llvm::all_of(SrcOps,
1355                         [&, this](const SrcOp &Op) {
1356                           return Op.getLLTTy(*getMRI()) ==
1357                                  SrcOps[0].getLLTTy(*getMRI());
1358                         }) &&
1359            "type mismatch in input list");
1360     assert((TypeSize::ScalarTy)SrcOps.size() *
1361                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1362                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1363            "input operands do not cover output register");
1364     assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1365            "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1366     break;
1367   }
1368   case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1369     assert(DstOps.size() == 1 && "Invalid Dst size");
1370     assert(SrcOps.size() == 2 && "Invalid Src size");
1371     assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1372     assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1373             DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1374            "Invalid operand type");
1375     assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1376     assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1377                DstOps[0].getLLTTy(*getMRI()) &&
1378            "Type mismatch");
1379     break;
1380   }
1381   case TargetOpcode::G_INSERT_VECTOR_ELT: {
1382     assert(DstOps.size() == 1 && "Invalid dst size");
1383     assert(SrcOps.size() == 3 && "Invalid src size");
1384     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1385            SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1386     assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1387                SrcOps[1].getLLTTy(*getMRI()) &&
1388            "Type mismatch");
1389     assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1390     assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1391                SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1392            "Type mismatch");
1393     break;
1394   }
1395   case TargetOpcode::G_BUILD_VECTOR: {
1396     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1397            "Must have at least 2 operands");
1398     assert(DstOps.size() == 1 && "Invalid DstOps");
1399     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1400            "Res type must be a vector");
1401     assert(llvm::all_of(SrcOps,
1402                         [&, this](const SrcOp &Op) {
1403                           return Op.getLLTTy(*getMRI()) ==
1404                                  SrcOps[0].getLLTTy(*getMRI());
1405                         }) &&
1406            "type mismatch in input list");
1407     assert((TypeSize::ScalarTy)SrcOps.size() *
1408                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1409                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1410            "input scalars do not exactly cover the output vector register");
1411     break;
1412   }
1413   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1414     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1415            "Must have at least 2 operands");
1416     assert(DstOps.size() == 1 && "Invalid DstOps");
1417     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1418            "Res type must be a vector");
1419     assert(llvm::all_of(SrcOps,
1420                         [&, this](const SrcOp &Op) {
1421                           return Op.getLLTTy(*getMRI()) ==
1422                                  SrcOps[0].getLLTTy(*getMRI());
1423                         }) &&
1424            "type mismatch in input list");
1425     break;
1426   }
1427   case TargetOpcode::G_CONCAT_VECTORS: {
1428     assert(DstOps.size() == 1 && "Invalid DstOps");
1429     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1430            "Must have at least 2 operands");
1431     assert(llvm::all_of(SrcOps,
1432                         [&, this](const SrcOp &Op) {
1433                           return (Op.getLLTTy(*getMRI()).isVector() &&
1434                                   Op.getLLTTy(*getMRI()) ==
1435                                       SrcOps[0].getLLTTy(*getMRI()));
1436                         }) &&
1437            "type mismatch in input list");
1438     assert((TypeSize::ScalarTy)SrcOps.size() *
1439                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1440                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1441            "input vectors do not exactly cover the output vector register");
1442     break;
1443   }
1444   case TargetOpcode::G_UADDE: {
1445     assert(DstOps.size() == 2 && "Invalid no of dst operands");
1446     assert(SrcOps.size() == 3 && "Invalid no of src operands");
1447     assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1448     assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1449            (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1450            "Invalid operand");
1451     assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1452     assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1453            "type mismatch");
1454     break;
1455   }
1456   }
1457 
1458   auto MIB = buildInstr(Opc);
1459   for (const DstOp &Op : DstOps)
1460     Op.addDefToMIB(*getMRI(), MIB);
1461   for (const SrcOp &Op : SrcOps)
1462     Op.addSrcToMIB(MIB);
1463   if (Flags)
1464     MIB->setFlags(*Flags);
1465   return MIB;
1466 }
1467