xref: /llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp (revision a9d2834508e276d0a3cc09ac549132b56796e87f)
1 //===- HexagonInstrInfo.cpp - Hexagon Instruction Information -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the Hexagon implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "HexagonInstrInfo.h"
14 #include "HexagonFrameLowering.h"
15 #include "HexagonHazardRecognizer.h"
16 #include "HexagonRegisterInfo.h"
17 #include "HexagonSubtarget.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/CodeGen/DFAPacketizer.h"
24 #include "llvm/CodeGen/LiveIntervals.h"
25 #include "llvm/CodeGen/LivePhysRegs.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineInstrBundle.h"
33 #include "llvm/CodeGen/MachineMemOperand.h"
34 #include "llvm/CodeGen/MachineOperand.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/ScheduleDAG.h"
37 #include "llvm/CodeGen/TargetInstrInfo.h"
38 #include "llvm/CodeGen/TargetOpcodes.h"
39 #include "llvm/CodeGen/TargetRegisterInfo.h"
40 #include "llvm/CodeGen/TargetSubtargetInfo.h"
41 #include "llvm/CodeGenTypes/MachineValueType.h"
42 #include "llvm/IR/DebugLoc.h"
43 #include "llvm/IR/GlobalVariable.h"
44 #include "llvm/MC/MCAsmInfo.h"
45 #include "llvm/MC/MCInstBuilder.h"
46 #include "llvm/MC/MCInstrDesc.h"
47 #include "llvm/MC/MCInstrItineraries.h"
48 #include "llvm/Support/BranchProbability.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include "llvm/Target/TargetMachine.h"
55 #include <cassert>
56 #include <cctype>
57 #include <cstdint>
58 #include <cstring>
59 #include <iterator>
60 #include <optional>
61 #include <string>
62 #include <utility>
63 
64 using namespace llvm;
65 
66 #define DEBUG_TYPE "hexagon-instrinfo"
67 
68 #define GET_INSTRINFO_CTOR_DTOR
69 #define GET_INSTRMAP_INFO
70 #include "HexagonDepTimingClasses.h"
71 #include "HexagonGenDFAPacketizer.inc"
72 #include "HexagonGenInstrInfo.inc"
73 
74 cl::opt<bool> ScheduleInlineAsm("hexagon-sched-inline-asm", cl::Hidden,
75   cl::init(false), cl::desc("Do not consider inline-asm a scheduling/"
76                             "packetization boundary."));
77 
78 static cl::opt<bool> EnableBranchPrediction("hexagon-enable-branch-prediction",
79   cl::Hidden, cl::init(true), cl::desc("Enable branch prediction"));
80 
81 static cl::opt<bool> DisableNVSchedule(
82     "disable-hexagon-nv-schedule", cl::Hidden,
83     cl::desc("Disable schedule adjustment for new value stores."));
84 
85 static cl::opt<bool> EnableTimingClassLatency(
86   "enable-timing-class-latency", cl::Hidden, cl::init(false),
87   cl::desc("Enable timing class latency"));
88 
89 static cl::opt<bool> EnableALUForwarding(
90   "enable-alu-forwarding", cl::Hidden, cl::init(true),
91   cl::desc("Enable vec alu forwarding"));
92 
93 static cl::opt<bool> EnableACCForwarding(
94   "enable-acc-forwarding", cl::Hidden, cl::init(true),
95   cl::desc("Enable vec acc forwarding"));
96 
97 static cl::opt<bool> BranchRelaxAsmLarge("branch-relax-asm-large",
98                                          cl::init(true), cl::Hidden,
99                                          cl::desc("branch relax asm"));
100 
101 static cl::opt<bool>
102     UseDFAHazardRec("dfa-hazard-rec", cl::init(true), cl::Hidden,
103                     cl::desc("Use the DFA based hazard recognizer."));
104 
105 /// Constants for Hexagon instructions.
106 const int Hexagon_MEMW_OFFSET_MAX = 4095;
107 const int Hexagon_MEMW_OFFSET_MIN = -4096;
108 const int Hexagon_MEMD_OFFSET_MAX = 8191;
109 const int Hexagon_MEMD_OFFSET_MIN = -8192;
110 const int Hexagon_MEMH_OFFSET_MAX = 2047;
111 const int Hexagon_MEMH_OFFSET_MIN = -2048;
112 const int Hexagon_MEMB_OFFSET_MAX = 1023;
113 const int Hexagon_MEMB_OFFSET_MIN = -1024;
114 const int Hexagon_ADDI_OFFSET_MAX = 32767;
115 const int Hexagon_ADDI_OFFSET_MIN = -32768;
116 
117 // Pin the vtable to this file.
118 void HexagonInstrInfo::anchor() {}
119 
120 HexagonInstrInfo::HexagonInstrInfo(HexagonSubtarget &ST)
121   : HexagonGenInstrInfo(Hexagon::ADJCALLSTACKDOWN, Hexagon::ADJCALLSTACKUP),
122     Subtarget(ST) {}
123 
124 namespace llvm {
125 namespace HexagonFUnits {
126   bool isSlot0Only(unsigned units);
127 }
128 }
129 
130 static bool isIntRegForSubInst(Register Reg) {
131   return (Reg >= Hexagon::R0 && Reg <= Hexagon::R7) ||
132          (Reg >= Hexagon::R16 && Reg <= Hexagon::R23);
133 }
134 
135 static bool isDblRegForSubInst(Register Reg, const HexagonRegisterInfo &HRI) {
136   return isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::isub_lo)) &&
137          isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::isub_hi));
138 }
139 
140 /// Calculate number of instructions excluding the debug instructions.
141 static unsigned nonDbgMICount(MachineBasicBlock::const_instr_iterator MIB,
142                               MachineBasicBlock::const_instr_iterator MIE) {
143   unsigned Count = 0;
144   for (; MIB != MIE; ++MIB) {
145     if (!MIB->isDebugInstr())
146       ++Count;
147   }
148   return Count;
149 }
150 
151 // Check if the A2_tfrsi instruction is cheap or not. If the operand has
152 // to be constant-extendend it is not cheap since it occupies two slots
153 // in a packet.
154 bool HexagonInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
155   // Enable the following steps only at Os/Oz
156   if (!(MI.getMF()->getFunction().hasOptSize()))
157     return MI.isAsCheapAsAMove();
158 
159   if (MI.getOpcode() == Hexagon::A2_tfrsi) {
160     auto Op = MI.getOperand(1);
161     // If the instruction has a global address as operand, it is not cheap
162     // since the operand will be constant extended.
163     if (Op.isGlobal())
164       return false;
165     // If the instruction has an operand of size > 16bits, its will be
166     // const-extended and hence, it is not cheap.
167     if (Op.isImm()) {
168       int64_t Imm = Op.getImm();
169       if (!isInt<16>(Imm))
170         return false;
171     }
172   }
173   return MI.isAsCheapAsAMove();
174 }
175 
176 // Do not sink floating point instructions that updates USR register.
177 // Example:
178 //    feclearexcept
179 //    F2_conv_w2sf
180 //    fetestexcept
181 // MachineSink sinks F2_conv_w2sf and we are not able to catch exceptions.
182 // TODO: On some of these floating point instructions, USR is marked as Use.
183 // In reality, these instructions also Def the USR. If USR is marked as Def,
184 // some of the assumptions in assembler packetization are broken.
185 bool HexagonInstrInfo::shouldSink(const MachineInstr &MI) const {
186   // Assumption: A floating point instruction that reads the USR will write
187   // the USR as well.
188   if (isFloat(MI) && MI.hasRegisterImplicitUseOperand(Hexagon::USR))
189     return false;
190   return true;
191 }
192 
193 /// Find the hardware loop instruction used to set-up the specified loop.
194 /// On Hexagon, we have two instructions used to set-up the hardware loop
195 /// (LOOP0, LOOP1) with corresponding endloop (ENDLOOP0, ENDLOOP1) instructions
196 /// to indicate the end of a loop.
197 MachineInstr *HexagonInstrInfo::findLoopInstr(MachineBasicBlock *BB,
198       unsigned EndLoopOp, MachineBasicBlock *TargetBB,
199       SmallPtrSet<MachineBasicBlock *, 8> &Visited) const {
200   unsigned LOOPi;
201   unsigned LOOPr;
202   if (EndLoopOp == Hexagon::ENDLOOP0) {
203     LOOPi = Hexagon::J2_loop0i;
204     LOOPr = Hexagon::J2_loop0r;
205   } else { // EndLoopOp == Hexagon::EndLOOP1
206     LOOPi = Hexagon::J2_loop1i;
207     LOOPr = Hexagon::J2_loop1r;
208   }
209 
210   // The loop set-up instruction will be in a predecessor block
211   for (MachineBasicBlock *PB : BB->predecessors()) {
212     // If this has been visited, already skip it.
213     if (!Visited.insert(PB).second)
214       continue;
215     if (PB == BB)
216       continue;
217     for (MachineInstr &I : llvm::reverse(PB->instrs())) {
218       unsigned Opc = I.getOpcode();
219       if (Opc == LOOPi || Opc == LOOPr)
220         return &I;
221       // We've reached a different loop, which means the loop01 has been
222       // removed.
223       if (Opc == EndLoopOp && I.getOperand(0).getMBB() != TargetBB)
224         return nullptr;
225     }
226     // Check the predecessors for the LOOP instruction.
227     if (MachineInstr *Loop = findLoopInstr(PB, EndLoopOp, TargetBB, Visited))
228       return Loop;
229   }
230   return nullptr;
231 }
232 
233 /// Gather register def/uses from MI.
234 /// This treats possible (predicated) defs as actually happening ones
235 /// (conservatively).
236 static inline void parseOperands(const MachineInstr &MI,
237       SmallVectorImpl<Register> &Defs, SmallVectorImpl<Register> &Uses) {
238   Defs.clear();
239   Uses.clear();
240 
241   for (const MachineOperand &MO : MI.operands()) {
242     if (!MO.isReg())
243       continue;
244 
245     Register Reg = MO.getReg();
246     if (!Reg)
247       continue;
248 
249     if (MO.isUse())
250       Uses.push_back(MO.getReg());
251 
252     if (MO.isDef())
253       Defs.push_back(MO.getReg());
254   }
255 }
256 
257 // Position dependent, so check twice for swap.
258 static bool isDuplexPairMatch(unsigned Ga, unsigned Gb) {
259   switch (Ga) {
260   case HexagonII::HSIG_None:
261   default:
262     return false;
263   case HexagonII::HSIG_L1:
264     return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_A);
265   case HexagonII::HSIG_L2:
266     return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
267             Gb == HexagonII::HSIG_A);
268   case HexagonII::HSIG_S1:
269     return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
270             Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_A);
271   case HexagonII::HSIG_S2:
272     return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
273             Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_S2 ||
274             Gb == HexagonII::HSIG_A);
275   case HexagonII::HSIG_A:
276     return (Gb == HexagonII::HSIG_A);
277   case HexagonII::HSIG_Compound:
278     return (Gb == HexagonII::HSIG_Compound);
279   }
280   return false;
281 }
282 
283 /// isLoadFromStackSlot - If the specified machine instruction is a direct
284 /// load from a stack slot, return the virtual or physical register number of
285 /// the destination along with the FrameIndex of the loaded stack slot.  If
286 /// not, return 0.  This predicate must return 0 if the instruction has
287 /// any side effects other than loading from the stack slot.
288 Register HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
289                                                int &FrameIndex) const {
290   switch (MI.getOpcode()) {
291     default:
292       break;
293     case Hexagon::L2_loadri_io:
294     case Hexagon::L2_loadrd_io:
295     case Hexagon::V6_vL32b_ai:
296     case Hexagon::V6_vL32b_nt_ai:
297     case Hexagon::V6_vL32Ub_ai:
298     case Hexagon::LDriw_pred:
299     case Hexagon::LDriw_ctr:
300     case Hexagon::PS_vloadrq_ai:
301     case Hexagon::PS_vloadrw_ai:
302     case Hexagon::PS_vloadrw_nt_ai: {
303       const MachineOperand OpFI = MI.getOperand(1);
304       if (!OpFI.isFI())
305         return 0;
306       const MachineOperand OpOff = MI.getOperand(2);
307       if (!OpOff.isImm() || OpOff.getImm() != 0)
308         return 0;
309       FrameIndex = OpFI.getIndex();
310       return MI.getOperand(0).getReg();
311     }
312 
313     case Hexagon::L2_ploadrit_io:
314     case Hexagon::L2_ploadrif_io:
315     case Hexagon::L2_ploadrdt_io:
316     case Hexagon::L2_ploadrdf_io: {
317       const MachineOperand OpFI = MI.getOperand(2);
318       if (!OpFI.isFI())
319         return 0;
320       const MachineOperand OpOff = MI.getOperand(3);
321       if (!OpOff.isImm() || OpOff.getImm() != 0)
322         return 0;
323       FrameIndex = OpFI.getIndex();
324       return MI.getOperand(0).getReg();
325     }
326   }
327 
328   return 0;
329 }
330 
331 /// isStoreToStackSlot - If the specified machine instruction is a direct
332 /// store to a stack slot, return the virtual or physical register number of
333 /// the source reg along with the FrameIndex of the loaded stack slot.  If
334 /// not, return 0.  This predicate must return 0 if the instruction has
335 /// any side effects other than storing to the stack slot.
336 Register HexagonInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
337                                               int &FrameIndex) const {
338   switch (MI.getOpcode()) {
339     default:
340       break;
341     case Hexagon::S2_storerb_io:
342     case Hexagon::S2_storerh_io:
343     case Hexagon::S2_storeri_io:
344     case Hexagon::S2_storerd_io:
345     case Hexagon::V6_vS32b_ai:
346     case Hexagon::V6_vS32Ub_ai:
347     case Hexagon::STriw_pred:
348     case Hexagon::STriw_ctr:
349     case Hexagon::PS_vstorerq_ai:
350     case Hexagon::PS_vstorerw_ai: {
351       const MachineOperand &OpFI = MI.getOperand(0);
352       if (!OpFI.isFI())
353         return 0;
354       const MachineOperand &OpOff = MI.getOperand(1);
355       if (!OpOff.isImm() || OpOff.getImm() != 0)
356         return 0;
357       FrameIndex = OpFI.getIndex();
358       return MI.getOperand(2).getReg();
359     }
360 
361     case Hexagon::S2_pstorerbt_io:
362     case Hexagon::S2_pstorerbf_io:
363     case Hexagon::S2_pstorerht_io:
364     case Hexagon::S2_pstorerhf_io:
365     case Hexagon::S2_pstorerit_io:
366     case Hexagon::S2_pstorerif_io:
367     case Hexagon::S2_pstorerdt_io:
368     case Hexagon::S2_pstorerdf_io: {
369       const MachineOperand &OpFI = MI.getOperand(1);
370       if (!OpFI.isFI())
371         return 0;
372       const MachineOperand &OpOff = MI.getOperand(2);
373       if (!OpOff.isImm() || OpOff.getImm() != 0)
374         return 0;
375       FrameIndex = OpFI.getIndex();
376       return MI.getOperand(3).getReg();
377     }
378   }
379 
380   return 0;
381 }
382 
383 /// This function checks if the instruction or bundle of instructions
384 /// has load from stack slot and returns frameindex and machine memory
385 /// operand of that instruction if true.
386 bool HexagonInstrInfo::hasLoadFromStackSlot(
387     const MachineInstr &MI,
388     SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
389   if (MI.isBundle()) {
390     const MachineBasicBlock *MBB = MI.getParent();
391     MachineBasicBlock::const_instr_iterator MII = MI.getIterator();
392     for (++MII; MII != MBB->instr_end() && MII->isInsideBundle(); ++MII)
393       if (TargetInstrInfo::hasLoadFromStackSlot(*MII, Accesses))
394         return true;
395     return false;
396   }
397 
398   return TargetInstrInfo::hasLoadFromStackSlot(MI, Accesses);
399 }
400 
401 /// This function checks if the instruction or bundle of instructions
402 /// has store to stack slot and returns frameindex and machine memory
403 /// operand of that instruction if true.
404 bool HexagonInstrInfo::hasStoreToStackSlot(
405     const MachineInstr &MI,
406     SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
407   if (MI.isBundle()) {
408     const MachineBasicBlock *MBB = MI.getParent();
409     MachineBasicBlock::const_instr_iterator MII = MI.getIterator();
410     for (++MII; MII != MBB->instr_end() && MII->isInsideBundle(); ++MII)
411       if (TargetInstrInfo::hasStoreToStackSlot(*MII, Accesses))
412         return true;
413     return false;
414   }
415 
416   return TargetInstrInfo::hasStoreToStackSlot(MI, Accesses);
417 }
418 
419 /// This function can analyze one/two way branching only and should (mostly) be
420 /// called by target independent side.
421 /// First entry is always the opcode of the branching instruction, except when
422 /// the Cond vector is supposed to be empty, e.g., when analyzeBranch fails, a
423 /// BB with only unconditional jump. Subsequent entries depend upon the opcode,
424 /// e.g. Jump_c p will have
425 /// Cond[0] = Jump_c
426 /// Cond[1] = p
427 /// HW-loop ENDLOOP:
428 /// Cond[0] = ENDLOOP
429 /// Cond[1] = MBB
430 /// New value jump:
431 /// Cond[0] = Hexagon::CMPEQri_f_Jumpnv_t_V4 -- specific opcode
432 /// Cond[1] = R
433 /// Cond[2] = Imm
434 bool HexagonInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
435                                      MachineBasicBlock *&TBB,
436                                      MachineBasicBlock *&FBB,
437                                      SmallVectorImpl<MachineOperand> &Cond,
438                                      bool AllowModify) const {
439   TBB = nullptr;
440   FBB = nullptr;
441   Cond.clear();
442 
443   // If the block has no terminators, it just falls into the block after it.
444   MachineBasicBlock::instr_iterator I = MBB.instr_end();
445   if (I == MBB.instr_begin())
446     return false;
447 
448   // A basic block may looks like this:
449   //
450   //  [   insn
451   //     EH_LABEL
452   //      insn
453   //      insn
454   //      insn
455   //     EH_LABEL
456   //      insn     ]
457   //
458   // It has two succs but does not have a terminator
459   // Don't know how to handle it.
460   do {
461     --I;
462     if (I->isEHLabel())
463       // Don't analyze EH branches.
464       return true;
465   } while (I != MBB.instr_begin());
466 
467   I = MBB.instr_end();
468   --I;
469 
470   while (I->isDebugInstr()) {
471     if (I == MBB.instr_begin())
472       return false;
473     --I;
474   }
475 
476   bool JumpToBlock = I->getOpcode() == Hexagon::J2_jump &&
477                      I->getOperand(0).isMBB();
478   // Delete the J2_jump if it's equivalent to a fall-through.
479   if (AllowModify && JumpToBlock &&
480       MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
481     LLVM_DEBUG(dbgs() << "\nErasing the jump to successor block\n";);
482     I->eraseFromParent();
483     I = MBB.instr_end();
484     if (I == MBB.instr_begin())
485       return false;
486     --I;
487   }
488   if (!isUnpredicatedTerminator(*I))
489     return false;
490 
491   // Get the last instruction in the block.
492   MachineInstr *LastInst = &*I;
493   MachineInstr *SecondLastInst = nullptr;
494   // Find one more terminator if present.
495   while (true) {
496     if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) {
497       if (!SecondLastInst)
498         SecondLastInst = &*I;
499       else
500         // This is a third branch.
501         return true;
502     }
503     if (I == MBB.instr_begin())
504       break;
505     --I;
506   }
507 
508   int LastOpcode = LastInst->getOpcode();
509   int SecLastOpcode = SecondLastInst ? SecondLastInst->getOpcode() : 0;
510   // If the branch target is not a basic block, it could be a tail call.
511   // (It is, if the target is a function.)
512   if (LastOpcode == Hexagon::J2_jump && !LastInst->getOperand(0).isMBB())
513     return true;
514   if (SecLastOpcode == Hexagon::J2_jump &&
515       !SecondLastInst->getOperand(0).isMBB())
516     return true;
517 
518   bool LastOpcodeHasJMP_c = PredOpcodeHasJMP_c(LastOpcode);
519   bool LastOpcodeHasNVJump = isNewValueJump(*LastInst);
520 
521   if (LastOpcodeHasJMP_c && !LastInst->getOperand(1).isMBB())
522     return true;
523 
524   // If there is only one terminator instruction, process it.
525   if (LastInst && !SecondLastInst) {
526     if (LastOpcode == Hexagon::J2_jump) {
527       TBB = LastInst->getOperand(0).getMBB();
528       return false;
529     }
530     if (isEndLoopN(LastOpcode)) {
531       TBB = LastInst->getOperand(0).getMBB();
532       Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
533       Cond.push_back(LastInst->getOperand(0));
534       return false;
535     }
536     if (LastOpcodeHasJMP_c) {
537       TBB = LastInst->getOperand(1).getMBB();
538       Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
539       Cond.push_back(LastInst->getOperand(0));
540       return false;
541     }
542     // Only supporting rr/ri versions of new-value jumps.
543     if (LastOpcodeHasNVJump && (LastInst->getNumExplicitOperands() == 3)) {
544       TBB = LastInst->getOperand(2).getMBB();
545       Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
546       Cond.push_back(LastInst->getOperand(0));
547       Cond.push_back(LastInst->getOperand(1));
548       return false;
549     }
550     LLVM_DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB)
551                       << " with one jump\n";);
552     // Otherwise, don't know what this is.
553     return true;
554   }
555 
556   bool SecLastOpcodeHasJMP_c = PredOpcodeHasJMP_c(SecLastOpcode);
557   bool SecLastOpcodeHasNVJump = isNewValueJump(*SecondLastInst);
558   if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::J2_jump)) {
559     if (!SecondLastInst->getOperand(1).isMBB())
560       return true;
561     TBB =  SecondLastInst->getOperand(1).getMBB();
562     Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
563     Cond.push_back(SecondLastInst->getOperand(0));
564     FBB = LastInst->getOperand(0).getMBB();
565     return false;
566   }
567 
568   // Only supporting rr/ri versions of new-value jumps.
569   if (SecLastOpcodeHasNVJump &&
570       (SecondLastInst->getNumExplicitOperands() == 3) &&
571       (LastOpcode == Hexagon::J2_jump)) {
572     TBB = SecondLastInst->getOperand(2).getMBB();
573     Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
574     Cond.push_back(SecondLastInst->getOperand(0));
575     Cond.push_back(SecondLastInst->getOperand(1));
576     FBB = LastInst->getOperand(0).getMBB();
577     return false;
578   }
579 
580   // If the block ends with two Hexagon:JMPs, handle it.  The second one is not
581   // executed, so remove it.
582   if (SecLastOpcode == Hexagon::J2_jump && LastOpcode == Hexagon::J2_jump) {
583     TBB = SecondLastInst->getOperand(0).getMBB();
584     I = LastInst->getIterator();
585     if (AllowModify)
586       I->eraseFromParent();
587     return false;
588   }
589 
590   // If the block ends with an ENDLOOP, and J2_jump, handle it.
591   if (isEndLoopN(SecLastOpcode) && LastOpcode == Hexagon::J2_jump) {
592     TBB = SecondLastInst->getOperand(0).getMBB();
593     Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
594     Cond.push_back(SecondLastInst->getOperand(0));
595     FBB = LastInst->getOperand(0).getMBB();
596     return false;
597   }
598   LLVM_DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB)
599                     << " with two jumps";);
600   // Otherwise, can't handle this.
601   return true;
602 }
603 
604 unsigned HexagonInstrInfo::removeBranch(MachineBasicBlock &MBB,
605                                         int *BytesRemoved) const {
606   assert(!BytesRemoved && "code size not handled");
607 
608   LLVM_DEBUG(dbgs() << "\nRemoving branches out of " << printMBBReference(MBB));
609   MachineBasicBlock::iterator I = MBB.end();
610   unsigned Count = 0;
611   while (I != MBB.begin()) {
612     --I;
613     if (I->isDebugInstr())
614       continue;
615     // Only removing branches from end of MBB.
616     if (!I->isBranch())
617       return Count;
618     if (Count && (I->getOpcode() == Hexagon::J2_jump))
619       llvm_unreachable("Malformed basic block: unconditional branch not last");
620     MBB.erase(&MBB.back());
621     I = MBB.end();
622     ++Count;
623   }
624   return Count;
625 }
626 
627 unsigned HexagonInstrInfo::insertBranch(MachineBasicBlock &MBB,
628                                         MachineBasicBlock *TBB,
629                                         MachineBasicBlock *FBB,
630                                         ArrayRef<MachineOperand> Cond,
631                                         const DebugLoc &DL,
632                                         int *BytesAdded) const {
633   unsigned BOpc   = Hexagon::J2_jump;
634   unsigned BccOpc = Hexagon::J2_jumpt;
635   assert(validateBranchCond(Cond) && "Invalid branching condition");
636   assert(TBB && "insertBranch must not be told to insert a fallthrough");
637   assert(!BytesAdded && "code size not handled");
638 
639   // Check if reverseBranchCondition has asked to reverse this branch
640   // If we want to reverse the branch an odd number of times, we want
641   // J2_jumpf.
642   if (!Cond.empty() && Cond[0].isImm())
643     BccOpc = Cond[0].getImm();
644 
645   if (!FBB) {
646     if (Cond.empty()) {
647       // Due to a bug in TailMerging/CFG Optimization, we need to add a
648       // special case handling of a predicated jump followed by an
649       // unconditional jump. If not, Tail Merging and CFG Optimization go
650       // into an infinite loop.
651       MachineBasicBlock *NewTBB, *NewFBB;
652       SmallVector<MachineOperand, 4> Cond;
653       auto Term = MBB.getFirstTerminator();
654       if (Term != MBB.end() && isPredicated(*Term) &&
655           !analyzeBranch(MBB, NewTBB, NewFBB, Cond, false) &&
656           MachineFunction::iterator(NewTBB) == ++MBB.getIterator()) {
657         reverseBranchCondition(Cond);
658         removeBranch(MBB);
659         return insertBranch(MBB, TBB, nullptr, Cond, DL);
660       }
661       BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
662     } else if (isEndLoopN(Cond[0].getImm())) {
663       int EndLoopOp = Cond[0].getImm();
664       assert(Cond[1].isMBB());
665       // Since we're adding an ENDLOOP, there better be a LOOP instruction.
666       // Check for it, and change the BB target if needed.
667       SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs;
668       MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, Cond[1].getMBB(),
669                                          VisitedBBs);
670       assert(Loop != nullptr && "Inserting an ENDLOOP without a LOOP");
671       Loop->getOperand(0).setMBB(TBB);
672       // Add the ENDLOOP after the finding the LOOP0.
673       BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB);
674     } else if (isNewValueJump(Cond[0].getImm())) {
675       assert((Cond.size() == 3) && "Only supporting rr/ri version of nvjump");
676       // New value jump
677       // (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset)
678       // (ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset)
679       unsigned Flags1 = getUndefRegState(Cond[1].isUndef());
680       LLVM_DEBUG(dbgs() << "\nInserting NVJump for "
681                         << printMBBReference(MBB););
682       if (Cond[2].isReg()) {
683         unsigned Flags2 = getUndefRegState(Cond[2].isUndef());
684         BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
685           addReg(Cond[2].getReg(), Flags2).addMBB(TBB);
686       } else if(Cond[2].isImm()) {
687         BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
688           addImm(Cond[2].getImm()).addMBB(TBB);
689       } else
690         llvm_unreachable("Invalid condition for branching");
691     } else {
692       assert((Cond.size() == 2) && "Malformed cond vector");
693       const MachineOperand &RO = Cond[1];
694       unsigned Flags = getUndefRegState(RO.isUndef());
695       BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB);
696     }
697     return 1;
698   }
699   assert((!Cond.empty()) &&
700          "Cond. cannot be empty when multiple branchings are required");
701   assert((!isNewValueJump(Cond[0].getImm())) &&
702          "NV-jump cannot be inserted with another branch");
703   // Special case for hardware loops.  The condition is a basic block.
704   if (isEndLoopN(Cond[0].getImm())) {
705     int EndLoopOp = Cond[0].getImm();
706     assert(Cond[1].isMBB());
707     // Since we're adding an ENDLOOP, there better be a LOOP instruction.
708     // Check for it, and change the BB target if needed.
709     SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs;
710     MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, Cond[1].getMBB(),
711                                        VisitedBBs);
712     assert(Loop != nullptr && "Inserting an ENDLOOP without a LOOP");
713     Loop->getOperand(0).setMBB(TBB);
714     // Add the ENDLOOP after the finding the LOOP0.
715     BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB);
716   } else {
717     const MachineOperand &RO = Cond[1];
718     unsigned Flags = getUndefRegState(RO.isUndef());
719     BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB);
720   }
721   BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
722 
723   return 2;
724 }
725 
726 namespace {
727 class HexagonPipelinerLoopInfo : public TargetInstrInfo::PipelinerLoopInfo {
728   MachineInstr *Loop, *EndLoop;
729   MachineFunction *MF;
730   const HexagonInstrInfo *TII;
731   int64_t TripCount;
732   Register LoopCount;
733   DebugLoc DL;
734 
735 public:
736   HexagonPipelinerLoopInfo(MachineInstr *Loop, MachineInstr *EndLoop)
737       : Loop(Loop), EndLoop(EndLoop), MF(Loop->getParent()->getParent()),
738         TII(MF->getSubtarget<HexagonSubtarget>().getInstrInfo()),
739         DL(Loop->getDebugLoc()) {
740     // Inspect the Loop instruction up-front, as it may be deleted when we call
741     // createTripCountGreaterCondition.
742     TripCount = Loop->getOpcode() == Hexagon::J2_loop0r
743                     ? -1
744                     : Loop->getOperand(1).getImm();
745     if (TripCount == -1)
746       LoopCount = Loop->getOperand(1).getReg();
747   }
748 
749   bool shouldIgnoreForPipelining(const MachineInstr *MI) const override {
750     // Only ignore the terminator.
751     return MI == EndLoop;
752   }
753 
754   std::optional<bool> createTripCountGreaterCondition(
755       int TC, MachineBasicBlock &MBB,
756       SmallVectorImpl<MachineOperand> &Cond) override {
757     if (TripCount == -1) {
758       // Check if we're done with the loop.
759       Register Done = TII->createVR(MF, MVT::i1);
760       MachineInstr *NewCmp = BuildMI(&MBB, DL,
761                                      TII->get(Hexagon::C2_cmpgtui), Done)
762                                  .addReg(LoopCount)
763                                  .addImm(TC);
764       Cond.push_back(MachineOperand::CreateImm(Hexagon::J2_jumpf));
765       Cond.push_back(NewCmp->getOperand(0));
766       return {};
767     }
768 
769     return TripCount > TC;
770   }
771 
772   void setPreheader(MachineBasicBlock *NewPreheader) override {
773     NewPreheader->splice(NewPreheader->getFirstTerminator(), Loop->getParent(),
774                          Loop);
775   }
776 
777   void adjustTripCount(int TripCountAdjust) override {
778     // If the loop trip count is a compile-time value, then just change the
779     // value.
780     if (Loop->getOpcode() == Hexagon::J2_loop0i ||
781         Loop->getOpcode() == Hexagon::J2_loop1i) {
782       int64_t TripCount = Loop->getOperand(1).getImm() + TripCountAdjust;
783       assert(TripCount > 0 && "Can't create an empty or negative loop!");
784       Loop->getOperand(1).setImm(TripCount);
785       return;
786     }
787 
788     // The loop trip count is a run-time value. We generate code to subtract
789     // one from the trip count, and update the loop instruction.
790     Register LoopCount = Loop->getOperand(1).getReg();
791     Register NewLoopCount = TII->createVR(MF, MVT::i32);
792     BuildMI(*Loop->getParent(), Loop, Loop->getDebugLoc(),
793             TII->get(Hexagon::A2_addi), NewLoopCount)
794         .addReg(LoopCount)
795         .addImm(TripCountAdjust);
796     Loop->getOperand(1).setReg(NewLoopCount);
797   }
798 
799   void disposed(LiveIntervals *LIS) override {
800     if (LIS)
801       LIS->RemoveMachineInstrFromMaps(*Loop);
802     Loop->eraseFromParent();
803   }
804 };
805 } // namespace
806 
807 std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
808 HexagonInstrInfo::analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const {
809   // We really "analyze" only hardware loops right now.
810   MachineBasicBlock::iterator I = LoopBB->getFirstTerminator();
811 
812   if (I != LoopBB->end() && isEndLoopN(I->getOpcode())) {
813     SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs;
814     MachineInstr *LoopInst = findLoopInstr(
815         LoopBB, I->getOpcode(), I->getOperand(0).getMBB(), VisitedBBs);
816     if (LoopInst)
817       return std::make_unique<HexagonPipelinerLoopInfo>(LoopInst, &*I);
818   }
819   return nullptr;
820 }
821 
822 bool HexagonInstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
823       unsigned NumCycles, unsigned ExtraPredCycles,
824       BranchProbability Probability) const {
825   return nonDbgBBSize(&MBB) <= 3;
826 }
827 
828 bool HexagonInstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
829       unsigned NumTCycles, unsigned ExtraTCycles, MachineBasicBlock &FMBB,
830       unsigned NumFCycles, unsigned ExtraFCycles, BranchProbability Probability)
831       const {
832   return nonDbgBBSize(&TMBB) <= 3 && nonDbgBBSize(&FMBB) <= 3;
833 }
834 
835 bool HexagonInstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
836       unsigned NumInstrs, BranchProbability Probability) const {
837   return NumInstrs <= 4;
838 }
839 
840 static void getLiveInRegsAt(LivePhysRegs &Regs, const MachineInstr &MI) {
841   SmallVector<std::pair<MCPhysReg, const MachineOperand*>,2> Clobbers;
842   const MachineBasicBlock &B = *MI.getParent();
843   Regs.addLiveIns(B);
844   auto E = MachineBasicBlock::const_iterator(MI.getIterator());
845   for (auto I = B.begin(); I != E; ++I) {
846     Clobbers.clear();
847     Regs.stepForward(*I, Clobbers);
848   }
849 }
850 
851 static void getLiveOutRegsAt(LivePhysRegs &Regs, const MachineInstr &MI) {
852   const MachineBasicBlock &B = *MI.getParent();
853   Regs.addLiveOuts(B);
854   auto E = ++MachineBasicBlock::const_iterator(MI.getIterator()).getReverse();
855   for (auto I = B.rbegin(); I != E; ++I)
856     Regs.stepBackward(*I);
857 }
858 
859 void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
860                                    MachineBasicBlock::iterator I,
861                                    const DebugLoc &DL, MCRegister DestReg,
862                                    MCRegister SrcReg, bool KillSrc,
863                                    bool RenamableDest,
864                                    bool RenamableSrc) const {
865   const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
866   unsigned KillFlag = getKillRegState(KillSrc);
867 
868   if (Hexagon::IntRegsRegClass.contains(SrcReg, DestReg)) {
869     BuildMI(MBB, I, DL, get(Hexagon::A2_tfr), DestReg)
870       .addReg(SrcReg, KillFlag);
871     return;
872   }
873   if (Hexagon::DoubleRegsRegClass.contains(SrcReg, DestReg)) {
874     BuildMI(MBB, I, DL, get(Hexagon::A2_tfrp), DestReg)
875       .addReg(SrcReg, KillFlag);
876     return;
877   }
878   if (Hexagon::PredRegsRegClass.contains(SrcReg, DestReg)) {
879     // Map Pd = Ps to Pd = or(Ps, Ps).
880     BuildMI(MBB, I, DL, get(Hexagon::C2_or), DestReg)
881       .addReg(SrcReg).addReg(SrcReg, KillFlag);
882     return;
883   }
884   if (Hexagon::CtrRegsRegClass.contains(DestReg) &&
885       Hexagon::IntRegsRegClass.contains(SrcReg)) {
886     BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg)
887       .addReg(SrcReg, KillFlag);
888     return;
889   }
890   if (Hexagon::IntRegsRegClass.contains(DestReg) &&
891       Hexagon::CtrRegsRegClass.contains(SrcReg)) {
892     BuildMI(MBB, I, DL, get(Hexagon::A2_tfrcrr), DestReg)
893       .addReg(SrcReg, KillFlag);
894     return;
895   }
896   if (Hexagon::ModRegsRegClass.contains(DestReg) &&
897       Hexagon::IntRegsRegClass.contains(SrcReg)) {
898     BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg)
899       .addReg(SrcReg, KillFlag);
900     return;
901   }
902   if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
903       Hexagon::IntRegsRegClass.contains(DestReg)) {
904     BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg)
905       .addReg(SrcReg, KillFlag);
906     return;
907   }
908   if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
909       Hexagon::PredRegsRegClass.contains(DestReg)) {
910     BuildMI(MBB, I, DL, get(Hexagon::C2_tfrrp), DestReg)
911       .addReg(SrcReg, KillFlag);
912     return;
913   }
914   if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
915       Hexagon::IntRegsRegClass.contains(DestReg)) {
916     BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg)
917       .addReg(SrcReg, KillFlag);
918     return;
919   }
920   if (Hexagon::HvxVRRegClass.contains(SrcReg, DestReg)) {
921     BuildMI(MBB, I, DL, get(Hexagon::V6_vassign), DestReg).
922       addReg(SrcReg, KillFlag);
923     return;
924   }
925   if (Hexagon::HvxWRRegClass.contains(SrcReg, DestReg)) {
926     LivePhysRegs LiveAtMI(HRI);
927     getLiveInRegsAt(LiveAtMI, *I);
928     Register SrcLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
929     Register SrcHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
930     unsigned UndefLo = getUndefRegState(!LiveAtMI.contains(SrcLo));
931     unsigned UndefHi = getUndefRegState(!LiveAtMI.contains(SrcHi));
932     BuildMI(MBB, I, DL, get(Hexagon::V6_vcombine), DestReg)
933       .addReg(SrcHi, KillFlag | UndefHi)
934       .addReg(SrcLo, KillFlag | UndefLo);
935     return;
936   }
937   if (Hexagon::HvxQRRegClass.contains(SrcReg, DestReg)) {
938     BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DestReg)
939       .addReg(SrcReg)
940       .addReg(SrcReg, KillFlag);
941     return;
942   }
943   if (Hexagon::HvxQRRegClass.contains(SrcReg) &&
944       Hexagon::HvxVRRegClass.contains(DestReg)) {
945     llvm_unreachable("Unimplemented pred to vec");
946     return;
947   }
948   if (Hexagon::HvxQRRegClass.contains(DestReg) &&
949       Hexagon::HvxVRRegClass.contains(SrcReg)) {
950     llvm_unreachable("Unimplemented vec to pred");
951     return;
952   }
953 
954 #ifndef NDEBUG
955   // Show the invalid registers to ease debugging.
956   dbgs() << "Invalid registers for copy in " << printMBBReference(MBB) << ": "
957          << printReg(DestReg, &HRI) << " = " << printReg(SrcReg, &HRI) << '\n';
958 #endif
959   llvm_unreachable("Unimplemented");
960 }
961 
962 void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
963                                            MachineBasicBlock::iterator I,
964                                            Register SrcReg, bool isKill, int FI,
965                                            const TargetRegisterClass *RC,
966                                            const TargetRegisterInfo *TRI,
967                                            Register VReg,
968                                            MachineInstr::MIFlag Flags) const {
969   DebugLoc DL = MBB.findDebugLoc(I);
970   MachineFunction &MF = *MBB.getParent();
971   MachineFrameInfo &MFI = MF.getFrameInfo();
972   unsigned KillFlag = getKillRegState(isKill);
973 
974   MachineMemOperand *MMO = MF.getMachineMemOperand(
975       MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
976       MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
977 
978   if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
979     BuildMI(MBB, I, DL, get(Hexagon::S2_storeri_io))
980       .addFrameIndex(FI).addImm(0)
981       .addReg(SrcReg, KillFlag).addMemOperand(MMO);
982   } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
983     BuildMI(MBB, I, DL, get(Hexagon::S2_storerd_io))
984       .addFrameIndex(FI).addImm(0)
985       .addReg(SrcReg, KillFlag).addMemOperand(MMO);
986   } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
987     BuildMI(MBB, I, DL, get(Hexagon::STriw_pred))
988       .addFrameIndex(FI).addImm(0)
989       .addReg(SrcReg, KillFlag).addMemOperand(MMO);
990   } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
991     BuildMI(MBB, I, DL, get(Hexagon::STriw_ctr))
992       .addFrameIndex(FI).addImm(0)
993       .addReg(SrcReg, KillFlag).addMemOperand(MMO);
994   } else if (Hexagon::HvxQRRegClass.hasSubClassEq(RC)) {
995     BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerq_ai))
996       .addFrameIndex(FI).addImm(0)
997       .addReg(SrcReg, KillFlag).addMemOperand(MMO);
998   } else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) {
999     BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerv_ai))
1000       .addFrameIndex(FI).addImm(0)
1001       .addReg(SrcReg, KillFlag).addMemOperand(MMO);
1002   } else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) {
1003     BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerw_ai))
1004       .addFrameIndex(FI).addImm(0)
1005       .addReg(SrcReg, KillFlag).addMemOperand(MMO);
1006   } else {
1007     llvm_unreachable("Unimplemented");
1008   }
1009 }
1010 
1011 void HexagonInstrInfo::loadRegFromStackSlot(
1012     MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
1013     int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
1014     Register VReg, MachineInstr::MIFlag Flags) const {
1015   DebugLoc DL = MBB.findDebugLoc(I);
1016   MachineFunction &MF = *MBB.getParent();
1017   MachineFrameInfo &MFI = MF.getFrameInfo();
1018 
1019   MachineMemOperand *MMO = MF.getMachineMemOperand(
1020       MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
1021       MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
1022 
1023   if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
1024     BuildMI(MBB, I, DL, get(Hexagon::L2_loadri_io), DestReg)
1025       .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1026   } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
1027     BuildMI(MBB, I, DL, get(Hexagon::L2_loadrd_io), DestReg)
1028       .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1029   } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
1030     BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg)
1031       .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1032   } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
1033     BuildMI(MBB, I, DL, get(Hexagon::LDriw_ctr), DestReg)
1034       .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1035   } else if (Hexagon::HvxQRRegClass.hasSubClassEq(RC)) {
1036     BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrq_ai), DestReg)
1037       .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1038   } else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) {
1039     BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrv_ai), DestReg)
1040       .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1041   } else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) {
1042     BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrw_ai), DestReg)
1043       .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
1044   } else {
1045     llvm_unreachable("Can't store this register to stack slot");
1046   }
1047 }
1048 
1049 /// expandPostRAPseudo - This function is called for all pseudo instructions
1050 /// that remain after register allocation. Many pseudo instructions are
1051 /// created to help register allocation. This is the place to convert them
1052 /// into real instructions. The target can edit MI in place, or it can insert
1053 /// new instructions and erase MI. The function should return true if
1054 /// anything was changed.
1055 bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1056   MachineBasicBlock &MBB = *MI.getParent();
1057   MachineFunction &MF = *MBB.getParent();
1058   MachineRegisterInfo &MRI = MF.getRegInfo();
1059   const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1060   LivePhysRegs LiveIn(HRI), LiveOut(HRI);
1061   DebugLoc DL = MI.getDebugLoc();
1062   unsigned Opc = MI.getOpcode();
1063 
1064   auto RealCirc = [&](unsigned Opc, bool HasImm, unsigned MxOp) {
1065     Register Mx = MI.getOperand(MxOp).getReg();
1066     Register CSx = (Mx == Hexagon::M0 ? Hexagon::CS0 : Hexagon::CS1);
1067     BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrrcr), CSx)
1068         .add(MI.getOperand((HasImm ? 5 : 4)));
1069     auto MIB = BuildMI(MBB, MI, DL, get(Opc)).add(MI.getOperand(0))
1070         .add(MI.getOperand(1)).add(MI.getOperand(2)).add(MI.getOperand(3));
1071     if (HasImm)
1072       MIB.add(MI.getOperand(4));
1073     MIB.addReg(CSx, RegState::Implicit);
1074     MBB.erase(MI);
1075     return true;
1076   };
1077 
1078   auto UseAligned = [&](const MachineInstr &MI, Align NeedAlign) {
1079     if (MI.memoperands().empty())
1080       return false;
1081     return all_of(MI.memoperands(), [NeedAlign](const MachineMemOperand *MMO) {
1082       return MMO->getAlign() >= NeedAlign;
1083     });
1084   };
1085 
1086   switch (Opc) {
1087     case Hexagon::PS_call_instrprof_custom: {
1088       auto Op0 = MI.getOperand(0);
1089       assert(Op0.isGlobal() &&
1090              "First operand must be a global containing handler name.");
1091       const GlobalValue *NameVar = Op0.getGlobal();
1092       const GlobalVariable *GV = dyn_cast<GlobalVariable>(NameVar);
1093       auto *Arr = cast<ConstantDataArray>(GV->getInitializer());
1094       StringRef NameStr = Arr->isCString() ? Arr->getAsCString() : Arr->getAsString();
1095 
1096       MachineOperand &Op1 = MI.getOperand(1);
1097       // Set R0 with the imm value to be passed to the custom profiling handler.
1098       BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrsi), Hexagon::R0)
1099         .addImm(Op1.getImm());
1100       // The call to the custom handler is being treated as a special one as the
1101       // callee is responsible for saving and restoring all the registers
1102       // (including caller saved registers) it needs to modify. This is
1103       // done to reduce the impact of instrumentation on the code being
1104       // instrumented/profiled.
1105       // NOTE: R14, R15 and R28 are reserved for PLT handling. These registers
1106       // are in the Def list of the Hexagon::PS_call_instrprof_custom and
1107       // therefore will be handled appropriately duing register allocation.
1108 
1109       // TODO: It may be a good idea to add a separate pseudo instruction for
1110       // static relocation which doesn't need to reserve r14, r15 and r28.
1111 
1112       auto MIB = BuildMI(MBB, MI, DL, get(Hexagon::J2_call))
1113                  .addUse(Hexagon::R0, RegState::Implicit|RegState::InternalRead)
1114                  .addDef(Hexagon::R29, RegState::ImplicitDefine)
1115                  .addDef(Hexagon::R30, RegState::ImplicitDefine)
1116                  .addDef(Hexagon::R14, RegState::ImplicitDefine)
1117                  .addDef(Hexagon::R15, RegState::ImplicitDefine)
1118                  .addDef(Hexagon::R28, RegState::ImplicitDefine);
1119       const char *cstr = MF.createExternalSymbolName(NameStr);
1120       MIB.addExternalSymbol(cstr);
1121       MBB.erase(MI);
1122       return true;
1123     }
1124     case TargetOpcode::COPY: {
1125       MachineOperand &MD = MI.getOperand(0);
1126       MachineOperand &MS = MI.getOperand(1);
1127       MachineBasicBlock::iterator MBBI = MI.getIterator();
1128       if (MD.getReg() != MS.getReg() && !MS.isUndef()) {
1129         copyPhysReg(MBB, MI, DL, MD.getReg(), MS.getReg(), MS.isKill());
1130         std::prev(MBBI)->copyImplicitOps(*MBB.getParent(), MI);
1131       }
1132       MBB.erase(MBBI);
1133       return true;
1134     }
1135     case Hexagon::PS_aligna:
1136       BuildMI(MBB, MI, DL, get(Hexagon::A2_andir), MI.getOperand(0).getReg())
1137           .addReg(HRI.getFrameRegister())
1138           .addImm(-MI.getOperand(1).getImm());
1139       MBB.erase(MI);
1140       return true;
1141     case Hexagon::V6_vassignp: {
1142       Register SrcReg = MI.getOperand(1).getReg();
1143       Register DstReg = MI.getOperand(0).getReg();
1144       Register SrcLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
1145       Register SrcHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
1146       getLiveInRegsAt(LiveIn, MI);
1147       unsigned UndefLo = getUndefRegState(!LiveIn.contains(SrcLo));
1148       unsigned UndefHi = getUndefRegState(!LiveIn.contains(SrcHi));
1149       unsigned Kill = getKillRegState(MI.getOperand(1).isKill());
1150       BuildMI(MBB, MI, DL, get(Hexagon::V6_vcombine), DstReg)
1151           .addReg(SrcHi, UndefHi)
1152           .addReg(SrcLo, Kill | UndefLo);
1153       MBB.erase(MI);
1154       return true;
1155     }
1156     case Hexagon::V6_lo: {
1157       Register SrcReg = MI.getOperand(1).getReg();
1158       Register DstReg = MI.getOperand(0).getReg();
1159       Register SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
1160       copyPhysReg(MBB, MI, DL, DstReg, SrcSubLo, MI.getOperand(1).isKill());
1161       MBB.erase(MI);
1162       MRI.clearKillFlags(SrcSubLo);
1163       return true;
1164     }
1165     case Hexagon::V6_hi: {
1166       Register SrcReg = MI.getOperand(1).getReg();
1167       Register DstReg = MI.getOperand(0).getReg();
1168       Register SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
1169       copyPhysReg(MBB, MI, DL, DstReg, SrcSubHi, MI.getOperand(1).isKill());
1170       MBB.erase(MI);
1171       MRI.clearKillFlags(SrcSubHi);
1172       return true;
1173     }
1174     case Hexagon::PS_vloadrv_ai: {
1175       Register DstReg = MI.getOperand(0).getReg();
1176       const MachineOperand &BaseOp = MI.getOperand(1);
1177       assert(BaseOp.getSubReg() == 0);
1178       int Offset = MI.getOperand(2).getImm();
1179       Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1180       unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vL32b_ai
1181                                                   : Hexagon::V6_vL32Ub_ai;
1182       BuildMI(MBB, MI, DL, get(NewOpc), DstReg)
1183           .addReg(BaseOp.getReg(), getRegState(BaseOp))
1184           .addImm(Offset)
1185           .cloneMemRefs(MI);
1186       MBB.erase(MI);
1187       return true;
1188     }
1189     case Hexagon::PS_vloadrw_ai: {
1190       Register DstReg = MI.getOperand(0).getReg();
1191       const MachineOperand &BaseOp = MI.getOperand(1);
1192       assert(BaseOp.getSubReg() == 0);
1193       int Offset = MI.getOperand(2).getImm();
1194       unsigned VecOffset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1195       Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1196       unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vL32b_ai
1197                                                   : Hexagon::V6_vL32Ub_ai;
1198       BuildMI(MBB, MI, DL, get(NewOpc),
1199               HRI.getSubReg(DstReg, Hexagon::vsub_lo))
1200           .addReg(BaseOp.getReg(), getRegState(BaseOp) & ~RegState::Kill)
1201           .addImm(Offset)
1202           .cloneMemRefs(MI);
1203       BuildMI(MBB, MI, DL, get(NewOpc),
1204               HRI.getSubReg(DstReg, Hexagon::vsub_hi))
1205           .addReg(BaseOp.getReg(), getRegState(BaseOp))
1206           .addImm(Offset + VecOffset)
1207           .cloneMemRefs(MI);
1208       MBB.erase(MI);
1209       return true;
1210     }
1211     case Hexagon::PS_vstorerv_ai: {
1212       const MachineOperand &SrcOp = MI.getOperand(2);
1213       assert(SrcOp.getSubReg() == 0);
1214       const MachineOperand &BaseOp = MI.getOperand(0);
1215       assert(BaseOp.getSubReg() == 0);
1216       int Offset = MI.getOperand(1).getImm();
1217       Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1218       unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vS32b_ai
1219                                                   : Hexagon::V6_vS32Ub_ai;
1220       BuildMI(MBB, MI, DL, get(NewOpc))
1221           .addReg(BaseOp.getReg(), getRegState(BaseOp))
1222           .addImm(Offset)
1223           .addReg(SrcOp.getReg(), getRegState(SrcOp))
1224           .cloneMemRefs(MI);
1225       MBB.erase(MI);
1226       return true;
1227     }
1228     case Hexagon::PS_vstorerw_ai: {
1229       Register SrcReg = MI.getOperand(2).getReg();
1230       const MachineOperand &BaseOp = MI.getOperand(0);
1231       assert(BaseOp.getSubReg() == 0);
1232       int Offset = MI.getOperand(1).getImm();
1233       unsigned VecOffset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1234       Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1235       unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vS32b_ai
1236                                                   : Hexagon::V6_vS32Ub_ai;
1237       BuildMI(MBB, MI, DL, get(NewOpc))
1238           .addReg(BaseOp.getReg(), getRegState(BaseOp) & ~RegState::Kill)
1239           .addImm(Offset)
1240           .addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_lo))
1241           .cloneMemRefs(MI);
1242       BuildMI(MBB, MI, DL, get(NewOpc))
1243           .addReg(BaseOp.getReg(), getRegState(BaseOp))
1244           .addImm(Offset + VecOffset)
1245           .addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_hi))
1246           .cloneMemRefs(MI);
1247       MBB.erase(MI);
1248       return true;
1249     }
1250     case Hexagon::PS_true: {
1251       Register Reg = MI.getOperand(0).getReg();
1252       BuildMI(MBB, MI, DL, get(Hexagon::C2_orn), Reg)
1253         .addReg(Reg, RegState::Undef)
1254         .addReg(Reg, RegState::Undef);
1255       MBB.erase(MI);
1256       return true;
1257     }
1258     case Hexagon::PS_false: {
1259       Register Reg = MI.getOperand(0).getReg();
1260       BuildMI(MBB, MI, DL, get(Hexagon::C2_andn), Reg)
1261         .addReg(Reg, RegState::Undef)
1262         .addReg(Reg, RegState::Undef);
1263       MBB.erase(MI);
1264       return true;
1265     }
1266     case Hexagon::PS_qtrue: {
1267       BuildMI(MBB, MI, DL, get(Hexagon::V6_veqw), MI.getOperand(0).getReg())
1268         .addReg(Hexagon::V0, RegState::Undef)
1269         .addReg(Hexagon::V0, RegState::Undef);
1270       MBB.erase(MI);
1271       return true;
1272     }
1273     case Hexagon::PS_qfalse: {
1274       BuildMI(MBB, MI, DL, get(Hexagon::V6_vgtw), MI.getOperand(0).getReg())
1275         .addReg(Hexagon::V0, RegState::Undef)
1276         .addReg(Hexagon::V0, RegState::Undef);
1277       MBB.erase(MI);
1278       return true;
1279     }
1280     case Hexagon::PS_vdd0: {
1281       Register Vd = MI.getOperand(0).getReg();
1282       BuildMI(MBB, MI, DL, get(Hexagon::V6_vsubw_dv), Vd)
1283         .addReg(Vd, RegState::Undef)
1284         .addReg(Vd, RegState::Undef);
1285       MBB.erase(MI);
1286       return true;
1287     }
1288     case Hexagon::PS_vmulw: {
1289       // Expand a 64-bit vector multiply into 2 32-bit scalar multiplies.
1290       Register DstReg = MI.getOperand(0).getReg();
1291       Register Src1Reg = MI.getOperand(1).getReg();
1292       Register Src2Reg = MI.getOperand(2).getReg();
1293       Register Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi);
1294       Register Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo);
1295       Register Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi);
1296       Register Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo);
1297       BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi),
1298               HRI.getSubReg(DstReg, Hexagon::isub_hi))
1299           .addReg(Src1SubHi)
1300           .addReg(Src2SubHi);
1301       BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi),
1302               HRI.getSubReg(DstReg, Hexagon::isub_lo))
1303           .addReg(Src1SubLo)
1304           .addReg(Src2SubLo);
1305       MBB.erase(MI);
1306       MRI.clearKillFlags(Src1SubHi);
1307       MRI.clearKillFlags(Src1SubLo);
1308       MRI.clearKillFlags(Src2SubHi);
1309       MRI.clearKillFlags(Src2SubLo);
1310       return true;
1311     }
1312     case Hexagon::PS_vmulw_acc: {
1313       // Expand 64-bit vector multiply with addition into 2 scalar multiplies.
1314       Register DstReg = MI.getOperand(0).getReg();
1315       Register Src1Reg = MI.getOperand(1).getReg();
1316       Register Src2Reg = MI.getOperand(2).getReg();
1317       Register Src3Reg = MI.getOperand(3).getReg();
1318       Register Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi);
1319       Register Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo);
1320       Register Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi);
1321       Register Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo);
1322       Register Src3SubHi = HRI.getSubReg(Src3Reg, Hexagon::isub_hi);
1323       Register Src3SubLo = HRI.getSubReg(Src3Reg, Hexagon::isub_lo);
1324       BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci),
1325               HRI.getSubReg(DstReg, Hexagon::isub_hi))
1326           .addReg(Src1SubHi)
1327           .addReg(Src2SubHi)
1328           .addReg(Src3SubHi);
1329       BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci),
1330               HRI.getSubReg(DstReg, Hexagon::isub_lo))
1331           .addReg(Src1SubLo)
1332           .addReg(Src2SubLo)
1333           .addReg(Src3SubLo);
1334       MBB.erase(MI);
1335       MRI.clearKillFlags(Src1SubHi);
1336       MRI.clearKillFlags(Src1SubLo);
1337       MRI.clearKillFlags(Src2SubHi);
1338       MRI.clearKillFlags(Src2SubLo);
1339       MRI.clearKillFlags(Src3SubHi);
1340       MRI.clearKillFlags(Src3SubLo);
1341       return true;
1342     }
1343     case Hexagon::PS_pselect: {
1344       const MachineOperand &Op0 = MI.getOperand(0);
1345       const MachineOperand &Op1 = MI.getOperand(1);
1346       const MachineOperand &Op2 = MI.getOperand(2);
1347       const MachineOperand &Op3 = MI.getOperand(3);
1348       Register Rd = Op0.getReg();
1349       Register Pu = Op1.getReg();
1350       Register Rs = Op2.getReg();
1351       Register Rt = Op3.getReg();
1352       DebugLoc DL = MI.getDebugLoc();
1353       unsigned K1 = getKillRegState(Op1.isKill());
1354       unsigned K2 = getKillRegState(Op2.isKill());
1355       unsigned K3 = getKillRegState(Op3.isKill());
1356       if (Rd != Rs)
1357         BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpt), Rd)
1358           .addReg(Pu, (Rd == Rt) ? K1 : 0)
1359           .addReg(Rs, K2);
1360       if (Rd != Rt)
1361         BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpf), Rd)
1362           .addReg(Pu, K1)
1363           .addReg(Rt, K3);
1364       MBB.erase(MI);
1365       return true;
1366     }
1367     case Hexagon::PS_vselect: {
1368       const MachineOperand &Op0 = MI.getOperand(0);
1369       const MachineOperand &Op1 = MI.getOperand(1);
1370       const MachineOperand &Op2 = MI.getOperand(2);
1371       const MachineOperand &Op3 = MI.getOperand(3);
1372       getLiveOutRegsAt(LiveOut, MI);
1373       bool IsDestLive = !LiveOut.available(MRI, Op0.getReg());
1374       Register PReg = Op1.getReg();
1375       assert(Op1.getSubReg() == 0);
1376       unsigned PState = getRegState(Op1);
1377 
1378       if (Op0.getReg() != Op2.getReg()) {
1379         unsigned S = Op0.getReg() != Op3.getReg() ? PState & ~RegState::Kill
1380                                                   : PState;
1381         auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vcmov))
1382                      .add(Op0)
1383                      .addReg(PReg, S)
1384                      .add(Op2);
1385         if (IsDestLive)
1386           T.addReg(Op0.getReg(), RegState::Implicit);
1387         IsDestLive = true;
1388       }
1389       if (Op0.getReg() != Op3.getReg()) {
1390         auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vncmov))
1391                      .add(Op0)
1392                      .addReg(PReg, PState)
1393                      .add(Op3);
1394         if (IsDestLive)
1395           T.addReg(Op0.getReg(), RegState::Implicit);
1396       }
1397       MBB.erase(MI);
1398       return true;
1399     }
1400     case Hexagon::PS_wselect: {
1401       MachineOperand &Op0 = MI.getOperand(0);
1402       MachineOperand &Op1 = MI.getOperand(1);
1403       MachineOperand &Op2 = MI.getOperand(2);
1404       MachineOperand &Op3 = MI.getOperand(3);
1405       getLiveOutRegsAt(LiveOut, MI);
1406       bool IsDestLive = !LiveOut.available(MRI, Op0.getReg());
1407       Register PReg = Op1.getReg();
1408       assert(Op1.getSubReg() == 0);
1409       unsigned PState = getRegState(Op1);
1410 
1411       if (Op0.getReg() != Op2.getReg()) {
1412         unsigned S = Op0.getReg() != Op3.getReg() ? PState & ~RegState::Kill
1413                                                   : PState;
1414         Register SrcLo = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_lo);
1415         Register SrcHi = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_hi);
1416         auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vccombine))
1417                      .add(Op0)
1418                      .addReg(PReg, S)
1419                      .addReg(SrcHi)
1420                      .addReg(SrcLo);
1421         if (IsDestLive)
1422           T.addReg(Op0.getReg(), RegState::Implicit);
1423         IsDestLive = true;
1424       }
1425       if (Op0.getReg() != Op3.getReg()) {
1426         Register SrcLo = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_lo);
1427         Register SrcHi = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_hi);
1428         auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vnccombine))
1429                      .add(Op0)
1430                      .addReg(PReg, PState)
1431                      .addReg(SrcHi)
1432                      .addReg(SrcLo);
1433         if (IsDestLive)
1434           T.addReg(Op0.getReg(), RegState::Implicit);
1435       }
1436       MBB.erase(MI);
1437       return true;
1438     }
1439 
1440     case Hexagon::PS_crash: {
1441       // Generate a misaligned load that is guaranteed to cause a crash.
1442       class CrashPseudoSourceValue : public PseudoSourceValue {
1443       public:
1444         CrashPseudoSourceValue(const TargetMachine &TM)
1445             : PseudoSourceValue(TargetCustom, TM) {}
1446 
1447         bool isConstant(const MachineFrameInfo *) const override {
1448           return false;
1449         }
1450         bool isAliased(const MachineFrameInfo *) const override {
1451           return false;
1452         }
1453         bool mayAlias(const MachineFrameInfo *) const override {
1454           return false;
1455         }
1456         void printCustom(raw_ostream &OS) const override {
1457           OS << "MisalignedCrash";
1458         }
1459       };
1460 
1461       static const CrashPseudoSourceValue CrashPSV(MF.getTarget());
1462       MachineMemOperand *MMO = MF.getMachineMemOperand(
1463           MachinePointerInfo(&CrashPSV),
1464           MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 8,
1465           Align(1));
1466       BuildMI(MBB, MI, DL, get(Hexagon::PS_loadrdabs), Hexagon::D13)
1467         .addImm(0xBADC0FEE)  // Misaligned load.
1468         .addMemOperand(MMO);
1469       MBB.erase(MI);
1470       return true;
1471     }
1472 
1473     case Hexagon::PS_tailcall_i:
1474       MI.setDesc(get(Hexagon::J2_jump));
1475       return true;
1476     case Hexagon::PS_tailcall_r:
1477     case Hexagon::PS_jmpret:
1478       MI.setDesc(get(Hexagon::J2_jumpr));
1479       return true;
1480     case Hexagon::PS_jmprett:
1481       MI.setDesc(get(Hexagon::J2_jumprt));
1482       return true;
1483     case Hexagon::PS_jmpretf:
1484       MI.setDesc(get(Hexagon::J2_jumprf));
1485       return true;
1486     case Hexagon::PS_jmprettnewpt:
1487       MI.setDesc(get(Hexagon::J2_jumprtnewpt));
1488       return true;
1489     case Hexagon::PS_jmpretfnewpt:
1490       MI.setDesc(get(Hexagon::J2_jumprfnewpt));
1491       return true;
1492     case Hexagon::PS_jmprettnew:
1493       MI.setDesc(get(Hexagon::J2_jumprtnew));
1494       return true;
1495     case Hexagon::PS_jmpretfnew:
1496       MI.setDesc(get(Hexagon::J2_jumprfnew));
1497       return true;
1498 
1499     case Hexagon::PS_loadrub_pci:
1500       return RealCirc(Hexagon::L2_loadrub_pci, /*HasImm*/true,  /*MxOp*/4);
1501     case Hexagon::PS_loadrb_pci:
1502       return RealCirc(Hexagon::L2_loadrb_pci,  /*HasImm*/true,  /*MxOp*/4);
1503     case Hexagon::PS_loadruh_pci:
1504       return RealCirc(Hexagon::L2_loadruh_pci, /*HasImm*/true,  /*MxOp*/4);
1505     case Hexagon::PS_loadrh_pci:
1506       return RealCirc(Hexagon::L2_loadrh_pci,  /*HasImm*/true,  /*MxOp*/4);
1507     case Hexagon::PS_loadri_pci:
1508       return RealCirc(Hexagon::L2_loadri_pci,  /*HasImm*/true,  /*MxOp*/4);
1509     case Hexagon::PS_loadrd_pci:
1510       return RealCirc(Hexagon::L2_loadrd_pci,  /*HasImm*/true,  /*MxOp*/4);
1511     case Hexagon::PS_loadrub_pcr:
1512       return RealCirc(Hexagon::L2_loadrub_pcr, /*HasImm*/false, /*MxOp*/3);
1513     case Hexagon::PS_loadrb_pcr:
1514       return RealCirc(Hexagon::L2_loadrb_pcr,  /*HasImm*/false, /*MxOp*/3);
1515     case Hexagon::PS_loadruh_pcr:
1516       return RealCirc(Hexagon::L2_loadruh_pcr, /*HasImm*/false, /*MxOp*/3);
1517     case Hexagon::PS_loadrh_pcr:
1518       return RealCirc(Hexagon::L2_loadrh_pcr,  /*HasImm*/false, /*MxOp*/3);
1519     case Hexagon::PS_loadri_pcr:
1520       return RealCirc(Hexagon::L2_loadri_pcr,  /*HasImm*/false, /*MxOp*/3);
1521     case Hexagon::PS_loadrd_pcr:
1522       return RealCirc(Hexagon::L2_loadrd_pcr,  /*HasImm*/false, /*MxOp*/3);
1523     case Hexagon::PS_storerb_pci:
1524       return RealCirc(Hexagon::S2_storerb_pci, /*HasImm*/true,  /*MxOp*/3);
1525     case Hexagon::PS_storerh_pci:
1526       return RealCirc(Hexagon::S2_storerh_pci, /*HasImm*/true,  /*MxOp*/3);
1527     case Hexagon::PS_storerf_pci:
1528       return RealCirc(Hexagon::S2_storerf_pci, /*HasImm*/true,  /*MxOp*/3);
1529     case Hexagon::PS_storeri_pci:
1530       return RealCirc(Hexagon::S2_storeri_pci, /*HasImm*/true,  /*MxOp*/3);
1531     case Hexagon::PS_storerd_pci:
1532       return RealCirc(Hexagon::S2_storerd_pci, /*HasImm*/true,  /*MxOp*/3);
1533     case Hexagon::PS_storerb_pcr:
1534       return RealCirc(Hexagon::S2_storerb_pcr, /*HasImm*/false, /*MxOp*/2);
1535     case Hexagon::PS_storerh_pcr:
1536       return RealCirc(Hexagon::S2_storerh_pcr, /*HasImm*/false, /*MxOp*/2);
1537     case Hexagon::PS_storerf_pcr:
1538       return RealCirc(Hexagon::S2_storerf_pcr, /*HasImm*/false, /*MxOp*/2);
1539     case Hexagon::PS_storeri_pcr:
1540       return RealCirc(Hexagon::S2_storeri_pcr, /*HasImm*/false, /*MxOp*/2);
1541     case Hexagon::PS_storerd_pcr:
1542       return RealCirc(Hexagon::S2_storerd_pcr, /*HasImm*/false, /*MxOp*/2);
1543   }
1544 
1545   return false;
1546 }
1547 
1548 MachineBasicBlock::instr_iterator
1549 HexagonInstrInfo::expandVGatherPseudo(MachineInstr &MI) const {
1550   MachineBasicBlock &MBB = *MI.getParent();
1551   const DebugLoc &DL = MI.getDebugLoc();
1552   unsigned Opc = MI.getOpcode();
1553   MachineBasicBlock::iterator First;
1554 
1555   switch (Opc) {
1556     case Hexagon::V6_vgathermh_pseudo:
1557       First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermh))
1558                   .add(MI.getOperand(2))
1559                   .add(MI.getOperand(3))
1560                   .add(MI.getOperand(4));
1561       BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1562           .add(MI.getOperand(0))
1563           .addImm(MI.getOperand(1).getImm())
1564           .addReg(Hexagon::VTMP);
1565       MBB.erase(MI);
1566       return First.getInstrIterator();
1567 
1568     case Hexagon::V6_vgathermw_pseudo:
1569       First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermw))
1570                   .add(MI.getOperand(2))
1571                   .add(MI.getOperand(3))
1572                   .add(MI.getOperand(4));
1573       BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1574           .add(MI.getOperand(0))
1575           .addImm(MI.getOperand(1).getImm())
1576           .addReg(Hexagon::VTMP);
1577       MBB.erase(MI);
1578       return First.getInstrIterator();
1579 
1580     case Hexagon::V6_vgathermhw_pseudo:
1581       First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermhw))
1582                   .add(MI.getOperand(2))
1583                   .add(MI.getOperand(3))
1584                   .add(MI.getOperand(4));
1585       BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1586           .add(MI.getOperand(0))
1587           .addImm(MI.getOperand(1).getImm())
1588           .addReg(Hexagon::VTMP);
1589       MBB.erase(MI);
1590       return First.getInstrIterator();
1591 
1592     case Hexagon::V6_vgathermhq_pseudo:
1593       First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermhq))
1594                   .add(MI.getOperand(2))
1595                   .add(MI.getOperand(3))
1596                   .add(MI.getOperand(4))
1597                   .add(MI.getOperand(5));
1598       BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1599           .add(MI.getOperand(0))
1600           .addImm(MI.getOperand(1).getImm())
1601           .addReg(Hexagon::VTMP);
1602       MBB.erase(MI);
1603       return First.getInstrIterator();
1604 
1605     case Hexagon::V6_vgathermwq_pseudo:
1606       First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermwq))
1607                   .add(MI.getOperand(2))
1608                   .add(MI.getOperand(3))
1609                   .add(MI.getOperand(4))
1610                   .add(MI.getOperand(5));
1611       BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1612           .add(MI.getOperand(0))
1613           .addImm(MI.getOperand(1).getImm())
1614           .addReg(Hexagon::VTMP);
1615       MBB.erase(MI);
1616       return First.getInstrIterator();
1617 
1618     case Hexagon::V6_vgathermhwq_pseudo:
1619       First = BuildMI(MBB, MI, DL, get(Hexagon::V6_vgathermhwq))
1620                   .add(MI.getOperand(2))
1621                   .add(MI.getOperand(3))
1622                   .add(MI.getOperand(4))
1623                   .add(MI.getOperand(5));
1624       BuildMI(MBB, MI, DL, get(Hexagon::V6_vS32b_new_ai))
1625           .add(MI.getOperand(0))
1626           .addImm(MI.getOperand(1).getImm())
1627           .addReg(Hexagon::VTMP);
1628       MBB.erase(MI);
1629       return First.getInstrIterator();
1630   }
1631 
1632   return MI.getIterator();
1633 }
1634 
1635 // We indicate that we want to reverse the branch by
1636 // inserting the reversed branching opcode.
1637 bool HexagonInstrInfo::reverseBranchCondition(
1638       SmallVectorImpl<MachineOperand> &Cond) const {
1639   if (Cond.empty())
1640     return true;
1641   assert(Cond[0].isImm() && "First entry in the cond vector not imm-val");
1642   unsigned opcode = Cond[0].getImm();
1643   //unsigned temp;
1644   assert(get(opcode).isBranch() && "Should be a branching condition.");
1645   if (isEndLoopN(opcode))
1646     return true;
1647   unsigned NewOpcode = getInvertedPredicatedOpcode(opcode);
1648   Cond[0].setImm(NewOpcode);
1649   return false;
1650 }
1651 
1652 void HexagonInstrInfo::insertNoop(MachineBasicBlock &MBB,
1653       MachineBasicBlock::iterator MI) const {
1654   DebugLoc DL;
1655   BuildMI(MBB, MI, DL, get(Hexagon::A2_nop));
1656 }
1657 
1658 bool HexagonInstrInfo::isPostIncrement(const MachineInstr &MI) const {
1659   return getAddrMode(MI) == HexagonII::PostInc;
1660 }
1661 
1662 // Returns true if an instruction is predicated irrespective of the predicate
1663 // sense. For example, all of the following will return true.
1664 // if (p0) R1 = add(R2, R3)
1665 // if (!p0) R1 = add(R2, R3)
1666 // if (p0.new) R1 = add(R2, R3)
1667 // if (!p0.new) R1 = add(R2, R3)
1668 // Note: New-value stores are not included here as in the current
1669 // implementation, we don't need to check their predicate sense.
1670 bool HexagonInstrInfo::isPredicated(const MachineInstr &MI) const {
1671   const uint64_t F = MI.getDesc().TSFlags;
1672   return (F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask;
1673 }
1674 
1675 bool HexagonInstrInfo::PredicateInstruction(
1676     MachineInstr &MI, ArrayRef<MachineOperand> Cond) const {
1677   if (Cond.empty() || isNewValueJump(Cond[0].getImm()) ||
1678       isEndLoopN(Cond[0].getImm())) {
1679     LLVM_DEBUG(dbgs() << "\nCannot predicate:"; MI.dump(););
1680     return false;
1681   }
1682   int Opc = MI.getOpcode();
1683   assert (isPredicable(MI) && "Expected predicable instruction");
1684   bool invertJump = predOpcodeHasNot(Cond);
1685 
1686   // We have to predicate MI "in place", i.e. after this function returns,
1687   // MI will need to be transformed into a predicated form. To avoid com-
1688   // plicated manipulations with the operands (handling tied operands,
1689   // etc.), build a new temporary instruction, then overwrite MI with it.
1690 
1691   MachineBasicBlock &B = *MI.getParent();
1692   DebugLoc DL = MI.getDebugLoc();
1693   unsigned PredOpc = getCondOpcode(Opc, invertJump);
1694   MachineInstrBuilder T = BuildMI(B, MI, DL, get(PredOpc));
1695   unsigned NOp = 0, NumOps = MI.getNumOperands();
1696   while (NOp < NumOps) {
1697     MachineOperand &Op = MI.getOperand(NOp);
1698     if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
1699       break;
1700     T.add(Op);
1701     NOp++;
1702   }
1703 
1704   Register PredReg;
1705   unsigned PredRegPos, PredRegFlags;
1706   bool GotPredReg = getPredReg(Cond, PredReg, PredRegPos, PredRegFlags);
1707   (void)GotPredReg;
1708   assert(GotPredReg);
1709   T.addReg(PredReg, PredRegFlags);
1710   while (NOp < NumOps)
1711     T.add(MI.getOperand(NOp++));
1712 
1713   MI.setDesc(get(PredOpc));
1714   while (unsigned n = MI.getNumOperands())
1715     MI.removeOperand(n-1);
1716   for (unsigned i = 0, n = T->getNumOperands(); i < n; ++i)
1717     MI.addOperand(T->getOperand(i));
1718 
1719   MachineBasicBlock::instr_iterator TI = T->getIterator();
1720   B.erase(TI);
1721 
1722   MachineRegisterInfo &MRI = B.getParent()->getRegInfo();
1723   MRI.clearKillFlags(PredReg);
1724   return true;
1725 }
1726 
1727 bool HexagonInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
1728       ArrayRef<MachineOperand> Pred2) const {
1729   // TODO: Fix this
1730   return false;
1731 }
1732 
1733 bool HexagonInstrInfo::ClobbersPredicate(MachineInstr &MI,
1734                                          std::vector<MachineOperand> &Pred,
1735                                          bool SkipDead) const {
1736   const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1737 
1738   for (const MachineOperand &MO : MI.operands()) {
1739     if (MO.isReg()) {
1740       if (!MO.isDef())
1741         continue;
1742       const TargetRegisterClass* RC = HRI.getMinimalPhysRegClass(MO.getReg());
1743       if (RC == &Hexagon::PredRegsRegClass) {
1744         Pred.push_back(MO);
1745         return true;
1746       }
1747       continue;
1748     } else if (MO.isRegMask()) {
1749       for (Register PR : Hexagon::PredRegsRegClass) {
1750         if (!MI.modifiesRegister(PR, &HRI))
1751           continue;
1752         Pred.push_back(MO);
1753         return true;
1754       }
1755     }
1756   }
1757   return false;
1758 }
1759 
1760 bool HexagonInstrInfo::isPredicable(const MachineInstr &MI) const {
1761   if (!MI.getDesc().isPredicable())
1762     return false;
1763 
1764   if (MI.isCall() || isTailCall(MI)) {
1765     if (!Subtarget.usePredicatedCalls())
1766       return false;
1767   }
1768 
1769   // HVX loads are not predicable on v60, but are on v62.
1770   if (!Subtarget.hasV62Ops()) {
1771     switch (MI.getOpcode()) {
1772       case Hexagon::V6_vL32b_ai:
1773       case Hexagon::V6_vL32b_pi:
1774       case Hexagon::V6_vL32b_ppu:
1775       case Hexagon::V6_vL32b_cur_ai:
1776       case Hexagon::V6_vL32b_cur_pi:
1777       case Hexagon::V6_vL32b_cur_ppu:
1778       case Hexagon::V6_vL32b_nt_ai:
1779       case Hexagon::V6_vL32b_nt_pi:
1780       case Hexagon::V6_vL32b_nt_ppu:
1781       case Hexagon::V6_vL32b_tmp_ai:
1782       case Hexagon::V6_vL32b_tmp_pi:
1783       case Hexagon::V6_vL32b_tmp_ppu:
1784       case Hexagon::V6_vL32b_nt_cur_ai:
1785       case Hexagon::V6_vL32b_nt_cur_pi:
1786       case Hexagon::V6_vL32b_nt_cur_ppu:
1787       case Hexagon::V6_vL32b_nt_tmp_ai:
1788       case Hexagon::V6_vL32b_nt_tmp_pi:
1789       case Hexagon::V6_vL32b_nt_tmp_ppu:
1790         return false;
1791     }
1792   }
1793   return true;
1794 }
1795 
1796 bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
1797                                             const MachineBasicBlock *MBB,
1798                                             const MachineFunction &MF) const {
1799   // Debug info is never a scheduling boundary. It's necessary to be explicit
1800   // due to the special treatment of IT instructions below, otherwise a
1801   // dbg_value followed by an IT will result in the IT instruction being
1802   // considered a scheduling hazard, which is wrong. It should be the actual
1803   // instruction preceding the dbg_value instruction(s), just like it is
1804   // when debug info is not present.
1805   if (MI.isDebugInstr())
1806     return false;
1807 
1808   // Throwing call is a boundary.
1809   if (MI.isCall()) {
1810     // Don't mess around with no return calls.
1811     if (doesNotReturn(MI))
1812       return true;
1813     // If any of the block's successors is a landing pad, this could be a
1814     // throwing call.
1815     for (auto *I : MBB->successors())
1816       if (I->isEHPad())
1817         return true;
1818   }
1819 
1820   // Terminators and labels can't be scheduled around.
1821   if (MI.getDesc().isTerminator() || MI.isPosition())
1822     return true;
1823 
1824   // INLINEASM_BR can jump to another block
1825   if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1826     return true;
1827 
1828   if (MI.isInlineAsm() && !ScheduleInlineAsm)
1829     return true;
1830 
1831   return false;
1832 }
1833 
1834 /// Measure the specified inline asm to determine an approximation of its
1835 /// length.
1836 /// Comments (which run till the next SeparatorString or newline) do not
1837 /// count as an instruction.
1838 /// Any other non-whitespace text is considered an instruction, with
1839 /// multiple instructions separated by SeparatorString or newlines.
1840 /// Variable-length instructions are not handled here; this function
1841 /// may be overloaded in the target code to do that.
1842 /// Hexagon counts the number of ##'s and adjust for that many
1843 /// constant exenders.
1844 unsigned HexagonInstrInfo::getInlineAsmLength(const char *Str,
1845                                               const MCAsmInfo &MAI,
1846                                               const TargetSubtargetInfo *STI) const {
1847   StringRef AStr(Str);
1848   // Count the number of instructions in the asm.
1849   bool atInsnStart = true;
1850   unsigned Length = 0;
1851   const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
1852   for (; *Str; ++Str) {
1853     if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
1854                                 strlen(MAI.getSeparatorString())) == 0)
1855       atInsnStart = true;
1856     if (atInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
1857       Length += MaxInstLength;
1858       atInsnStart = false;
1859     }
1860     if (atInsnStart && strncmp(Str, MAI.getCommentString().data(),
1861                                MAI.getCommentString().size()) == 0)
1862       atInsnStart = false;
1863   }
1864 
1865   // Add to size number of constant extenders seen * 4.
1866   StringRef Occ("##");
1867   Length += AStr.count(Occ)*4;
1868   return Length;
1869 }
1870 
1871 ScheduleHazardRecognizer*
1872 HexagonInstrInfo::CreateTargetPostRAHazardRecognizer(
1873       const InstrItineraryData *II, const ScheduleDAG *DAG) const {
1874   if (UseDFAHazardRec)
1875     return new HexagonHazardRecognizer(II, this, Subtarget);
1876   return TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG);
1877 }
1878 
1879 /// For a comparison instruction, return the source registers in
1880 /// \p SrcReg and \p SrcReg2 if having two register operands, and the value it
1881 /// compares against in CmpValue. Return true if the comparison instruction
1882 /// can be analyzed.
1883 bool HexagonInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
1884                                       Register &SrcReg2, int64_t &Mask,
1885                                       int64_t &Value) const {
1886   unsigned Opc = MI.getOpcode();
1887 
1888   // Set mask and the first source register.
1889   switch (Opc) {
1890     case Hexagon::C2_cmpeq:
1891     case Hexagon::C2_cmpeqp:
1892     case Hexagon::C2_cmpgt:
1893     case Hexagon::C2_cmpgtp:
1894     case Hexagon::C2_cmpgtu:
1895     case Hexagon::C2_cmpgtup:
1896     case Hexagon::C4_cmpneq:
1897     case Hexagon::C4_cmplte:
1898     case Hexagon::C4_cmplteu:
1899     case Hexagon::C2_cmpeqi:
1900     case Hexagon::C2_cmpgti:
1901     case Hexagon::C2_cmpgtui:
1902     case Hexagon::C4_cmpneqi:
1903     case Hexagon::C4_cmplteui:
1904     case Hexagon::C4_cmpltei:
1905       SrcReg = MI.getOperand(1).getReg();
1906       Mask = ~0;
1907       break;
1908     case Hexagon::A4_cmpbeq:
1909     case Hexagon::A4_cmpbgt:
1910     case Hexagon::A4_cmpbgtu:
1911     case Hexagon::A4_cmpbeqi:
1912     case Hexagon::A4_cmpbgti:
1913     case Hexagon::A4_cmpbgtui:
1914       SrcReg = MI.getOperand(1).getReg();
1915       Mask = 0xFF;
1916       break;
1917     case Hexagon::A4_cmpheq:
1918     case Hexagon::A4_cmphgt:
1919     case Hexagon::A4_cmphgtu:
1920     case Hexagon::A4_cmpheqi:
1921     case Hexagon::A4_cmphgti:
1922     case Hexagon::A4_cmphgtui:
1923       SrcReg = MI.getOperand(1).getReg();
1924       Mask = 0xFFFF;
1925       break;
1926   }
1927 
1928   // Set the value/second source register.
1929   switch (Opc) {
1930     case Hexagon::C2_cmpeq:
1931     case Hexagon::C2_cmpeqp:
1932     case Hexagon::C2_cmpgt:
1933     case Hexagon::C2_cmpgtp:
1934     case Hexagon::C2_cmpgtu:
1935     case Hexagon::C2_cmpgtup:
1936     case Hexagon::A4_cmpbeq:
1937     case Hexagon::A4_cmpbgt:
1938     case Hexagon::A4_cmpbgtu:
1939     case Hexagon::A4_cmpheq:
1940     case Hexagon::A4_cmphgt:
1941     case Hexagon::A4_cmphgtu:
1942     case Hexagon::C4_cmpneq:
1943     case Hexagon::C4_cmplte:
1944     case Hexagon::C4_cmplteu:
1945       SrcReg2 = MI.getOperand(2).getReg();
1946       Value = 0;
1947       return true;
1948 
1949     case Hexagon::C2_cmpeqi:
1950     case Hexagon::C2_cmpgtui:
1951     case Hexagon::C2_cmpgti:
1952     case Hexagon::C4_cmpneqi:
1953     case Hexagon::C4_cmplteui:
1954     case Hexagon::C4_cmpltei:
1955     case Hexagon::A4_cmpbeqi:
1956     case Hexagon::A4_cmpbgti:
1957     case Hexagon::A4_cmpbgtui:
1958     case Hexagon::A4_cmpheqi:
1959     case Hexagon::A4_cmphgti:
1960     case Hexagon::A4_cmphgtui: {
1961       SrcReg2 = 0;
1962       const MachineOperand &Op2 = MI.getOperand(2);
1963       if (!Op2.isImm())
1964         return false;
1965       Value = MI.getOperand(2).getImm();
1966       return true;
1967     }
1968   }
1969 
1970   return false;
1971 }
1972 
1973 unsigned HexagonInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1974                                            const MachineInstr &MI,
1975                                            unsigned *PredCost) const {
1976   return getInstrTimingClassLatency(ItinData, MI);
1977 }
1978 
1979 DFAPacketizer *HexagonInstrInfo::CreateTargetScheduleState(
1980     const TargetSubtargetInfo &STI) const {
1981   const InstrItineraryData *II = STI.getInstrItineraryData();
1982   return static_cast<const HexagonSubtarget&>(STI).createDFAPacketizer(II);
1983 }
1984 
1985 // Inspired by this pair:
1986 //  %r13 = L2_loadri_io %r29, 136; mem:LD4[FixedStack0]
1987 //  S2_storeri_io %r29, 132, killed %r1; flags:  mem:ST4[FixedStack1]
1988 // Currently AA considers the addresses in these instructions to be aliasing.
1989 bool HexagonInstrInfo::areMemAccessesTriviallyDisjoint(
1990     const MachineInstr &MIa, const MachineInstr &MIb) const {
1991   if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
1992       MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
1993     return false;
1994 
1995   // Instructions that are pure loads, not loads and stores like memops are not
1996   // dependent.
1997   if (MIa.mayLoad() && !isMemOp(MIa) && MIb.mayLoad() && !isMemOp(MIb))
1998     return true;
1999 
2000   // Get the base register in MIa.
2001   unsigned BasePosA, OffsetPosA;
2002   if (!getBaseAndOffsetPosition(MIa, BasePosA, OffsetPosA))
2003     return false;
2004   const MachineOperand &BaseA = MIa.getOperand(BasePosA);
2005   Register BaseRegA = BaseA.getReg();
2006   unsigned BaseSubA = BaseA.getSubReg();
2007 
2008   // Get the base register in MIb.
2009   unsigned BasePosB, OffsetPosB;
2010   if (!getBaseAndOffsetPosition(MIb, BasePosB, OffsetPosB))
2011     return false;
2012   const MachineOperand &BaseB = MIb.getOperand(BasePosB);
2013   Register BaseRegB = BaseB.getReg();
2014   unsigned BaseSubB = BaseB.getSubReg();
2015 
2016   if (BaseRegA != BaseRegB || BaseSubA != BaseSubB)
2017     return false;
2018 
2019   // Get the access sizes.
2020   unsigned SizeA = getMemAccessSize(MIa);
2021   unsigned SizeB = getMemAccessSize(MIb);
2022 
2023   // Get the offsets. Handle immediates only for now.
2024   const MachineOperand &OffA = MIa.getOperand(OffsetPosA);
2025   const MachineOperand &OffB = MIb.getOperand(OffsetPosB);
2026   if (!MIa.getOperand(OffsetPosA).isImm() ||
2027       !MIb.getOperand(OffsetPosB).isImm())
2028     return false;
2029   int OffsetA = isPostIncrement(MIa) ? 0 : OffA.getImm();
2030   int OffsetB = isPostIncrement(MIb) ? 0 : OffB.getImm();
2031 
2032   // This is a mem access with the same base register and known offsets from it.
2033   // Reason about it.
2034   if (OffsetA > OffsetB) {
2035     uint64_t OffDiff = (uint64_t)((int64_t)OffsetA - (int64_t)OffsetB);
2036     return SizeB <= OffDiff;
2037   }
2038   if (OffsetA < OffsetB) {
2039     uint64_t OffDiff = (uint64_t)((int64_t)OffsetB - (int64_t)OffsetA);
2040     return SizeA <= OffDiff;
2041   }
2042 
2043   return false;
2044 }
2045 
2046 /// If the instruction is an increment of a constant value, return the amount.
2047 bool HexagonInstrInfo::getIncrementValue(const MachineInstr &MI,
2048       int &Value) const {
2049   if (isPostIncrement(MI)) {
2050     unsigned BasePos = 0, OffsetPos = 0;
2051     if (!getBaseAndOffsetPosition(MI, BasePos, OffsetPos))
2052       return false;
2053     const MachineOperand &OffsetOp = MI.getOperand(OffsetPos);
2054     if (OffsetOp.isImm()) {
2055       Value = OffsetOp.getImm();
2056       return true;
2057     }
2058   } else if (MI.getOpcode() == Hexagon::A2_addi) {
2059     const MachineOperand &AddOp = MI.getOperand(2);
2060     if (AddOp.isImm()) {
2061       Value = AddOp.getImm();
2062       return true;
2063     }
2064   }
2065 
2066   return false;
2067 }
2068 
2069 std::pair<unsigned, unsigned>
2070 HexagonInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
2071   return std::make_pair(TF & ~HexagonII::MO_Bitmasks,
2072                         TF & HexagonII::MO_Bitmasks);
2073 }
2074 
2075 ArrayRef<std::pair<unsigned, const char*>>
2076 HexagonInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
2077   using namespace HexagonII;
2078 
2079   static const std::pair<unsigned, const char*> Flags[] = {
2080     {MO_PCREL,  "hexagon-pcrel"},
2081     {MO_GOT,    "hexagon-got"},
2082     {MO_LO16,   "hexagon-lo16"},
2083     {MO_HI16,   "hexagon-hi16"},
2084     {MO_GPREL,  "hexagon-gprel"},
2085     {MO_GDGOT,  "hexagon-gdgot"},
2086     {MO_GDPLT,  "hexagon-gdplt"},
2087     {MO_IE,     "hexagon-ie"},
2088     {MO_IEGOT,  "hexagon-iegot"},
2089     {MO_TPREL,  "hexagon-tprel"}
2090   };
2091   return ArrayRef(Flags);
2092 }
2093 
2094 ArrayRef<std::pair<unsigned, const char*>>
2095 HexagonInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
2096   using namespace HexagonII;
2097 
2098   static const std::pair<unsigned, const char*> Flags[] = {
2099     {HMOTF_ConstExtended, "hexagon-ext"}
2100   };
2101   return ArrayRef(Flags);
2102 }
2103 
2104 Register HexagonInstrInfo::createVR(MachineFunction *MF, MVT VT) const {
2105   MachineRegisterInfo &MRI = MF->getRegInfo();
2106   const TargetRegisterClass *TRC;
2107   if (VT == MVT::i1) {
2108     TRC = &Hexagon::PredRegsRegClass;
2109   } else if (VT == MVT::i32 || VT == MVT::f32) {
2110     TRC = &Hexagon::IntRegsRegClass;
2111   } else if (VT == MVT::i64 || VT == MVT::f64) {
2112     TRC = &Hexagon::DoubleRegsRegClass;
2113   } else {
2114     llvm_unreachable("Cannot handle this register class");
2115   }
2116 
2117   Register NewReg = MRI.createVirtualRegister(TRC);
2118   return NewReg;
2119 }
2120 
2121 bool HexagonInstrInfo::isAbsoluteSet(const MachineInstr &MI) const {
2122   return (getAddrMode(MI) == HexagonII::AbsoluteSet);
2123 }
2124 
2125 bool HexagonInstrInfo::isAccumulator(const MachineInstr &MI) const {
2126   const uint64_t F = MI.getDesc().TSFlags;
2127   return((F >> HexagonII::AccumulatorPos) & HexagonII::AccumulatorMask);
2128 }
2129 
2130 bool HexagonInstrInfo::isBaseImmOffset(const MachineInstr &MI) const {
2131   return getAddrMode(MI) == HexagonII::BaseImmOffset;
2132 }
2133 
2134 bool HexagonInstrInfo::isComplex(const MachineInstr &MI) const {
2135   return !isTC1(MI) && !isTC2Early(MI) && !MI.getDesc().mayLoad() &&
2136          !MI.getDesc().mayStore() &&
2137          MI.getDesc().getOpcode() != Hexagon::S2_allocframe &&
2138          MI.getDesc().getOpcode() != Hexagon::L2_deallocframe &&
2139          !isMemOp(MI) && !MI.isBranch() && !MI.isReturn() && !MI.isCall();
2140 }
2141 
2142 // Return true if the instruction is a compound branch instruction.
2143 bool HexagonInstrInfo::isCompoundBranchInstr(const MachineInstr &MI) const {
2144   return getType(MI) == HexagonII::TypeCJ && MI.isBranch();
2145 }
2146 
2147 // TODO: In order to have isExtendable for fpimm/f32Ext, we need to handle
2148 // isFPImm and later getFPImm as well.
2149 bool HexagonInstrInfo::isConstExtended(const MachineInstr &MI) const {
2150   const uint64_t F = MI.getDesc().TSFlags;
2151   unsigned isExtended = (F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask;
2152   if (isExtended) // Instruction must be extended.
2153     return true;
2154 
2155   unsigned isExtendable =
2156     (F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask;
2157   if (!isExtendable)
2158     return false;
2159 
2160   if (MI.isCall())
2161     return false;
2162 
2163   short ExtOpNum = getCExtOpNum(MI);
2164   const MachineOperand &MO = MI.getOperand(ExtOpNum);
2165   // Use MO operand flags to determine if MO
2166   // has the HMOTF_ConstExtended flag set.
2167   if (MO.getTargetFlags() & HexagonII::HMOTF_ConstExtended)
2168     return true;
2169   // If this is a Machine BB address we are talking about, and it is
2170   // not marked as extended, say so.
2171   if (MO.isMBB())
2172     return false;
2173 
2174   // We could be using an instruction with an extendable immediate and shoehorn
2175   // a global address into it. If it is a global address it will be constant
2176   // extended. We do this for COMBINE.
2177   if (MO.isGlobal() || MO.isSymbol() || MO.isBlockAddress() ||
2178       MO.isJTI() || MO.isCPI() || MO.isFPImm())
2179     return true;
2180 
2181   // If the extendable operand is not 'Immediate' type, the instruction should
2182   // have 'isExtended' flag set.
2183   assert(MO.isImm() && "Extendable operand must be Immediate type");
2184 
2185   int64_t Value = MO.getImm();
2186   if ((F >> HexagonII::ExtentSignedPos) & HexagonII::ExtentSignedMask) {
2187     int32_t SValue = Value;
2188     int32_t MinValue = getMinValue(MI);
2189     int32_t MaxValue = getMaxValue(MI);
2190     return SValue < MinValue || SValue > MaxValue;
2191   }
2192   uint32_t UValue = Value;
2193   uint32_t MinValue = getMinValue(MI);
2194   uint32_t MaxValue = getMaxValue(MI);
2195   return UValue < MinValue || UValue > MaxValue;
2196 }
2197 
2198 bool HexagonInstrInfo::isDeallocRet(const MachineInstr &MI) const {
2199   switch (MI.getOpcode()) {
2200   case Hexagon::L4_return:
2201   case Hexagon::L4_return_t:
2202   case Hexagon::L4_return_f:
2203   case Hexagon::L4_return_tnew_pnt:
2204   case Hexagon::L4_return_fnew_pnt:
2205   case Hexagon::L4_return_tnew_pt:
2206   case Hexagon::L4_return_fnew_pt:
2207     return true;
2208   }
2209   return false;
2210 }
2211 
2212 // Return true when ConsMI uses a register defined by ProdMI.
2213 bool HexagonInstrInfo::isDependent(const MachineInstr &ProdMI,
2214       const MachineInstr &ConsMI) const {
2215   if (!ProdMI.getDesc().getNumDefs())
2216     return false;
2217   const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
2218 
2219   SmallVector<Register, 4> DefsA;
2220   SmallVector<Register, 4> DefsB;
2221   SmallVector<Register, 8> UsesA;
2222   SmallVector<Register, 8> UsesB;
2223 
2224   parseOperands(ProdMI, DefsA, UsesA);
2225   parseOperands(ConsMI, DefsB, UsesB);
2226 
2227   for (auto &RegA : DefsA)
2228     for (auto &RegB : UsesB) {
2229       // True data dependency.
2230       if (RegA == RegB)
2231         return true;
2232 
2233       if (RegA.isPhysical() && llvm::is_contained(HRI.subregs(RegA), RegB))
2234         return true;
2235 
2236       if (RegB.isPhysical() && llvm::is_contained(HRI.subregs(RegB), RegA))
2237         return true;
2238     }
2239 
2240   return false;
2241 }
2242 
2243 // Returns true if the instruction is alread a .cur.
2244 bool HexagonInstrInfo::isDotCurInst(const MachineInstr &MI) const {
2245   switch (MI.getOpcode()) {
2246   case Hexagon::V6_vL32b_cur_pi:
2247   case Hexagon::V6_vL32b_cur_ai:
2248     return true;
2249   }
2250   return false;
2251 }
2252 
2253 // Returns true, if any one of the operands is a dot new
2254 // insn, whether it is predicated dot new or register dot new.
2255 bool HexagonInstrInfo::isDotNewInst(const MachineInstr &MI) const {
2256   if (isNewValueInst(MI) || (isPredicated(MI) && isPredicatedNew(MI)))
2257     return true;
2258 
2259   return false;
2260 }
2261 
2262 /// Symmetrical. See if these two instructions are fit for duplex pair.
2263 bool HexagonInstrInfo::isDuplexPair(const MachineInstr &MIa,
2264       const MachineInstr &MIb) const {
2265   HexagonII::SubInstructionGroup MIaG = getDuplexCandidateGroup(MIa);
2266   HexagonII::SubInstructionGroup MIbG = getDuplexCandidateGroup(MIb);
2267   return (isDuplexPairMatch(MIaG, MIbG) || isDuplexPairMatch(MIbG, MIaG));
2268 }
2269 
2270 bool HexagonInstrInfo::isEndLoopN(unsigned Opcode) const {
2271   return (Opcode == Hexagon::ENDLOOP0 ||
2272           Opcode == Hexagon::ENDLOOP1);
2273 }
2274 
2275 bool HexagonInstrInfo::isExpr(unsigned OpType) const {
2276   switch(OpType) {
2277   case MachineOperand::MO_MachineBasicBlock:
2278   case MachineOperand::MO_GlobalAddress:
2279   case MachineOperand::MO_ExternalSymbol:
2280   case MachineOperand::MO_JumpTableIndex:
2281   case MachineOperand::MO_ConstantPoolIndex:
2282   case MachineOperand::MO_BlockAddress:
2283     return true;
2284   default:
2285     return false;
2286   }
2287 }
2288 
2289 bool HexagonInstrInfo::isExtendable(const MachineInstr &MI) const {
2290   const MCInstrDesc &MID = MI.getDesc();
2291   const uint64_t F = MID.TSFlags;
2292   if ((F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask)
2293     return true;
2294 
2295   // TODO: This is largely obsolete now. Will need to be removed
2296   // in consecutive patches.
2297   switch (MI.getOpcode()) {
2298     // PS_fi and PS_fia remain special cases.
2299     case Hexagon::PS_fi:
2300     case Hexagon::PS_fia:
2301       return true;
2302     default:
2303       return false;
2304   }
2305   return  false;
2306 }
2307 
2308 // This returns true in two cases:
2309 // - The OP code itself indicates that this is an extended instruction.
2310 // - One of MOs has been marked with HMOTF_ConstExtended flag.
2311 bool HexagonInstrInfo::isExtended(const MachineInstr &MI) const {
2312   // First check if this is permanently extended op code.
2313   const uint64_t F = MI.getDesc().TSFlags;
2314   if ((F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask)
2315     return true;
2316   // Use MO operand flags to determine if one of MI's operands
2317   // has HMOTF_ConstExtended flag set.
2318   for (const MachineOperand &MO : MI.operands())
2319     if (MO.getTargetFlags() & HexagonII::HMOTF_ConstExtended)
2320       return true;
2321   return  false;
2322 }
2323 
2324 bool HexagonInstrInfo::isFloat(const MachineInstr &MI) const {
2325   unsigned Opcode = MI.getOpcode();
2326   const uint64_t F = get(Opcode).TSFlags;
2327   return (F >> HexagonII::FPPos) & HexagonII::FPMask;
2328 }
2329 
2330 // No V60 HVX VMEM with A_INDIRECT.
2331 bool HexagonInstrInfo::isHVXMemWithAIndirect(const MachineInstr &I,
2332       const MachineInstr &J) const {
2333   if (!isHVXVec(I))
2334     return false;
2335   if (!I.mayLoad() && !I.mayStore())
2336     return false;
2337   return J.isIndirectBranch() || isIndirectCall(J) || isIndirectL4Return(J);
2338 }
2339 
2340 bool HexagonInstrInfo::isIndirectCall(const MachineInstr &MI) const {
2341   switch (MI.getOpcode()) {
2342   case Hexagon::J2_callr:
2343   case Hexagon::J2_callrf:
2344   case Hexagon::J2_callrt:
2345   case Hexagon::PS_call_nr:
2346     return true;
2347   }
2348   return false;
2349 }
2350 
2351 bool HexagonInstrInfo::isIndirectL4Return(const MachineInstr &MI) const {
2352   switch (MI.getOpcode()) {
2353   case Hexagon::L4_return:
2354   case Hexagon::L4_return_t:
2355   case Hexagon::L4_return_f:
2356   case Hexagon::L4_return_fnew_pnt:
2357   case Hexagon::L4_return_fnew_pt:
2358   case Hexagon::L4_return_tnew_pnt:
2359   case Hexagon::L4_return_tnew_pt:
2360     return true;
2361   }
2362   return false;
2363 }
2364 
2365 bool HexagonInstrInfo::isJumpR(const MachineInstr &MI) const {
2366   switch (MI.getOpcode()) {
2367   case Hexagon::J2_jumpr:
2368   case Hexagon::J2_jumprt:
2369   case Hexagon::J2_jumprf:
2370   case Hexagon::J2_jumprtnewpt:
2371   case Hexagon::J2_jumprfnewpt:
2372   case Hexagon::J2_jumprtnew:
2373   case Hexagon::J2_jumprfnew:
2374     return true;
2375   }
2376   return false;
2377 }
2378 
2379 // Return true if a given MI can accommodate given offset.
2380 // Use abs estimate as oppose to the exact number.
2381 // TODO: This will need to be changed to use MC level
2382 // definition of instruction extendable field size.
2383 bool HexagonInstrInfo::isJumpWithinBranchRange(const MachineInstr &MI,
2384       unsigned offset) const {
2385   // This selection of jump instructions matches to that what
2386   // analyzeBranch can parse, plus NVJ.
2387   if (isNewValueJump(MI)) // r9:2
2388     return isInt<11>(offset);
2389 
2390   switch (MI.getOpcode()) {
2391   // Still missing Jump to address condition on register value.
2392   default:
2393     return false;
2394   case Hexagon::J2_jump: // bits<24> dst; // r22:2
2395   case Hexagon::J2_call:
2396   case Hexagon::PS_call_nr:
2397     return isInt<24>(offset);
2398   case Hexagon::J2_jumpt: //bits<17> dst; // r15:2
2399   case Hexagon::J2_jumpf:
2400   case Hexagon::J2_jumptnew:
2401   case Hexagon::J2_jumptnewpt:
2402   case Hexagon::J2_jumpfnew:
2403   case Hexagon::J2_jumpfnewpt:
2404   case Hexagon::J2_callt:
2405   case Hexagon::J2_callf:
2406     return isInt<17>(offset);
2407   case Hexagon::J2_loop0i:
2408   case Hexagon::J2_loop0iext:
2409   case Hexagon::J2_loop0r:
2410   case Hexagon::J2_loop0rext:
2411   case Hexagon::J2_loop1i:
2412   case Hexagon::J2_loop1iext:
2413   case Hexagon::J2_loop1r:
2414   case Hexagon::J2_loop1rext:
2415     return isInt<9>(offset);
2416   // TODO: Add all the compound branches here. Can we do this in Relation model?
2417   case Hexagon::J4_cmpeqi_tp0_jump_nt:
2418   case Hexagon::J4_cmpeqi_tp1_jump_nt:
2419   case Hexagon::J4_cmpeqn1_tp0_jump_nt:
2420   case Hexagon::J4_cmpeqn1_tp1_jump_nt:
2421     return isInt<11>(offset);
2422   }
2423 }
2424 
2425 bool HexagonInstrInfo::isLateSourceInstr(const MachineInstr &MI) const {
2426   // Instructions with iclass A_CVI_VX and attribute A_CVI_LATE uses a multiply
2427   // resource, but all operands can be received late like an ALU instruction.
2428   return getType(MI) == HexagonII::TypeCVI_VX_LATE;
2429 }
2430 
2431 bool HexagonInstrInfo::isLoopN(const MachineInstr &MI) const {
2432   unsigned Opcode = MI.getOpcode();
2433   return Opcode == Hexagon::J2_loop0i    ||
2434          Opcode == Hexagon::J2_loop0r    ||
2435          Opcode == Hexagon::J2_loop0iext ||
2436          Opcode == Hexagon::J2_loop0rext ||
2437          Opcode == Hexagon::J2_loop1i    ||
2438          Opcode == Hexagon::J2_loop1r    ||
2439          Opcode == Hexagon::J2_loop1iext ||
2440          Opcode == Hexagon::J2_loop1rext;
2441 }
2442 
2443 bool HexagonInstrInfo::isMemOp(const MachineInstr &MI) const {
2444   switch (MI.getOpcode()) {
2445     default: return false;
2446     case Hexagon::L4_iadd_memopw_io:
2447     case Hexagon::L4_isub_memopw_io:
2448     case Hexagon::L4_add_memopw_io:
2449     case Hexagon::L4_sub_memopw_io:
2450     case Hexagon::L4_and_memopw_io:
2451     case Hexagon::L4_or_memopw_io:
2452     case Hexagon::L4_iadd_memoph_io:
2453     case Hexagon::L4_isub_memoph_io:
2454     case Hexagon::L4_add_memoph_io:
2455     case Hexagon::L4_sub_memoph_io:
2456     case Hexagon::L4_and_memoph_io:
2457     case Hexagon::L4_or_memoph_io:
2458     case Hexagon::L4_iadd_memopb_io:
2459     case Hexagon::L4_isub_memopb_io:
2460     case Hexagon::L4_add_memopb_io:
2461     case Hexagon::L4_sub_memopb_io:
2462     case Hexagon::L4_and_memopb_io:
2463     case Hexagon::L4_or_memopb_io:
2464     case Hexagon::L4_ior_memopb_io:
2465     case Hexagon::L4_ior_memoph_io:
2466     case Hexagon::L4_ior_memopw_io:
2467     case Hexagon::L4_iand_memopb_io:
2468     case Hexagon::L4_iand_memoph_io:
2469     case Hexagon::L4_iand_memopw_io:
2470     return true;
2471   }
2472   return false;
2473 }
2474 
2475 bool HexagonInstrInfo::isNewValue(const MachineInstr &MI) const {
2476   const uint64_t F = MI.getDesc().TSFlags;
2477   return (F >> HexagonII::NewValuePos) & HexagonII::NewValueMask;
2478 }
2479 
2480 bool HexagonInstrInfo::isNewValue(unsigned Opcode) const {
2481   const uint64_t F = get(Opcode).TSFlags;
2482   return (F >> HexagonII::NewValuePos) & HexagonII::NewValueMask;
2483 }
2484 
2485 bool HexagonInstrInfo::isNewValueInst(const MachineInstr &MI) const {
2486   return isNewValueJump(MI) || isNewValueStore(MI);
2487 }
2488 
2489 bool HexagonInstrInfo::isNewValueJump(const MachineInstr &MI) const {
2490   return isNewValue(MI) && MI.isBranch();
2491 }
2492 
2493 bool HexagonInstrInfo::isNewValueJump(unsigned Opcode) const {
2494   return isNewValue(Opcode) && get(Opcode).isBranch() && isPredicated(Opcode);
2495 }
2496 
2497 bool HexagonInstrInfo::isNewValueStore(const MachineInstr &MI) const {
2498   const uint64_t F = MI.getDesc().TSFlags;
2499   return (F >> HexagonII::NVStorePos) & HexagonII::NVStoreMask;
2500 }
2501 
2502 bool HexagonInstrInfo::isNewValueStore(unsigned Opcode) const {
2503   const uint64_t F = get(Opcode).TSFlags;
2504   return (F >> HexagonII::NVStorePos) & HexagonII::NVStoreMask;
2505 }
2506 
2507 // Returns true if a particular operand is extendable for an instruction.
2508 bool HexagonInstrInfo::isOperandExtended(const MachineInstr &MI,
2509     unsigned OperandNum) const {
2510   const uint64_t F = MI.getDesc().TSFlags;
2511   return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask)
2512           == OperandNum;
2513 }
2514 
2515 bool HexagonInstrInfo::isPredicatedNew(const MachineInstr &MI) const {
2516   const uint64_t F = MI.getDesc().TSFlags;
2517   assert(isPredicated(MI));
2518   return (F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask;
2519 }
2520 
2521 bool HexagonInstrInfo::isPredicatedNew(unsigned Opcode) const {
2522   const uint64_t F = get(Opcode).TSFlags;
2523   assert(isPredicated(Opcode));
2524   return (F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask;
2525 }
2526 
2527 bool HexagonInstrInfo::isPredicatedTrue(const MachineInstr &MI) const {
2528   const uint64_t F = MI.getDesc().TSFlags;
2529   return !((F >> HexagonII::PredicatedFalsePos) &
2530            HexagonII::PredicatedFalseMask);
2531 }
2532 
2533 bool HexagonInstrInfo::isPredicatedTrue(unsigned Opcode) const {
2534   const uint64_t F = get(Opcode).TSFlags;
2535   // Make sure that the instruction is predicated.
2536   assert((F>> HexagonII::PredicatedPos) & HexagonII::PredicatedMask);
2537   return !((F >> HexagonII::PredicatedFalsePos) &
2538            HexagonII::PredicatedFalseMask);
2539 }
2540 
2541 bool HexagonInstrInfo::isPredicated(unsigned Opcode) const {
2542   const uint64_t F = get(Opcode).TSFlags;
2543   return (F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask;
2544 }
2545 
2546 bool HexagonInstrInfo::isPredicateLate(unsigned Opcode) const {
2547   const uint64_t F = get(Opcode).TSFlags;
2548   return (F >> HexagonII::PredicateLatePos) & HexagonII::PredicateLateMask;
2549 }
2550 
2551 bool HexagonInstrInfo::isPredictedTaken(unsigned Opcode) const {
2552   const uint64_t F = get(Opcode).TSFlags;
2553   assert(get(Opcode).isBranch() &&
2554          (isPredicatedNew(Opcode) || isNewValue(Opcode)));
2555   return (F >> HexagonII::TakenPos) & HexagonII::TakenMask;
2556 }
2557 
2558 bool HexagonInstrInfo::isSaveCalleeSavedRegsCall(const MachineInstr &MI) const {
2559   return MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4 ||
2560          MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT ||
2561          MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_PIC ||
2562          MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC;
2563 }
2564 
2565 bool HexagonInstrInfo::isSignExtendingLoad(const MachineInstr &MI) const {
2566   switch (MI.getOpcode()) {
2567   // Byte
2568   case Hexagon::L2_loadrb_io:
2569   case Hexagon::L4_loadrb_ur:
2570   case Hexagon::L4_loadrb_ap:
2571   case Hexagon::L2_loadrb_pr:
2572   case Hexagon::L2_loadrb_pbr:
2573   case Hexagon::L2_loadrb_pi:
2574   case Hexagon::L2_loadrb_pci:
2575   case Hexagon::L2_loadrb_pcr:
2576   case Hexagon::L2_loadbsw2_io:
2577   case Hexagon::L4_loadbsw2_ur:
2578   case Hexagon::L4_loadbsw2_ap:
2579   case Hexagon::L2_loadbsw2_pr:
2580   case Hexagon::L2_loadbsw2_pbr:
2581   case Hexagon::L2_loadbsw2_pi:
2582   case Hexagon::L2_loadbsw2_pci:
2583   case Hexagon::L2_loadbsw2_pcr:
2584   case Hexagon::L2_loadbsw4_io:
2585   case Hexagon::L4_loadbsw4_ur:
2586   case Hexagon::L4_loadbsw4_ap:
2587   case Hexagon::L2_loadbsw4_pr:
2588   case Hexagon::L2_loadbsw4_pbr:
2589   case Hexagon::L2_loadbsw4_pi:
2590   case Hexagon::L2_loadbsw4_pci:
2591   case Hexagon::L2_loadbsw4_pcr:
2592   case Hexagon::L4_loadrb_rr:
2593   case Hexagon::L2_ploadrbt_io:
2594   case Hexagon::L2_ploadrbt_pi:
2595   case Hexagon::L2_ploadrbf_io:
2596   case Hexagon::L2_ploadrbf_pi:
2597   case Hexagon::L2_ploadrbtnew_io:
2598   case Hexagon::L2_ploadrbfnew_io:
2599   case Hexagon::L4_ploadrbt_rr:
2600   case Hexagon::L4_ploadrbf_rr:
2601   case Hexagon::L4_ploadrbtnew_rr:
2602   case Hexagon::L4_ploadrbfnew_rr:
2603   case Hexagon::L2_ploadrbtnew_pi:
2604   case Hexagon::L2_ploadrbfnew_pi:
2605   case Hexagon::L4_ploadrbt_abs:
2606   case Hexagon::L4_ploadrbf_abs:
2607   case Hexagon::L4_ploadrbtnew_abs:
2608   case Hexagon::L4_ploadrbfnew_abs:
2609   case Hexagon::L2_loadrbgp:
2610   // Half
2611   case Hexagon::L2_loadrh_io:
2612   case Hexagon::L4_loadrh_ur:
2613   case Hexagon::L4_loadrh_ap:
2614   case Hexagon::L2_loadrh_pr:
2615   case Hexagon::L2_loadrh_pbr:
2616   case Hexagon::L2_loadrh_pi:
2617   case Hexagon::L2_loadrh_pci:
2618   case Hexagon::L2_loadrh_pcr:
2619   case Hexagon::L4_loadrh_rr:
2620   case Hexagon::L2_ploadrht_io:
2621   case Hexagon::L2_ploadrht_pi:
2622   case Hexagon::L2_ploadrhf_io:
2623   case Hexagon::L2_ploadrhf_pi:
2624   case Hexagon::L2_ploadrhtnew_io:
2625   case Hexagon::L2_ploadrhfnew_io:
2626   case Hexagon::L4_ploadrht_rr:
2627   case Hexagon::L4_ploadrhf_rr:
2628   case Hexagon::L4_ploadrhtnew_rr:
2629   case Hexagon::L4_ploadrhfnew_rr:
2630   case Hexagon::L2_ploadrhtnew_pi:
2631   case Hexagon::L2_ploadrhfnew_pi:
2632   case Hexagon::L4_ploadrht_abs:
2633   case Hexagon::L4_ploadrhf_abs:
2634   case Hexagon::L4_ploadrhtnew_abs:
2635   case Hexagon::L4_ploadrhfnew_abs:
2636   case Hexagon::L2_loadrhgp:
2637     return true;
2638   default:
2639     return false;
2640   }
2641 }
2642 
2643 bool HexagonInstrInfo::isSolo(const MachineInstr &MI) const {
2644   const uint64_t F = MI.getDesc().TSFlags;
2645   return (F >> HexagonII::SoloPos) & HexagonII::SoloMask;
2646 }
2647 
2648 bool HexagonInstrInfo::isSpillPredRegOp(const MachineInstr &MI) const {
2649   switch (MI.getOpcode()) {
2650   case Hexagon::STriw_pred:
2651   case Hexagon::LDriw_pred:
2652     return true;
2653   default:
2654     return false;
2655   }
2656 }
2657 
2658 bool HexagonInstrInfo::isTailCall(const MachineInstr &MI) const {
2659   if (!MI.isBranch())
2660     return false;
2661 
2662   for (auto &Op : MI.operands())
2663     if (Op.isGlobal() || Op.isSymbol())
2664       return true;
2665   return false;
2666 }
2667 
2668 // Returns true when SU has a timing class TC1.
2669 bool HexagonInstrInfo::isTC1(const MachineInstr &MI) const {
2670   unsigned SchedClass = MI.getDesc().getSchedClass();
2671   return is_TC1(SchedClass);
2672 }
2673 
2674 bool HexagonInstrInfo::isTC2(const MachineInstr &MI) const {
2675   unsigned SchedClass = MI.getDesc().getSchedClass();
2676   return is_TC2(SchedClass);
2677 }
2678 
2679 bool HexagonInstrInfo::isTC2Early(const MachineInstr &MI) const {
2680   unsigned SchedClass = MI.getDesc().getSchedClass();
2681   return is_TC2early(SchedClass);
2682 }
2683 
2684 bool HexagonInstrInfo::isTC4x(const MachineInstr &MI) const {
2685   unsigned SchedClass = MI.getDesc().getSchedClass();
2686   return is_TC4x(SchedClass);
2687 }
2688 
2689 // Schedule this ASAP.
2690 bool HexagonInstrInfo::isToBeScheduledASAP(const MachineInstr &MI1,
2691       const MachineInstr &MI2) const {
2692   if (mayBeCurLoad(MI1)) {
2693     // if (result of SU is used in Next) return true;
2694     Register DstReg = MI1.getOperand(0).getReg();
2695     int N = MI2.getNumOperands();
2696     for (int I = 0; I < N; I++)
2697       if (MI2.getOperand(I).isReg() && DstReg == MI2.getOperand(I).getReg())
2698         return true;
2699   }
2700   if (mayBeNewStore(MI2))
2701     if (MI2.getOpcode() == Hexagon::V6_vS32b_pi)
2702       if (MI1.getOperand(0).isReg() && MI2.getOperand(3).isReg() &&
2703           MI1.getOperand(0).getReg() == MI2.getOperand(3).getReg())
2704         return true;
2705   return false;
2706 }
2707 
2708 bool HexagonInstrInfo::isHVXVec(const MachineInstr &MI) const {
2709   const uint64_t V = getType(MI);
2710   return HexagonII::TypeCVI_FIRST <= V && V <= HexagonII::TypeCVI_LAST;
2711 }
2712 
2713 // Check if the Offset is a valid auto-inc imm by Load/Store Type.
2714 bool HexagonInstrInfo::isValidAutoIncImm(const EVT VT, int Offset) const {
2715   int Size = VT.getSizeInBits() / 8;
2716   if (Offset % Size != 0)
2717     return false;
2718   int Count = Offset / Size;
2719 
2720   switch (VT.getSimpleVT().SimpleTy) {
2721     // For scalars the auto-inc is s4
2722     case MVT::i8:
2723     case MVT::i16:
2724     case MVT::i32:
2725     case MVT::i64:
2726     case MVT::f32:
2727     case MVT::f64:
2728     case MVT::v2i16:
2729     case MVT::v2i32:
2730     case MVT::v4i8:
2731     case MVT::v4i16:
2732     case MVT::v8i8:
2733       return isInt<4>(Count);
2734     // For HVX vectors the auto-inc is s3
2735     case MVT::v64i8:
2736     case MVT::v32i16:
2737     case MVT::v16i32:
2738     case MVT::v8i64:
2739     case MVT::v128i8:
2740     case MVT::v64i16:
2741     case MVT::v32i32:
2742     case MVT::v16i64:
2743       return isInt<3>(Count);
2744     default:
2745       break;
2746   }
2747 
2748   llvm_unreachable("Not an valid type!");
2749 }
2750 
2751 bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
2752       const TargetRegisterInfo *TRI, bool Extend) const {
2753   // This function is to check whether the "Offset" is in the correct range of
2754   // the given "Opcode". If "Offset" is not in the correct range, "A2_addi" is
2755   // inserted to calculate the final address. Due to this reason, the function
2756   // assumes that the "Offset" has correct alignment.
2757   // We used to assert if the offset was not properly aligned, however,
2758   // there are cases where a misaligned pointer recast can cause this
2759   // problem, and we need to allow for it. The front end warns of such
2760   // misaligns with respect to load size.
2761   switch (Opcode) {
2762   case Hexagon::PS_vstorerq_ai:
2763   case Hexagon::PS_vstorerv_ai:
2764   case Hexagon::PS_vstorerw_ai:
2765   case Hexagon::PS_vstorerw_nt_ai:
2766   case Hexagon::PS_vloadrq_ai:
2767   case Hexagon::PS_vloadrv_ai:
2768   case Hexagon::PS_vloadrw_ai:
2769   case Hexagon::PS_vloadrw_nt_ai:
2770   case Hexagon::V6_vL32b_ai:
2771   case Hexagon::V6_vS32b_ai:
2772   case Hexagon::V6_vS32b_pred_ai:
2773   case Hexagon::V6_vS32b_npred_ai:
2774   case Hexagon::V6_vS32b_qpred_ai:
2775   case Hexagon::V6_vS32b_nqpred_ai:
2776   case Hexagon::V6_vS32b_new_ai:
2777   case Hexagon::V6_vS32b_new_pred_ai:
2778   case Hexagon::V6_vS32b_new_npred_ai:
2779   case Hexagon::V6_vS32b_nt_pred_ai:
2780   case Hexagon::V6_vS32b_nt_npred_ai:
2781   case Hexagon::V6_vS32b_nt_new_ai:
2782   case Hexagon::V6_vS32b_nt_new_pred_ai:
2783   case Hexagon::V6_vS32b_nt_new_npred_ai:
2784   case Hexagon::V6_vS32b_nt_qpred_ai:
2785   case Hexagon::V6_vS32b_nt_nqpred_ai:
2786   case Hexagon::V6_vL32b_nt_ai:
2787   case Hexagon::V6_vS32b_nt_ai:
2788   case Hexagon::V6_vL32Ub_ai:
2789   case Hexagon::V6_vS32Ub_ai:
2790   case Hexagon::V6_vL32b_cur_ai:
2791   case Hexagon::V6_vL32b_tmp_ai:
2792   case Hexagon::V6_vL32b_pred_ai:
2793   case Hexagon::V6_vL32b_npred_ai:
2794   case Hexagon::V6_vL32b_cur_pred_ai:
2795   case Hexagon::V6_vL32b_cur_npred_ai:
2796   case Hexagon::V6_vL32b_tmp_pred_ai:
2797   case Hexagon::V6_vL32b_tmp_npred_ai:
2798   case Hexagon::V6_vL32b_nt_cur_ai:
2799   case Hexagon::V6_vL32b_nt_tmp_ai:
2800   case Hexagon::V6_vL32b_nt_pred_ai:
2801   case Hexagon::V6_vL32b_nt_npred_ai:
2802   case Hexagon::V6_vL32b_nt_cur_pred_ai:
2803   case Hexagon::V6_vL32b_nt_cur_npred_ai:
2804   case Hexagon::V6_vL32b_nt_tmp_pred_ai:
2805   case Hexagon::V6_vL32b_nt_tmp_npred_ai:
2806   case Hexagon::V6_vgathermh_pseudo:
2807   case Hexagon::V6_vgathermw_pseudo:
2808   case Hexagon::V6_vgathermhw_pseudo:
2809   case Hexagon::V6_vgathermhq_pseudo:
2810   case Hexagon::V6_vgathermwq_pseudo:
2811   case Hexagon::V6_vgathermhwq_pseudo: {
2812     unsigned VectorSize = TRI->getSpillSize(Hexagon::HvxVRRegClass);
2813     assert(isPowerOf2_32(VectorSize));
2814     if (Offset & (VectorSize-1))
2815       return false;
2816     return isInt<4>(Offset >> Log2_32(VectorSize));
2817   }
2818 
2819   case Hexagon::J2_loop0i:
2820   case Hexagon::J2_loop1i:
2821     return isUInt<10>(Offset);
2822 
2823   case Hexagon::S4_storeirb_io:
2824   case Hexagon::S4_storeirbt_io:
2825   case Hexagon::S4_storeirbf_io:
2826     return isUInt<6>(Offset);
2827 
2828   case Hexagon::S4_storeirh_io:
2829   case Hexagon::S4_storeirht_io:
2830   case Hexagon::S4_storeirhf_io:
2831     return isShiftedUInt<6,1>(Offset);
2832 
2833   case Hexagon::S4_storeiri_io:
2834   case Hexagon::S4_storeirit_io:
2835   case Hexagon::S4_storeirif_io:
2836     return isShiftedUInt<6,2>(Offset);
2837   // Handle these two compare instructions that are not extendable.
2838   case Hexagon::A4_cmpbeqi:
2839     return isUInt<8>(Offset);
2840   case Hexagon::A4_cmpbgti:
2841     return isInt<8>(Offset);
2842   }
2843 
2844   if (Extend)
2845     return true;
2846 
2847   switch (Opcode) {
2848   case Hexagon::L2_loadri_io:
2849   case Hexagon::S2_storeri_io:
2850     return (Offset >= Hexagon_MEMW_OFFSET_MIN) &&
2851       (Offset <= Hexagon_MEMW_OFFSET_MAX);
2852 
2853   case Hexagon::L2_loadrd_io:
2854   case Hexagon::S2_storerd_io:
2855     return (Offset >= Hexagon_MEMD_OFFSET_MIN) &&
2856       (Offset <= Hexagon_MEMD_OFFSET_MAX);
2857 
2858   case Hexagon::L2_loadrh_io:
2859   case Hexagon::L2_loadruh_io:
2860   case Hexagon::S2_storerh_io:
2861   case Hexagon::S2_storerf_io:
2862     return (Offset >= Hexagon_MEMH_OFFSET_MIN) &&
2863       (Offset <= Hexagon_MEMH_OFFSET_MAX);
2864 
2865   case Hexagon::L2_loadrb_io:
2866   case Hexagon::L2_loadrub_io:
2867   case Hexagon::S2_storerb_io:
2868     return (Offset >= Hexagon_MEMB_OFFSET_MIN) &&
2869       (Offset <= Hexagon_MEMB_OFFSET_MAX);
2870 
2871   case Hexagon::A2_addi:
2872     return (Offset >= Hexagon_ADDI_OFFSET_MIN) &&
2873       (Offset <= Hexagon_ADDI_OFFSET_MAX);
2874 
2875   case Hexagon::L4_iadd_memopw_io:
2876   case Hexagon::L4_isub_memopw_io:
2877   case Hexagon::L4_add_memopw_io:
2878   case Hexagon::L4_sub_memopw_io:
2879   case Hexagon::L4_iand_memopw_io:
2880   case Hexagon::L4_ior_memopw_io:
2881   case Hexagon::L4_and_memopw_io:
2882   case Hexagon::L4_or_memopw_io:
2883     return (0 <= Offset && Offset <= 255);
2884 
2885   case Hexagon::L4_iadd_memoph_io:
2886   case Hexagon::L4_isub_memoph_io:
2887   case Hexagon::L4_add_memoph_io:
2888   case Hexagon::L4_sub_memoph_io:
2889   case Hexagon::L4_iand_memoph_io:
2890   case Hexagon::L4_ior_memoph_io:
2891   case Hexagon::L4_and_memoph_io:
2892   case Hexagon::L4_or_memoph_io:
2893     return (0 <= Offset && Offset <= 127);
2894 
2895   case Hexagon::L4_iadd_memopb_io:
2896   case Hexagon::L4_isub_memopb_io:
2897   case Hexagon::L4_add_memopb_io:
2898   case Hexagon::L4_sub_memopb_io:
2899   case Hexagon::L4_iand_memopb_io:
2900   case Hexagon::L4_ior_memopb_io:
2901   case Hexagon::L4_and_memopb_io:
2902   case Hexagon::L4_or_memopb_io:
2903     return (0 <= Offset && Offset <= 63);
2904 
2905   // LDriw_xxx and STriw_xxx are pseudo operations, so it has to take offset of
2906   // any size. Later pass knows how to handle it.
2907   case Hexagon::STriw_pred:
2908   case Hexagon::LDriw_pred:
2909   case Hexagon::STriw_ctr:
2910   case Hexagon::LDriw_ctr:
2911     return true;
2912 
2913   case Hexagon::PS_fi:
2914   case Hexagon::PS_fia:
2915   case Hexagon::INLINEASM:
2916     return true;
2917 
2918   case Hexagon::L2_ploadrbt_io:
2919   case Hexagon::L2_ploadrbf_io:
2920   case Hexagon::L2_ploadrubt_io:
2921   case Hexagon::L2_ploadrubf_io:
2922   case Hexagon::S2_pstorerbt_io:
2923   case Hexagon::S2_pstorerbf_io:
2924     return isUInt<6>(Offset);
2925 
2926   case Hexagon::L2_ploadrht_io:
2927   case Hexagon::L2_ploadrhf_io:
2928   case Hexagon::L2_ploadruht_io:
2929   case Hexagon::L2_ploadruhf_io:
2930   case Hexagon::S2_pstorerht_io:
2931   case Hexagon::S2_pstorerhf_io:
2932     return isShiftedUInt<6,1>(Offset);
2933 
2934   case Hexagon::L2_ploadrit_io:
2935   case Hexagon::L2_ploadrif_io:
2936   case Hexagon::S2_pstorerit_io:
2937   case Hexagon::S2_pstorerif_io:
2938     return isShiftedUInt<6,2>(Offset);
2939 
2940   case Hexagon::L2_ploadrdt_io:
2941   case Hexagon::L2_ploadrdf_io:
2942   case Hexagon::S2_pstorerdt_io:
2943   case Hexagon::S2_pstorerdf_io:
2944     return isShiftedUInt<6,3>(Offset);
2945 
2946   case Hexagon::L2_loadbsw2_io:
2947   case Hexagon::L2_loadbzw2_io:
2948     return isShiftedInt<11,1>(Offset);
2949 
2950   case Hexagon::L2_loadbsw4_io:
2951   case Hexagon::L2_loadbzw4_io:
2952     return isShiftedInt<11,2>(Offset);
2953   } // switch
2954 
2955   dbgs() << "Failed Opcode is : " << Opcode << " (" << getName(Opcode)
2956          << ")\n";
2957   llvm_unreachable("No offset range is defined for this opcode. "
2958                    "Please define it in the above switch statement!");
2959 }
2960 
2961 bool HexagonInstrInfo::isVecAcc(const MachineInstr &MI) const {
2962   return isHVXVec(MI) && isAccumulator(MI);
2963 }
2964 
2965 bool HexagonInstrInfo::isVecALU(const MachineInstr &MI) const {
2966   const uint64_t F = get(MI.getOpcode()).TSFlags;
2967   const uint64_t V = ((F >> HexagonII::TypePos) & HexagonII::TypeMask);
2968   return
2969     V == HexagonII::TypeCVI_VA         ||
2970     V == HexagonII::TypeCVI_VA_DV;
2971 }
2972 
2973 bool HexagonInstrInfo::isVecUsableNextPacket(const MachineInstr &ProdMI,
2974       const MachineInstr &ConsMI) const {
2975   if (EnableACCForwarding && isVecAcc(ProdMI) && isVecAcc(ConsMI))
2976     return true;
2977 
2978   if (EnableALUForwarding && (isVecALU(ConsMI) || isLateSourceInstr(ConsMI)))
2979     return true;
2980 
2981   if (mayBeNewStore(ConsMI))
2982     return true;
2983 
2984   return false;
2985 }
2986 
2987 bool HexagonInstrInfo::isZeroExtendingLoad(const MachineInstr &MI) const {
2988   switch (MI.getOpcode()) {
2989   // Byte
2990   case Hexagon::L2_loadrub_io:
2991   case Hexagon::L4_loadrub_ur:
2992   case Hexagon::L4_loadrub_ap:
2993   case Hexagon::L2_loadrub_pr:
2994   case Hexagon::L2_loadrub_pbr:
2995   case Hexagon::L2_loadrub_pi:
2996   case Hexagon::L2_loadrub_pci:
2997   case Hexagon::L2_loadrub_pcr:
2998   case Hexagon::L2_loadbzw2_io:
2999   case Hexagon::L4_loadbzw2_ur:
3000   case Hexagon::L4_loadbzw2_ap:
3001   case Hexagon::L2_loadbzw2_pr:
3002   case Hexagon::L2_loadbzw2_pbr:
3003   case Hexagon::L2_loadbzw2_pi:
3004   case Hexagon::L2_loadbzw2_pci:
3005   case Hexagon::L2_loadbzw2_pcr:
3006   case Hexagon::L2_loadbzw4_io:
3007   case Hexagon::L4_loadbzw4_ur:
3008   case Hexagon::L4_loadbzw4_ap:
3009   case Hexagon::L2_loadbzw4_pr:
3010   case Hexagon::L2_loadbzw4_pbr:
3011   case Hexagon::L2_loadbzw4_pi:
3012   case Hexagon::L2_loadbzw4_pci:
3013   case Hexagon::L2_loadbzw4_pcr:
3014   case Hexagon::L4_loadrub_rr:
3015   case Hexagon::L2_ploadrubt_io:
3016   case Hexagon::L2_ploadrubt_pi:
3017   case Hexagon::L2_ploadrubf_io:
3018   case Hexagon::L2_ploadrubf_pi:
3019   case Hexagon::L2_ploadrubtnew_io:
3020   case Hexagon::L2_ploadrubfnew_io:
3021   case Hexagon::L4_ploadrubt_rr:
3022   case Hexagon::L4_ploadrubf_rr:
3023   case Hexagon::L4_ploadrubtnew_rr:
3024   case Hexagon::L4_ploadrubfnew_rr:
3025   case Hexagon::L2_ploadrubtnew_pi:
3026   case Hexagon::L2_ploadrubfnew_pi:
3027   case Hexagon::L4_ploadrubt_abs:
3028   case Hexagon::L4_ploadrubf_abs:
3029   case Hexagon::L4_ploadrubtnew_abs:
3030   case Hexagon::L4_ploadrubfnew_abs:
3031   case Hexagon::L2_loadrubgp:
3032   // Half
3033   case Hexagon::L2_loadruh_io:
3034   case Hexagon::L4_loadruh_ur:
3035   case Hexagon::L4_loadruh_ap:
3036   case Hexagon::L2_loadruh_pr:
3037   case Hexagon::L2_loadruh_pbr:
3038   case Hexagon::L2_loadruh_pi:
3039   case Hexagon::L2_loadruh_pci:
3040   case Hexagon::L2_loadruh_pcr:
3041   case Hexagon::L4_loadruh_rr:
3042   case Hexagon::L2_ploadruht_io:
3043   case Hexagon::L2_ploadruht_pi:
3044   case Hexagon::L2_ploadruhf_io:
3045   case Hexagon::L2_ploadruhf_pi:
3046   case Hexagon::L2_ploadruhtnew_io:
3047   case Hexagon::L2_ploadruhfnew_io:
3048   case Hexagon::L4_ploadruht_rr:
3049   case Hexagon::L4_ploadruhf_rr:
3050   case Hexagon::L4_ploadruhtnew_rr:
3051   case Hexagon::L4_ploadruhfnew_rr:
3052   case Hexagon::L2_ploadruhtnew_pi:
3053   case Hexagon::L2_ploadruhfnew_pi:
3054   case Hexagon::L4_ploadruht_abs:
3055   case Hexagon::L4_ploadruhf_abs:
3056   case Hexagon::L4_ploadruhtnew_abs:
3057   case Hexagon::L4_ploadruhfnew_abs:
3058   case Hexagon::L2_loadruhgp:
3059     return true;
3060   default:
3061     return false;
3062   }
3063 }
3064 
3065 // Add latency to instruction.
3066 bool HexagonInstrInfo::addLatencyToSchedule(const MachineInstr &MI1,
3067       const MachineInstr &MI2) const {
3068   if (isHVXVec(MI1) && isHVXVec(MI2))
3069     if (!isVecUsableNextPacket(MI1, MI2))
3070       return true;
3071   return false;
3072 }
3073 
3074 /// Get the base register and byte offset of a load/store instr.
3075 bool HexagonInstrInfo::getMemOperandsWithOffsetWidth(
3076     const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
3077     int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
3078     const TargetRegisterInfo *TRI) const {
3079   OffsetIsScalable = false;
3080   const MachineOperand *BaseOp = getBaseAndOffset(LdSt, Offset, Width);
3081   if (!BaseOp || !BaseOp->isReg())
3082     return false;
3083   BaseOps.push_back(BaseOp);
3084   return true;
3085 }
3086 
3087 /// Can these instructions execute at the same time in a bundle.
3088 bool HexagonInstrInfo::canExecuteInBundle(const MachineInstr &First,
3089       const MachineInstr &Second) const {
3090   if (Second.mayStore() && First.getOpcode() == Hexagon::S2_allocframe) {
3091     const MachineOperand &Op = Second.getOperand(0);
3092     if (Op.isReg() && Op.isUse() && Op.getReg() == Hexagon::R29)
3093       return true;
3094   }
3095   if (DisableNVSchedule)
3096     return false;
3097   if (mayBeNewStore(Second)) {
3098     // Make sure the definition of the first instruction is the value being
3099     // stored.
3100     const MachineOperand &Stored =
3101       Second.getOperand(Second.getNumOperands() - 1);
3102     if (!Stored.isReg())
3103       return false;
3104     for (unsigned i = 0, e = First.getNumOperands(); i < e; ++i) {
3105       const MachineOperand &Op = First.getOperand(i);
3106       if (Op.isReg() && Op.isDef() && Op.getReg() == Stored.getReg())
3107         return true;
3108     }
3109   }
3110   return false;
3111 }
3112 
3113 bool HexagonInstrInfo::doesNotReturn(const MachineInstr &CallMI) const {
3114   unsigned Opc = CallMI.getOpcode();
3115   return Opc == Hexagon::PS_call_nr || Opc == Hexagon::PS_callr_nr;
3116 }
3117 
3118 bool HexagonInstrInfo::hasEHLabel(const MachineBasicBlock *B) const {
3119   for (auto &I : *B)
3120     if (I.isEHLabel())
3121       return true;
3122   return false;
3123 }
3124 
3125 // Returns true if an instruction can be converted into a non-extended
3126 // equivalent instruction.
3127 bool HexagonInstrInfo::hasNonExtEquivalent(const MachineInstr &MI) const {
3128   short NonExtOpcode;
3129   // Check if the instruction has a register form that uses register in place
3130   // of the extended operand, if so return that as the non-extended form.
3131   if (Hexagon::getRegForm(MI.getOpcode()) >= 0)
3132     return true;
3133 
3134   if (MI.getDesc().mayLoad() || MI.getDesc().mayStore()) {
3135     // Check addressing mode and retrieve non-ext equivalent instruction.
3136 
3137     switch (getAddrMode(MI)) {
3138     case HexagonII::Absolute:
3139       // Load/store with absolute addressing mode can be converted into
3140       // base+offset mode.
3141       NonExtOpcode = Hexagon::changeAddrMode_abs_io(MI.getOpcode());
3142       break;
3143     case HexagonII::BaseImmOffset:
3144       // Load/store with base+offset addressing mode can be converted into
3145       // base+register offset addressing mode. However left shift operand should
3146       // be set to 0.
3147       NonExtOpcode = Hexagon::changeAddrMode_io_rr(MI.getOpcode());
3148       break;
3149     case HexagonII::BaseLongOffset:
3150       NonExtOpcode = Hexagon::changeAddrMode_ur_rr(MI.getOpcode());
3151       break;
3152     default:
3153       return false;
3154     }
3155     if (NonExtOpcode < 0)
3156       return false;
3157     return true;
3158   }
3159   return false;
3160 }
3161 
3162 bool HexagonInstrInfo::hasPseudoInstrPair(const MachineInstr &MI) const {
3163   return Hexagon::getRealHWInstr(MI.getOpcode(),
3164                                  Hexagon::InstrType_Pseudo) >= 0;
3165 }
3166 
3167 bool HexagonInstrInfo::hasUncondBranch(const MachineBasicBlock *B)
3168       const {
3169   MachineBasicBlock::const_iterator I = B->getFirstTerminator(), E = B->end();
3170   while (I != E) {
3171     if (I->isBarrier())
3172       return true;
3173     ++I;
3174   }
3175   return false;
3176 }
3177 
3178 // Returns true, if a LD insn can be promoted to a cur load.
3179 bool HexagonInstrInfo::mayBeCurLoad(const MachineInstr &MI) const {
3180   const uint64_t F = MI.getDesc().TSFlags;
3181   return ((F >> HexagonII::mayCVLoadPos) & HexagonII::mayCVLoadMask) &&
3182          Subtarget.hasV60Ops();
3183 }
3184 
3185 // Returns true, if a ST insn can be promoted to a new-value store.
3186 bool HexagonInstrInfo::mayBeNewStore(const MachineInstr &MI) const {
3187   if (MI.mayStore() && !Subtarget.useNewValueStores())
3188     return false;
3189 
3190   const uint64_t F = MI.getDesc().TSFlags;
3191   return (F >> HexagonII::mayNVStorePos) & HexagonII::mayNVStoreMask;
3192 }
3193 
3194 bool HexagonInstrInfo::producesStall(const MachineInstr &ProdMI,
3195       const MachineInstr &ConsMI) const {
3196   // There is no stall when ProdMI is not a V60 vector.
3197   if (!isHVXVec(ProdMI))
3198     return false;
3199 
3200   // There is no stall when ProdMI and ConsMI are not dependent.
3201   if (!isDependent(ProdMI, ConsMI))
3202     return false;
3203 
3204   // When Forward Scheduling is enabled, there is no stall if ProdMI and ConsMI
3205   // are scheduled in consecutive packets.
3206   if (isVecUsableNextPacket(ProdMI, ConsMI))
3207     return false;
3208 
3209   return true;
3210 }
3211 
3212 bool HexagonInstrInfo::producesStall(const MachineInstr &MI,
3213       MachineBasicBlock::const_instr_iterator BII) const {
3214   // There is no stall when I is not a V60 vector.
3215   if (!isHVXVec(MI))
3216     return false;
3217 
3218   MachineBasicBlock::const_instr_iterator MII = BII;
3219   MachineBasicBlock::const_instr_iterator MIE = MII->getParent()->instr_end();
3220 
3221   if (!MII->isBundle())
3222     return producesStall(*MII, MI);
3223 
3224   for (++MII; MII != MIE && MII->isInsideBundle(); ++MII) {
3225     const MachineInstr &J = *MII;
3226     if (producesStall(J, MI))
3227       return true;
3228   }
3229   return false;
3230 }
3231 
3232 bool HexagonInstrInfo::predCanBeUsedAsDotNew(const MachineInstr &MI,
3233       Register PredReg) const {
3234   for (const MachineOperand &MO : MI.operands()) {
3235     // Predicate register must be explicitly defined.
3236     if (MO.isRegMask() && MO.clobbersPhysReg(PredReg))
3237       return false;
3238     if (MO.isReg() && MO.isDef() && MO.isImplicit() && (MO.getReg() == PredReg))
3239       return false;
3240   }
3241 
3242   // Instruction that produce late predicate cannot be used as sources of
3243   // dot-new.
3244   switch (MI.getOpcode()) {
3245     case Hexagon::A4_addp_c:
3246     case Hexagon::A4_subp_c:
3247     case Hexagon::A4_tlbmatch:
3248     case Hexagon::A5_ACS:
3249     case Hexagon::F2_sfinvsqrta:
3250     case Hexagon::F2_sfrecipa:
3251     case Hexagon::J2_endloop0:
3252     case Hexagon::J2_endloop01:
3253     case Hexagon::J2_ploop1si:
3254     case Hexagon::J2_ploop1sr:
3255     case Hexagon::J2_ploop2si:
3256     case Hexagon::J2_ploop2sr:
3257     case Hexagon::J2_ploop3si:
3258     case Hexagon::J2_ploop3sr:
3259     case Hexagon::S2_cabacdecbin:
3260     case Hexagon::S2_storew_locked:
3261     case Hexagon::S4_stored_locked:
3262       return false;
3263   }
3264   return true;
3265 }
3266 
3267 bool HexagonInstrInfo::PredOpcodeHasJMP_c(unsigned Opcode) const {
3268   return Opcode == Hexagon::J2_jumpt      ||
3269          Opcode == Hexagon::J2_jumptpt    ||
3270          Opcode == Hexagon::J2_jumpf      ||
3271          Opcode == Hexagon::J2_jumpfpt    ||
3272          Opcode == Hexagon::J2_jumptnew   ||
3273          Opcode == Hexagon::J2_jumpfnew   ||
3274          Opcode == Hexagon::J2_jumptnewpt ||
3275          Opcode == Hexagon::J2_jumpfnewpt;
3276 }
3277 
3278 bool HexagonInstrInfo::predOpcodeHasNot(ArrayRef<MachineOperand> Cond) const {
3279   if (Cond.empty() || !isPredicated(Cond[0].getImm()))
3280     return false;
3281   return !isPredicatedTrue(Cond[0].getImm());
3282 }
3283 
3284 unsigned HexagonInstrInfo::getAddrMode(const MachineInstr &MI) const {
3285   const uint64_t F = MI.getDesc().TSFlags;
3286   return (F >> HexagonII::AddrModePos) & HexagonII::AddrModeMask;
3287 }
3288 
3289 // Returns the base register in a memory access (load/store). The offset is
3290 // returned in Offset and the access size is returned in AccessSize.
3291 // If the base operand has a subregister or the offset field does not contain
3292 // an immediate value, return nullptr.
3293 MachineOperand *
3294 HexagonInstrInfo::getBaseAndOffset(const MachineInstr &MI, int64_t &Offset,
3295                                    LocationSize &AccessSize) const {
3296   // Return if it is not a base+offset type instruction or a MemOp.
3297   if (getAddrMode(MI) != HexagonII::BaseImmOffset &&
3298       getAddrMode(MI) != HexagonII::BaseLongOffset &&
3299       !isMemOp(MI) && !isPostIncrement(MI))
3300     return nullptr;
3301 
3302   AccessSize = getMemAccessSize(MI);
3303 
3304   unsigned BasePos = 0, OffsetPos = 0;
3305   if (!getBaseAndOffsetPosition(MI, BasePos, OffsetPos))
3306     return nullptr;
3307 
3308   // Post increment updates its EA after the mem access,
3309   // so we need to treat its offset as zero.
3310   if (isPostIncrement(MI)) {
3311     Offset = 0;
3312   } else {
3313     const MachineOperand &OffsetOp = MI.getOperand(OffsetPos);
3314     if (!OffsetOp.isImm())
3315       return nullptr;
3316     Offset = OffsetOp.getImm();
3317   }
3318 
3319   const MachineOperand &BaseOp = MI.getOperand(BasePos);
3320   if (BaseOp.getSubReg() != 0)
3321     return nullptr;
3322   return &const_cast<MachineOperand&>(BaseOp);
3323 }
3324 
3325 /// Return the position of the base and offset operands for this instruction.
3326 bool HexagonInstrInfo::getBaseAndOffsetPosition(const MachineInstr &MI,
3327       unsigned &BasePos, unsigned &OffsetPos) const {
3328   if (!isAddrModeWithOffset(MI) && !isPostIncrement(MI))
3329     return false;
3330 
3331   // Deal with memops first.
3332   if (isMemOp(MI)) {
3333     BasePos = 0;
3334     OffsetPos = 1;
3335   } else if (MI.mayStore()) {
3336     BasePos = 0;
3337     OffsetPos = 1;
3338   } else if (MI.mayLoad()) {
3339     BasePos = 1;
3340     OffsetPos = 2;
3341   } else
3342     return false;
3343 
3344   if (isPredicated(MI)) {
3345     BasePos++;
3346     OffsetPos++;
3347   }
3348   if (isPostIncrement(MI)) {
3349     BasePos++;
3350     OffsetPos++;
3351   }
3352 
3353   if (!MI.getOperand(BasePos).isReg() || !MI.getOperand(OffsetPos).isImm())
3354     return false;
3355 
3356   return true;
3357 }
3358 
3359 // Inserts branching instructions in reverse order of their occurrence.
3360 // e.g. jump_t t1 (i1)
3361 // jump t2        (i2)
3362 // Jumpers = {i2, i1}
3363 SmallVector<MachineInstr*, 2> HexagonInstrInfo::getBranchingInstrs(
3364       MachineBasicBlock& MBB) const {
3365   SmallVector<MachineInstr*, 2> Jumpers;
3366   // If the block has no terminators, it just falls into the block after it.
3367   MachineBasicBlock::instr_iterator I = MBB.instr_end();
3368   if (I == MBB.instr_begin())
3369     return Jumpers;
3370 
3371   // A basic block may looks like this:
3372   //
3373   //  [   insn
3374   //     EH_LABEL
3375   //      insn
3376   //      insn
3377   //      insn
3378   //     EH_LABEL
3379   //      insn     ]
3380   //
3381   // It has two succs but does not have a terminator
3382   // Don't know how to handle it.
3383   do {
3384     --I;
3385     if (I->isEHLabel())
3386       return Jumpers;
3387   } while (I != MBB.instr_begin());
3388 
3389   I = MBB.instr_end();
3390   --I;
3391 
3392   while (I->isDebugInstr()) {
3393     if (I == MBB.instr_begin())
3394       return Jumpers;
3395     --I;
3396   }
3397   if (!isUnpredicatedTerminator(*I))
3398     return Jumpers;
3399 
3400   // Get the last instruction in the block.
3401   MachineInstr *LastInst = &*I;
3402   Jumpers.push_back(LastInst);
3403   MachineInstr *SecondLastInst = nullptr;
3404   // Find one more terminator if present.
3405   do {
3406     if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) {
3407       if (!SecondLastInst) {
3408         SecondLastInst = &*I;
3409         Jumpers.push_back(SecondLastInst);
3410       } else // This is a third branch.
3411         return Jumpers;
3412     }
3413     if (I == MBB.instr_begin())
3414       break;
3415     --I;
3416   } while (true);
3417   return Jumpers;
3418 }
3419 
3420 // Returns Operand Index for the constant extended instruction.
3421 unsigned HexagonInstrInfo::getCExtOpNum(const MachineInstr &MI) const {
3422   const uint64_t F = MI.getDesc().TSFlags;
3423   return (F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask;
3424 }
3425 
3426 // See if instruction could potentially be a duplex candidate.
3427 // If so, return its group. Zero otherwise.
3428 HexagonII::CompoundGroup HexagonInstrInfo::getCompoundCandidateGroup(
3429       const MachineInstr &MI) const {
3430   Register DstReg, SrcReg, Src1Reg, Src2Reg;
3431 
3432   switch (MI.getOpcode()) {
3433   default:
3434     return HexagonII::HCG_None;
3435   //
3436   // Compound pairs.
3437   // "p0=cmp.eq(Rs16,Rt16); if (p0.new) jump:nt #r9:2"
3438   // "Rd16=#U6 ; jump #r9:2"
3439   // "Rd16=Rs16 ; jump #r9:2"
3440   //
3441   case Hexagon::C2_cmpeq:
3442   case Hexagon::C2_cmpgt:
3443   case Hexagon::C2_cmpgtu:
3444     DstReg = MI.getOperand(0).getReg();
3445     Src1Reg = MI.getOperand(1).getReg();
3446     Src2Reg = MI.getOperand(2).getReg();
3447     if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3448         (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3449         isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg))
3450       return HexagonII::HCG_A;
3451     break;
3452   case Hexagon::C2_cmpeqi:
3453   case Hexagon::C2_cmpgti:
3454   case Hexagon::C2_cmpgtui:
3455     // P0 = cmp.eq(Rs,#u2)
3456     DstReg = MI.getOperand(0).getReg();
3457     SrcReg = MI.getOperand(1).getReg();
3458     if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3459         (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3460         isIntRegForSubInst(SrcReg) && MI.getOperand(2).isImm() &&
3461         ((isUInt<5>(MI.getOperand(2).getImm())) ||
3462          (MI.getOperand(2).getImm() == -1)))
3463       return HexagonII::HCG_A;
3464     break;
3465   case Hexagon::A2_tfr:
3466     // Rd = Rs
3467     DstReg = MI.getOperand(0).getReg();
3468     SrcReg = MI.getOperand(1).getReg();
3469     if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
3470       return HexagonII::HCG_A;
3471     break;
3472   case Hexagon::A2_tfrsi:
3473     // Rd = #u6
3474     // Do not test for #u6 size since the const is getting extended
3475     // regardless and compound could be formed.
3476     DstReg = MI.getOperand(0).getReg();
3477     if (isIntRegForSubInst(DstReg))
3478       return HexagonII::HCG_A;
3479     break;
3480   case Hexagon::S2_tstbit_i:
3481     DstReg = MI.getOperand(0).getReg();
3482     Src1Reg = MI.getOperand(1).getReg();
3483     if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3484         (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3485         MI.getOperand(2).isImm() &&
3486         isIntRegForSubInst(Src1Reg) && (MI.getOperand(2).getImm() == 0))
3487       return HexagonII::HCG_A;
3488     break;
3489   // The fact that .new form is used pretty much guarantees
3490   // that predicate register will match. Nevertheless,
3491   // there could be some false positives without additional
3492   // checking.
3493   case Hexagon::J2_jumptnew:
3494   case Hexagon::J2_jumpfnew:
3495   case Hexagon::J2_jumptnewpt:
3496   case Hexagon::J2_jumpfnewpt:
3497     Src1Reg = MI.getOperand(0).getReg();
3498     if (Hexagon::PredRegsRegClass.contains(Src1Reg) &&
3499         (Hexagon::P0 == Src1Reg || Hexagon::P1 == Src1Reg))
3500       return HexagonII::HCG_B;
3501     break;
3502   // Transfer and jump:
3503   // Rd=#U6 ; jump #r9:2
3504   // Rd=Rs ; jump #r9:2
3505   // Do not test for jump range here.
3506   case Hexagon::J2_jump:
3507   case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3508   case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
3509     return HexagonII::HCG_C;
3510   }
3511 
3512   return HexagonII::HCG_None;
3513 }
3514 
3515 // Returns -1 when there is no opcode found.
3516 unsigned HexagonInstrInfo::getCompoundOpcode(const MachineInstr &GA,
3517       const MachineInstr &GB) const {
3518   assert(getCompoundCandidateGroup(GA) == HexagonII::HCG_A);
3519   assert(getCompoundCandidateGroup(GB) == HexagonII::HCG_B);
3520   if ((GA.getOpcode() != Hexagon::C2_cmpeqi) ||
3521       (GB.getOpcode() != Hexagon::J2_jumptnew))
3522     return -1u;
3523   Register DestReg = GA.getOperand(0).getReg();
3524   if (!GB.readsRegister(DestReg, /*TRI=*/nullptr))
3525     return -1u;
3526   if (DestReg != Hexagon::P0 && DestReg != Hexagon::P1)
3527     return -1u;
3528   // The value compared against must be either u5 or -1.
3529   const MachineOperand &CmpOp = GA.getOperand(2);
3530   if (!CmpOp.isImm())
3531     return -1u;
3532   int V = CmpOp.getImm();
3533   if (V == -1)
3534     return DestReg == Hexagon::P0 ? Hexagon::J4_cmpeqn1_tp0_jump_nt
3535                                   : Hexagon::J4_cmpeqn1_tp1_jump_nt;
3536   if (!isUInt<5>(V))
3537     return -1u;
3538   return DestReg == Hexagon::P0 ? Hexagon::J4_cmpeqi_tp0_jump_nt
3539                                 : Hexagon::J4_cmpeqi_tp1_jump_nt;
3540 }
3541 
3542 // Returns -1 if there is no opcode found.
3543 int HexagonInstrInfo::getDuplexOpcode(const MachineInstr &MI,
3544                                       bool ForBigCore) const {
3545   // Static table to switch the opcodes across Tiny Core and Big Core.
3546   // dup_ opcodes are Big core opcodes.
3547   // NOTE: There are special instructions that need to handled later.
3548   // L4_return* instructions, they will only occupy SLOT0 (on big core too).
3549   // PS_jmpret - This pseudo translates to J2_jumpr which occupies only SLOT2.
3550   // The compiler need to base the root instruction to L6_return_map_to_raw
3551   // which can go any slot.
3552   static const std::map<unsigned, unsigned> DupMap = {
3553       {Hexagon::A2_add, Hexagon::dup_A2_add},
3554       {Hexagon::A2_addi, Hexagon::dup_A2_addi},
3555       {Hexagon::A2_andir, Hexagon::dup_A2_andir},
3556       {Hexagon::A2_combineii, Hexagon::dup_A2_combineii},
3557       {Hexagon::A2_sxtb, Hexagon::dup_A2_sxtb},
3558       {Hexagon::A2_sxth, Hexagon::dup_A2_sxth},
3559       {Hexagon::A2_tfr, Hexagon::dup_A2_tfr},
3560       {Hexagon::A2_tfrsi, Hexagon::dup_A2_tfrsi},
3561       {Hexagon::A2_zxtb, Hexagon::dup_A2_zxtb},
3562       {Hexagon::A2_zxth, Hexagon::dup_A2_zxth},
3563       {Hexagon::A4_combineii, Hexagon::dup_A4_combineii},
3564       {Hexagon::A4_combineir, Hexagon::dup_A4_combineir},
3565       {Hexagon::A4_combineri, Hexagon::dup_A4_combineri},
3566       {Hexagon::C2_cmoveif, Hexagon::dup_C2_cmoveif},
3567       {Hexagon::C2_cmoveit, Hexagon::dup_C2_cmoveit},
3568       {Hexagon::C2_cmovenewif, Hexagon::dup_C2_cmovenewif},
3569       {Hexagon::C2_cmovenewit, Hexagon::dup_C2_cmovenewit},
3570       {Hexagon::C2_cmpeqi, Hexagon::dup_C2_cmpeqi},
3571       {Hexagon::L2_deallocframe, Hexagon::dup_L2_deallocframe},
3572       {Hexagon::L2_loadrb_io, Hexagon::dup_L2_loadrb_io},
3573       {Hexagon::L2_loadrd_io, Hexagon::dup_L2_loadrd_io},
3574       {Hexagon::L2_loadrh_io, Hexagon::dup_L2_loadrh_io},
3575       {Hexagon::L2_loadri_io, Hexagon::dup_L2_loadri_io},
3576       {Hexagon::L2_loadrub_io, Hexagon::dup_L2_loadrub_io},
3577       {Hexagon::L2_loadruh_io, Hexagon::dup_L2_loadruh_io},
3578       {Hexagon::S2_allocframe, Hexagon::dup_S2_allocframe},
3579       {Hexagon::S2_storerb_io, Hexagon::dup_S2_storerb_io},
3580       {Hexagon::S2_storerd_io, Hexagon::dup_S2_storerd_io},
3581       {Hexagon::S2_storerh_io, Hexagon::dup_S2_storerh_io},
3582       {Hexagon::S2_storeri_io, Hexagon::dup_S2_storeri_io},
3583       {Hexagon::S4_storeirb_io, Hexagon::dup_S4_storeirb_io},
3584       {Hexagon::S4_storeiri_io, Hexagon::dup_S4_storeiri_io},
3585   };
3586   unsigned OpNum = MI.getOpcode();
3587   // Conversion to Big core.
3588   if (ForBigCore) {
3589     auto Iter = DupMap.find(OpNum);
3590     if (Iter != DupMap.end())
3591       return Iter->second;
3592   } else { // Conversion to Tiny core.
3593     for (const auto &Iter : DupMap)
3594       if (Iter.second == OpNum)
3595         return Iter.first;
3596   }
3597   return -1;
3598 }
3599 
3600 int HexagonInstrInfo::getCondOpcode(int Opc, bool invertPredicate) const {
3601   enum Hexagon::PredSense inPredSense;
3602   inPredSense = invertPredicate ? Hexagon::PredSense_false :
3603                                   Hexagon::PredSense_true;
3604   int CondOpcode = Hexagon::getPredOpcode(Opc, inPredSense);
3605   if (CondOpcode >= 0) // Valid Conditional opcode/instruction
3606     return CondOpcode;
3607 
3608   llvm_unreachable("Unexpected predicable instruction");
3609 }
3610 
3611 // Return the cur value instruction for a given store.
3612 int HexagonInstrInfo::getDotCurOp(const MachineInstr &MI) const {
3613   switch (MI.getOpcode()) {
3614   default: llvm_unreachable("Unknown .cur type");
3615   case Hexagon::V6_vL32b_pi:
3616     return Hexagon::V6_vL32b_cur_pi;
3617   case Hexagon::V6_vL32b_ai:
3618     return Hexagon::V6_vL32b_cur_ai;
3619   case Hexagon::V6_vL32b_nt_pi:
3620     return Hexagon::V6_vL32b_nt_cur_pi;
3621   case Hexagon::V6_vL32b_nt_ai:
3622     return Hexagon::V6_vL32b_nt_cur_ai;
3623   case Hexagon::V6_vL32b_ppu:
3624     return Hexagon::V6_vL32b_cur_ppu;
3625   case Hexagon::V6_vL32b_nt_ppu:
3626     return Hexagon::V6_vL32b_nt_cur_ppu;
3627   }
3628   return 0;
3629 }
3630 
3631 // Return the regular version of the .cur instruction.
3632 int HexagonInstrInfo::getNonDotCurOp(const MachineInstr &MI) const {
3633   switch (MI.getOpcode()) {
3634   default: llvm_unreachable("Unknown .cur type");
3635   case Hexagon::V6_vL32b_cur_pi:
3636     return Hexagon::V6_vL32b_pi;
3637   case Hexagon::V6_vL32b_cur_ai:
3638     return Hexagon::V6_vL32b_ai;
3639   case Hexagon::V6_vL32b_nt_cur_pi:
3640     return Hexagon::V6_vL32b_nt_pi;
3641   case Hexagon::V6_vL32b_nt_cur_ai:
3642     return Hexagon::V6_vL32b_nt_ai;
3643   case Hexagon::V6_vL32b_cur_ppu:
3644     return Hexagon::V6_vL32b_ppu;
3645   case Hexagon::V6_vL32b_nt_cur_ppu:
3646     return Hexagon::V6_vL32b_nt_ppu;
3647   }
3648   return 0;
3649 }
3650 
3651 // The diagram below shows the steps involved in the conversion of a predicated
3652 // store instruction to its .new predicated new-value form.
3653 //
3654 // Note: It doesn't include conditional new-value stores as they can't be
3655 // converted to .new predicate.
3656 //
3657 //               p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ]
3658 //                ^           ^
3659 //               /             \ (not OK. it will cause new-value store to be
3660 //              /               X conditional on p0.new while R2 producer is
3661 //             /                 \ on p0)
3662 //            /                   \.
3663 //     p.new store                 p.old NV store
3664 // [if(p0.new)memw(R0+#0)=R2]    [if(p0)memw(R0+#0)=R2.new]
3665 //            ^                  ^
3666 //             \                /
3667 //              \              /
3668 //               \            /
3669 //                 p.old store
3670 //             [if (p0)memw(R0+#0)=R2]
3671 //
3672 // The following set of instructions further explains the scenario where
3673 // conditional new-value store becomes invalid when promoted to .new predicate
3674 // form.
3675 //
3676 // { 1) if (p0) r0 = add(r1, r2)
3677 //   2) p0 = cmp.eq(r3, #0) }
3678 //
3679 //   3) if (p0) memb(r1+#0) = r0  --> this instruction can't be grouped with
3680 // the first two instructions because in instr 1, r0 is conditional on old value
3681 // of p0 but its use in instr 3 is conditional on p0 modified by instr 2 which
3682 // is not valid for new-value stores.
3683 // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
3684 // from the "Conditional Store" list. Because a predicated new value store
3685 // would NOT be promoted to a double dot new store. See diagram below:
3686 // This function returns yes for those stores that are predicated but not
3687 // yet promoted to predicate dot new instructions.
3688 //
3689 //                          +---------------------+
3690 //                    /-----| if (p0) memw(..)=r0 |---------\~
3691 //                   ||     +---------------------+         ||
3692 //          promote  ||       /\       /\                   ||  promote
3693 //                   ||      /||\     /||\                  ||
3694 //                  \||/    demote     ||                  \||/
3695 //                   \/       ||       ||                   \/
3696 //       +-------------------------+   ||   +-------------------------+
3697 //       | if (p0.new) memw(..)=r0 |   ||   | if (p0) memw(..)=r0.new |
3698 //       +-------------------------+   ||   +-------------------------+
3699 //                        ||           ||         ||
3700 //                        ||         demote      \||/
3701 //                      promote        ||         \/ NOT possible
3702 //                        ||           ||         /\~
3703 //                       \||/          ||        /||\~
3704 //                        \/           ||         ||
3705 //                      +-----------------------------+
3706 //                      | if (p0.new) memw(..)=r0.new |
3707 //                      +-----------------------------+
3708 //                           Double Dot New Store
3709 //
3710 // Returns the most basic instruction for the .new predicated instructions and
3711 // new-value stores.
3712 // For example, all of the following instructions will be converted back to the
3713 // same instruction:
3714 // 1) if (p0.new) memw(R0+#0) = R1.new  --->
3715 // 2) if (p0) memw(R0+#0)= R1.new      -------> if (p0) memw(R0+#0) = R1
3716 // 3) if (p0.new) memw(R0+#0) = R1      --->
3717 //
3718 // To understand the translation of instruction 1 to its original form, consider
3719 // a packet with 3 instructions.
3720 // { p0 = cmp.eq(R0,R1)
3721 //   if (p0.new) R2 = add(R3, R4)
3722 //   R5 = add (R3, R1)
3723 // }
3724 // if (p0) memw(R5+#0) = R2 <--- trying to include it in the previous packet
3725 //
3726 // This instruction can be part of the previous packet only if both p0 and R2
3727 // are promoted to .new values. This promotion happens in steps, first
3728 // predicate register is promoted to .new and in the next iteration R2 is
3729 // promoted. Therefore, in case of dependence check failure (due to R5) during
3730 // next iteration, it should be converted back to its most basic form.
3731 
3732 // Return the new value instruction for a given store.
3733 int HexagonInstrInfo::getDotNewOp(const MachineInstr &MI) const {
3734   int NVOpcode = Hexagon::getNewValueOpcode(MI.getOpcode());
3735   if (NVOpcode >= 0) // Valid new-value store instruction.
3736     return NVOpcode;
3737 
3738   switch (MI.getOpcode()) {
3739   default:
3740     report_fatal_error(Twine("Unknown .new type: ") +
3741                        std::to_string(MI.getOpcode()));
3742   case Hexagon::S4_storerb_ur:
3743     return Hexagon::S4_storerbnew_ur;
3744 
3745   case Hexagon::S2_storerb_pci:
3746     return Hexagon::S2_storerb_pci;
3747 
3748   case Hexagon::S2_storeri_pci:
3749     return Hexagon::S2_storeri_pci;
3750 
3751   case Hexagon::S2_storerh_pci:
3752     return Hexagon::S2_storerh_pci;
3753 
3754   case Hexagon::S2_storerd_pci:
3755     return Hexagon::S2_storerd_pci;
3756 
3757   case Hexagon::S2_storerf_pci:
3758     return Hexagon::S2_storerf_pci;
3759 
3760   case Hexagon::V6_vS32b_ai:
3761     return Hexagon::V6_vS32b_new_ai;
3762 
3763   case Hexagon::V6_vS32b_pi:
3764     return Hexagon::V6_vS32b_new_pi;
3765   }
3766   return 0;
3767 }
3768 
3769 // Returns the opcode to use when converting MI, which is a conditional jump,
3770 // into a conditional instruction which uses the .new value of the predicate.
3771 // We also use branch probabilities to add a hint to the jump.
3772 // If MBPI is null, all edges will be treated as equally likely for the
3773 // purposes of establishing a predication hint.
3774 int HexagonInstrInfo::getDotNewPredJumpOp(const MachineInstr &MI,
3775       const MachineBranchProbabilityInfo *MBPI) const {
3776   // We assume that block can have at most two successors.
3777   const MachineBasicBlock *Src = MI.getParent();
3778   const MachineOperand &BrTarget = MI.getOperand(1);
3779   bool Taken = false;
3780   const BranchProbability OneHalf(1, 2);
3781 
3782   auto getEdgeProbability = [MBPI] (const MachineBasicBlock *Src,
3783                                     const MachineBasicBlock *Dst) {
3784     if (MBPI)
3785       return MBPI->getEdgeProbability(Src, Dst);
3786     return BranchProbability(1, Src->succ_size());
3787   };
3788 
3789   if (BrTarget.isMBB()) {
3790     const MachineBasicBlock *Dst = BrTarget.getMBB();
3791     Taken = getEdgeProbability(Src, Dst) >= OneHalf;
3792   } else {
3793     // The branch target is not a basic block (most likely a function).
3794     // Since BPI only gives probabilities for targets that are basic blocks,
3795     // try to identify another target of this branch (potentially a fall-
3796     // -through) and check the probability of that target.
3797     //
3798     // The only handled branch combinations are:
3799     // - one conditional branch,
3800     // - one conditional branch followed by one unconditional branch.
3801     // Otherwise, assume not-taken.
3802     assert(MI.isConditionalBranch());
3803     const MachineBasicBlock &B = *MI.getParent();
3804     bool SawCond = false, Bad = false;
3805     for (const MachineInstr &I : B) {
3806       if (!I.isBranch())
3807         continue;
3808       if (I.isConditionalBranch()) {
3809         SawCond = true;
3810         if (&I != &MI) {
3811           Bad = true;
3812           break;
3813         }
3814       }
3815       if (I.isUnconditionalBranch() && !SawCond) {
3816         Bad = true;
3817         break;
3818       }
3819     }
3820     if (!Bad) {
3821       MachineBasicBlock::const_instr_iterator It(MI);
3822       MachineBasicBlock::const_instr_iterator NextIt = std::next(It);
3823       if (NextIt == B.instr_end()) {
3824         // If this branch is the last, look for the fall-through block.
3825         for (const MachineBasicBlock *SB : B.successors()) {
3826           if (!B.isLayoutSuccessor(SB))
3827             continue;
3828           Taken = getEdgeProbability(Src, SB) < OneHalf;
3829           break;
3830         }
3831       } else {
3832         assert(NextIt->isUnconditionalBranch());
3833         // Find the first MBB operand and assume it's the target.
3834         const MachineBasicBlock *BT = nullptr;
3835         for (const MachineOperand &Op : NextIt->operands()) {
3836           if (!Op.isMBB())
3837             continue;
3838           BT = Op.getMBB();
3839           break;
3840         }
3841         Taken = BT && getEdgeProbability(Src, BT) < OneHalf;
3842       }
3843     } // if (!Bad)
3844   }
3845 
3846   // The Taken flag should be set to something reasonable by this point.
3847 
3848   switch (MI.getOpcode()) {
3849   case Hexagon::J2_jumpt:
3850     return Taken ? Hexagon::J2_jumptnewpt : Hexagon::J2_jumptnew;
3851   case Hexagon::J2_jumpf:
3852     return Taken ? Hexagon::J2_jumpfnewpt : Hexagon::J2_jumpfnew;
3853 
3854   default:
3855     llvm_unreachable("Unexpected jump instruction.");
3856   }
3857 }
3858 
3859 // Return .new predicate version for an instruction.
3860 int HexagonInstrInfo::getDotNewPredOp(const MachineInstr &MI,
3861       const MachineBranchProbabilityInfo *MBPI) const {
3862   switch (MI.getOpcode()) {
3863   // Condtional Jumps
3864   case Hexagon::J2_jumpt:
3865   case Hexagon::J2_jumpf:
3866     return getDotNewPredJumpOp(MI, MBPI);
3867   }
3868 
3869   int NewOpcode = Hexagon::getPredNewOpcode(MI.getOpcode());
3870   if (NewOpcode >= 0)
3871     return NewOpcode;
3872   return 0;
3873 }
3874 
3875 int HexagonInstrInfo::getDotOldOp(const MachineInstr &MI) const {
3876   int NewOp = MI.getOpcode();
3877   if (isPredicated(NewOp) && isPredicatedNew(NewOp)) { // Get predicate old form
3878     NewOp = Hexagon::getPredOldOpcode(NewOp);
3879     // All Hexagon architectures have prediction bits on dot-new branches,
3880     // but only Hexagon V60+ has prediction bits on dot-old ones. Make sure
3881     // to pick the right opcode when converting back to dot-old.
3882     if (!Subtarget.hasFeature(Hexagon::ArchV60)) {
3883       switch (NewOp) {
3884       case Hexagon::J2_jumptpt:
3885         NewOp = Hexagon::J2_jumpt;
3886         break;
3887       case Hexagon::J2_jumpfpt:
3888         NewOp = Hexagon::J2_jumpf;
3889         break;
3890       case Hexagon::J2_jumprtpt:
3891         NewOp = Hexagon::J2_jumprt;
3892         break;
3893       case Hexagon::J2_jumprfpt:
3894         NewOp = Hexagon::J2_jumprf;
3895         break;
3896       }
3897     }
3898     assert(NewOp >= 0 &&
3899            "Couldn't change predicate new instruction to its old form.");
3900   }
3901 
3902   if (isNewValueStore(NewOp)) { // Convert into non-new-value format
3903     NewOp = Hexagon::getNonNVStore(NewOp);
3904     assert(NewOp >= 0 && "Couldn't change new-value store to its old form.");
3905   }
3906 
3907   if (Subtarget.hasV60Ops())
3908     return NewOp;
3909 
3910   // Subtargets prior to V60 didn't support 'taken' forms of predicated jumps.
3911   switch (NewOp) {
3912   case Hexagon::J2_jumpfpt:
3913     return Hexagon::J2_jumpf;
3914   case Hexagon::J2_jumptpt:
3915     return Hexagon::J2_jumpt;
3916   case Hexagon::J2_jumprfpt:
3917     return Hexagon::J2_jumprf;
3918   case Hexagon::J2_jumprtpt:
3919     return Hexagon::J2_jumprt;
3920   }
3921   return NewOp;
3922 }
3923 
3924 // See if instruction could potentially be a duplex candidate.
3925 // If so, return its group. Zero otherwise.
3926 HexagonII::SubInstructionGroup HexagonInstrInfo::getDuplexCandidateGroup(
3927       const MachineInstr &MI) const {
3928   Register DstReg, SrcReg, Src1Reg, Src2Reg;
3929   const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
3930 
3931   switch (MI.getOpcode()) {
3932   default:
3933     return HexagonII::HSIG_None;
3934   //
3935   // Group L1:
3936   //
3937   // Rd = memw(Rs+#u4:2)
3938   // Rd = memub(Rs+#u4:0)
3939   case Hexagon::L2_loadri_io:
3940   case Hexagon::dup_L2_loadri_io:
3941     DstReg = MI.getOperand(0).getReg();
3942     SrcReg = MI.getOperand(1).getReg();
3943     // Special case this one from Group L2.
3944     // Rd = memw(r29+#u5:2)
3945     if (isIntRegForSubInst(DstReg)) {
3946       if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
3947           HRI.getStackRegister() == SrcReg &&
3948           MI.getOperand(2).isImm() &&
3949           isShiftedUInt<5,2>(MI.getOperand(2).getImm()))
3950         return HexagonII::HSIG_L2;
3951       // Rd = memw(Rs+#u4:2)
3952       if (isIntRegForSubInst(SrcReg) &&
3953           (MI.getOperand(2).isImm() &&
3954           isShiftedUInt<4,2>(MI.getOperand(2).getImm())))
3955         return HexagonII::HSIG_L1;
3956     }
3957     break;
3958   case Hexagon::L2_loadrub_io:
3959   case Hexagon::dup_L2_loadrub_io:
3960     // Rd = memub(Rs+#u4:0)
3961     DstReg = MI.getOperand(0).getReg();
3962     SrcReg = MI.getOperand(1).getReg();
3963     if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3964         MI.getOperand(2).isImm() && isUInt<4>(MI.getOperand(2).getImm()))
3965       return HexagonII::HSIG_L1;
3966     break;
3967   //
3968   // Group L2:
3969   //
3970   // Rd = memh/memuh(Rs+#u3:1)
3971   // Rd = memb(Rs+#u3:0)
3972   // Rd = memw(r29+#u5:2) - Handled above.
3973   // Rdd = memd(r29+#u5:3)
3974   // deallocframe
3975   // [if ([!]p0[.new])] dealloc_return
3976   // [if ([!]p0[.new])] jumpr r31
3977   case Hexagon::L2_loadrh_io:
3978   case Hexagon::L2_loadruh_io:
3979   case Hexagon::dup_L2_loadrh_io:
3980   case Hexagon::dup_L2_loadruh_io:
3981     // Rd = memh/memuh(Rs+#u3:1)
3982     DstReg = MI.getOperand(0).getReg();
3983     SrcReg = MI.getOperand(1).getReg();
3984     if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3985         MI.getOperand(2).isImm() &&
3986         isShiftedUInt<3,1>(MI.getOperand(2).getImm()))
3987       return HexagonII::HSIG_L2;
3988     break;
3989   case Hexagon::L2_loadrb_io:
3990   case Hexagon::dup_L2_loadrb_io:
3991     // Rd = memb(Rs+#u3:0)
3992     DstReg = MI.getOperand(0).getReg();
3993     SrcReg = MI.getOperand(1).getReg();
3994     if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3995         MI.getOperand(2).isImm() &&
3996         isUInt<3>(MI.getOperand(2).getImm()))
3997       return HexagonII::HSIG_L2;
3998     break;
3999   case Hexagon::L2_loadrd_io:
4000   case Hexagon::dup_L2_loadrd_io:
4001     // Rdd = memd(r29+#u5:3)
4002     DstReg = MI.getOperand(0).getReg();
4003     SrcReg = MI.getOperand(1).getReg();
4004     if (isDblRegForSubInst(DstReg, HRI) &&
4005         Hexagon::IntRegsRegClass.contains(SrcReg) &&
4006         HRI.getStackRegister() == SrcReg &&
4007         MI.getOperand(2).isImm() &&
4008         isShiftedUInt<5,3>(MI.getOperand(2).getImm()))
4009       return HexagonII::HSIG_L2;
4010     break;
4011   // dealloc_return is not documented in Hexagon Manual, but marked
4012   // with A_SUBINSN attribute in iset_v4classic.py.
4013   case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
4014   case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
4015   case Hexagon::L4_return:
4016   case Hexagon::L2_deallocframe:
4017   case Hexagon::dup_L2_deallocframe:
4018     return HexagonII::HSIG_L2;
4019   case Hexagon::EH_RETURN_JMPR:
4020   case Hexagon::PS_jmpret:
4021   case Hexagon::SL2_jumpr31:
4022     // jumpr r31
4023     // Actual form JMPR implicit-def %pc, implicit %r31, implicit internal %r0
4024     DstReg = MI.getOperand(0).getReg();
4025     if (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg))
4026       return HexagonII::HSIG_L2;
4027     break;
4028   case Hexagon::PS_jmprett:
4029   case Hexagon::PS_jmpretf:
4030   case Hexagon::PS_jmprettnewpt:
4031   case Hexagon::PS_jmpretfnewpt:
4032   case Hexagon::PS_jmprettnew:
4033   case Hexagon::PS_jmpretfnew:
4034   case Hexagon::SL2_jumpr31_t:
4035   case Hexagon::SL2_jumpr31_f:
4036   case Hexagon::SL2_jumpr31_tnew:
4037   case Hexagon::SL2_jumpr31_fnew:
4038     DstReg = MI.getOperand(1).getReg();
4039     SrcReg = MI.getOperand(0).getReg();
4040     // [if ([!]p0[.new])] jumpr r31
4041     if ((Hexagon::PredRegsRegClass.contains(SrcReg) &&
4042         (Hexagon::P0 == SrcReg)) &&
4043         (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg)))
4044       return HexagonII::HSIG_L2;
4045     break;
4046   case Hexagon::L4_return_t:
4047   case Hexagon::L4_return_f:
4048   case Hexagon::L4_return_tnew_pnt:
4049   case Hexagon::L4_return_fnew_pnt:
4050   case Hexagon::L4_return_tnew_pt:
4051   case Hexagon::L4_return_fnew_pt:
4052     // [if ([!]p0[.new])] dealloc_return
4053     SrcReg = MI.getOperand(0).getReg();
4054     if (Hexagon::PredRegsRegClass.contains(SrcReg) && (Hexagon::P0 == SrcReg))
4055       return HexagonII::HSIG_L2;
4056     break;
4057   //
4058   // Group S1:
4059   //
4060   // memw(Rs+#u4:2) = Rt
4061   // memb(Rs+#u4:0) = Rt
4062   case Hexagon::S2_storeri_io:
4063   case Hexagon::dup_S2_storeri_io:
4064     // Special case this one from Group S2.
4065     // memw(r29+#u5:2) = Rt
4066     Src1Reg = MI.getOperand(0).getReg();
4067     Src2Reg = MI.getOperand(2).getReg();
4068     if (Hexagon::IntRegsRegClass.contains(Src1Reg) &&
4069         isIntRegForSubInst(Src2Reg) &&
4070         HRI.getStackRegister() == Src1Reg && MI.getOperand(1).isImm() &&
4071         isShiftedUInt<5,2>(MI.getOperand(1).getImm()))
4072       return HexagonII::HSIG_S2;
4073     // memw(Rs+#u4:2) = Rt
4074     if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
4075         MI.getOperand(1).isImm() &&
4076         isShiftedUInt<4,2>(MI.getOperand(1).getImm()))
4077       return HexagonII::HSIG_S1;
4078     break;
4079   case Hexagon::S2_storerb_io:
4080   case Hexagon::dup_S2_storerb_io:
4081     // memb(Rs+#u4:0) = Rt
4082     Src1Reg = MI.getOperand(0).getReg();
4083     Src2Reg = MI.getOperand(2).getReg();
4084     if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
4085         MI.getOperand(1).isImm() && isUInt<4>(MI.getOperand(1).getImm()))
4086       return HexagonII::HSIG_S1;
4087     break;
4088   //
4089   // Group S2:
4090   //
4091   // memh(Rs+#u3:1) = Rt
4092   // memw(r29+#u5:2) = Rt
4093   // memd(r29+#s6:3) = Rtt
4094   // memw(Rs+#u4:2) = #U1
4095   // memb(Rs+#u4) = #U1
4096   // allocframe(#u5:3)
4097   case Hexagon::S2_storerh_io:
4098   case Hexagon::dup_S2_storerh_io:
4099     // memh(Rs+#u3:1) = Rt
4100     Src1Reg = MI.getOperand(0).getReg();
4101     Src2Reg = MI.getOperand(2).getReg();
4102     if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
4103         MI.getOperand(1).isImm() &&
4104         isShiftedUInt<3,1>(MI.getOperand(1).getImm()))
4105       return HexagonII::HSIG_S1;
4106     break;
4107   case Hexagon::S2_storerd_io:
4108   case Hexagon::dup_S2_storerd_io:
4109     // memd(r29+#s6:3) = Rtt
4110     Src1Reg = MI.getOperand(0).getReg();
4111     Src2Reg = MI.getOperand(2).getReg();
4112     if (isDblRegForSubInst(Src2Reg, HRI) &&
4113         Hexagon::IntRegsRegClass.contains(Src1Reg) &&
4114         HRI.getStackRegister() == Src1Reg && MI.getOperand(1).isImm() &&
4115         isShiftedInt<6,3>(MI.getOperand(1).getImm()))
4116       return HexagonII::HSIG_S2;
4117     break;
4118   case Hexagon::S4_storeiri_io:
4119   case Hexagon::dup_S4_storeiri_io:
4120     // memw(Rs+#u4:2) = #U1
4121     Src1Reg = MI.getOperand(0).getReg();
4122     if (isIntRegForSubInst(Src1Reg) && MI.getOperand(1).isImm() &&
4123         isShiftedUInt<4,2>(MI.getOperand(1).getImm()) &&
4124         MI.getOperand(2).isImm() && isUInt<1>(MI.getOperand(2).getImm()))
4125       return HexagonII::HSIG_S2;
4126     break;
4127   case Hexagon::S4_storeirb_io:
4128   case Hexagon::dup_S4_storeirb_io:
4129     // memb(Rs+#u4) = #U1
4130     Src1Reg = MI.getOperand(0).getReg();
4131     if (isIntRegForSubInst(Src1Reg) &&
4132         MI.getOperand(1).isImm() && isUInt<4>(MI.getOperand(1).getImm()) &&
4133         MI.getOperand(2).isImm() && isUInt<1>(MI.getOperand(2).getImm()))
4134       return HexagonII::HSIG_S2;
4135     break;
4136   case Hexagon::S2_allocframe:
4137   case Hexagon::dup_S2_allocframe:
4138     if (MI.getOperand(2).isImm() &&
4139         isShiftedUInt<5,3>(MI.getOperand(2).getImm()))
4140       return HexagonII::HSIG_S1;
4141     break;
4142   //
4143   // Group A:
4144   //
4145   // Rx = add(Rx,#s7)
4146   // Rd = Rs
4147   // Rd = #u6
4148   // Rd = #-1
4149   // if ([!]P0[.new]) Rd = #0
4150   // Rd = add(r29,#u6:2)
4151   // Rx = add(Rx,Rs)
4152   // P0 = cmp.eq(Rs,#u2)
4153   // Rdd = combine(#0,Rs)
4154   // Rdd = combine(Rs,#0)
4155   // Rdd = combine(#u2,#U2)
4156   // Rd = add(Rs,#1)
4157   // Rd = add(Rs,#-1)
4158   // Rd = sxth/sxtb/zxtb/zxth(Rs)
4159   // Rd = and(Rs,#1)
4160   case Hexagon::A2_addi:
4161   case Hexagon::dup_A2_addi:
4162     DstReg = MI.getOperand(0).getReg();
4163     SrcReg = MI.getOperand(1).getReg();
4164     if (isIntRegForSubInst(DstReg)) {
4165       // Rd = add(r29,#u6:2)
4166       if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
4167         HRI.getStackRegister() == SrcReg && MI.getOperand(2).isImm() &&
4168         isShiftedUInt<6,2>(MI.getOperand(2).getImm()))
4169         return HexagonII::HSIG_A;
4170       // Rx = add(Rx,#s7)
4171       if ((DstReg == SrcReg) && MI.getOperand(2).isImm() &&
4172           isInt<7>(MI.getOperand(2).getImm()))
4173         return HexagonII::HSIG_A;
4174       // Rd = add(Rs,#1)
4175       // Rd = add(Rs,#-1)
4176       if (isIntRegForSubInst(SrcReg) && MI.getOperand(2).isImm() &&
4177           ((MI.getOperand(2).getImm() == 1) ||
4178           (MI.getOperand(2).getImm() == -1)))
4179         return HexagonII::HSIG_A;
4180     }
4181     break;
4182   case Hexagon::A2_add:
4183   case Hexagon::dup_A2_add:
4184     // Rx = add(Rx,Rs)
4185     DstReg = MI.getOperand(0).getReg();
4186     Src1Reg = MI.getOperand(1).getReg();
4187     Src2Reg = MI.getOperand(2).getReg();
4188     if (isIntRegForSubInst(DstReg) && (DstReg == Src1Reg) &&
4189         isIntRegForSubInst(Src2Reg))
4190       return HexagonII::HSIG_A;
4191     break;
4192   case Hexagon::A2_andir:
4193   case Hexagon::dup_A2_andir:
4194     // Same as zxtb.
4195     // Rd16=and(Rs16,#255)
4196     // Rd16=and(Rs16,#1)
4197     DstReg = MI.getOperand(0).getReg();
4198     SrcReg = MI.getOperand(1).getReg();
4199     if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
4200         MI.getOperand(2).isImm() &&
4201         ((MI.getOperand(2).getImm() == 1) ||
4202         (MI.getOperand(2).getImm() == 255)))
4203       return HexagonII::HSIG_A;
4204     break;
4205   case Hexagon::A2_tfr:
4206   case Hexagon::dup_A2_tfr:
4207     // Rd = Rs
4208     DstReg = MI.getOperand(0).getReg();
4209     SrcReg = MI.getOperand(1).getReg();
4210     if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
4211       return HexagonII::HSIG_A;
4212     break;
4213   case Hexagon::A2_tfrsi:
4214   case Hexagon::dup_A2_tfrsi:
4215     // Rd = #u6
4216     // Do not test for #u6 size since the const is getting extended
4217     // regardless and compound could be formed.
4218     // Rd = #-1
4219     DstReg = MI.getOperand(0).getReg();
4220     if (isIntRegForSubInst(DstReg))
4221       return HexagonII::HSIG_A;
4222     break;
4223   case Hexagon::C2_cmoveit:
4224   case Hexagon::C2_cmovenewit:
4225   case Hexagon::C2_cmoveif:
4226   case Hexagon::C2_cmovenewif:
4227   case Hexagon::dup_C2_cmoveit:
4228   case Hexagon::dup_C2_cmovenewit:
4229   case Hexagon::dup_C2_cmoveif:
4230   case Hexagon::dup_C2_cmovenewif:
4231     // if ([!]P0[.new]) Rd = #0
4232     // Actual form:
4233     // %r16 = C2_cmovenewit internal %p0, 0, implicit undef %r16;
4234     DstReg = MI.getOperand(0).getReg();
4235     SrcReg = MI.getOperand(1).getReg();
4236     if (isIntRegForSubInst(DstReg) &&
4237         Hexagon::PredRegsRegClass.contains(SrcReg) && Hexagon::P0 == SrcReg &&
4238         MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0)
4239       return HexagonII::HSIG_A;
4240     break;
4241   case Hexagon::C2_cmpeqi:
4242   case Hexagon::dup_C2_cmpeqi:
4243     // P0 = cmp.eq(Rs,#u2)
4244     DstReg = MI.getOperand(0).getReg();
4245     SrcReg = MI.getOperand(1).getReg();
4246     if (Hexagon::PredRegsRegClass.contains(DstReg) &&
4247         Hexagon::P0 == DstReg && isIntRegForSubInst(SrcReg) &&
4248         MI.getOperand(2).isImm() && isUInt<2>(MI.getOperand(2).getImm()))
4249       return HexagonII::HSIG_A;
4250     break;
4251   case Hexagon::A2_combineii:
4252   case Hexagon::A4_combineii:
4253   case Hexagon::dup_A2_combineii:
4254   case Hexagon::dup_A4_combineii:
4255     // Rdd = combine(#u2,#U2)
4256     DstReg = MI.getOperand(0).getReg();
4257     if (isDblRegForSubInst(DstReg, HRI) &&
4258         ((MI.getOperand(1).isImm() && isUInt<2>(MI.getOperand(1).getImm())) ||
4259         (MI.getOperand(1).isGlobal() &&
4260         isUInt<2>(MI.getOperand(1).getOffset()))) &&
4261         ((MI.getOperand(2).isImm() && isUInt<2>(MI.getOperand(2).getImm())) ||
4262         (MI.getOperand(2).isGlobal() &&
4263         isUInt<2>(MI.getOperand(2).getOffset()))))
4264       return HexagonII::HSIG_A;
4265     break;
4266   case Hexagon::A4_combineri:
4267   case Hexagon::dup_A4_combineri:
4268     // Rdd = combine(Rs,#0)
4269     // Rdd = combine(Rs,#0)
4270     DstReg = MI.getOperand(0).getReg();
4271     SrcReg = MI.getOperand(1).getReg();
4272     if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) &&
4273         ((MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) ||
4274         (MI.getOperand(2).isGlobal() && MI.getOperand(2).getOffset() == 0)))
4275       return HexagonII::HSIG_A;
4276     break;
4277   case Hexagon::A4_combineir:
4278   case Hexagon::dup_A4_combineir:
4279     // Rdd = combine(#0,Rs)
4280     DstReg = MI.getOperand(0).getReg();
4281     SrcReg = MI.getOperand(2).getReg();
4282     if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) &&
4283         ((MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) ||
4284         (MI.getOperand(1).isGlobal() && MI.getOperand(1).getOffset() == 0)))
4285       return HexagonII::HSIG_A;
4286     break;
4287   case Hexagon::A2_sxtb:
4288   case Hexagon::A2_sxth:
4289   case Hexagon::A2_zxtb:
4290   case Hexagon::A2_zxth:
4291   case Hexagon::dup_A2_sxtb:
4292   case Hexagon::dup_A2_sxth:
4293   case Hexagon::dup_A2_zxtb:
4294   case Hexagon::dup_A2_zxth:
4295     // Rd = sxth/sxtb/zxtb/zxth(Rs)
4296     DstReg = MI.getOperand(0).getReg();
4297     SrcReg = MI.getOperand(1).getReg();
4298     if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
4299       return HexagonII::HSIG_A;
4300     break;
4301   }
4302 
4303   return HexagonII::HSIG_None;
4304 }
4305 
4306 short HexagonInstrInfo::getEquivalentHWInstr(const MachineInstr &MI) const {
4307   return Hexagon::getRealHWInstr(MI.getOpcode(), Hexagon::InstrType_Real);
4308 }
4309 
4310 unsigned HexagonInstrInfo::getInstrTimingClassLatency(
4311       const InstrItineraryData *ItinData, const MachineInstr &MI) const {
4312   // Default to one cycle for no itinerary. However, an "empty" itinerary may
4313   // still have a MinLatency property, which getStageLatency checks.
4314   if (!ItinData)
4315     return getInstrLatency(ItinData, MI);
4316 
4317   if (MI.isTransient())
4318     return 0;
4319   return ItinData->getStageLatency(MI.getDesc().getSchedClass());
4320 }
4321 
4322 /// getOperandLatency - Compute and return the use operand latency of a given
4323 /// pair of def and use.
4324 /// In most cases, the static scheduling itinerary was enough to determine the
4325 /// operand latency. But it may not be possible for instructions with variable
4326 /// number of defs / uses.
4327 ///
4328 /// This is a raw interface to the itinerary that may be directly overriden by
4329 /// a target. Use computeOperandLatency to get the best estimate of latency.
4330 std::optional<unsigned> HexagonInstrInfo::getOperandLatency(
4331     const InstrItineraryData *ItinData, const MachineInstr &DefMI,
4332     unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const {
4333   const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
4334 
4335   // Get DefIdx and UseIdx for super registers.
4336   const MachineOperand &DefMO = DefMI.getOperand(DefIdx);
4337 
4338   if (DefMO.isReg() && DefMO.getReg().isPhysical()) {
4339     if (DefMO.isImplicit()) {
4340       for (MCPhysReg SR : HRI.superregs(DefMO.getReg())) {
4341         int Idx = DefMI.findRegisterDefOperandIdx(SR, &HRI, false, false);
4342         if (Idx != -1) {
4343           DefIdx = Idx;
4344           break;
4345         }
4346       }
4347     }
4348 
4349     const MachineOperand &UseMO = UseMI.getOperand(UseIdx);
4350     if (UseMO.isImplicit()) {
4351       for (MCPhysReg SR : HRI.superregs(UseMO.getReg())) {
4352         int Idx = UseMI.findRegisterUseOperandIdx(SR, &HRI, false);
4353         if (Idx != -1) {
4354           UseIdx = Idx;
4355           break;
4356         }
4357       }
4358     }
4359   }
4360 
4361   std::optional<unsigned> Latency = TargetInstrInfo::getOperandLatency(
4362       ItinData, DefMI, DefIdx, UseMI, UseIdx);
4363   if (Latency == 0)
4364     // We should never have 0 cycle latency between two instructions unless
4365     // they can be packetized together. However, this decision can't be made
4366     // here.
4367     Latency = 1;
4368   return Latency;
4369 }
4370 
4371 // inverts the predication logic.
4372 // p -> NotP
4373 // NotP -> P
4374 bool HexagonInstrInfo::getInvertedPredSense(
4375       SmallVectorImpl<MachineOperand> &Cond) const {
4376   if (Cond.empty())
4377     return false;
4378   unsigned Opc = getInvertedPredicatedOpcode(Cond[0].getImm());
4379   Cond[0].setImm(Opc);
4380   return true;
4381 }
4382 
4383 unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
4384   int InvPredOpcode;
4385   InvPredOpcode = isPredicatedTrue(Opc) ? Hexagon::getFalsePredOpcode(Opc)
4386                                         : Hexagon::getTruePredOpcode(Opc);
4387   if (InvPredOpcode >= 0) // Valid instruction with the inverted predicate.
4388     return InvPredOpcode;
4389 
4390   llvm_unreachable("Unexpected predicated instruction");
4391 }
4392 
4393 // Returns the max value that doesn't need to be extended.
4394 int HexagonInstrInfo::getMaxValue(const MachineInstr &MI) const {
4395   const uint64_t F = MI.getDesc().TSFlags;
4396   unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
4397                     & HexagonII::ExtentSignedMask;
4398   unsigned bits =  (F >> HexagonII::ExtentBitsPos)
4399                     & HexagonII::ExtentBitsMask;
4400 
4401   if (isSigned) // if value is signed
4402     return ~(-1U << (bits - 1));
4403   else
4404     return ~(-1U << bits);
4405 }
4406 
4407 
4408 bool HexagonInstrInfo::isAddrModeWithOffset(const MachineInstr &MI) const {
4409   switch (MI.getOpcode()) {
4410   case Hexagon::L2_loadrbgp:
4411   case Hexagon::L2_loadrdgp:
4412   case Hexagon::L2_loadrhgp:
4413   case Hexagon::L2_loadrigp:
4414   case Hexagon::L2_loadrubgp:
4415   case Hexagon::L2_loadruhgp:
4416   case Hexagon::S2_storerbgp:
4417   case Hexagon::S2_storerbnewgp:
4418   case Hexagon::S2_storerhgp:
4419   case Hexagon::S2_storerhnewgp:
4420   case Hexagon::S2_storerigp:
4421   case Hexagon::S2_storerinewgp:
4422   case Hexagon::S2_storerdgp:
4423   case Hexagon::S2_storerfgp:
4424     return true;
4425   }
4426   const uint64_t F = MI.getDesc().TSFlags;
4427   unsigned addrMode =
4428     ((F >> HexagonII::AddrModePos) & HexagonII::AddrModeMask);
4429   // Disallow any base+offset instruction. The assembler does not yet reorder
4430   // based up any zero offset instruction.
4431   return (addrMode == HexagonII::BaseRegOffset ||
4432           addrMode == HexagonII::BaseImmOffset ||
4433           addrMode == HexagonII::BaseLongOffset);
4434 }
4435 
4436 bool HexagonInstrInfo::isPureSlot0(const MachineInstr &MI) const {
4437   // Workaround for the Global Scheduler. Sometimes, it creates
4438   // A4_ext as a Pseudo instruction and calls this function to see if
4439   // it can be added to an existing bundle. Since the instruction doesn't
4440   // belong to any BB yet, we can't use getUnits API.
4441   if (MI.getOpcode() == Hexagon::A4_ext)
4442     return false;
4443 
4444   unsigned FuncUnits = getUnits(MI);
4445   return HexagonFUnits::isSlot0Only(FuncUnits);
4446 }
4447 
4448 bool HexagonInstrInfo::isRestrictNoSlot1Store(const MachineInstr &MI) const {
4449   const uint64_t F = MI.getDesc().TSFlags;
4450   return ((F >> HexagonII::RestrictNoSlot1StorePos) &
4451           HexagonII::RestrictNoSlot1StoreMask);
4452 }
4453 
4454 void HexagonInstrInfo::changeDuplexOpcode(MachineBasicBlock::instr_iterator MII,
4455                                           bool ToBigInstrs) const {
4456   int Opcode = -1;
4457   if (ToBigInstrs) { // To BigCore Instr.
4458     // Check if the instruction can form a Duplex.
4459     if (getDuplexCandidateGroup(*MII))
4460       // Get the opcode marked "dup_*" tag.
4461       Opcode = getDuplexOpcode(*MII, ToBigInstrs);
4462   } else // To TinyCore Instr.
4463     Opcode = getDuplexOpcode(*MII, ToBigInstrs);
4464 
4465   // Change the opcode of the instruction.
4466   if (Opcode >= 0)
4467     MII->setDesc(get(Opcode));
4468 }
4469 
4470 // This function is used to translate instructions to facilitate generating
4471 // Duplexes on TinyCore.
4472 void HexagonInstrInfo::translateInstrsForDup(MachineFunction &MF,
4473                                              bool ToBigInstrs) const {
4474   for (auto &MB : MF)
4475     for (MachineBasicBlock::instr_iterator Instr = MB.instr_begin(),
4476                                            End = MB.instr_end();
4477          Instr != End; ++Instr)
4478       changeDuplexOpcode(Instr, ToBigInstrs);
4479 }
4480 
4481 // This is a specialized form of above function.
4482 void HexagonInstrInfo::translateInstrsForDup(
4483     MachineBasicBlock::instr_iterator MII, bool ToBigInstrs) const {
4484   MachineBasicBlock *MBB = MII->getParent();
4485   while ((MII != MBB->instr_end()) && MII->isInsideBundle()) {
4486     changeDuplexOpcode(MII, ToBigInstrs);
4487     ++MII;
4488   }
4489 }
4490 
4491 unsigned HexagonInstrInfo::getMemAccessSize(const MachineInstr &MI) const {
4492   using namespace HexagonII;
4493 
4494   const uint64_t F = MI.getDesc().TSFlags;
4495   unsigned S = (F >> MemAccessSizePos) & MemAccesSizeMask;
4496   unsigned Size = getMemAccessSizeInBytes(MemAccessSize(S));
4497   if (Size != 0)
4498     return Size;
4499   // Y2_dcfetchbo is special
4500   if (MI.getOpcode() == Hexagon::Y2_dcfetchbo)
4501     return HexagonII::DoubleWordAccess;
4502 
4503   // Handle vector access sizes.
4504   const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
4505   switch (S) {
4506     case HexagonII::HVXVectorAccess:
4507       return HRI.getSpillSize(Hexagon::HvxVRRegClass);
4508     default:
4509       llvm_unreachable("Unexpected instruction");
4510   }
4511 }
4512 
4513 // Returns the min value that doesn't need to be extended.
4514 int HexagonInstrInfo::getMinValue(const MachineInstr &MI) const {
4515   const uint64_t F = MI.getDesc().TSFlags;
4516   unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
4517                     & HexagonII::ExtentSignedMask;
4518   unsigned bits =  (F >> HexagonII::ExtentBitsPos)
4519                     & HexagonII::ExtentBitsMask;
4520 
4521   if (isSigned) // if value is signed
4522     return -1U << (bits - 1);
4523   else
4524     return 0;
4525 }
4526 
4527 // Returns opcode of the non-extended equivalent instruction.
4528 short HexagonInstrInfo::getNonExtOpcode(const MachineInstr &MI) const {
4529   // Check if the instruction has a register form that uses register in place
4530   // of the extended operand, if so return that as the non-extended form.
4531   short NonExtOpcode = Hexagon::getRegForm(MI.getOpcode());
4532     if (NonExtOpcode >= 0)
4533       return NonExtOpcode;
4534 
4535   if (MI.getDesc().mayLoad() || MI.getDesc().mayStore()) {
4536     // Check addressing mode and retrieve non-ext equivalent instruction.
4537     switch (getAddrMode(MI)) {
4538     case HexagonII::Absolute:
4539       return Hexagon::changeAddrMode_abs_io(MI.getOpcode());
4540     case HexagonII::BaseImmOffset:
4541       return Hexagon::changeAddrMode_io_rr(MI.getOpcode());
4542     case HexagonII::BaseLongOffset:
4543       return Hexagon::changeAddrMode_ur_rr(MI.getOpcode());
4544 
4545     default:
4546       return -1;
4547     }
4548   }
4549   return -1;
4550 }
4551 
4552 bool HexagonInstrInfo::getPredReg(ArrayRef<MachineOperand> Cond,
4553       Register &PredReg, unsigned &PredRegPos, unsigned &PredRegFlags) const {
4554   if (Cond.empty())
4555     return false;
4556   assert(Cond.size() == 2);
4557   if (isNewValueJump(Cond[0].getImm()) || Cond[1].isMBB()) {
4558     LLVM_DEBUG(dbgs() << "No predregs for new-value jumps/endloop");
4559     return false;
4560   }
4561   PredReg = Cond[1].getReg();
4562   PredRegPos = 1;
4563   // See IfConversion.cpp why we add RegState::Implicit | RegState::Undef
4564   PredRegFlags = 0;
4565   if (Cond[1].isImplicit())
4566     PredRegFlags = RegState::Implicit;
4567   if (Cond[1].isUndef())
4568     PredRegFlags |= RegState::Undef;
4569   return true;
4570 }
4571 
4572 short HexagonInstrInfo::getPseudoInstrPair(const MachineInstr &MI) const {
4573   return Hexagon::getRealHWInstr(MI.getOpcode(), Hexagon::InstrType_Pseudo);
4574 }
4575 
4576 short HexagonInstrInfo::getRegForm(const MachineInstr &MI) const {
4577   return Hexagon::getRegForm(MI.getOpcode());
4578 }
4579 
4580 // Return the number of bytes required to encode the instruction.
4581 // Hexagon instructions are fixed length, 4 bytes, unless they
4582 // use a constant extender, which requires another 4 bytes.
4583 // For debug instructions and prolog labels, return 0.
4584 unsigned HexagonInstrInfo::getSize(const MachineInstr &MI) const {
4585   if (MI.isDebugInstr() || MI.isPosition())
4586     return 0;
4587 
4588   unsigned Size = MI.getDesc().getSize();
4589   if (!Size)
4590     // Assume the default insn size in case it cannot be determined
4591     // for whatever reason.
4592     Size = HEXAGON_INSTR_SIZE;
4593 
4594   if (isConstExtended(MI) || isExtended(MI))
4595     Size += HEXAGON_INSTR_SIZE;
4596 
4597   // Try and compute number of instructions in asm.
4598   if (BranchRelaxAsmLarge && MI.getOpcode() == Hexagon::INLINEASM) {
4599     const MachineBasicBlock &MBB = *MI.getParent();
4600     const MachineFunction *MF = MBB.getParent();
4601     const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
4602 
4603     // Count the number of register definitions to find the asm string.
4604     unsigned NumDefs = 0;
4605     for (; MI.getOperand(NumDefs).isReg() && MI.getOperand(NumDefs).isDef();
4606          ++NumDefs)
4607       assert(NumDefs != MI.getNumOperands()-2 && "No asm string?");
4608 
4609     assert(MI.getOperand(NumDefs).isSymbol() && "No asm string?");
4610     // Disassemble the AsmStr and approximate number of instructions.
4611     const char *AsmStr = MI.getOperand(NumDefs).getSymbolName();
4612     Size = getInlineAsmLength(AsmStr, *MAI);
4613   }
4614 
4615   return Size;
4616 }
4617 
4618 uint64_t HexagonInstrInfo::getType(const MachineInstr &MI) const {
4619   const uint64_t F = MI.getDesc().TSFlags;
4620   return (F >> HexagonII::TypePos) & HexagonII::TypeMask;
4621 }
4622 
4623 InstrStage::FuncUnits HexagonInstrInfo::getUnits(const MachineInstr &MI) const {
4624   const InstrItineraryData &II = *Subtarget.getInstrItineraryData();
4625   const InstrStage &IS = *II.beginStage(MI.getDesc().getSchedClass());
4626 
4627   return IS.getUnits();
4628 }
4629 
4630 // Calculate size of the basic block without debug instructions.
4631 unsigned HexagonInstrInfo::nonDbgBBSize(const MachineBasicBlock *BB) const {
4632   return nonDbgMICount(BB->instr_begin(), BB->instr_end());
4633 }
4634 
4635 unsigned HexagonInstrInfo::nonDbgBundleSize(
4636       MachineBasicBlock::const_iterator BundleHead) const {
4637   assert(BundleHead->isBundle() && "Not a bundle header");
4638   auto MII = BundleHead.getInstrIterator();
4639   // Skip the bundle header.
4640   return nonDbgMICount(++MII, getBundleEnd(BundleHead.getInstrIterator()));
4641 }
4642 
4643 /// immediateExtend - Changes the instruction in place to one using an immediate
4644 /// extender.
4645 void HexagonInstrInfo::immediateExtend(MachineInstr &MI) const {
4646   assert((isExtendable(MI)||isConstExtended(MI)) &&
4647                                "Instruction must be extendable");
4648   // Find which operand is extendable.
4649   short ExtOpNum = getCExtOpNum(MI);
4650   MachineOperand &MO = MI.getOperand(ExtOpNum);
4651   // This needs to be something we understand.
4652   assert((MO.isMBB() || MO.isImm()) &&
4653          "Branch with unknown extendable field type");
4654   // Mark given operand as extended.
4655   MO.addTargetFlag(HexagonII::HMOTF_ConstExtended);
4656 }
4657 
4658 bool HexagonInstrInfo::invertAndChangeJumpTarget(
4659       MachineInstr &MI, MachineBasicBlock *NewTarget) const {
4660   LLVM_DEBUG(dbgs() << "\n[invertAndChangeJumpTarget] to "
4661                     << printMBBReference(*NewTarget);
4662              MI.dump(););
4663   assert(MI.isBranch());
4664   unsigned NewOpcode = getInvertedPredicatedOpcode(MI.getOpcode());
4665   int TargetPos = MI.getNumOperands() - 1;
4666   // In general branch target is the last operand,
4667   // but some implicit defs added at the end might change it.
4668   while ((TargetPos > -1) && !MI.getOperand(TargetPos).isMBB())
4669     --TargetPos;
4670   assert((TargetPos >= 0) && MI.getOperand(TargetPos).isMBB());
4671   MI.getOperand(TargetPos).setMBB(NewTarget);
4672   if (EnableBranchPrediction && isPredicatedNew(MI)) {
4673     NewOpcode = reversePrediction(NewOpcode);
4674   }
4675   MI.setDesc(get(NewOpcode));
4676   return true;
4677 }
4678 
4679 void HexagonInstrInfo::genAllInsnTimingClasses(MachineFunction &MF) const {
4680   /* +++ The code below is used to generate complete set of Hexagon Insn +++ */
4681   MachineFunction::iterator A = MF.begin();
4682   MachineBasicBlock &B = *A;
4683   MachineBasicBlock::iterator I = B.begin();
4684   DebugLoc DL = I->getDebugLoc();
4685   MachineInstr *NewMI;
4686 
4687   for (unsigned insn = TargetOpcode::GENERIC_OP_END+1;
4688        insn < Hexagon::INSTRUCTION_LIST_END; ++insn) {
4689     NewMI = BuildMI(B, I, DL, get(insn));
4690     LLVM_DEBUG(dbgs() << "\n"
4691                       << getName(NewMI->getOpcode())
4692                       << "  Class: " << NewMI->getDesc().getSchedClass());
4693     NewMI->eraseFromParent();
4694   }
4695   /* --- The code above is used to generate complete set of Hexagon Insn --- */
4696 }
4697 
4698 // inverts the predication logic.
4699 // p -> NotP
4700 // NotP -> P
4701 bool HexagonInstrInfo::reversePredSense(MachineInstr &MI) const {
4702   LLVM_DEBUG(dbgs() << "\nTrying to reverse pred. sense of:"; MI.dump());
4703   MI.setDesc(get(getInvertedPredicatedOpcode(MI.getOpcode())));
4704   return true;
4705 }
4706 
4707 // Reverse the branch prediction.
4708 unsigned HexagonInstrInfo::reversePrediction(unsigned Opcode) const {
4709   int PredRevOpcode = -1;
4710   if (isPredictedTaken(Opcode))
4711     PredRevOpcode = Hexagon::notTakenBranchPrediction(Opcode);
4712   else
4713     PredRevOpcode = Hexagon::takenBranchPrediction(Opcode);
4714   assert(PredRevOpcode > 0);
4715   return PredRevOpcode;
4716 }
4717 
4718 // TODO: Add more rigorous validation.
4719 bool HexagonInstrInfo::validateBranchCond(const ArrayRef<MachineOperand> &Cond)
4720       const {
4721   return Cond.empty() || (Cond[0].isImm() && (Cond.size() != 1));
4722 }
4723 
4724 void HexagonInstrInfo::
4725 setBundleNoShuf(MachineBasicBlock::instr_iterator MIB) const {
4726   assert(MIB->isBundle());
4727   MachineOperand &Operand = MIB->getOperand(0);
4728   if (Operand.isImm())
4729     Operand.setImm(Operand.getImm() | memShufDisabledMask);
4730   else
4731     MIB->addOperand(MachineOperand::CreateImm(memShufDisabledMask));
4732 }
4733 
4734 bool HexagonInstrInfo::getBundleNoShuf(const MachineInstr &MIB) const {
4735   assert(MIB.isBundle());
4736   const MachineOperand &Operand = MIB.getOperand(0);
4737   return (Operand.isImm() && (Operand.getImm() & memShufDisabledMask) != 0);
4738 }
4739 
4740 // Addressing mode relations.
4741 short HexagonInstrInfo::changeAddrMode_abs_io(short Opc) const {
4742   return Opc >= 0 ? Hexagon::changeAddrMode_abs_io(Opc) : Opc;
4743 }
4744 
4745 short HexagonInstrInfo::changeAddrMode_io_abs(short Opc) const {
4746   return Opc >= 0 ? Hexagon::changeAddrMode_io_abs(Opc) : Opc;
4747 }
4748 
4749 short HexagonInstrInfo::changeAddrMode_io_pi(short Opc) const {
4750   return Opc >= 0 ? Hexagon::changeAddrMode_io_pi(Opc) : Opc;
4751 }
4752 
4753 short HexagonInstrInfo::changeAddrMode_io_rr(short Opc) const {
4754   return Opc >= 0 ? Hexagon::changeAddrMode_io_rr(Opc) : Opc;
4755 }
4756 
4757 short HexagonInstrInfo::changeAddrMode_pi_io(short Opc) const {
4758   return Opc >= 0 ? Hexagon::changeAddrMode_pi_io(Opc) : Opc;
4759 }
4760 
4761 short HexagonInstrInfo::changeAddrMode_rr_io(short Opc) const {
4762   return Opc >= 0 ? Hexagon::changeAddrMode_rr_io(Opc) : Opc;
4763 }
4764 
4765 short HexagonInstrInfo::changeAddrMode_rr_ur(short Opc) const {
4766   return Opc >= 0 ? Hexagon::changeAddrMode_rr_ur(Opc) : Opc;
4767 }
4768 
4769 short HexagonInstrInfo::changeAddrMode_ur_rr(short Opc) const {
4770   return Opc >= 0 ? Hexagon::changeAddrMode_ur_rr(Opc) : Opc;
4771 }
4772 
4773 MCInst HexagonInstrInfo::getNop() const {
4774   static const MCInst Nop = MCInstBuilder(Hexagon::A2_nop);
4775 
4776   return MCInstBuilder(Hexagon::BUNDLE)
4777     .addImm(0)
4778     .addInst(&Nop);
4779 }
4780