1 //===- HexagonBitSimplify.cpp ---------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "BitTracker.h" 10 #include "HexagonBitTracker.h" 11 #include "HexagonInstrInfo.h" 12 #include "HexagonRegisterInfo.h" 13 #include "HexagonSubtarget.h" 14 #include "llvm/ADT/BitVector.h" 15 #include "llvm/ADT/DenseMap.h" 16 #include "llvm/ADT/GraphTraits.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/StringRef.h" 20 #include "llvm/CodeGen/MachineBasicBlock.h" 21 #include "llvm/CodeGen/MachineDominators.h" 22 #include "llvm/CodeGen/MachineFunction.h" 23 #include "llvm/CodeGen/MachineFunctionPass.h" 24 #include "llvm/CodeGen/MachineInstr.h" 25 #include "llvm/CodeGen/MachineInstrBuilder.h" 26 #include "llvm/CodeGen/MachineOperand.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/TargetRegisterInfo.h" 29 #include "llvm/IR/DebugLoc.h" 30 #include "llvm/MC/MCInstrDesc.h" 31 #include "llvm/Pass.h" 32 #include "llvm/Support/CommandLine.h" 33 #include "llvm/Support/Compiler.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/MathExtras.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include <algorithm> 39 #include <cassert> 40 #include <cstdint> 41 #include <iterator> 42 #include <limits> 43 #include <utility> 44 #include <vector> 45 46 #define DEBUG_TYPE "hexbit" 47 48 using namespace llvm; 49 50 static cl::opt<bool> PreserveTiedOps("hexbit-keep-tied", cl::Hidden, 51 cl::init(true), cl::desc("Preserve subregisters in tied operands")); 52 static cl::opt<bool> GenExtract("hexbit-extract", cl::Hidden, 53 cl::init(true), cl::desc("Generate extract instructions")); 54 static cl::opt<bool> GenBitSplit("hexbit-bitsplit", cl::Hidden, 55 cl::init(true), cl::desc("Generate bitsplit instructions")); 56 57 static cl::opt<unsigned> MaxExtract("hexbit-max-extract", cl::Hidden, 58 cl::init(std::numeric_limits<unsigned>::max())); 59 static unsigned CountExtract = 0; 60 static cl::opt<unsigned> MaxBitSplit("hexbit-max-bitsplit", cl::Hidden, 61 cl::init(std::numeric_limits<unsigned>::max())); 62 static unsigned CountBitSplit = 0; 63 64 namespace llvm { 65 66 void initializeHexagonBitSimplifyPass(PassRegistry& Registry); 67 FunctionPass *createHexagonBitSimplify(); 68 69 } // end namespace llvm 70 71 namespace { 72 73 // Set of virtual registers, based on BitVector. 74 struct RegisterSet : private BitVector { 75 RegisterSet() = default; 76 explicit RegisterSet(unsigned s, bool t = false) : BitVector(s, t) {} 77 RegisterSet(const RegisterSet &RS) = default; 78 79 using BitVector::clear; 80 using BitVector::count; 81 82 unsigned find_first() const { 83 int First = BitVector::find_first(); 84 if (First < 0) 85 return 0; 86 return x2v(First); 87 } 88 89 unsigned find_next(unsigned Prev) const { 90 int Next = BitVector::find_next(v2x(Prev)); 91 if (Next < 0) 92 return 0; 93 return x2v(Next); 94 } 95 96 RegisterSet &insert(unsigned R) { 97 unsigned Idx = v2x(R); 98 ensure(Idx); 99 return static_cast<RegisterSet&>(BitVector::set(Idx)); 100 } 101 RegisterSet &remove(unsigned R) { 102 unsigned Idx = v2x(R); 103 if (Idx >= size()) 104 return *this; 105 return static_cast<RegisterSet&>(BitVector::reset(Idx)); 106 } 107 108 RegisterSet &insert(const RegisterSet &Rs) { 109 return static_cast<RegisterSet&>(BitVector::operator|=(Rs)); 110 } 111 RegisterSet &remove(const RegisterSet &Rs) { 112 return static_cast<RegisterSet&>(BitVector::reset(Rs)); 113 } 114 115 reference operator[](unsigned R) { 116 unsigned Idx = v2x(R); 117 ensure(Idx); 118 return BitVector::operator[](Idx); 119 } 120 bool operator[](unsigned R) const { 121 unsigned Idx = v2x(R); 122 assert(Idx < size()); 123 return BitVector::operator[](Idx); 124 } 125 bool has(unsigned R) const { 126 unsigned Idx = v2x(R); 127 if (Idx >= size()) 128 return false; 129 return BitVector::test(Idx); 130 } 131 132 bool empty() const { 133 return !BitVector::any(); 134 } 135 bool includes(const RegisterSet &Rs) const { 136 // A.BitVector::test(B) <=> A-B != {} 137 return !Rs.BitVector::test(*this); 138 } 139 bool intersects(const RegisterSet &Rs) const { 140 return BitVector::anyCommon(Rs); 141 } 142 143 private: 144 void ensure(unsigned Idx) { 145 if (size() <= Idx) 146 resize(std::max(Idx+1, 32U)); 147 } 148 149 static inline unsigned v2x(unsigned v) { 150 return Register::virtReg2Index(v); 151 } 152 153 static inline unsigned x2v(unsigned x) { 154 return Register::index2VirtReg(x); 155 } 156 }; 157 158 struct PrintRegSet { 159 PrintRegSet(const RegisterSet &S, const TargetRegisterInfo *RI) 160 : RS(S), TRI(RI) {} 161 162 friend raw_ostream &operator<< (raw_ostream &OS, 163 const PrintRegSet &P); 164 165 private: 166 const RegisterSet &RS; 167 const TargetRegisterInfo *TRI; 168 }; 169 170 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P) 171 LLVM_ATTRIBUTE_UNUSED; 172 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P) { 173 OS << '{'; 174 for (unsigned R = P.RS.find_first(); R; R = P.RS.find_next(R)) 175 OS << ' ' << printReg(R, P.TRI); 176 OS << " }"; 177 return OS; 178 } 179 180 class Transformation; 181 182 class HexagonBitSimplify : public MachineFunctionPass { 183 public: 184 static char ID; 185 186 HexagonBitSimplify() : MachineFunctionPass(ID) {} 187 188 StringRef getPassName() const override { 189 return "Hexagon bit simplification"; 190 } 191 192 void getAnalysisUsage(AnalysisUsage &AU) const override { 193 AU.addRequired<MachineDominatorTree>(); 194 AU.addPreserved<MachineDominatorTree>(); 195 MachineFunctionPass::getAnalysisUsage(AU); 196 } 197 198 bool runOnMachineFunction(MachineFunction &MF) override; 199 200 static void getInstrDefs(const MachineInstr &MI, RegisterSet &Defs); 201 static void getInstrUses(const MachineInstr &MI, RegisterSet &Uses); 202 static bool isEqual(const BitTracker::RegisterCell &RC1, uint16_t B1, 203 const BitTracker::RegisterCell &RC2, uint16_t B2, uint16_t W); 204 static bool isZero(const BitTracker::RegisterCell &RC, uint16_t B, 205 uint16_t W); 206 static bool getConst(const BitTracker::RegisterCell &RC, uint16_t B, 207 uint16_t W, uint64_t &U); 208 static bool replaceReg(unsigned OldR, unsigned NewR, 209 MachineRegisterInfo &MRI); 210 static bool getSubregMask(const BitTracker::RegisterRef &RR, 211 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI); 212 static bool replaceRegWithSub(unsigned OldR, unsigned NewR, 213 unsigned NewSR, MachineRegisterInfo &MRI); 214 static bool replaceSubWithSub(unsigned OldR, unsigned OldSR, 215 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI); 216 static bool parseRegSequence(const MachineInstr &I, 217 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH, 218 const MachineRegisterInfo &MRI); 219 220 static bool getUsedBitsInStore(unsigned Opc, BitVector &Bits, 221 uint16_t Begin); 222 static bool getUsedBits(unsigned Opc, unsigned OpN, BitVector &Bits, 223 uint16_t Begin, const HexagonInstrInfo &HII); 224 225 static const TargetRegisterClass *getFinalVRegClass( 226 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI); 227 static bool isTransparentCopy(const BitTracker::RegisterRef &RD, 228 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI); 229 230 private: 231 MachineDominatorTree *MDT = nullptr; 232 233 bool visitBlock(MachineBasicBlock &B, Transformation &T, RegisterSet &AVs); 234 static bool hasTiedUse(unsigned Reg, MachineRegisterInfo &MRI, 235 unsigned NewSub = Hexagon::NoSubRegister); 236 }; 237 238 using HBS = HexagonBitSimplify; 239 240 // The purpose of this class is to provide a common facility to traverse 241 // the function top-down or bottom-up via the dominator tree, and keep 242 // track of the available registers. 243 class Transformation { 244 public: 245 bool TopDown; 246 247 Transformation(bool TD) : TopDown(TD) {} 248 virtual ~Transformation() = default; 249 250 virtual bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) = 0; 251 }; 252 253 } // end anonymous namespace 254 255 char HexagonBitSimplify::ID = 0; 256 257 INITIALIZE_PASS_BEGIN(HexagonBitSimplify, "hexagon-bit-simplify", 258 "Hexagon bit simplification", false, false) 259 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 260 INITIALIZE_PASS_END(HexagonBitSimplify, "hexagon-bit-simplify", 261 "Hexagon bit simplification", false, false) 262 263 bool HexagonBitSimplify::visitBlock(MachineBasicBlock &B, Transformation &T, 264 RegisterSet &AVs) { 265 bool Changed = false; 266 267 if (T.TopDown) 268 Changed = T.processBlock(B, AVs); 269 270 RegisterSet Defs; 271 for (auto &I : B) 272 getInstrDefs(I, Defs); 273 RegisterSet NewAVs = AVs; 274 NewAVs.insert(Defs); 275 276 for (auto *DTN : children<MachineDomTreeNode*>(MDT->getNode(&B))) 277 Changed |= visitBlock(*(DTN->getBlock()), T, NewAVs); 278 279 if (!T.TopDown) 280 Changed |= T.processBlock(B, AVs); 281 282 return Changed; 283 } 284 285 // 286 // Utility functions: 287 // 288 void HexagonBitSimplify::getInstrDefs(const MachineInstr &MI, 289 RegisterSet &Defs) { 290 for (auto &Op : MI.operands()) { 291 if (!Op.isReg() || !Op.isDef()) 292 continue; 293 Register R = Op.getReg(); 294 if (!Register::isVirtualRegister(R)) 295 continue; 296 Defs.insert(R); 297 } 298 } 299 300 void HexagonBitSimplify::getInstrUses(const MachineInstr &MI, 301 RegisterSet &Uses) { 302 for (auto &Op : MI.operands()) { 303 if (!Op.isReg() || !Op.isUse()) 304 continue; 305 Register R = Op.getReg(); 306 if (!Register::isVirtualRegister(R)) 307 continue; 308 Uses.insert(R); 309 } 310 } 311 312 // Check if all the bits in range [B, E) in both cells are equal. 313 bool HexagonBitSimplify::isEqual(const BitTracker::RegisterCell &RC1, 314 uint16_t B1, const BitTracker::RegisterCell &RC2, uint16_t B2, 315 uint16_t W) { 316 for (uint16_t i = 0; i < W; ++i) { 317 // If RC1[i] is "bottom", it cannot be proven equal to RC2[i]. 318 if (RC1[B1+i].Type == BitTracker::BitValue::Ref && RC1[B1+i].RefI.Reg == 0) 319 return false; 320 // Same for RC2[i]. 321 if (RC2[B2+i].Type == BitTracker::BitValue::Ref && RC2[B2+i].RefI.Reg == 0) 322 return false; 323 if (RC1[B1+i] != RC2[B2+i]) 324 return false; 325 } 326 return true; 327 } 328 329 bool HexagonBitSimplify::isZero(const BitTracker::RegisterCell &RC, 330 uint16_t B, uint16_t W) { 331 assert(B < RC.width() && B+W <= RC.width()); 332 for (uint16_t i = B; i < B+W; ++i) 333 if (!RC[i].is(0)) 334 return false; 335 return true; 336 } 337 338 bool HexagonBitSimplify::getConst(const BitTracker::RegisterCell &RC, 339 uint16_t B, uint16_t W, uint64_t &U) { 340 assert(B < RC.width() && B+W <= RC.width()); 341 int64_t T = 0; 342 for (uint16_t i = B+W; i > B; --i) { 343 const BitTracker::BitValue &BV = RC[i-1]; 344 T <<= 1; 345 if (BV.is(1)) 346 T |= 1; 347 else if (!BV.is(0)) 348 return false; 349 } 350 U = T; 351 return true; 352 } 353 354 bool HexagonBitSimplify::replaceReg(unsigned OldR, unsigned NewR, 355 MachineRegisterInfo &MRI) { 356 if (!Register::isVirtualRegister(OldR) || !Register::isVirtualRegister(NewR)) 357 return false; 358 auto Begin = MRI.use_begin(OldR), End = MRI.use_end(); 359 decltype(End) NextI; 360 for (auto I = Begin; I != End; I = NextI) { 361 NextI = std::next(I); 362 I->setReg(NewR); 363 } 364 return Begin != End; 365 } 366 367 bool HexagonBitSimplify::replaceRegWithSub(unsigned OldR, unsigned NewR, 368 unsigned NewSR, MachineRegisterInfo &MRI) { 369 if (!Register::isVirtualRegister(OldR) || !Register::isVirtualRegister(NewR)) 370 return false; 371 if (hasTiedUse(OldR, MRI, NewSR)) 372 return false; 373 auto Begin = MRI.use_begin(OldR), End = MRI.use_end(); 374 decltype(End) NextI; 375 for (auto I = Begin; I != End; I = NextI) { 376 NextI = std::next(I); 377 I->setReg(NewR); 378 I->setSubReg(NewSR); 379 } 380 return Begin != End; 381 } 382 383 bool HexagonBitSimplify::replaceSubWithSub(unsigned OldR, unsigned OldSR, 384 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI) { 385 if (!Register::isVirtualRegister(OldR) || !Register::isVirtualRegister(NewR)) 386 return false; 387 if (OldSR != NewSR && hasTiedUse(OldR, MRI, NewSR)) 388 return false; 389 auto Begin = MRI.use_begin(OldR), End = MRI.use_end(); 390 decltype(End) NextI; 391 for (auto I = Begin; I != End; I = NextI) { 392 NextI = std::next(I); 393 if (I->getSubReg() != OldSR) 394 continue; 395 I->setReg(NewR); 396 I->setSubReg(NewSR); 397 } 398 return Begin != End; 399 } 400 401 // For a register ref (pair Reg:Sub), set Begin to the position of the LSB 402 // of Sub in Reg, and set Width to the size of Sub in bits. Return true, 403 // if this succeeded, otherwise return false. 404 bool HexagonBitSimplify::getSubregMask(const BitTracker::RegisterRef &RR, 405 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI) { 406 const TargetRegisterClass *RC = MRI.getRegClass(RR.Reg); 407 if (RR.Sub == 0) { 408 Begin = 0; 409 Width = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC); 410 return true; 411 } 412 413 Begin = 0; 414 415 switch (RC->getID()) { 416 case Hexagon::DoubleRegsRegClassID: 417 case Hexagon::HvxWRRegClassID: 418 Width = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 2; 419 if (RR.Sub == Hexagon::isub_hi || RR.Sub == Hexagon::vsub_hi) 420 Begin = Width; 421 break; 422 default: 423 return false; 424 } 425 return true; 426 } 427 428 429 // For a REG_SEQUENCE, set SL to the low subregister and SH to the high 430 // subregister. 431 bool HexagonBitSimplify::parseRegSequence(const MachineInstr &I, 432 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH, 433 const MachineRegisterInfo &MRI) { 434 assert(I.getOpcode() == TargetOpcode::REG_SEQUENCE); 435 unsigned Sub1 = I.getOperand(2).getImm(), Sub2 = I.getOperand(4).getImm(); 436 auto &DstRC = *MRI.getRegClass(I.getOperand(0).getReg()); 437 auto &HRI = static_cast<const HexagonRegisterInfo&>( 438 *MRI.getTargetRegisterInfo()); 439 unsigned SubLo = HRI.getHexagonSubRegIndex(DstRC, Hexagon::ps_sub_lo); 440 unsigned SubHi = HRI.getHexagonSubRegIndex(DstRC, Hexagon::ps_sub_hi); 441 assert((Sub1 == SubLo && Sub2 == SubHi) || (Sub1 == SubHi && Sub2 == SubLo)); 442 if (Sub1 == SubLo && Sub2 == SubHi) { 443 SL = I.getOperand(1); 444 SH = I.getOperand(3); 445 return true; 446 } 447 if (Sub1 == SubHi && Sub2 == SubLo) { 448 SH = I.getOperand(1); 449 SL = I.getOperand(3); 450 return true; 451 } 452 return false; 453 } 454 455 // All stores (except 64-bit stores) take a 32-bit register as the source 456 // of the value to be stored. If the instruction stores into a location 457 // that is shorter than 32 bits, some bits of the source register are not 458 // used. For each store instruction, calculate the set of used bits in 459 // the source register, and set appropriate bits in Bits. Return true if 460 // the bits are calculated, false otherwise. 461 bool HexagonBitSimplify::getUsedBitsInStore(unsigned Opc, BitVector &Bits, 462 uint16_t Begin) { 463 using namespace Hexagon; 464 465 switch (Opc) { 466 // Store byte 467 case S2_storerb_io: // memb(Rs32+#s11:0)=Rt32 468 case S2_storerbnew_io: // memb(Rs32+#s11:0)=Nt8.new 469 case S2_pstorerbt_io: // if (Pv4) memb(Rs32+#u6:0)=Rt32 470 case S2_pstorerbf_io: // if (!Pv4) memb(Rs32+#u6:0)=Rt32 471 case S4_pstorerbtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Rt32 472 case S4_pstorerbfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Rt32 473 case S2_pstorerbnewt_io: // if (Pv4) memb(Rs32+#u6:0)=Nt8.new 474 case S2_pstorerbnewf_io: // if (!Pv4) memb(Rs32+#u6:0)=Nt8.new 475 case S4_pstorerbnewtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Nt8.new 476 case S4_pstorerbnewfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Nt8.new 477 case S2_storerb_pi: // memb(Rx32++#s4:0)=Rt32 478 case S2_storerbnew_pi: // memb(Rx32++#s4:0)=Nt8.new 479 case S2_pstorerbt_pi: // if (Pv4) memb(Rx32++#s4:0)=Rt32 480 case S2_pstorerbf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Rt32 481 case S2_pstorerbtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Rt32 482 case S2_pstorerbfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Rt32 483 case S2_pstorerbnewt_pi: // if (Pv4) memb(Rx32++#s4:0)=Nt8.new 484 case S2_pstorerbnewf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Nt8.new 485 case S2_pstorerbnewtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Nt8.new 486 case S2_pstorerbnewfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Nt8.new 487 case S4_storerb_ap: // memb(Re32=#U6)=Rt32 488 case S4_storerbnew_ap: // memb(Re32=#U6)=Nt8.new 489 case S2_storerb_pr: // memb(Rx32++Mu2)=Rt32 490 case S2_storerbnew_pr: // memb(Rx32++Mu2)=Nt8.new 491 case S4_storerb_ur: // memb(Ru32<<#u2+#U6)=Rt32 492 case S4_storerbnew_ur: // memb(Ru32<<#u2+#U6)=Nt8.new 493 case S2_storerb_pbr: // memb(Rx32++Mu2:brev)=Rt32 494 case S2_storerbnew_pbr: // memb(Rx32++Mu2:brev)=Nt8.new 495 case S2_storerb_pci: // memb(Rx32++#s4:0:circ(Mu2))=Rt32 496 case S2_storerbnew_pci: // memb(Rx32++#s4:0:circ(Mu2))=Nt8.new 497 case S2_storerb_pcr: // memb(Rx32++I:circ(Mu2))=Rt32 498 case S2_storerbnew_pcr: // memb(Rx32++I:circ(Mu2))=Nt8.new 499 case S4_storerb_rr: // memb(Rs32+Ru32<<#u2)=Rt32 500 case S4_storerbnew_rr: // memb(Rs32+Ru32<<#u2)=Nt8.new 501 case S4_pstorerbt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Rt32 502 case S4_pstorerbf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Rt32 503 case S4_pstorerbtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32 504 case S4_pstorerbfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32 505 case S4_pstorerbnewt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new 506 case S4_pstorerbnewf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new 507 case S4_pstorerbnewtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new 508 case S4_pstorerbnewfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new 509 case S2_storerbgp: // memb(gp+#u16:0)=Rt32 510 case S2_storerbnewgp: // memb(gp+#u16:0)=Nt8.new 511 case S4_pstorerbt_abs: // if (Pv4) memb(#u6)=Rt32 512 case S4_pstorerbf_abs: // if (!Pv4) memb(#u6)=Rt32 513 case S4_pstorerbtnew_abs: // if (Pv4.new) memb(#u6)=Rt32 514 case S4_pstorerbfnew_abs: // if (!Pv4.new) memb(#u6)=Rt32 515 case S4_pstorerbnewt_abs: // if (Pv4) memb(#u6)=Nt8.new 516 case S4_pstorerbnewf_abs: // if (!Pv4) memb(#u6)=Nt8.new 517 case S4_pstorerbnewtnew_abs: // if (Pv4.new) memb(#u6)=Nt8.new 518 case S4_pstorerbnewfnew_abs: // if (!Pv4.new) memb(#u6)=Nt8.new 519 Bits.set(Begin, Begin+8); 520 return true; 521 522 // Store low half 523 case S2_storerh_io: // memh(Rs32+#s11:1)=Rt32 524 case S2_storerhnew_io: // memh(Rs32+#s11:1)=Nt8.new 525 case S2_pstorerht_io: // if (Pv4) memh(Rs32+#u6:1)=Rt32 526 case S2_pstorerhf_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt32 527 case S4_pstorerhtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt32 528 case S4_pstorerhfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt32 529 case S2_pstorerhnewt_io: // if (Pv4) memh(Rs32+#u6:1)=Nt8.new 530 case S2_pstorerhnewf_io: // if (!Pv4) memh(Rs32+#u6:1)=Nt8.new 531 case S4_pstorerhnewtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Nt8.new 532 case S4_pstorerhnewfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Nt8.new 533 case S2_storerh_pi: // memh(Rx32++#s4:1)=Rt32 534 case S2_storerhnew_pi: // memh(Rx32++#s4:1)=Nt8.new 535 case S2_pstorerht_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt32 536 case S2_pstorerhf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt32 537 case S2_pstorerhtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt32 538 case S2_pstorerhfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt32 539 case S2_pstorerhnewt_pi: // if (Pv4) memh(Rx32++#s4:1)=Nt8.new 540 case S2_pstorerhnewf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Nt8.new 541 case S2_pstorerhnewtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Nt8.new 542 case S2_pstorerhnewfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Nt8.new 543 case S4_storerh_ap: // memh(Re32=#U6)=Rt32 544 case S4_storerhnew_ap: // memh(Re32=#U6)=Nt8.new 545 case S2_storerh_pr: // memh(Rx32++Mu2)=Rt32 546 case S2_storerhnew_pr: // memh(Rx32++Mu2)=Nt8.new 547 case S4_storerh_ur: // memh(Ru32<<#u2+#U6)=Rt32 548 case S4_storerhnew_ur: // memh(Ru32<<#u2+#U6)=Nt8.new 549 case S2_storerh_pbr: // memh(Rx32++Mu2:brev)=Rt32 550 case S2_storerhnew_pbr: // memh(Rx32++Mu2:brev)=Nt8.new 551 case S2_storerh_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt32 552 case S2_storerhnew_pci: // memh(Rx32++#s4:1:circ(Mu2))=Nt8.new 553 case S2_storerh_pcr: // memh(Rx32++I:circ(Mu2))=Rt32 554 case S2_storerhnew_pcr: // memh(Rx32++I:circ(Mu2))=Nt8.new 555 case S4_storerh_rr: // memh(Rs32+Ru32<<#u2)=Rt32 556 case S4_pstorerht_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt32 557 case S4_pstorerhf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt32 558 case S4_pstorerhtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32 559 case S4_pstorerhfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32 560 case S4_storerhnew_rr: // memh(Rs32+Ru32<<#u2)=Nt8.new 561 case S4_pstorerhnewt_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new 562 case S4_pstorerhnewf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new 563 case S4_pstorerhnewtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new 564 case S4_pstorerhnewfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new 565 case S2_storerhgp: // memh(gp+#u16:1)=Rt32 566 case S2_storerhnewgp: // memh(gp+#u16:1)=Nt8.new 567 case S4_pstorerht_abs: // if (Pv4) memh(#u6)=Rt32 568 case S4_pstorerhf_abs: // if (!Pv4) memh(#u6)=Rt32 569 case S4_pstorerhtnew_abs: // if (Pv4.new) memh(#u6)=Rt32 570 case S4_pstorerhfnew_abs: // if (!Pv4.new) memh(#u6)=Rt32 571 case S4_pstorerhnewt_abs: // if (Pv4) memh(#u6)=Nt8.new 572 case S4_pstorerhnewf_abs: // if (!Pv4) memh(#u6)=Nt8.new 573 case S4_pstorerhnewtnew_abs: // if (Pv4.new) memh(#u6)=Nt8.new 574 case S4_pstorerhnewfnew_abs: // if (!Pv4.new) memh(#u6)=Nt8.new 575 Bits.set(Begin, Begin+16); 576 return true; 577 578 // Store high half 579 case S2_storerf_io: // memh(Rs32+#s11:1)=Rt.H32 580 case S2_pstorerft_io: // if (Pv4) memh(Rs32+#u6:1)=Rt.H32 581 case S2_pstorerff_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt.H32 582 case S4_pstorerftnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt.H32 583 case S4_pstorerffnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt.H32 584 case S2_storerf_pi: // memh(Rx32++#s4:1)=Rt.H32 585 case S2_pstorerft_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt.H32 586 case S2_pstorerff_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt.H32 587 case S2_pstorerftnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt.H32 588 case S2_pstorerffnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt.H32 589 case S4_storerf_ap: // memh(Re32=#U6)=Rt.H32 590 case S2_storerf_pr: // memh(Rx32++Mu2)=Rt.H32 591 case S4_storerf_ur: // memh(Ru32<<#u2+#U6)=Rt.H32 592 case S2_storerf_pbr: // memh(Rx32++Mu2:brev)=Rt.H32 593 case S2_storerf_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt.H32 594 case S2_storerf_pcr: // memh(Rx32++I:circ(Mu2))=Rt.H32 595 case S4_storerf_rr: // memh(Rs32+Ru32<<#u2)=Rt.H32 596 case S4_pstorerft_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32 597 case S4_pstorerff_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32 598 case S4_pstorerftnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32 599 case S4_pstorerffnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32 600 case S2_storerfgp: // memh(gp+#u16:1)=Rt.H32 601 case S4_pstorerft_abs: // if (Pv4) memh(#u6)=Rt.H32 602 case S4_pstorerff_abs: // if (!Pv4) memh(#u6)=Rt.H32 603 case S4_pstorerftnew_abs: // if (Pv4.new) memh(#u6)=Rt.H32 604 case S4_pstorerffnew_abs: // if (!Pv4.new) memh(#u6)=Rt.H32 605 Bits.set(Begin+16, Begin+32); 606 return true; 607 } 608 609 return false; 610 } 611 612 // For an instruction with opcode Opc, calculate the set of bits that it 613 // uses in a register in operand OpN. This only calculates the set of used 614 // bits for cases where it does not depend on any operands (as is the case 615 // in shifts, for example). For concrete instructions from a program, the 616 // operand may be a subregister of a larger register, while Bits would 617 // correspond to the larger register in its entirety. Because of that, 618 // the parameter Begin can be used to indicate which bit of Bits should be 619 // considered the LSB of the operand. 620 bool HexagonBitSimplify::getUsedBits(unsigned Opc, unsigned OpN, 621 BitVector &Bits, uint16_t Begin, const HexagonInstrInfo &HII) { 622 using namespace Hexagon; 623 624 const MCInstrDesc &D = HII.get(Opc); 625 if (D.mayStore()) { 626 if (OpN == D.getNumOperands()-1) 627 return getUsedBitsInStore(Opc, Bits, Begin); 628 return false; 629 } 630 631 switch (Opc) { 632 // One register source. Used bits: R1[0-7]. 633 case A2_sxtb: 634 case A2_zxtb: 635 case A4_cmpbeqi: 636 case A4_cmpbgti: 637 case A4_cmpbgtui: 638 if (OpN == 1) { 639 Bits.set(Begin, Begin+8); 640 return true; 641 } 642 break; 643 644 // One register source. Used bits: R1[0-15]. 645 case A2_aslh: 646 case A2_sxth: 647 case A2_zxth: 648 case A4_cmpheqi: 649 case A4_cmphgti: 650 case A4_cmphgtui: 651 if (OpN == 1) { 652 Bits.set(Begin, Begin+16); 653 return true; 654 } 655 break; 656 657 // One register source. Used bits: R1[16-31]. 658 case A2_asrh: 659 if (OpN == 1) { 660 Bits.set(Begin+16, Begin+32); 661 return true; 662 } 663 break; 664 665 // Two register sources. Used bits: R1[0-7], R2[0-7]. 666 case A4_cmpbeq: 667 case A4_cmpbgt: 668 case A4_cmpbgtu: 669 if (OpN == 1) { 670 Bits.set(Begin, Begin+8); 671 return true; 672 } 673 break; 674 675 // Two register sources. Used bits: R1[0-15], R2[0-15]. 676 case A4_cmpheq: 677 case A4_cmphgt: 678 case A4_cmphgtu: 679 case A2_addh_h16_ll: 680 case A2_addh_h16_sat_ll: 681 case A2_addh_l16_ll: 682 case A2_addh_l16_sat_ll: 683 case A2_combine_ll: 684 case A2_subh_h16_ll: 685 case A2_subh_h16_sat_ll: 686 case A2_subh_l16_ll: 687 case A2_subh_l16_sat_ll: 688 case M2_mpy_acc_ll_s0: 689 case M2_mpy_acc_ll_s1: 690 case M2_mpy_acc_sat_ll_s0: 691 case M2_mpy_acc_sat_ll_s1: 692 case M2_mpy_ll_s0: 693 case M2_mpy_ll_s1: 694 case M2_mpy_nac_ll_s0: 695 case M2_mpy_nac_ll_s1: 696 case M2_mpy_nac_sat_ll_s0: 697 case M2_mpy_nac_sat_ll_s1: 698 case M2_mpy_rnd_ll_s0: 699 case M2_mpy_rnd_ll_s1: 700 case M2_mpy_sat_ll_s0: 701 case M2_mpy_sat_ll_s1: 702 case M2_mpy_sat_rnd_ll_s0: 703 case M2_mpy_sat_rnd_ll_s1: 704 case M2_mpyd_acc_ll_s0: 705 case M2_mpyd_acc_ll_s1: 706 case M2_mpyd_ll_s0: 707 case M2_mpyd_ll_s1: 708 case M2_mpyd_nac_ll_s0: 709 case M2_mpyd_nac_ll_s1: 710 case M2_mpyd_rnd_ll_s0: 711 case M2_mpyd_rnd_ll_s1: 712 case M2_mpyu_acc_ll_s0: 713 case M2_mpyu_acc_ll_s1: 714 case M2_mpyu_ll_s0: 715 case M2_mpyu_ll_s1: 716 case M2_mpyu_nac_ll_s0: 717 case M2_mpyu_nac_ll_s1: 718 case M2_mpyud_acc_ll_s0: 719 case M2_mpyud_acc_ll_s1: 720 case M2_mpyud_ll_s0: 721 case M2_mpyud_ll_s1: 722 case M2_mpyud_nac_ll_s0: 723 case M2_mpyud_nac_ll_s1: 724 if (OpN == 1 || OpN == 2) { 725 Bits.set(Begin, Begin+16); 726 return true; 727 } 728 break; 729 730 // Two register sources. Used bits: R1[0-15], R2[16-31]. 731 case A2_addh_h16_lh: 732 case A2_addh_h16_sat_lh: 733 case A2_combine_lh: 734 case A2_subh_h16_lh: 735 case A2_subh_h16_sat_lh: 736 case M2_mpy_acc_lh_s0: 737 case M2_mpy_acc_lh_s1: 738 case M2_mpy_acc_sat_lh_s0: 739 case M2_mpy_acc_sat_lh_s1: 740 case M2_mpy_lh_s0: 741 case M2_mpy_lh_s1: 742 case M2_mpy_nac_lh_s0: 743 case M2_mpy_nac_lh_s1: 744 case M2_mpy_nac_sat_lh_s0: 745 case M2_mpy_nac_sat_lh_s1: 746 case M2_mpy_rnd_lh_s0: 747 case M2_mpy_rnd_lh_s1: 748 case M2_mpy_sat_lh_s0: 749 case M2_mpy_sat_lh_s1: 750 case M2_mpy_sat_rnd_lh_s0: 751 case M2_mpy_sat_rnd_lh_s1: 752 case M2_mpyd_acc_lh_s0: 753 case M2_mpyd_acc_lh_s1: 754 case M2_mpyd_lh_s0: 755 case M2_mpyd_lh_s1: 756 case M2_mpyd_nac_lh_s0: 757 case M2_mpyd_nac_lh_s1: 758 case M2_mpyd_rnd_lh_s0: 759 case M2_mpyd_rnd_lh_s1: 760 case M2_mpyu_acc_lh_s0: 761 case M2_mpyu_acc_lh_s1: 762 case M2_mpyu_lh_s0: 763 case M2_mpyu_lh_s1: 764 case M2_mpyu_nac_lh_s0: 765 case M2_mpyu_nac_lh_s1: 766 case M2_mpyud_acc_lh_s0: 767 case M2_mpyud_acc_lh_s1: 768 case M2_mpyud_lh_s0: 769 case M2_mpyud_lh_s1: 770 case M2_mpyud_nac_lh_s0: 771 case M2_mpyud_nac_lh_s1: 772 // These four are actually LH. 773 case A2_addh_l16_hl: 774 case A2_addh_l16_sat_hl: 775 case A2_subh_l16_hl: 776 case A2_subh_l16_sat_hl: 777 if (OpN == 1) { 778 Bits.set(Begin, Begin+16); 779 return true; 780 } 781 if (OpN == 2) { 782 Bits.set(Begin+16, Begin+32); 783 return true; 784 } 785 break; 786 787 // Two register sources, used bits: R1[16-31], R2[0-15]. 788 case A2_addh_h16_hl: 789 case A2_addh_h16_sat_hl: 790 case A2_combine_hl: 791 case A2_subh_h16_hl: 792 case A2_subh_h16_sat_hl: 793 case M2_mpy_acc_hl_s0: 794 case M2_mpy_acc_hl_s1: 795 case M2_mpy_acc_sat_hl_s0: 796 case M2_mpy_acc_sat_hl_s1: 797 case M2_mpy_hl_s0: 798 case M2_mpy_hl_s1: 799 case M2_mpy_nac_hl_s0: 800 case M2_mpy_nac_hl_s1: 801 case M2_mpy_nac_sat_hl_s0: 802 case M2_mpy_nac_sat_hl_s1: 803 case M2_mpy_rnd_hl_s0: 804 case M2_mpy_rnd_hl_s1: 805 case M2_mpy_sat_hl_s0: 806 case M2_mpy_sat_hl_s1: 807 case M2_mpy_sat_rnd_hl_s0: 808 case M2_mpy_sat_rnd_hl_s1: 809 case M2_mpyd_acc_hl_s0: 810 case M2_mpyd_acc_hl_s1: 811 case M2_mpyd_hl_s0: 812 case M2_mpyd_hl_s1: 813 case M2_mpyd_nac_hl_s0: 814 case M2_mpyd_nac_hl_s1: 815 case M2_mpyd_rnd_hl_s0: 816 case M2_mpyd_rnd_hl_s1: 817 case M2_mpyu_acc_hl_s0: 818 case M2_mpyu_acc_hl_s1: 819 case M2_mpyu_hl_s0: 820 case M2_mpyu_hl_s1: 821 case M2_mpyu_nac_hl_s0: 822 case M2_mpyu_nac_hl_s1: 823 case M2_mpyud_acc_hl_s0: 824 case M2_mpyud_acc_hl_s1: 825 case M2_mpyud_hl_s0: 826 case M2_mpyud_hl_s1: 827 case M2_mpyud_nac_hl_s0: 828 case M2_mpyud_nac_hl_s1: 829 if (OpN == 1) { 830 Bits.set(Begin+16, Begin+32); 831 return true; 832 } 833 if (OpN == 2) { 834 Bits.set(Begin, Begin+16); 835 return true; 836 } 837 break; 838 839 // Two register sources, used bits: R1[16-31], R2[16-31]. 840 case A2_addh_h16_hh: 841 case A2_addh_h16_sat_hh: 842 case A2_combine_hh: 843 case A2_subh_h16_hh: 844 case A2_subh_h16_sat_hh: 845 case M2_mpy_acc_hh_s0: 846 case M2_mpy_acc_hh_s1: 847 case M2_mpy_acc_sat_hh_s0: 848 case M2_mpy_acc_sat_hh_s1: 849 case M2_mpy_hh_s0: 850 case M2_mpy_hh_s1: 851 case M2_mpy_nac_hh_s0: 852 case M2_mpy_nac_hh_s1: 853 case M2_mpy_nac_sat_hh_s0: 854 case M2_mpy_nac_sat_hh_s1: 855 case M2_mpy_rnd_hh_s0: 856 case M2_mpy_rnd_hh_s1: 857 case M2_mpy_sat_hh_s0: 858 case M2_mpy_sat_hh_s1: 859 case M2_mpy_sat_rnd_hh_s0: 860 case M2_mpy_sat_rnd_hh_s1: 861 case M2_mpyd_acc_hh_s0: 862 case M2_mpyd_acc_hh_s1: 863 case M2_mpyd_hh_s0: 864 case M2_mpyd_hh_s1: 865 case M2_mpyd_nac_hh_s0: 866 case M2_mpyd_nac_hh_s1: 867 case M2_mpyd_rnd_hh_s0: 868 case M2_mpyd_rnd_hh_s1: 869 case M2_mpyu_acc_hh_s0: 870 case M2_mpyu_acc_hh_s1: 871 case M2_mpyu_hh_s0: 872 case M2_mpyu_hh_s1: 873 case M2_mpyu_nac_hh_s0: 874 case M2_mpyu_nac_hh_s1: 875 case M2_mpyud_acc_hh_s0: 876 case M2_mpyud_acc_hh_s1: 877 case M2_mpyud_hh_s0: 878 case M2_mpyud_hh_s1: 879 case M2_mpyud_nac_hh_s0: 880 case M2_mpyud_nac_hh_s1: 881 if (OpN == 1 || OpN == 2) { 882 Bits.set(Begin+16, Begin+32); 883 return true; 884 } 885 break; 886 } 887 888 return false; 889 } 890 891 // Calculate the register class that matches Reg:Sub. For example, if 892 // %1 is a double register, then %1:isub_hi would match the "int" 893 // register class. 894 const TargetRegisterClass *HexagonBitSimplify::getFinalVRegClass( 895 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI) { 896 if (!Register::isVirtualRegister(RR.Reg)) 897 return nullptr; 898 auto *RC = MRI.getRegClass(RR.Reg); 899 if (RR.Sub == 0) 900 return RC; 901 auto &HRI = static_cast<const HexagonRegisterInfo&>( 902 *MRI.getTargetRegisterInfo()); 903 904 auto VerifySR = [&HRI] (const TargetRegisterClass *RC, unsigned Sub) -> void { 905 (void)HRI; 906 assert(Sub == HRI.getHexagonSubRegIndex(*RC, Hexagon::ps_sub_lo) || 907 Sub == HRI.getHexagonSubRegIndex(*RC, Hexagon::ps_sub_hi)); 908 }; 909 910 switch (RC->getID()) { 911 case Hexagon::DoubleRegsRegClassID: 912 VerifySR(RC, RR.Sub); 913 return &Hexagon::IntRegsRegClass; 914 case Hexagon::HvxWRRegClassID: 915 VerifySR(RC, RR.Sub); 916 return &Hexagon::HvxVRRegClass; 917 } 918 return nullptr; 919 } 920 921 // Check if RD could be replaced with RS at any possible use of RD. 922 // For example a predicate register cannot be replaced with a integer 923 // register, but a 64-bit register with a subregister can be replaced 924 // with a 32-bit register. 925 bool HexagonBitSimplify::isTransparentCopy(const BitTracker::RegisterRef &RD, 926 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI) { 927 if (!Register::isVirtualRegister(RD.Reg) || 928 !Register::isVirtualRegister(RS.Reg)) 929 return false; 930 // Return false if one (or both) classes are nullptr. 931 auto *DRC = getFinalVRegClass(RD, MRI); 932 if (!DRC) 933 return false; 934 935 return DRC == getFinalVRegClass(RS, MRI); 936 } 937 938 bool HexagonBitSimplify::hasTiedUse(unsigned Reg, MachineRegisterInfo &MRI, 939 unsigned NewSub) { 940 if (!PreserveTiedOps) 941 return false; 942 return llvm::any_of(MRI.use_operands(Reg), 943 [NewSub] (const MachineOperand &Op) -> bool { 944 return Op.getSubReg() != NewSub && Op.isTied(); 945 }); 946 } 947 948 namespace { 949 950 class DeadCodeElimination { 951 public: 952 DeadCodeElimination(MachineFunction &mf, MachineDominatorTree &mdt) 953 : MF(mf), HII(*MF.getSubtarget<HexagonSubtarget>().getInstrInfo()), 954 MDT(mdt), MRI(mf.getRegInfo()) {} 955 956 bool run() { 957 return runOnNode(MDT.getRootNode()); 958 } 959 960 private: 961 bool isDead(unsigned R) const; 962 bool runOnNode(MachineDomTreeNode *N); 963 964 MachineFunction &MF; 965 const HexagonInstrInfo &HII; 966 MachineDominatorTree &MDT; 967 MachineRegisterInfo &MRI; 968 }; 969 970 } // end anonymous namespace 971 972 bool DeadCodeElimination::isDead(unsigned R) const { 973 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) { 974 MachineInstr *UseI = I->getParent(); 975 if (UseI->isDebugValue()) 976 continue; 977 if (UseI->isPHI()) { 978 assert(!UseI->getOperand(0).getSubReg()); 979 Register DR = UseI->getOperand(0).getReg(); 980 if (DR == R) 981 continue; 982 } 983 return false; 984 } 985 return true; 986 } 987 988 bool DeadCodeElimination::runOnNode(MachineDomTreeNode *N) { 989 bool Changed = false; 990 991 for (auto *DTN : children<MachineDomTreeNode*>(N)) 992 Changed |= runOnNode(DTN); 993 994 MachineBasicBlock *B = N->getBlock(); 995 std::vector<MachineInstr*> Instrs; 996 for (auto I = B->rbegin(), E = B->rend(); I != E; ++I) 997 Instrs.push_back(&*I); 998 999 for (auto MI : Instrs) { 1000 unsigned Opc = MI->getOpcode(); 1001 // Do not touch lifetime markers. This is why the target-independent DCE 1002 // cannot be used. 1003 if (Opc == TargetOpcode::LIFETIME_START || 1004 Opc == TargetOpcode::LIFETIME_END) 1005 continue; 1006 bool Store = false; 1007 if (MI->isInlineAsm()) 1008 continue; 1009 // Delete PHIs if possible. 1010 if (!MI->isPHI() && !MI->isSafeToMove(nullptr, Store)) 1011 continue; 1012 1013 bool AllDead = true; 1014 SmallVector<unsigned,2> Regs; 1015 for (auto &Op : MI->operands()) { 1016 if (!Op.isReg() || !Op.isDef()) 1017 continue; 1018 Register R = Op.getReg(); 1019 if (!Register::isVirtualRegister(R) || !isDead(R)) { 1020 AllDead = false; 1021 break; 1022 } 1023 Regs.push_back(R); 1024 } 1025 if (!AllDead) 1026 continue; 1027 1028 B->erase(MI); 1029 for (unsigned i = 0, n = Regs.size(); i != n; ++i) 1030 MRI.markUsesInDebugValueAsUndef(Regs[i]); 1031 Changed = true; 1032 } 1033 1034 return Changed; 1035 } 1036 1037 namespace { 1038 1039 // Eliminate redundant instructions 1040 // 1041 // This transformation will identify instructions where the output register 1042 // is the same as one of its input registers. This only works on instructions 1043 // that define a single register (unlike post-increment loads, for example). 1044 // The equality check is actually more detailed: the code calculates which 1045 // bits of the output are used, and only compares these bits with the input 1046 // registers. 1047 // If the output matches an input, the instruction is replaced with COPY. 1048 // The copies will be removed by another transformation. 1049 class RedundantInstrElimination : public Transformation { 1050 public: 1051 RedundantInstrElimination(BitTracker &bt, const HexagonInstrInfo &hii, 1052 const HexagonRegisterInfo &hri, MachineRegisterInfo &mri) 1053 : Transformation(true), HII(hii), HRI(hri), MRI(mri), BT(bt) {} 1054 1055 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1056 1057 private: 1058 bool isLossyShiftLeft(const MachineInstr &MI, unsigned OpN, 1059 unsigned &LostB, unsigned &LostE); 1060 bool isLossyShiftRight(const MachineInstr &MI, unsigned OpN, 1061 unsigned &LostB, unsigned &LostE); 1062 bool computeUsedBits(unsigned Reg, BitVector &Bits); 1063 bool computeUsedBits(const MachineInstr &MI, unsigned OpN, BitVector &Bits, 1064 uint16_t Begin); 1065 bool usedBitsEqual(BitTracker::RegisterRef RD, BitTracker::RegisterRef RS); 1066 1067 const HexagonInstrInfo &HII; 1068 const HexagonRegisterInfo &HRI; 1069 MachineRegisterInfo &MRI; 1070 BitTracker &BT; 1071 }; 1072 1073 } // end anonymous namespace 1074 1075 // Check if the instruction is a lossy shift left, where the input being 1076 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range 1077 // of bit indices that are lost. 1078 bool RedundantInstrElimination::isLossyShiftLeft(const MachineInstr &MI, 1079 unsigned OpN, unsigned &LostB, unsigned &LostE) { 1080 using namespace Hexagon; 1081 1082 unsigned Opc = MI.getOpcode(); 1083 unsigned ImN, RegN, Width; 1084 switch (Opc) { 1085 case S2_asl_i_p: 1086 ImN = 2; 1087 RegN = 1; 1088 Width = 64; 1089 break; 1090 case S2_asl_i_p_acc: 1091 case S2_asl_i_p_and: 1092 case S2_asl_i_p_nac: 1093 case S2_asl_i_p_or: 1094 case S2_asl_i_p_xacc: 1095 ImN = 3; 1096 RegN = 2; 1097 Width = 64; 1098 break; 1099 case S2_asl_i_r: 1100 ImN = 2; 1101 RegN = 1; 1102 Width = 32; 1103 break; 1104 case S2_addasl_rrri: 1105 case S4_andi_asl_ri: 1106 case S4_ori_asl_ri: 1107 case S4_addi_asl_ri: 1108 case S4_subi_asl_ri: 1109 case S2_asl_i_r_acc: 1110 case S2_asl_i_r_and: 1111 case S2_asl_i_r_nac: 1112 case S2_asl_i_r_or: 1113 case S2_asl_i_r_sat: 1114 case S2_asl_i_r_xacc: 1115 ImN = 3; 1116 RegN = 2; 1117 Width = 32; 1118 break; 1119 default: 1120 return false; 1121 } 1122 1123 if (RegN != OpN) 1124 return false; 1125 1126 assert(MI.getOperand(ImN).isImm()); 1127 unsigned S = MI.getOperand(ImN).getImm(); 1128 if (S == 0) 1129 return false; 1130 LostB = Width-S; 1131 LostE = Width; 1132 return true; 1133 } 1134 1135 // Check if the instruction is a lossy shift right, where the input being 1136 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range 1137 // of bit indices that are lost. 1138 bool RedundantInstrElimination::isLossyShiftRight(const MachineInstr &MI, 1139 unsigned OpN, unsigned &LostB, unsigned &LostE) { 1140 using namespace Hexagon; 1141 1142 unsigned Opc = MI.getOpcode(); 1143 unsigned ImN, RegN; 1144 switch (Opc) { 1145 case S2_asr_i_p: 1146 case S2_lsr_i_p: 1147 ImN = 2; 1148 RegN = 1; 1149 break; 1150 case S2_asr_i_p_acc: 1151 case S2_asr_i_p_and: 1152 case S2_asr_i_p_nac: 1153 case S2_asr_i_p_or: 1154 case S2_lsr_i_p_acc: 1155 case S2_lsr_i_p_and: 1156 case S2_lsr_i_p_nac: 1157 case S2_lsr_i_p_or: 1158 case S2_lsr_i_p_xacc: 1159 ImN = 3; 1160 RegN = 2; 1161 break; 1162 case S2_asr_i_r: 1163 case S2_lsr_i_r: 1164 ImN = 2; 1165 RegN = 1; 1166 break; 1167 case S4_andi_lsr_ri: 1168 case S4_ori_lsr_ri: 1169 case S4_addi_lsr_ri: 1170 case S4_subi_lsr_ri: 1171 case S2_asr_i_r_acc: 1172 case S2_asr_i_r_and: 1173 case S2_asr_i_r_nac: 1174 case S2_asr_i_r_or: 1175 case S2_lsr_i_r_acc: 1176 case S2_lsr_i_r_and: 1177 case S2_lsr_i_r_nac: 1178 case S2_lsr_i_r_or: 1179 case S2_lsr_i_r_xacc: 1180 ImN = 3; 1181 RegN = 2; 1182 break; 1183 1184 default: 1185 return false; 1186 } 1187 1188 if (RegN != OpN) 1189 return false; 1190 1191 assert(MI.getOperand(ImN).isImm()); 1192 unsigned S = MI.getOperand(ImN).getImm(); 1193 LostB = 0; 1194 LostE = S; 1195 return true; 1196 } 1197 1198 // Calculate the bit vector that corresponds to the used bits of register Reg. 1199 // The vector Bits has the same size, as the size of Reg in bits. If the cal- 1200 // culation fails (i.e. the used bits are unknown), it returns false. Other- 1201 // wise, it returns true and sets the corresponding bits in Bits. 1202 bool RedundantInstrElimination::computeUsedBits(unsigned Reg, BitVector &Bits) { 1203 BitVector Used(Bits.size()); 1204 RegisterSet Visited; 1205 std::vector<unsigned> Pending; 1206 Pending.push_back(Reg); 1207 1208 for (unsigned i = 0; i < Pending.size(); ++i) { 1209 unsigned R = Pending[i]; 1210 if (Visited.has(R)) 1211 continue; 1212 Visited.insert(R); 1213 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) { 1214 BitTracker::RegisterRef UR = *I; 1215 unsigned B, W; 1216 if (!HBS::getSubregMask(UR, B, W, MRI)) 1217 return false; 1218 MachineInstr &UseI = *I->getParent(); 1219 if (UseI.isPHI() || UseI.isCopy()) { 1220 Register DefR = UseI.getOperand(0).getReg(); 1221 if (!Register::isVirtualRegister(DefR)) 1222 return false; 1223 Pending.push_back(DefR); 1224 } else { 1225 if (!computeUsedBits(UseI, I.getOperandNo(), Used, B)) 1226 return false; 1227 } 1228 } 1229 } 1230 Bits |= Used; 1231 return true; 1232 } 1233 1234 // Calculate the bits used by instruction MI in a register in operand OpN. 1235 // Return true/false if the calculation succeeds/fails. If is succeeds, set 1236 // used bits in Bits. This function does not reset any bits in Bits, so 1237 // subsequent calls over different instructions will result in the union 1238 // of the used bits in all these instructions. 1239 // The register in question may be used with a sub-register, whereas Bits 1240 // holds the bits for the entire register. To keep track of that, the 1241 // argument Begin indicates where in Bits is the lowest-significant bit 1242 // of the register used in operand OpN. For example, in instruction: 1243 // %1 = S2_lsr_i_r %2:isub_hi, 10 1244 // the operand 1 is a 32-bit register, which happens to be a subregister 1245 // of the 64-bit register %2, and that subregister starts at position 32. 1246 // In this case Begin=32, since Bits[32] would be the lowest-significant bit 1247 // of %2:isub_hi. 1248 bool RedundantInstrElimination::computeUsedBits(const MachineInstr &MI, 1249 unsigned OpN, BitVector &Bits, uint16_t Begin) { 1250 unsigned Opc = MI.getOpcode(); 1251 BitVector T(Bits.size()); 1252 bool GotBits = HBS::getUsedBits(Opc, OpN, T, Begin, HII); 1253 // Even if we don't have bits yet, we could still provide some information 1254 // if the instruction is a lossy shift: the lost bits will be marked as 1255 // not used. 1256 unsigned LB, LE; 1257 if (isLossyShiftLeft(MI, OpN, LB, LE) || isLossyShiftRight(MI, OpN, LB, LE)) { 1258 assert(MI.getOperand(OpN).isReg()); 1259 BitTracker::RegisterRef RR = MI.getOperand(OpN); 1260 const TargetRegisterClass *RC = HBS::getFinalVRegClass(RR, MRI); 1261 uint16_t Width = HRI.getRegSizeInBits(*RC); 1262 1263 if (!GotBits) 1264 T.set(Begin, Begin+Width); 1265 assert(LB <= LE && LB < Width && LE <= Width); 1266 T.reset(Begin+LB, Begin+LE); 1267 GotBits = true; 1268 } 1269 if (GotBits) 1270 Bits |= T; 1271 return GotBits; 1272 } 1273 1274 // Calculates the used bits in RD ("defined register"), and checks if these 1275 // bits in RS ("used register") and RD are identical. 1276 bool RedundantInstrElimination::usedBitsEqual(BitTracker::RegisterRef RD, 1277 BitTracker::RegisterRef RS) { 1278 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg); 1279 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 1280 1281 unsigned DB, DW; 1282 if (!HBS::getSubregMask(RD, DB, DW, MRI)) 1283 return false; 1284 unsigned SB, SW; 1285 if (!HBS::getSubregMask(RS, SB, SW, MRI)) 1286 return false; 1287 if (SW != DW) 1288 return false; 1289 1290 BitVector Used(DC.width()); 1291 if (!computeUsedBits(RD.Reg, Used)) 1292 return false; 1293 1294 for (unsigned i = 0; i != DW; ++i) 1295 if (Used[i+DB] && DC[DB+i] != SC[SB+i]) 1296 return false; 1297 return true; 1298 } 1299 1300 bool RedundantInstrElimination::processBlock(MachineBasicBlock &B, 1301 const RegisterSet&) { 1302 if (!BT.reached(&B)) 1303 return false; 1304 bool Changed = false; 1305 1306 for (auto I = B.begin(), E = B.end(), NextI = I; I != E; ++I) { 1307 NextI = std::next(I); 1308 MachineInstr *MI = &*I; 1309 1310 if (MI->getOpcode() == TargetOpcode::COPY) 1311 continue; 1312 if (MI->isPHI() || MI->hasUnmodeledSideEffects() || MI->isInlineAsm()) 1313 continue; 1314 unsigned NumD = MI->getDesc().getNumDefs(); 1315 if (NumD != 1) 1316 continue; 1317 1318 BitTracker::RegisterRef RD = MI->getOperand(0); 1319 if (!BT.has(RD.Reg)) 1320 continue; 1321 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg); 1322 auto At = MachineBasicBlock::iterator(MI); 1323 1324 // Find a source operand that is equal to the result. 1325 for (auto &Op : MI->uses()) { 1326 if (!Op.isReg()) 1327 continue; 1328 BitTracker::RegisterRef RS = Op; 1329 if (!BT.has(RS.Reg)) 1330 continue; 1331 if (!HBS::isTransparentCopy(RD, RS, MRI)) 1332 continue; 1333 1334 unsigned BN, BW; 1335 if (!HBS::getSubregMask(RS, BN, BW, MRI)) 1336 continue; 1337 1338 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 1339 if (!usedBitsEqual(RD, RS) && !HBS::isEqual(DC, 0, SC, BN, BW)) 1340 continue; 1341 1342 // If found, replace the instruction with a COPY. 1343 const DebugLoc &DL = MI->getDebugLoc(); 1344 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI); 1345 Register NewR = MRI.createVirtualRegister(FRC); 1346 MachineInstr *CopyI = 1347 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR) 1348 .addReg(RS.Reg, 0, RS.Sub); 1349 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 1350 // This pass can create copies between registers that don't have the 1351 // exact same values. Updating the tracker has to involve updating 1352 // all dependent cells. Example: 1353 // %1 = inst %2 ; %1 != %2, but used bits are equal 1354 // 1355 // %3 = copy %2 ; <- inserted 1356 // ... = %3 ; <- replaced from %2 1357 // Indirectly, we can create a "copy" between %1 and %2 even 1358 // though their exact values do not match. 1359 BT.visit(*CopyI); 1360 Changed = true; 1361 break; 1362 } 1363 } 1364 1365 return Changed; 1366 } 1367 1368 namespace { 1369 1370 // Recognize instructions that produce constant values known at compile-time. 1371 // Replace them with register definitions that load these constants directly. 1372 class ConstGeneration : public Transformation { 1373 public: 1374 ConstGeneration(BitTracker &bt, const HexagonInstrInfo &hii, 1375 MachineRegisterInfo &mri) 1376 : Transformation(true), HII(hii), MRI(mri), BT(bt) {} 1377 1378 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1379 static bool isTfrConst(const MachineInstr &MI); 1380 1381 private: 1382 unsigned genTfrConst(const TargetRegisterClass *RC, int64_t C, 1383 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL); 1384 1385 const HexagonInstrInfo &HII; 1386 MachineRegisterInfo &MRI; 1387 BitTracker &BT; 1388 }; 1389 1390 } // end anonymous namespace 1391 1392 bool ConstGeneration::isTfrConst(const MachineInstr &MI) { 1393 unsigned Opc = MI.getOpcode(); 1394 switch (Opc) { 1395 case Hexagon::A2_combineii: 1396 case Hexagon::A4_combineii: 1397 case Hexagon::A2_tfrsi: 1398 case Hexagon::A2_tfrpi: 1399 case Hexagon::PS_true: 1400 case Hexagon::PS_false: 1401 case Hexagon::CONST32: 1402 case Hexagon::CONST64: 1403 return true; 1404 } 1405 return false; 1406 } 1407 1408 // Generate a transfer-immediate instruction that is appropriate for the 1409 // register class and the actual value being transferred. 1410 unsigned ConstGeneration::genTfrConst(const TargetRegisterClass *RC, int64_t C, 1411 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL) { 1412 Register Reg = MRI.createVirtualRegister(RC); 1413 if (RC == &Hexagon::IntRegsRegClass) { 1414 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrsi), Reg) 1415 .addImm(int32_t(C)); 1416 return Reg; 1417 } 1418 1419 if (RC == &Hexagon::DoubleRegsRegClass) { 1420 if (isInt<8>(C)) { 1421 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrpi), Reg) 1422 .addImm(C); 1423 return Reg; 1424 } 1425 1426 unsigned Lo = Lo_32(C), Hi = Hi_32(C); 1427 if (isInt<8>(Lo) || isInt<8>(Hi)) { 1428 unsigned Opc = isInt<8>(Lo) ? Hexagon::A2_combineii 1429 : Hexagon::A4_combineii; 1430 BuildMI(B, At, DL, HII.get(Opc), Reg) 1431 .addImm(int32_t(Hi)) 1432 .addImm(int32_t(Lo)); 1433 return Reg; 1434 } 1435 1436 BuildMI(B, At, DL, HII.get(Hexagon::CONST64), Reg) 1437 .addImm(C); 1438 return Reg; 1439 } 1440 1441 if (RC == &Hexagon::PredRegsRegClass) { 1442 unsigned Opc; 1443 if (C == 0) 1444 Opc = Hexagon::PS_false; 1445 else if ((C & 0xFF) == 0xFF) 1446 Opc = Hexagon::PS_true; 1447 else 1448 return 0; 1449 BuildMI(B, At, DL, HII.get(Opc), Reg); 1450 return Reg; 1451 } 1452 1453 return 0; 1454 } 1455 1456 bool ConstGeneration::processBlock(MachineBasicBlock &B, const RegisterSet&) { 1457 if (!BT.reached(&B)) 1458 return false; 1459 bool Changed = false; 1460 RegisterSet Defs; 1461 1462 for (auto I = B.begin(), E = B.end(); I != E; ++I) { 1463 if (isTfrConst(*I)) 1464 continue; 1465 Defs.clear(); 1466 HBS::getInstrDefs(*I, Defs); 1467 if (Defs.count() != 1) 1468 continue; 1469 unsigned DR = Defs.find_first(); 1470 if (!Register::isVirtualRegister(DR)) 1471 continue; 1472 uint64_t U; 1473 const BitTracker::RegisterCell &DRC = BT.lookup(DR); 1474 if (HBS::getConst(DRC, 0, DRC.width(), U)) { 1475 int64_t C = U; 1476 DebugLoc DL = I->getDebugLoc(); 1477 auto At = I->isPHI() ? B.getFirstNonPHI() : I; 1478 unsigned ImmReg = genTfrConst(MRI.getRegClass(DR), C, B, At, DL); 1479 if (ImmReg) { 1480 HBS::replaceReg(DR, ImmReg, MRI); 1481 BT.put(ImmReg, DRC); 1482 Changed = true; 1483 } 1484 } 1485 } 1486 return Changed; 1487 } 1488 1489 namespace { 1490 1491 // Identify pairs of available registers which hold identical values. 1492 // In such cases, only one of them needs to be calculated, the other one 1493 // will be defined as a copy of the first. 1494 class CopyGeneration : public Transformation { 1495 public: 1496 CopyGeneration(BitTracker &bt, const HexagonInstrInfo &hii, 1497 const HexagonRegisterInfo &hri, MachineRegisterInfo &mri) 1498 : Transformation(true), HII(hii), HRI(hri), MRI(mri), BT(bt) {} 1499 1500 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1501 1502 private: 1503 bool findMatch(const BitTracker::RegisterRef &Inp, 1504 BitTracker::RegisterRef &Out, const RegisterSet &AVs); 1505 1506 const HexagonInstrInfo &HII; 1507 const HexagonRegisterInfo &HRI; 1508 MachineRegisterInfo &MRI; 1509 BitTracker &BT; 1510 RegisterSet Forbidden; 1511 }; 1512 1513 // Eliminate register copies RD = RS, by replacing the uses of RD with 1514 // with uses of RS. 1515 class CopyPropagation : public Transformation { 1516 public: 1517 CopyPropagation(const HexagonRegisterInfo &hri, MachineRegisterInfo &mri) 1518 : Transformation(false), HRI(hri), MRI(mri) {} 1519 1520 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1521 1522 static bool isCopyReg(unsigned Opc, bool NoConv); 1523 1524 private: 1525 bool propagateRegCopy(MachineInstr &MI); 1526 1527 const HexagonRegisterInfo &HRI; 1528 MachineRegisterInfo &MRI; 1529 }; 1530 1531 } // end anonymous namespace 1532 1533 /// Check if there is a register in AVs that is identical to Inp. If so, 1534 /// set Out to the found register. The output may be a pair Reg:Sub. 1535 bool CopyGeneration::findMatch(const BitTracker::RegisterRef &Inp, 1536 BitTracker::RegisterRef &Out, const RegisterSet &AVs) { 1537 if (!BT.has(Inp.Reg)) 1538 return false; 1539 const BitTracker::RegisterCell &InpRC = BT.lookup(Inp.Reg); 1540 auto *FRC = HBS::getFinalVRegClass(Inp, MRI); 1541 unsigned B, W; 1542 if (!HBS::getSubregMask(Inp, B, W, MRI)) 1543 return false; 1544 1545 for (unsigned R = AVs.find_first(); R; R = AVs.find_next(R)) { 1546 if (!BT.has(R) || Forbidden[R]) 1547 continue; 1548 const BitTracker::RegisterCell &RC = BT.lookup(R); 1549 unsigned RW = RC.width(); 1550 if (W == RW) { 1551 if (FRC != MRI.getRegClass(R)) 1552 continue; 1553 if (!HBS::isTransparentCopy(R, Inp, MRI)) 1554 continue; 1555 if (!HBS::isEqual(InpRC, B, RC, 0, W)) 1556 continue; 1557 Out.Reg = R; 1558 Out.Sub = 0; 1559 return true; 1560 } 1561 // Check if there is a super-register, whose part (with a subregister) 1562 // is equal to the input. 1563 // Only do double registers for now. 1564 if (W*2 != RW) 1565 continue; 1566 if (MRI.getRegClass(R) != &Hexagon::DoubleRegsRegClass) 1567 continue; 1568 1569 if (HBS::isEqual(InpRC, B, RC, 0, W)) 1570 Out.Sub = Hexagon::isub_lo; 1571 else if (HBS::isEqual(InpRC, B, RC, W, W)) 1572 Out.Sub = Hexagon::isub_hi; 1573 else 1574 continue; 1575 Out.Reg = R; 1576 if (HBS::isTransparentCopy(Out, Inp, MRI)) 1577 return true; 1578 } 1579 return false; 1580 } 1581 1582 bool CopyGeneration::processBlock(MachineBasicBlock &B, 1583 const RegisterSet &AVs) { 1584 if (!BT.reached(&B)) 1585 return false; 1586 RegisterSet AVB(AVs); 1587 bool Changed = false; 1588 RegisterSet Defs; 1589 1590 for (auto I = B.begin(), E = B.end(), NextI = I; I != E; 1591 ++I, AVB.insert(Defs)) { 1592 NextI = std::next(I); 1593 Defs.clear(); 1594 HBS::getInstrDefs(*I, Defs); 1595 1596 unsigned Opc = I->getOpcode(); 1597 if (CopyPropagation::isCopyReg(Opc, false) || 1598 ConstGeneration::isTfrConst(*I)) 1599 continue; 1600 1601 DebugLoc DL = I->getDebugLoc(); 1602 auto At = I->isPHI() ? B.getFirstNonPHI() : I; 1603 1604 for (unsigned R = Defs.find_first(); R; R = Defs.find_next(R)) { 1605 BitTracker::RegisterRef MR; 1606 auto *FRC = HBS::getFinalVRegClass(R, MRI); 1607 1608 if (findMatch(R, MR, AVB)) { 1609 Register NewR = MRI.createVirtualRegister(FRC); 1610 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR) 1611 .addReg(MR.Reg, 0, MR.Sub); 1612 BT.put(BitTracker::RegisterRef(NewR), BT.get(MR)); 1613 HBS::replaceReg(R, NewR, MRI); 1614 Forbidden.insert(R); 1615 continue; 1616 } 1617 1618 if (FRC == &Hexagon::DoubleRegsRegClass || 1619 FRC == &Hexagon::HvxWRRegClass) { 1620 // Try to generate REG_SEQUENCE. 1621 unsigned SubLo = HRI.getHexagonSubRegIndex(*FRC, Hexagon::ps_sub_lo); 1622 unsigned SubHi = HRI.getHexagonSubRegIndex(*FRC, Hexagon::ps_sub_hi); 1623 BitTracker::RegisterRef TL = { R, SubLo }; 1624 BitTracker::RegisterRef TH = { R, SubHi }; 1625 BitTracker::RegisterRef ML, MH; 1626 if (findMatch(TL, ML, AVB) && findMatch(TH, MH, AVB)) { 1627 auto *FRC = HBS::getFinalVRegClass(R, MRI); 1628 Register NewR = MRI.createVirtualRegister(FRC); 1629 BuildMI(B, At, DL, HII.get(TargetOpcode::REG_SEQUENCE), NewR) 1630 .addReg(ML.Reg, 0, ML.Sub) 1631 .addImm(SubLo) 1632 .addReg(MH.Reg, 0, MH.Sub) 1633 .addImm(SubHi); 1634 BT.put(BitTracker::RegisterRef(NewR), BT.get(R)); 1635 HBS::replaceReg(R, NewR, MRI); 1636 Forbidden.insert(R); 1637 } 1638 } 1639 } 1640 } 1641 1642 return Changed; 1643 } 1644 1645 bool CopyPropagation::isCopyReg(unsigned Opc, bool NoConv) { 1646 switch (Opc) { 1647 case TargetOpcode::COPY: 1648 case TargetOpcode::REG_SEQUENCE: 1649 case Hexagon::A4_combineir: 1650 case Hexagon::A4_combineri: 1651 return true; 1652 case Hexagon::A2_tfr: 1653 case Hexagon::A2_tfrp: 1654 case Hexagon::A2_combinew: 1655 case Hexagon::V6_vcombine: 1656 return NoConv; 1657 default: 1658 break; 1659 } 1660 return false; 1661 } 1662 1663 bool CopyPropagation::propagateRegCopy(MachineInstr &MI) { 1664 bool Changed = false; 1665 unsigned Opc = MI.getOpcode(); 1666 BitTracker::RegisterRef RD = MI.getOperand(0); 1667 assert(MI.getOperand(0).getSubReg() == 0); 1668 1669 switch (Opc) { 1670 case TargetOpcode::COPY: 1671 case Hexagon::A2_tfr: 1672 case Hexagon::A2_tfrp: { 1673 BitTracker::RegisterRef RS = MI.getOperand(1); 1674 if (!HBS::isTransparentCopy(RD, RS, MRI)) 1675 break; 1676 if (RS.Sub != 0) 1677 Changed = HBS::replaceRegWithSub(RD.Reg, RS.Reg, RS.Sub, MRI); 1678 else 1679 Changed = HBS::replaceReg(RD.Reg, RS.Reg, MRI); 1680 break; 1681 } 1682 case TargetOpcode::REG_SEQUENCE: { 1683 BitTracker::RegisterRef SL, SH; 1684 if (HBS::parseRegSequence(MI, SL, SH, MRI)) { 1685 const TargetRegisterClass &RC = *MRI.getRegClass(RD.Reg); 1686 unsigned SubLo = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo); 1687 unsigned SubHi = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi); 1688 Changed = HBS::replaceSubWithSub(RD.Reg, SubLo, SL.Reg, SL.Sub, MRI); 1689 Changed |= HBS::replaceSubWithSub(RD.Reg, SubHi, SH.Reg, SH.Sub, MRI); 1690 } 1691 break; 1692 } 1693 case Hexagon::A2_combinew: 1694 case Hexagon::V6_vcombine: { 1695 const TargetRegisterClass &RC = *MRI.getRegClass(RD.Reg); 1696 unsigned SubLo = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo); 1697 unsigned SubHi = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi); 1698 BitTracker::RegisterRef RH = MI.getOperand(1), RL = MI.getOperand(2); 1699 Changed = HBS::replaceSubWithSub(RD.Reg, SubLo, RL.Reg, RL.Sub, MRI); 1700 Changed |= HBS::replaceSubWithSub(RD.Reg, SubHi, RH.Reg, RH.Sub, MRI); 1701 break; 1702 } 1703 case Hexagon::A4_combineir: 1704 case Hexagon::A4_combineri: { 1705 unsigned SrcX = (Opc == Hexagon::A4_combineir) ? 2 : 1; 1706 unsigned Sub = (Opc == Hexagon::A4_combineir) ? Hexagon::isub_lo 1707 : Hexagon::isub_hi; 1708 BitTracker::RegisterRef RS = MI.getOperand(SrcX); 1709 Changed = HBS::replaceSubWithSub(RD.Reg, Sub, RS.Reg, RS.Sub, MRI); 1710 break; 1711 } 1712 } 1713 return Changed; 1714 } 1715 1716 bool CopyPropagation::processBlock(MachineBasicBlock &B, const RegisterSet&) { 1717 std::vector<MachineInstr*> Instrs; 1718 for (auto I = B.rbegin(), E = B.rend(); I != E; ++I) 1719 Instrs.push_back(&*I); 1720 1721 bool Changed = false; 1722 for (auto I : Instrs) { 1723 unsigned Opc = I->getOpcode(); 1724 if (!CopyPropagation::isCopyReg(Opc, true)) 1725 continue; 1726 Changed |= propagateRegCopy(*I); 1727 } 1728 1729 return Changed; 1730 } 1731 1732 namespace { 1733 1734 // Recognize patterns that can be simplified and replace them with the 1735 // simpler forms. 1736 // This is by no means complete 1737 class BitSimplification : public Transformation { 1738 public: 1739 BitSimplification(BitTracker &bt, const MachineDominatorTree &mdt, 1740 const HexagonInstrInfo &hii, const HexagonRegisterInfo &hri, 1741 MachineRegisterInfo &mri, MachineFunction &mf) 1742 : Transformation(true), MDT(mdt), HII(hii), HRI(hri), MRI(mri), 1743 MF(mf), BT(bt) {} 1744 1745 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1746 1747 private: 1748 struct RegHalf : public BitTracker::RegisterRef { 1749 bool Low; // Low/High halfword. 1750 }; 1751 1752 bool matchHalf(unsigned SelfR, const BitTracker::RegisterCell &RC, 1753 unsigned B, RegHalf &RH); 1754 bool validateReg(BitTracker::RegisterRef R, unsigned Opc, unsigned OpNum); 1755 1756 bool matchPackhl(unsigned SelfR, const BitTracker::RegisterCell &RC, 1757 BitTracker::RegisterRef &Rs, BitTracker::RegisterRef &Rt); 1758 unsigned getCombineOpcode(bool HLow, bool LLow); 1759 1760 bool genStoreUpperHalf(MachineInstr *MI); 1761 bool genStoreImmediate(MachineInstr *MI); 1762 bool genPackhl(MachineInstr *MI, BitTracker::RegisterRef RD, 1763 const BitTracker::RegisterCell &RC); 1764 bool genExtractHalf(MachineInstr *MI, BitTracker::RegisterRef RD, 1765 const BitTracker::RegisterCell &RC); 1766 bool genCombineHalf(MachineInstr *MI, BitTracker::RegisterRef RD, 1767 const BitTracker::RegisterCell &RC); 1768 bool genExtractLow(MachineInstr *MI, BitTracker::RegisterRef RD, 1769 const BitTracker::RegisterCell &RC); 1770 bool genBitSplit(MachineInstr *MI, BitTracker::RegisterRef RD, 1771 const BitTracker::RegisterCell &RC, const RegisterSet &AVs); 1772 bool simplifyTstbit(MachineInstr *MI, BitTracker::RegisterRef RD, 1773 const BitTracker::RegisterCell &RC); 1774 bool simplifyExtractLow(MachineInstr *MI, BitTracker::RegisterRef RD, 1775 const BitTracker::RegisterCell &RC, const RegisterSet &AVs); 1776 bool simplifyRCmp0(MachineInstr *MI, BitTracker::RegisterRef RD); 1777 1778 // Cache of created instructions to avoid creating duplicates. 1779 // XXX Currently only used by genBitSplit. 1780 std::vector<MachineInstr*> NewMIs; 1781 1782 const MachineDominatorTree &MDT; 1783 const HexagonInstrInfo &HII; 1784 const HexagonRegisterInfo &HRI; 1785 MachineRegisterInfo &MRI; 1786 MachineFunction &MF; 1787 BitTracker &BT; 1788 }; 1789 1790 } // end anonymous namespace 1791 1792 // Check if the bits [B..B+16) in register cell RC form a valid halfword, 1793 // i.e. [0..16), [16..32), etc. of some register. If so, return true and 1794 // set the information about the found register in RH. 1795 bool BitSimplification::matchHalf(unsigned SelfR, 1796 const BitTracker::RegisterCell &RC, unsigned B, RegHalf &RH) { 1797 // XXX This could be searching in the set of available registers, in case 1798 // the match is not exact. 1799 1800 // Match 16-bit chunks, where the RC[B..B+15] references exactly one 1801 // register and all the bits B..B+15 match between RC and the register. 1802 // This is meant to match "v1[0-15]", where v1 = { [0]:0 [1-15]:v1... }, 1803 // and RC = { [0]:0 [1-15]:v1[1-15]... }. 1804 bool Low = false; 1805 unsigned I = B; 1806 while (I < B+16 && RC[I].num()) 1807 I++; 1808 if (I == B+16) 1809 return false; 1810 1811 unsigned Reg = RC[I].RefI.Reg; 1812 unsigned P = RC[I].RefI.Pos; // The RefI.Pos will be advanced by I-B. 1813 if (P < I-B) 1814 return false; 1815 unsigned Pos = P - (I-B); 1816 1817 if (Reg == 0 || Reg == SelfR) // Don't match "self". 1818 return false; 1819 if (!Register::isVirtualRegister(Reg)) 1820 return false; 1821 if (!BT.has(Reg)) 1822 return false; 1823 1824 const BitTracker::RegisterCell &SC = BT.lookup(Reg); 1825 if (Pos+16 > SC.width()) 1826 return false; 1827 1828 for (unsigned i = 0; i < 16; ++i) { 1829 const BitTracker::BitValue &RV = RC[i+B]; 1830 if (RV.Type == BitTracker::BitValue::Ref) { 1831 if (RV.RefI.Reg != Reg) 1832 return false; 1833 if (RV.RefI.Pos != i+Pos) 1834 return false; 1835 continue; 1836 } 1837 if (RC[i+B] != SC[i+Pos]) 1838 return false; 1839 } 1840 1841 unsigned Sub = 0; 1842 switch (Pos) { 1843 case 0: 1844 Sub = Hexagon::isub_lo; 1845 Low = true; 1846 break; 1847 case 16: 1848 Sub = Hexagon::isub_lo; 1849 Low = false; 1850 break; 1851 case 32: 1852 Sub = Hexagon::isub_hi; 1853 Low = true; 1854 break; 1855 case 48: 1856 Sub = Hexagon::isub_hi; 1857 Low = false; 1858 break; 1859 default: 1860 return false; 1861 } 1862 1863 RH.Reg = Reg; 1864 RH.Sub = Sub; 1865 RH.Low = Low; 1866 // If the subregister is not valid with the register, set it to 0. 1867 if (!HBS::getFinalVRegClass(RH, MRI)) 1868 RH.Sub = 0; 1869 1870 return true; 1871 } 1872 1873 bool BitSimplification::validateReg(BitTracker::RegisterRef R, unsigned Opc, 1874 unsigned OpNum) { 1875 auto *OpRC = HII.getRegClass(HII.get(Opc), OpNum, &HRI, MF); 1876 auto *RRC = HBS::getFinalVRegClass(R, MRI); 1877 return OpRC->hasSubClassEq(RRC); 1878 } 1879 1880 // Check if RC matches the pattern of a S2_packhl. If so, return true and 1881 // set the inputs Rs and Rt. 1882 bool BitSimplification::matchPackhl(unsigned SelfR, 1883 const BitTracker::RegisterCell &RC, BitTracker::RegisterRef &Rs, 1884 BitTracker::RegisterRef &Rt) { 1885 RegHalf L1, H1, L2, H2; 1886 1887 if (!matchHalf(SelfR, RC, 0, L2) || !matchHalf(SelfR, RC, 16, L1)) 1888 return false; 1889 if (!matchHalf(SelfR, RC, 32, H2) || !matchHalf(SelfR, RC, 48, H1)) 1890 return false; 1891 1892 // Rs = H1.L1, Rt = H2.L2 1893 if (H1.Reg != L1.Reg || H1.Sub != L1.Sub || H1.Low || !L1.Low) 1894 return false; 1895 if (H2.Reg != L2.Reg || H2.Sub != L2.Sub || H2.Low || !L2.Low) 1896 return false; 1897 1898 Rs = H1; 1899 Rt = H2; 1900 return true; 1901 } 1902 1903 unsigned BitSimplification::getCombineOpcode(bool HLow, bool LLow) { 1904 return HLow ? LLow ? Hexagon::A2_combine_ll 1905 : Hexagon::A2_combine_lh 1906 : LLow ? Hexagon::A2_combine_hl 1907 : Hexagon::A2_combine_hh; 1908 } 1909 1910 // If MI stores the upper halfword of a register (potentially obtained via 1911 // shifts or extracts), replace it with a storerf instruction. This could 1912 // cause the "extraction" code to become dead. 1913 bool BitSimplification::genStoreUpperHalf(MachineInstr *MI) { 1914 unsigned Opc = MI->getOpcode(); 1915 if (Opc != Hexagon::S2_storerh_io) 1916 return false; 1917 1918 MachineOperand &ValOp = MI->getOperand(2); 1919 BitTracker::RegisterRef RS = ValOp; 1920 if (!BT.has(RS.Reg)) 1921 return false; 1922 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg); 1923 RegHalf H; 1924 if (!matchHalf(0, RC, 0, H)) 1925 return false; 1926 if (H.Low) 1927 return false; 1928 MI->setDesc(HII.get(Hexagon::S2_storerf_io)); 1929 ValOp.setReg(H.Reg); 1930 ValOp.setSubReg(H.Sub); 1931 return true; 1932 } 1933 1934 // If MI stores a value known at compile-time, and the value is within a range 1935 // that avoids using constant-extenders, replace it with a store-immediate. 1936 bool BitSimplification::genStoreImmediate(MachineInstr *MI) { 1937 unsigned Opc = MI->getOpcode(); 1938 unsigned Align = 0; 1939 switch (Opc) { 1940 case Hexagon::S2_storeri_io: 1941 Align++; 1942 LLVM_FALLTHROUGH; 1943 case Hexagon::S2_storerh_io: 1944 Align++; 1945 LLVM_FALLTHROUGH; 1946 case Hexagon::S2_storerb_io: 1947 break; 1948 default: 1949 return false; 1950 } 1951 1952 // Avoid stores to frame-indices (due to an unknown offset). 1953 if (!MI->getOperand(0).isReg()) 1954 return false; 1955 MachineOperand &OffOp = MI->getOperand(1); 1956 if (!OffOp.isImm()) 1957 return false; 1958 1959 int64_t Off = OffOp.getImm(); 1960 // Offset is u6:a. Sadly, there is no isShiftedUInt(n,x). 1961 if (!isUIntN(6+Align, Off) || (Off & ((1<<Align)-1))) 1962 return false; 1963 // Source register: 1964 BitTracker::RegisterRef RS = MI->getOperand(2); 1965 if (!BT.has(RS.Reg)) 1966 return false; 1967 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg); 1968 uint64_t U; 1969 if (!HBS::getConst(RC, 0, RC.width(), U)) 1970 return false; 1971 1972 // Only consider 8-bit values to avoid constant-extenders. 1973 int V; 1974 switch (Opc) { 1975 case Hexagon::S2_storerb_io: 1976 V = int8_t(U); 1977 break; 1978 case Hexagon::S2_storerh_io: 1979 V = int16_t(U); 1980 break; 1981 case Hexagon::S2_storeri_io: 1982 V = int32_t(U); 1983 break; 1984 default: 1985 // Opc is already checked above to be one of the three store instructions. 1986 // This silences a -Wuninitialized false positive on GCC 5.4. 1987 llvm_unreachable("Unexpected store opcode"); 1988 } 1989 if (!isInt<8>(V)) 1990 return false; 1991 1992 MI->RemoveOperand(2); 1993 switch (Opc) { 1994 case Hexagon::S2_storerb_io: 1995 MI->setDesc(HII.get(Hexagon::S4_storeirb_io)); 1996 break; 1997 case Hexagon::S2_storerh_io: 1998 MI->setDesc(HII.get(Hexagon::S4_storeirh_io)); 1999 break; 2000 case Hexagon::S2_storeri_io: 2001 MI->setDesc(HII.get(Hexagon::S4_storeiri_io)); 2002 break; 2003 } 2004 MI->addOperand(MachineOperand::CreateImm(V)); 2005 return true; 2006 } 2007 2008 // If MI is equivalent o S2_packhl, generate the S2_packhl. MI could be the 2009 // last instruction in a sequence that results in something equivalent to 2010 // the pack-halfwords. The intent is to cause the entire sequence to become 2011 // dead. 2012 bool BitSimplification::genPackhl(MachineInstr *MI, 2013 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2014 unsigned Opc = MI->getOpcode(); 2015 if (Opc == Hexagon::S2_packhl) 2016 return false; 2017 BitTracker::RegisterRef Rs, Rt; 2018 if (!matchPackhl(RD.Reg, RC, Rs, Rt)) 2019 return false; 2020 if (!validateReg(Rs, Hexagon::S2_packhl, 1) || 2021 !validateReg(Rt, Hexagon::S2_packhl, 2)) 2022 return false; 2023 2024 MachineBasicBlock &B = *MI->getParent(); 2025 Register NewR = MRI.createVirtualRegister(&Hexagon::DoubleRegsRegClass); 2026 DebugLoc DL = MI->getDebugLoc(); 2027 auto At = MI->isPHI() ? B.getFirstNonPHI() 2028 : MachineBasicBlock::iterator(MI); 2029 BuildMI(B, At, DL, HII.get(Hexagon::S2_packhl), NewR) 2030 .addReg(Rs.Reg, 0, Rs.Sub) 2031 .addReg(Rt.Reg, 0, Rt.Sub); 2032 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2033 BT.put(BitTracker::RegisterRef(NewR), RC); 2034 return true; 2035 } 2036 2037 // If MI produces halfword of the input in the low half of the output, 2038 // replace it with zero-extend or extractu. 2039 bool BitSimplification::genExtractHalf(MachineInstr *MI, 2040 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2041 RegHalf L; 2042 // Check for halfword in low 16 bits, zeros elsewhere. 2043 if (!matchHalf(RD.Reg, RC, 0, L) || !HBS::isZero(RC, 16, 16)) 2044 return false; 2045 2046 unsigned Opc = MI->getOpcode(); 2047 MachineBasicBlock &B = *MI->getParent(); 2048 DebugLoc DL = MI->getDebugLoc(); 2049 2050 // Prefer zxth, since zxth can go in any slot, while extractu only in 2051 // slots 2 and 3. 2052 unsigned NewR = 0; 2053 auto At = MI->isPHI() ? B.getFirstNonPHI() 2054 : MachineBasicBlock::iterator(MI); 2055 if (L.Low && Opc != Hexagon::A2_zxth) { 2056 if (validateReg(L, Hexagon::A2_zxth, 1)) { 2057 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2058 BuildMI(B, At, DL, HII.get(Hexagon::A2_zxth), NewR) 2059 .addReg(L.Reg, 0, L.Sub); 2060 } 2061 } else if (!L.Low && Opc != Hexagon::S2_lsr_i_r) { 2062 if (validateReg(L, Hexagon::S2_lsr_i_r, 1)) { 2063 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2064 BuildMI(B, MI, DL, HII.get(Hexagon::S2_lsr_i_r), NewR) 2065 .addReg(L.Reg, 0, L.Sub) 2066 .addImm(16); 2067 } 2068 } 2069 if (NewR == 0) 2070 return false; 2071 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2072 BT.put(BitTracker::RegisterRef(NewR), RC); 2073 return true; 2074 } 2075 2076 // If MI is equivalent to a combine(.L/.H, .L/.H) replace with with the 2077 // combine. 2078 bool BitSimplification::genCombineHalf(MachineInstr *MI, 2079 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2080 RegHalf L, H; 2081 // Check for combine h/l 2082 if (!matchHalf(RD.Reg, RC, 0, L) || !matchHalf(RD.Reg, RC, 16, H)) 2083 return false; 2084 // Do nothing if this is just a reg copy. 2085 if (L.Reg == H.Reg && L.Sub == H.Sub && !H.Low && L.Low) 2086 return false; 2087 2088 unsigned Opc = MI->getOpcode(); 2089 unsigned COpc = getCombineOpcode(H.Low, L.Low); 2090 if (COpc == Opc) 2091 return false; 2092 if (!validateReg(H, COpc, 1) || !validateReg(L, COpc, 2)) 2093 return false; 2094 2095 MachineBasicBlock &B = *MI->getParent(); 2096 DebugLoc DL = MI->getDebugLoc(); 2097 Register NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2098 auto At = MI->isPHI() ? B.getFirstNonPHI() 2099 : MachineBasicBlock::iterator(MI); 2100 BuildMI(B, At, DL, HII.get(COpc), NewR) 2101 .addReg(H.Reg, 0, H.Sub) 2102 .addReg(L.Reg, 0, L.Sub); 2103 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2104 BT.put(BitTracker::RegisterRef(NewR), RC); 2105 return true; 2106 } 2107 2108 // If MI resets high bits of a register and keeps the lower ones, replace it 2109 // with zero-extend byte/half, and-immediate, or extractu, as appropriate. 2110 bool BitSimplification::genExtractLow(MachineInstr *MI, 2111 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2112 unsigned Opc = MI->getOpcode(); 2113 switch (Opc) { 2114 case Hexagon::A2_zxtb: 2115 case Hexagon::A2_zxth: 2116 case Hexagon::S2_extractu: 2117 return false; 2118 } 2119 if (Opc == Hexagon::A2_andir && MI->getOperand(2).isImm()) { 2120 int32_t Imm = MI->getOperand(2).getImm(); 2121 if (isInt<10>(Imm)) 2122 return false; 2123 } 2124 2125 if (MI->hasUnmodeledSideEffects() || MI->isInlineAsm()) 2126 return false; 2127 unsigned W = RC.width(); 2128 while (W > 0 && RC[W-1].is(0)) 2129 W--; 2130 if (W == 0 || W == RC.width()) 2131 return false; 2132 unsigned NewOpc = (W == 8) ? Hexagon::A2_zxtb 2133 : (W == 16) ? Hexagon::A2_zxth 2134 : (W < 10) ? Hexagon::A2_andir 2135 : Hexagon::S2_extractu; 2136 MachineBasicBlock &B = *MI->getParent(); 2137 DebugLoc DL = MI->getDebugLoc(); 2138 2139 for (auto &Op : MI->uses()) { 2140 if (!Op.isReg()) 2141 continue; 2142 BitTracker::RegisterRef RS = Op; 2143 if (!BT.has(RS.Reg)) 2144 continue; 2145 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 2146 unsigned BN, BW; 2147 if (!HBS::getSubregMask(RS, BN, BW, MRI)) 2148 continue; 2149 if (BW < W || !HBS::isEqual(RC, 0, SC, BN, W)) 2150 continue; 2151 if (!validateReg(RS, NewOpc, 1)) 2152 continue; 2153 2154 Register NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2155 auto At = MI->isPHI() ? B.getFirstNonPHI() 2156 : MachineBasicBlock::iterator(MI); 2157 auto MIB = BuildMI(B, At, DL, HII.get(NewOpc), NewR) 2158 .addReg(RS.Reg, 0, RS.Sub); 2159 if (NewOpc == Hexagon::A2_andir) 2160 MIB.addImm((1 << W) - 1); 2161 else if (NewOpc == Hexagon::S2_extractu) 2162 MIB.addImm(W).addImm(0); 2163 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2164 BT.put(BitTracker::RegisterRef(NewR), RC); 2165 return true; 2166 } 2167 return false; 2168 } 2169 2170 bool BitSimplification::genBitSplit(MachineInstr *MI, 2171 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC, 2172 const RegisterSet &AVs) { 2173 if (!GenBitSplit) 2174 return false; 2175 if (MaxBitSplit.getNumOccurrences()) { 2176 if (CountBitSplit >= MaxBitSplit) 2177 return false; 2178 } 2179 2180 unsigned Opc = MI->getOpcode(); 2181 switch (Opc) { 2182 case Hexagon::A4_bitsplit: 2183 case Hexagon::A4_bitspliti: 2184 return false; 2185 } 2186 2187 unsigned W = RC.width(); 2188 if (W != 32) 2189 return false; 2190 2191 auto ctlz = [] (const BitTracker::RegisterCell &C) -> unsigned { 2192 unsigned Z = C.width(); 2193 while (Z > 0 && C[Z-1].is(0)) 2194 --Z; 2195 return C.width() - Z; 2196 }; 2197 2198 // Count the number of leading zeros in the target RC. 2199 unsigned Z = ctlz(RC); 2200 if (Z == 0 || Z == W) 2201 return false; 2202 2203 // A simplistic analysis: assume the source register (the one being split) 2204 // is fully unknown, and that all its bits are self-references. 2205 const BitTracker::BitValue &B0 = RC[0]; 2206 if (B0.Type != BitTracker::BitValue::Ref) 2207 return false; 2208 2209 unsigned SrcR = B0.RefI.Reg; 2210 unsigned SrcSR = 0; 2211 unsigned Pos = B0.RefI.Pos; 2212 2213 // All the non-zero bits should be consecutive bits from the same register. 2214 for (unsigned i = 1; i < W-Z; ++i) { 2215 const BitTracker::BitValue &V = RC[i]; 2216 if (V.Type != BitTracker::BitValue::Ref) 2217 return false; 2218 if (V.RefI.Reg != SrcR || V.RefI.Pos != Pos+i) 2219 return false; 2220 } 2221 2222 // Now, find the other bitfield among AVs. 2223 for (unsigned S = AVs.find_first(); S; S = AVs.find_next(S)) { 2224 // The number of leading zeros here should be the number of trailing 2225 // non-zeros in RC. 2226 unsigned SRC = MRI.getRegClass(S)->getID(); 2227 if (SRC != Hexagon::IntRegsRegClassID && 2228 SRC != Hexagon::DoubleRegsRegClassID) 2229 continue; 2230 if (!BT.has(S)) 2231 continue; 2232 const BitTracker::RegisterCell &SC = BT.lookup(S); 2233 if (SC.width() != W || ctlz(SC) != W-Z) 2234 continue; 2235 // The Z lower bits should now match SrcR. 2236 const BitTracker::BitValue &S0 = SC[0]; 2237 if (S0.Type != BitTracker::BitValue::Ref || S0.RefI.Reg != SrcR) 2238 continue; 2239 unsigned P = S0.RefI.Pos; 2240 2241 if (Pos <= P && (Pos + W-Z) != P) 2242 continue; 2243 if (P < Pos && (P + Z) != Pos) 2244 continue; 2245 // The starting bitfield position must be at a subregister boundary. 2246 if (std::min(P, Pos) != 0 && std::min(P, Pos) != 32) 2247 continue; 2248 2249 unsigned I; 2250 for (I = 1; I < Z; ++I) { 2251 const BitTracker::BitValue &V = SC[I]; 2252 if (V.Type != BitTracker::BitValue::Ref) 2253 break; 2254 if (V.RefI.Reg != SrcR || V.RefI.Pos != P+I) 2255 break; 2256 } 2257 if (I != Z) 2258 continue; 2259 2260 // Generate bitsplit where S is defined. 2261 if (MaxBitSplit.getNumOccurrences()) 2262 CountBitSplit++; 2263 MachineInstr *DefS = MRI.getVRegDef(S); 2264 assert(DefS != nullptr); 2265 DebugLoc DL = DefS->getDebugLoc(); 2266 MachineBasicBlock &B = *DefS->getParent(); 2267 auto At = DefS->isPHI() ? B.getFirstNonPHI() 2268 : MachineBasicBlock::iterator(DefS); 2269 if (MRI.getRegClass(SrcR)->getID() == Hexagon::DoubleRegsRegClassID) 2270 SrcSR = (std::min(Pos, P) == 32) ? Hexagon::isub_hi : Hexagon::isub_lo; 2271 if (!validateReg({SrcR,SrcSR}, Hexagon::A4_bitspliti, 1)) 2272 continue; 2273 unsigned ImmOp = Pos <= P ? W-Z : Z; 2274 2275 // Find an existing bitsplit instruction if one already exists. 2276 unsigned NewR = 0; 2277 for (MachineInstr *In : NewMIs) { 2278 if (In->getOpcode() != Hexagon::A4_bitspliti) 2279 continue; 2280 MachineOperand &Op1 = In->getOperand(1); 2281 if (Op1.getReg() != SrcR || Op1.getSubReg() != SrcSR) 2282 continue; 2283 if (In->getOperand(2).getImm() != ImmOp) 2284 continue; 2285 // Check if the target register is available here. 2286 MachineOperand &Op0 = In->getOperand(0); 2287 MachineInstr *DefI = MRI.getVRegDef(Op0.getReg()); 2288 assert(DefI != nullptr); 2289 if (!MDT.dominates(DefI, &*At)) 2290 continue; 2291 2292 // Found one that can be reused. 2293 assert(Op0.getSubReg() == 0); 2294 NewR = Op0.getReg(); 2295 break; 2296 } 2297 if (!NewR) { 2298 NewR = MRI.createVirtualRegister(&Hexagon::DoubleRegsRegClass); 2299 auto NewBS = BuildMI(B, At, DL, HII.get(Hexagon::A4_bitspliti), NewR) 2300 .addReg(SrcR, 0, SrcSR) 2301 .addImm(ImmOp); 2302 NewMIs.push_back(NewBS); 2303 } 2304 if (Pos <= P) { 2305 HBS::replaceRegWithSub(RD.Reg, NewR, Hexagon::isub_lo, MRI); 2306 HBS::replaceRegWithSub(S, NewR, Hexagon::isub_hi, MRI); 2307 } else { 2308 HBS::replaceRegWithSub(S, NewR, Hexagon::isub_lo, MRI); 2309 HBS::replaceRegWithSub(RD.Reg, NewR, Hexagon::isub_hi, MRI); 2310 } 2311 return true; 2312 } 2313 2314 return false; 2315 } 2316 2317 // Check for tstbit simplification opportunity, where the bit being checked 2318 // can be tracked back to another register. For example: 2319 // %2 = S2_lsr_i_r %1, 5 2320 // %3 = S2_tstbit_i %2, 0 2321 // => 2322 // %3 = S2_tstbit_i %1, 5 2323 bool BitSimplification::simplifyTstbit(MachineInstr *MI, 2324 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2325 unsigned Opc = MI->getOpcode(); 2326 if (Opc != Hexagon::S2_tstbit_i) 2327 return false; 2328 2329 unsigned BN = MI->getOperand(2).getImm(); 2330 BitTracker::RegisterRef RS = MI->getOperand(1); 2331 unsigned F, W; 2332 DebugLoc DL = MI->getDebugLoc(); 2333 if (!BT.has(RS.Reg) || !HBS::getSubregMask(RS, F, W, MRI)) 2334 return false; 2335 MachineBasicBlock &B = *MI->getParent(); 2336 auto At = MI->isPHI() ? B.getFirstNonPHI() 2337 : MachineBasicBlock::iterator(MI); 2338 2339 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 2340 const BitTracker::BitValue &V = SC[F+BN]; 2341 if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg != RS.Reg) { 2342 const TargetRegisterClass *TC = MRI.getRegClass(V.RefI.Reg); 2343 // Need to map V.RefI.Reg to a 32-bit register, i.e. if it is 2344 // a double register, need to use a subregister and adjust bit 2345 // number. 2346 unsigned P = std::numeric_limits<unsigned>::max(); 2347 BitTracker::RegisterRef RR(V.RefI.Reg, 0); 2348 if (TC == &Hexagon::DoubleRegsRegClass) { 2349 P = V.RefI.Pos; 2350 RR.Sub = Hexagon::isub_lo; 2351 if (P >= 32) { 2352 P -= 32; 2353 RR.Sub = Hexagon::isub_hi; 2354 } 2355 } else if (TC == &Hexagon::IntRegsRegClass) { 2356 P = V.RefI.Pos; 2357 } 2358 if (P != std::numeric_limits<unsigned>::max()) { 2359 unsigned NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass); 2360 BuildMI(B, At, DL, HII.get(Hexagon::S2_tstbit_i), NewR) 2361 .addReg(RR.Reg, 0, RR.Sub) 2362 .addImm(P); 2363 HBS::replaceReg(RD.Reg, NewR, MRI); 2364 BT.put(NewR, RC); 2365 return true; 2366 } 2367 } else if (V.is(0) || V.is(1)) { 2368 Register NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass); 2369 unsigned NewOpc = V.is(0) ? Hexagon::PS_false : Hexagon::PS_true; 2370 BuildMI(B, At, DL, HII.get(NewOpc), NewR); 2371 HBS::replaceReg(RD.Reg, NewR, MRI); 2372 return true; 2373 } 2374 2375 return false; 2376 } 2377 2378 // Detect whether RD is a bitfield extract (sign- or zero-extended) of 2379 // some register from the AVs set. Create a new corresponding instruction 2380 // at the location of MI. The intent is to recognize situations where 2381 // a sequence of instructions performs an operation that is equivalent to 2382 // an extract operation, such as a shift left followed by a shift right. 2383 bool BitSimplification::simplifyExtractLow(MachineInstr *MI, 2384 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC, 2385 const RegisterSet &AVs) { 2386 if (!GenExtract) 2387 return false; 2388 if (MaxExtract.getNumOccurrences()) { 2389 if (CountExtract >= MaxExtract) 2390 return false; 2391 CountExtract++; 2392 } 2393 2394 unsigned W = RC.width(); 2395 unsigned RW = W; 2396 unsigned Len; 2397 bool Signed; 2398 2399 // The code is mostly class-independent, except for the part that generates 2400 // the extract instruction, and establishes the source register (in case it 2401 // needs to use a subregister). 2402 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI); 2403 if (FRC != &Hexagon::IntRegsRegClass && FRC != &Hexagon::DoubleRegsRegClass) 2404 return false; 2405 assert(RD.Sub == 0); 2406 2407 // Observation: 2408 // If the cell has a form of 00..0xx..x with k zeros and n remaining 2409 // bits, this could be an extractu of the n bits, but it could also be 2410 // an extractu of a longer field which happens to have 0s in the top 2411 // bit positions. 2412 // The same logic applies to sign-extended fields. 2413 // 2414 // Do not check for the extended extracts, since it would expand the 2415 // search space quite a bit. The search may be expensive as it is. 2416 2417 const BitTracker::BitValue &TopV = RC[W-1]; 2418 2419 // Eliminate candidates that have self-referential bits, since they 2420 // cannot be extracts from other registers. Also, skip registers that 2421 // have compile-time constant values. 2422 bool IsConst = true; 2423 for (unsigned I = 0; I != W; ++I) { 2424 const BitTracker::BitValue &V = RC[I]; 2425 if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg == RD.Reg) 2426 return false; 2427 IsConst = IsConst && (V.is(0) || V.is(1)); 2428 } 2429 if (IsConst) 2430 return false; 2431 2432 if (TopV.is(0) || TopV.is(1)) { 2433 bool S = TopV.is(1); 2434 for (--W; W > 0 && RC[W-1].is(S); --W) 2435 ; 2436 Len = W; 2437 Signed = S; 2438 // The sign bit must be a part of the field being extended. 2439 if (Signed) 2440 ++Len; 2441 } else { 2442 // This could still be a sign-extended extract. 2443 assert(TopV.Type == BitTracker::BitValue::Ref); 2444 if (TopV.RefI.Reg == RD.Reg || TopV.RefI.Pos == W-1) 2445 return false; 2446 for (--W; W > 0 && RC[W-1] == TopV; --W) 2447 ; 2448 // The top bits of RC are copies of TopV. One occurrence of TopV will 2449 // be a part of the field. 2450 Len = W + 1; 2451 Signed = true; 2452 } 2453 2454 // This would be just a copy. It should be handled elsewhere. 2455 if (Len == RW) 2456 return false; 2457 2458 LLVM_DEBUG({ 2459 dbgs() << __func__ << " on reg: " << printReg(RD.Reg, &HRI, RD.Sub) 2460 << ", MI: " << *MI; 2461 dbgs() << "Cell: " << RC << '\n'; 2462 dbgs() << "Expected bitfield size: " << Len << " bits, " 2463 << (Signed ? "sign" : "zero") << "-extended\n"; 2464 }); 2465 2466 bool Changed = false; 2467 2468 for (unsigned R = AVs.find_first(); R != 0; R = AVs.find_next(R)) { 2469 if (!BT.has(R)) 2470 continue; 2471 const BitTracker::RegisterCell &SC = BT.lookup(R); 2472 unsigned SW = SC.width(); 2473 2474 // The source can be longer than the destination, as long as its size is 2475 // a multiple of the size of the destination. Also, we would need to be 2476 // able to refer to the subregister in the source that would be of the 2477 // same size as the destination, but only check the sizes here. 2478 if (SW < RW || (SW % RW) != 0) 2479 continue; 2480 2481 // The field can start at any offset in SC as long as it contains Len 2482 // bits and does not cross subregister boundary (if the source register 2483 // is longer than the destination). 2484 unsigned Off = 0; 2485 while (Off <= SW-Len) { 2486 unsigned OE = (Off+Len)/RW; 2487 if (OE != Off/RW) { 2488 // The assumption here is that if the source (R) is longer than the 2489 // destination, then the destination is a sequence of words of 2490 // size RW, and each such word in R can be accessed via a subregister. 2491 // 2492 // If the beginning and the end of the field cross the subregister 2493 // boundary, advance to the next subregister. 2494 Off = OE*RW; 2495 continue; 2496 } 2497 if (HBS::isEqual(RC, 0, SC, Off, Len)) 2498 break; 2499 ++Off; 2500 } 2501 2502 if (Off > SW-Len) 2503 continue; 2504 2505 // Found match. 2506 unsigned ExtOpc = 0; 2507 if (Off == 0) { 2508 if (Len == 8) 2509 ExtOpc = Signed ? Hexagon::A2_sxtb : Hexagon::A2_zxtb; 2510 else if (Len == 16) 2511 ExtOpc = Signed ? Hexagon::A2_sxth : Hexagon::A2_zxth; 2512 else if (Len < 10 && !Signed) 2513 ExtOpc = Hexagon::A2_andir; 2514 } 2515 if (ExtOpc == 0) { 2516 ExtOpc = 2517 Signed ? (RW == 32 ? Hexagon::S4_extract : Hexagon::S4_extractp) 2518 : (RW == 32 ? Hexagon::S2_extractu : Hexagon::S2_extractup); 2519 } 2520 unsigned SR = 0; 2521 // This only recognizes isub_lo and isub_hi. 2522 if (RW != SW && RW*2 != SW) 2523 continue; 2524 if (RW != SW) 2525 SR = (Off/RW == 0) ? Hexagon::isub_lo : Hexagon::isub_hi; 2526 Off = Off % RW; 2527 2528 if (!validateReg({R,SR}, ExtOpc, 1)) 2529 continue; 2530 2531 // Don't generate the same instruction as the one being optimized. 2532 if (MI->getOpcode() == ExtOpc) { 2533 // All possible ExtOpc's have the source in operand(1). 2534 const MachineOperand &SrcOp = MI->getOperand(1); 2535 if (SrcOp.getReg() == R) 2536 continue; 2537 } 2538 2539 DebugLoc DL = MI->getDebugLoc(); 2540 MachineBasicBlock &B = *MI->getParent(); 2541 Register NewR = MRI.createVirtualRegister(FRC); 2542 auto At = MI->isPHI() ? B.getFirstNonPHI() 2543 : MachineBasicBlock::iterator(MI); 2544 auto MIB = BuildMI(B, At, DL, HII.get(ExtOpc), NewR) 2545 .addReg(R, 0, SR); 2546 switch (ExtOpc) { 2547 case Hexagon::A2_sxtb: 2548 case Hexagon::A2_zxtb: 2549 case Hexagon::A2_sxth: 2550 case Hexagon::A2_zxth: 2551 break; 2552 case Hexagon::A2_andir: 2553 MIB.addImm((1u << Len) - 1); 2554 break; 2555 case Hexagon::S4_extract: 2556 case Hexagon::S2_extractu: 2557 case Hexagon::S4_extractp: 2558 case Hexagon::S2_extractup: 2559 MIB.addImm(Len) 2560 .addImm(Off); 2561 break; 2562 default: 2563 llvm_unreachable("Unexpected opcode"); 2564 } 2565 2566 HBS::replaceReg(RD.Reg, NewR, MRI); 2567 BT.put(BitTracker::RegisterRef(NewR), RC); 2568 Changed = true; 2569 break; 2570 } 2571 2572 return Changed; 2573 } 2574 2575 bool BitSimplification::simplifyRCmp0(MachineInstr *MI, 2576 BitTracker::RegisterRef RD) { 2577 unsigned Opc = MI->getOpcode(); 2578 if (Opc != Hexagon::A4_rcmpeqi && Opc != Hexagon::A4_rcmpneqi) 2579 return false; 2580 MachineOperand &CmpOp = MI->getOperand(2); 2581 if (!CmpOp.isImm() || CmpOp.getImm() != 0) 2582 return false; 2583 2584 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI); 2585 if (FRC != &Hexagon::IntRegsRegClass && FRC != &Hexagon::DoubleRegsRegClass) 2586 return false; 2587 assert(RD.Sub == 0); 2588 2589 MachineBasicBlock &B = *MI->getParent(); 2590 const DebugLoc &DL = MI->getDebugLoc(); 2591 auto At = MI->isPHI() ? B.getFirstNonPHI() 2592 : MachineBasicBlock::iterator(MI); 2593 bool KnownZ = true; 2594 bool KnownNZ = false; 2595 2596 BitTracker::RegisterRef SR = MI->getOperand(1); 2597 if (!BT.has(SR.Reg)) 2598 return false; 2599 const BitTracker::RegisterCell &SC = BT.lookup(SR.Reg); 2600 unsigned F, W; 2601 if (!HBS::getSubregMask(SR, F, W, MRI)) 2602 return false; 2603 2604 for (uint16_t I = F; I != F+W; ++I) { 2605 const BitTracker::BitValue &V = SC[I]; 2606 if (!V.is(0)) 2607 KnownZ = false; 2608 if (V.is(1)) 2609 KnownNZ = true; 2610 } 2611 2612 auto ReplaceWithConst = [&](int C) { 2613 Register NewR = MRI.createVirtualRegister(FRC); 2614 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrsi), NewR) 2615 .addImm(C); 2616 HBS::replaceReg(RD.Reg, NewR, MRI); 2617 BitTracker::RegisterCell NewRC(W); 2618 for (uint16_t I = 0; I != W; ++I) { 2619 NewRC[I] = BitTracker::BitValue(C & 1); 2620 C = unsigned(C) >> 1; 2621 } 2622 BT.put(BitTracker::RegisterRef(NewR), NewRC); 2623 return true; 2624 }; 2625 2626 auto IsNonZero = [] (const MachineOperand &Op) { 2627 if (Op.isGlobal() || Op.isBlockAddress()) 2628 return true; 2629 if (Op.isImm()) 2630 return Op.getImm() != 0; 2631 if (Op.isCImm()) 2632 return !Op.getCImm()->isZero(); 2633 if (Op.isFPImm()) 2634 return !Op.getFPImm()->isZero(); 2635 return false; 2636 }; 2637 2638 auto IsZero = [] (const MachineOperand &Op) { 2639 if (Op.isGlobal() || Op.isBlockAddress()) 2640 return false; 2641 if (Op.isImm()) 2642 return Op.getImm() == 0; 2643 if (Op.isCImm()) 2644 return Op.getCImm()->isZero(); 2645 if (Op.isFPImm()) 2646 return Op.getFPImm()->isZero(); 2647 return false; 2648 }; 2649 2650 // If the source register is known to be 0 or non-0, the comparison can 2651 // be folded to a load of a constant. 2652 if (KnownZ || KnownNZ) { 2653 assert(KnownZ != KnownNZ && "Register cannot be both 0 and non-0"); 2654 return ReplaceWithConst(KnownZ == (Opc == Hexagon::A4_rcmpeqi)); 2655 } 2656 2657 // Special case: if the compare comes from a C2_muxii, then we know the 2658 // two possible constants that can be the source value. 2659 MachineInstr *InpDef = MRI.getVRegDef(SR.Reg); 2660 if (!InpDef) 2661 return false; 2662 if (SR.Sub == 0 && InpDef->getOpcode() == Hexagon::C2_muxii) { 2663 MachineOperand &Src1 = InpDef->getOperand(2); 2664 MachineOperand &Src2 = InpDef->getOperand(3); 2665 // Check if both are non-zero. 2666 bool KnownNZ1 = IsNonZero(Src1), KnownNZ2 = IsNonZero(Src2); 2667 if (KnownNZ1 && KnownNZ2) 2668 return ReplaceWithConst(Opc == Hexagon::A4_rcmpneqi); 2669 // Check if both are zero. 2670 bool KnownZ1 = IsZero(Src1), KnownZ2 = IsZero(Src2); 2671 if (KnownZ1 && KnownZ2) 2672 return ReplaceWithConst(Opc == Hexagon::A4_rcmpeqi); 2673 2674 // If for both operands we know that they are either 0 or non-0, 2675 // replace the comparison with a C2_muxii, using the same predicate 2676 // register, but with operands substituted with 0/1 accordingly. 2677 if ((KnownZ1 || KnownNZ1) && (KnownZ2 || KnownNZ2)) { 2678 Register NewR = MRI.createVirtualRegister(FRC); 2679 BuildMI(B, At, DL, HII.get(Hexagon::C2_muxii), NewR) 2680 .addReg(InpDef->getOperand(1).getReg()) 2681 .addImm(KnownZ1 == (Opc == Hexagon::A4_rcmpeqi)) 2682 .addImm(KnownZ2 == (Opc == Hexagon::A4_rcmpeqi)); 2683 HBS::replaceReg(RD.Reg, NewR, MRI); 2684 // Create a new cell with only the least significant bit unknown. 2685 BitTracker::RegisterCell NewRC(W); 2686 NewRC[0] = BitTracker::BitValue::self(); 2687 NewRC.fill(1, W, BitTracker::BitValue::Zero); 2688 BT.put(BitTracker::RegisterRef(NewR), NewRC); 2689 return true; 2690 } 2691 } 2692 2693 return false; 2694 } 2695 2696 bool BitSimplification::processBlock(MachineBasicBlock &B, 2697 const RegisterSet &AVs) { 2698 if (!BT.reached(&B)) 2699 return false; 2700 bool Changed = false; 2701 RegisterSet AVB = AVs; 2702 RegisterSet Defs; 2703 2704 for (auto I = B.begin(), E = B.end(); I != E; ++I, AVB.insert(Defs)) { 2705 MachineInstr *MI = &*I; 2706 Defs.clear(); 2707 HBS::getInstrDefs(*MI, Defs); 2708 2709 unsigned Opc = MI->getOpcode(); 2710 if (Opc == TargetOpcode::COPY || Opc == TargetOpcode::REG_SEQUENCE) 2711 continue; 2712 2713 if (MI->mayStore()) { 2714 bool T = genStoreUpperHalf(MI); 2715 T = T || genStoreImmediate(MI); 2716 Changed |= T; 2717 continue; 2718 } 2719 2720 if (Defs.count() != 1) 2721 continue; 2722 const MachineOperand &Op0 = MI->getOperand(0); 2723 if (!Op0.isReg() || !Op0.isDef()) 2724 continue; 2725 BitTracker::RegisterRef RD = Op0; 2726 if (!BT.has(RD.Reg)) 2727 continue; 2728 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI); 2729 const BitTracker::RegisterCell &RC = BT.lookup(RD.Reg); 2730 2731 if (FRC->getID() == Hexagon::DoubleRegsRegClassID) { 2732 bool T = genPackhl(MI, RD, RC); 2733 T = T || simplifyExtractLow(MI, RD, RC, AVB); 2734 Changed |= T; 2735 continue; 2736 } 2737 2738 if (FRC->getID() == Hexagon::IntRegsRegClassID) { 2739 bool T = genBitSplit(MI, RD, RC, AVB); 2740 T = T || simplifyExtractLow(MI, RD, RC, AVB); 2741 T = T || genExtractHalf(MI, RD, RC); 2742 T = T || genCombineHalf(MI, RD, RC); 2743 T = T || genExtractLow(MI, RD, RC); 2744 T = T || simplifyRCmp0(MI, RD); 2745 Changed |= T; 2746 continue; 2747 } 2748 2749 if (FRC->getID() == Hexagon::PredRegsRegClassID) { 2750 bool T = simplifyTstbit(MI, RD, RC); 2751 Changed |= T; 2752 continue; 2753 } 2754 } 2755 return Changed; 2756 } 2757 2758 bool HexagonBitSimplify::runOnMachineFunction(MachineFunction &MF) { 2759 if (skipFunction(MF.getFunction())) 2760 return false; 2761 2762 auto &HST = MF.getSubtarget<HexagonSubtarget>(); 2763 auto &HRI = *HST.getRegisterInfo(); 2764 auto &HII = *HST.getInstrInfo(); 2765 2766 MDT = &getAnalysis<MachineDominatorTree>(); 2767 MachineRegisterInfo &MRI = MF.getRegInfo(); 2768 bool Changed; 2769 2770 Changed = DeadCodeElimination(MF, *MDT).run(); 2771 2772 const HexagonEvaluator HE(HRI, MRI, HII, MF); 2773 BitTracker BT(HE, MF); 2774 LLVM_DEBUG(BT.trace(true)); 2775 BT.run(); 2776 2777 MachineBasicBlock &Entry = MF.front(); 2778 2779 RegisterSet AIG; // Available registers for IG. 2780 ConstGeneration ImmG(BT, HII, MRI); 2781 Changed |= visitBlock(Entry, ImmG, AIG); 2782 2783 RegisterSet ARE; // Available registers for RIE. 2784 RedundantInstrElimination RIE(BT, HII, HRI, MRI); 2785 bool Ried = visitBlock(Entry, RIE, ARE); 2786 if (Ried) { 2787 Changed = true; 2788 BT.run(); 2789 } 2790 2791 RegisterSet ACG; // Available registers for CG. 2792 CopyGeneration CopyG(BT, HII, HRI, MRI); 2793 Changed |= visitBlock(Entry, CopyG, ACG); 2794 2795 RegisterSet ACP; // Available registers for CP. 2796 CopyPropagation CopyP(HRI, MRI); 2797 Changed |= visitBlock(Entry, CopyP, ACP); 2798 2799 Changed = DeadCodeElimination(MF, *MDT).run() || Changed; 2800 2801 BT.run(); 2802 RegisterSet ABS; // Available registers for BS. 2803 BitSimplification BitS(BT, *MDT, HII, HRI, MRI, MF); 2804 Changed |= visitBlock(Entry, BitS, ABS); 2805 2806 Changed = DeadCodeElimination(MF, *MDT).run() || Changed; 2807 2808 if (Changed) { 2809 for (auto &B : MF) 2810 for (auto &I : B) 2811 I.clearKillInfo(); 2812 DeadCodeElimination(MF, *MDT).run(); 2813 } 2814 return Changed; 2815 } 2816 2817 // Recognize loops where the code at the end of the loop matches the code 2818 // before the entry of the loop, and the matching code is such that is can 2819 // be simplified. This pass relies on the bit simplification above and only 2820 // prepares code in a way that can be handled by the bit simplifcation. 2821 // 2822 // This is the motivating testcase (and explanation): 2823 // 2824 // { 2825 // loop0(.LBB0_2, r1) // %for.body.preheader 2826 // r5:4 = memd(r0++#8) 2827 // } 2828 // { 2829 // r3 = lsr(r4, #16) 2830 // r7:6 = combine(r5, r5) 2831 // } 2832 // { 2833 // r3 = insert(r5, #16, #16) 2834 // r7:6 = vlsrw(r7:6, #16) 2835 // } 2836 // .LBB0_2: 2837 // { 2838 // memh(r2+#4) = r5 2839 // memh(r2+#6) = r6 # R6 is really R5.H 2840 // } 2841 // { 2842 // r2 = add(r2, #8) 2843 // memh(r2+#0) = r4 2844 // memh(r2+#2) = r3 # R3 is really R4.H 2845 // } 2846 // { 2847 // r5:4 = memd(r0++#8) 2848 // } 2849 // { # "Shuffling" code that sets up R3 and R6 2850 // r3 = lsr(r4, #16) # so that their halves can be stored in the 2851 // r7:6 = combine(r5, r5) # next iteration. This could be folded into 2852 // } # the stores if the code was at the beginning 2853 // { # of the loop iteration. Since the same code 2854 // r3 = insert(r5, #16, #16) # precedes the loop, it can actually be moved 2855 // r7:6 = vlsrw(r7:6, #16) # there. 2856 // }:endloop0 2857 // 2858 // 2859 // The outcome: 2860 // 2861 // { 2862 // loop0(.LBB0_2, r1) 2863 // r5:4 = memd(r0++#8) 2864 // } 2865 // .LBB0_2: 2866 // { 2867 // memh(r2+#4) = r5 2868 // memh(r2+#6) = r5.h 2869 // } 2870 // { 2871 // r2 = add(r2, #8) 2872 // memh(r2+#0) = r4 2873 // memh(r2+#2) = r4.h 2874 // } 2875 // { 2876 // r5:4 = memd(r0++#8) 2877 // }:endloop0 2878 2879 namespace llvm { 2880 2881 FunctionPass *createHexagonLoopRescheduling(); 2882 void initializeHexagonLoopReschedulingPass(PassRegistry&); 2883 2884 } // end namespace llvm 2885 2886 namespace { 2887 2888 class HexagonLoopRescheduling : public MachineFunctionPass { 2889 public: 2890 static char ID; 2891 2892 HexagonLoopRescheduling() : MachineFunctionPass(ID) { 2893 initializeHexagonLoopReschedulingPass(*PassRegistry::getPassRegistry()); 2894 } 2895 2896 bool runOnMachineFunction(MachineFunction &MF) override; 2897 2898 private: 2899 const HexagonInstrInfo *HII = nullptr; 2900 const HexagonRegisterInfo *HRI = nullptr; 2901 MachineRegisterInfo *MRI = nullptr; 2902 BitTracker *BTP = nullptr; 2903 2904 struct LoopCand { 2905 LoopCand(MachineBasicBlock *lb, MachineBasicBlock *pb, 2906 MachineBasicBlock *eb) : LB(lb), PB(pb), EB(eb) {} 2907 2908 MachineBasicBlock *LB, *PB, *EB; 2909 }; 2910 using InstrList = std::vector<MachineInstr *>; 2911 struct InstrGroup { 2912 BitTracker::RegisterRef Inp, Out; 2913 InstrList Ins; 2914 }; 2915 struct PhiInfo { 2916 PhiInfo(MachineInstr &P, MachineBasicBlock &B); 2917 2918 unsigned DefR; 2919 BitTracker::RegisterRef LR, PR; // Loop Register, Preheader Register 2920 MachineBasicBlock *LB, *PB; // Loop Block, Preheader Block 2921 }; 2922 2923 static unsigned getDefReg(const MachineInstr *MI); 2924 bool isConst(unsigned Reg) const; 2925 bool isBitShuffle(const MachineInstr *MI, unsigned DefR) const; 2926 bool isStoreInput(const MachineInstr *MI, unsigned DefR) const; 2927 bool isShuffleOf(unsigned OutR, unsigned InpR) const; 2928 bool isSameShuffle(unsigned OutR1, unsigned InpR1, unsigned OutR2, 2929 unsigned &InpR2) const; 2930 void moveGroup(InstrGroup &G, MachineBasicBlock &LB, MachineBasicBlock &PB, 2931 MachineBasicBlock::iterator At, unsigned OldPhiR, unsigned NewPredR); 2932 bool processLoop(LoopCand &C); 2933 }; 2934 2935 } // end anonymous namespace 2936 2937 char HexagonLoopRescheduling::ID = 0; 2938 2939 INITIALIZE_PASS(HexagonLoopRescheduling, "hexagon-loop-resched", 2940 "Hexagon Loop Rescheduling", false, false) 2941 2942 HexagonLoopRescheduling::PhiInfo::PhiInfo(MachineInstr &P, 2943 MachineBasicBlock &B) { 2944 DefR = HexagonLoopRescheduling::getDefReg(&P); 2945 LB = &B; 2946 PB = nullptr; 2947 for (unsigned i = 1, n = P.getNumOperands(); i < n; i += 2) { 2948 const MachineOperand &OpB = P.getOperand(i+1); 2949 if (OpB.getMBB() == &B) { 2950 LR = P.getOperand(i); 2951 continue; 2952 } 2953 PB = OpB.getMBB(); 2954 PR = P.getOperand(i); 2955 } 2956 } 2957 2958 unsigned HexagonLoopRescheduling::getDefReg(const MachineInstr *MI) { 2959 RegisterSet Defs; 2960 HBS::getInstrDefs(*MI, Defs); 2961 if (Defs.count() != 1) 2962 return 0; 2963 return Defs.find_first(); 2964 } 2965 2966 bool HexagonLoopRescheduling::isConst(unsigned Reg) const { 2967 if (!BTP->has(Reg)) 2968 return false; 2969 const BitTracker::RegisterCell &RC = BTP->lookup(Reg); 2970 for (unsigned i = 0, w = RC.width(); i < w; ++i) { 2971 const BitTracker::BitValue &V = RC[i]; 2972 if (!V.is(0) && !V.is(1)) 2973 return false; 2974 } 2975 return true; 2976 } 2977 2978 bool HexagonLoopRescheduling::isBitShuffle(const MachineInstr *MI, 2979 unsigned DefR) const { 2980 unsigned Opc = MI->getOpcode(); 2981 switch (Opc) { 2982 case TargetOpcode::COPY: 2983 case Hexagon::S2_lsr_i_r: 2984 case Hexagon::S2_asr_i_r: 2985 case Hexagon::S2_asl_i_r: 2986 case Hexagon::S2_lsr_i_p: 2987 case Hexagon::S2_asr_i_p: 2988 case Hexagon::S2_asl_i_p: 2989 case Hexagon::S2_insert: 2990 case Hexagon::A2_or: 2991 case Hexagon::A2_orp: 2992 case Hexagon::A2_and: 2993 case Hexagon::A2_andp: 2994 case Hexagon::A2_combinew: 2995 case Hexagon::A4_combineri: 2996 case Hexagon::A4_combineir: 2997 case Hexagon::A2_combineii: 2998 case Hexagon::A4_combineii: 2999 case Hexagon::A2_combine_ll: 3000 case Hexagon::A2_combine_lh: 3001 case Hexagon::A2_combine_hl: 3002 case Hexagon::A2_combine_hh: 3003 return true; 3004 } 3005 return false; 3006 } 3007 3008 bool HexagonLoopRescheduling::isStoreInput(const MachineInstr *MI, 3009 unsigned InpR) const { 3010 for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) { 3011 const MachineOperand &Op = MI->getOperand(i); 3012 if (!Op.isReg()) 3013 continue; 3014 if (Op.getReg() == InpR) 3015 return i == n-1; 3016 } 3017 return false; 3018 } 3019 3020 bool HexagonLoopRescheduling::isShuffleOf(unsigned OutR, unsigned InpR) const { 3021 if (!BTP->has(OutR) || !BTP->has(InpR)) 3022 return false; 3023 const BitTracker::RegisterCell &OutC = BTP->lookup(OutR); 3024 for (unsigned i = 0, w = OutC.width(); i < w; ++i) { 3025 const BitTracker::BitValue &V = OutC[i]; 3026 if (V.Type != BitTracker::BitValue::Ref) 3027 continue; 3028 if (V.RefI.Reg != InpR) 3029 return false; 3030 } 3031 return true; 3032 } 3033 3034 bool HexagonLoopRescheduling::isSameShuffle(unsigned OutR1, unsigned InpR1, 3035 unsigned OutR2, unsigned &InpR2) const { 3036 if (!BTP->has(OutR1) || !BTP->has(InpR1) || !BTP->has(OutR2)) 3037 return false; 3038 const BitTracker::RegisterCell &OutC1 = BTP->lookup(OutR1); 3039 const BitTracker::RegisterCell &OutC2 = BTP->lookup(OutR2); 3040 unsigned W = OutC1.width(); 3041 unsigned MatchR = 0; 3042 if (W != OutC2.width()) 3043 return false; 3044 for (unsigned i = 0; i < W; ++i) { 3045 const BitTracker::BitValue &V1 = OutC1[i], &V2 = OutC2[i]; 3046 if (V1.Type != V2.Type || V1.Type == BitTracker::BitValue::One) 3047 return false; 3048 if (V1.Type != BitTracker::BitValue::Ref) 3049 continue; 3050 if (V1.RefI.Pos != V2.RefI.Pos) 3051 return false; 3052 if (V1.RefI.Reg != InpR1) 3053 return false; 3054 if (V2.RefI.Reg == 0 || V2.RefI.Reg == OutR2) 3055 return false; 3056 if (!MatchR) 3057 MatchR = V2.RefI.Reg; 3058 else if (V2.RefI.Reg != MatchR) 3059 return false; 3060 } 3061 InpR2 = MatchR; 3062 return true; 3063 } 3064 3065 void HexagonLoopRescheduling::moveGroup(InstrGroup &G, MachineBasicBlock &LB, 3066 MachineBasicBlock &PB, MachineBasicBlock::iterator At, unsigned OldPhiR, 3067 unsigned NewPredR) { 3068 DenseMap<unsigned,unsigned> RegMap; 3069 3070 const TargetRegisterClass *PhiRC = MRI->getRegClass(NewPredR); 3071 Register PhiR = MRI->createVirtualRegister(PhiRC); 3072 BuildMI(LB, At, At->getDebugLoc(), HII->get(TargetOpcode::PHI), PhiR) 3073 .addReg(NewPredR) 3074 .addMBB(&PB) 3075 .addReg(G.Inp.Reg) 3076 .addMBB(&LB); 3077 RegMap.insert(std::make_pair(G.Inp.Reg, PhiR)); 3078 3079 for (unsigned i = G.Ins.size(); i > 0; --i) { 3080 const MachineInstr *SI = G.Ins[i-1]; 3081 unsigned DR = getDefReg(SI); 3082 const TargetRegisterClass *RC = MRI->getRegClass(DR); 3083 Register NewDR = MRI->createVirtualRegister(RC); 3084 DebugLoc DL = SI->getDebugLoc(); 3085 3086 auto MIB = BuildMI(LB, At, DL, HII->get(SI->getOpcode()), NewDR); 3087 for (unsigned j = 0, m = SI->getNumOperands(); j < m; ++j) { 3088 const MachineOperand &Op = SI->getOperand(j); 3089 if (!Op.isReg()) { 3090 MIB.add(Op); 3091 continue; 3092 } 3093 if (!Op.isUse()) 3094 continue; 3095 unsigned UseR = RegMap[Op.getReg()]; 3096 MIB.addReg(UseR, 0, Op.getSubReg()); 3097 } 3098 RegMap.insert(std::make_pair(DR, NewDR)); 3099 } 3100 3101 HBS::replaceReg(OldPhiR, RegMap[G.Out.Reg], *MRI); 3102 } 3103 3104 bool HexagonLoopRescheduling::processLoop(LoopCand &C) { 3105 LLVM_DEBUG(dbgs() << "Processing loop in " << printMBBReference(*C.LB) 3106 << "\n"); 3107 std::vector<PhiInfo> Phis; 3108 for (auto &I : *C.LB) { 3109 if (!I.isPHI()) 3110 break; 3111 unsigned PR = getDefReg(&I); 3112 if (isConst(PR)) 3113 continue; 3114 bool BadUse = false, GoodUse = false; 3115 for (auto UI = MRI->use_begin(PR), UE = MRI->use_end(); UI != UE; ++UI) { 3116 MachineInstr *UseI = UI->getParent(); 3117 if (UseI->getParent() != C.LB) { 3118 BadUse = true; 3119 break; 3120 } 3121 if (isBitShuffle(UseI, PR) || isStoreInput(UseI, PR)) 3122 GoodUse = true; 3123 } 3124 if (BadUse || !GoodUse) 3125 continue; 3126 3127 Phis.push_back(PhiInfo(I, *C.LB)); 3128 } 3129 3130 LLVM_DEBUG({ 3131 dbgs() << "Phis: {"; 3132 for (auto &I : Phis) { 3133 dbgs() << ' ' << printReg(I.DefR, HRI) << "=phi(" 3134 << printReg(I.PR.Reg, HRI, I.PR.Sub) << ":b" << I.PB->getNumber() 3135 << ',' << printReg(I.LR.Reg, HRI, I.LR.Sub) << ":b" 3136 << I.LB->getNumber() << ')'; 3137 } 3138 dbgs() << " }\n"; 3139 }); 3140 3141 if (Phis.empty()) 3142 return false; 3143 3144 bool Changed = false; 3145 InstrList ShufIns; 3146 3147 // Go backwards in the block: for each bit shuffling instruction, check 3148 // if that instruction could potentially be moved to the front of the loop: 3149 // the output of the loop cannot be used in a non-shuffling instruction 3150 // in this loop. 3151 for (auto I = C.LB->rbegin(), E = C.LB->rend(); I != E; ++I) { 3152 if (I->isTerminator()) 3153 continue; 3154 if (I->isPHI()) 3155 break; 3156 3157 RegisterSet Defs; 3158 HBS::getInstrDefs(*I, Defs); 3159 if (Defs.count() != 1) 3160 continue; 3161 unsigned DefR = Defs.find_first(); 3162 if (!Register::isVirtualRegister(DefR)) 3163 continue; 3164 if (!isBitShuffle(&*I, DefR)) 3165 continue; 3166 3167 bool BadUse = false; 3168 for (auto UI = MRI->use_begin(DefR), UE = MRI->use_end(); UI != UE; ++UI) { 3169 MachineInstr *UseI = UI->getParent(); 3170 if (UseI->getParent() == C.LB) { 3171 if (UseI->isPHI()) { 3172 // If the use is in a phi node in this loop, then it should be 3173 // the value corresponding to the back edge. 3174 unsigned Idx = UI.getOperandNo(); 3175 if (UseI->getOperand(Idx+1).getMBB() != C.LB) 3176 BadUse = true; 3177 } else { 3178 auto F = find(ShufIns, UseI); 3179 if (F == ShufIns.end()) 3180 BadUse = true; 3181 } 3182 } else { 3183 // There is a use outside of the loop, but there is no epilog block 3184 // suitable for a copy-out. 3185 if (C.EB == nullptr) 3186 BadUse = true; 3187 } 3188 if (BadUse) 3189 break; 3190 } 3191 3192 if (BadUse) 3193 continue; 3194 ShufIns.push_back(&*I); 3195 } 3196 3197 // Partition the list of shuffling instructions into instruction groups, 3198 // where each group has to be moved as a whole (i.e. a group is a chain of 3199 // dependent instructions). A group produces a single live output register, 3200 // which is meant to be the input of the loop phi node (although this is 3201 // not checked here yet). It also uses a single register as its input, 3202 // which is some value produced in the loop body. After moving the group 3203 // to the beginning of the loop, that input register would need to be 3204 // the loop-carried register (through a phi node) instead of the (currently 3205 // loop-carried) output register. 3206 using InstrGroupList = std::vector<InstrGroup>; 3207 InstrGroupList Groups; 3208 3209 for (unsigned i = 0, n = ShufIns.size(); i < n; ++i) { 3210 MachineInstr *SI = ShufIns[i]; 3211 if (SI == nullptr) 3212 continue; 3213 3214 InstrGroup G; 3215 G.Ins.push_back(SI); 3216 G.Out.Reg = getDefReg(SI); 3217 RegisterSet Inputs; 3218 HBS::getInstrUses(*SI, Inputs); 3219 3220 for (unsigned j = i+1; j < n; ++j) { 3221 MachineInstr *MI = ShufIns[j]; 3222 if (MI == nullptr) 3223 continue; 3224 RegisterSet Defs; 3225 HBS::getInstrDefs(*MI, Defs); 3226 // If this instruction does not define any pending inputs, skip it. 3227 if (!Defs.intersects(Inputs)) 3228 continue; 3229 // Otherwise, add it to the current group and remove the inputs that 3230 // are defined by MI. 3231 G.Ins.push_back(MI); 3232 Inputs.remove(Defs); 3233 // Then add all registers used by MI. 3234 HBS::getInstrUses(*MI, Inputs); 3235 ShufIns[j] = nullptr; 3236 } 3237 3238 // Only add a group if it requires at most one register. 3239 if (Inputs.count() > 1) 3240 continue; 3241 auto LoopInpEq = [G] (const PhiInfo &P) -> bool { 3242 return G.Out.Reg == P.LR.Reg; 3243 }; 3244 if (llvm::find_if(Phis, LoopInpEq) == Phis.end()) 3245 continue; 3246 3247 G.Inp.Reg = Inputs.find_first(); 3248 Groups.push_back(G); 3249 } 3250 3251 LLVM_DEBUG({ 3252 for (unsigned i = 0, n = Groups.size(); i < n; ++i) { 3253 InstrGroup &G = Groups[i]; 3254 dbgs() << "Group[" << i << "] inp: " 3255 << printReg(G.Inp.Reg, HRI, G.Inp.Sub) 3256 << " out: " << printReg(G.Out.Reg, HRI, G.Out.Sub) << "\n"; 3257 for (unsigned j = 0, m = G.Ins.size(); j < m; ++j) 3258 dbgs() << " " << *G.Ins[j]; 3259 } 3260 }); 3261 3262 for (unsigned i = 0, n = Groups.size(); i < n; ++i) { 3263 InstrGroup &G = Groups[i]; 3264 if (!isShuffleOf(G.Out.Reg, G.Inp.Reg)) 3265 continue; 3266 auto LoopInpEq = [G] (const PhiInfo &P) -> bool { 3267 return G.Out.Reg == P.LR.Reg; 3268 }; 3269 auto F = llvm::find_if(Phis, LoopInpEq); 3270 if (F == Phis.end()) 3271 continue; 3272 unsigned PrehR = 0; 3273 if (!isSameShuffle(G.Out.Reg, G.Inp.Reg, F->PR.Reg, PrehR)) { 3274 const MachineInstr *DefPrehR = MRI->getVRegDef(F->PR.Reg); 3275 unsigned Opc = DefPrehR->getOpcode(); 3276 if (Opc != Hexagon::A2_tfrsi && Opc != Hexagon::A2_tfrpi) 3277 continue; 3278 if (!DefPrehR->getOperand(1).isImm()) 3279 continue; 3280 if (DefPrehR->getOperand(1).getImm() != 0) 3281 continue; 3282 const TargetRegisterClass *RC = MRI->getRegClass(G.Inp.Reg); 3283 if (RC != MRI->getRegClass(F->PR.Reg)) { 3284 PrehR = MRI->createVirtualRegister(RC); 3285 unsigned TfrI = (RC == &Hexagon::IntRegsRegClass) ? Hexagon::A2_tfrsi 3286 : Hexagon::A2_tfrpi; 3287 auto T = C.PB->getFirstTerminator(); 3288 DebugLoc DL = (T != C.PB->end()) ? T->getDebugLoc() : DebugLoc(); 3289 BuildMI(*C.PB, T, DL, HII->get(TfrI), PrehR) 3290 .addImm(0); 3291 } else { 3292 PrehR = F->PR.Reg; 3293 } 3294 } 3295 // isSameShuffle could match with PrehR being of a wider class than 3296 // G.Inp.Reg, for example if G shuffles the low 32 bits of its input, 3297 // it would match for the input being a 32-bit register, and PrehR 3298 // being a 64-bit register (where the low 32 bits match). This could 3299 // be handled, but for now skip these cases. 3300 if (MRI->getRegClass(PrehR) != MRI->getRegClass(G.Inp.Reg)) 3301 continue; 3302 moveGroup(G, *F->LB, *F->PB, F->LB->getFirstNonPHI(), F->DefR, PrehR); 3303 Changed = true; 3304 } 3305 3306 return Changed; 3307 } 3308 3309 bool HexagonLoopRescheduling::runOnMachineFunction(MachineFunction &MF) { 3310 if (skipFunction(MF.getFunction())) 3311 return false; 3312 3313 auto &HST = MF.getSubtarget<HexagonSubtarget>(); 3314 HII = HST.getInstrInfo(); 3315 HRI = HST.getRegisterInfo(); 3316 MRI = &MF.getRegInfo(); 3317 const HexagonEvaluator HE(*HRI, *MRI, *HII, MF); 3318 BitTracker BT(HE, MF); 3319 LLVM_DEBUG(BT.trace(true)); 3320 BT.run(); 3321 BTP = &BT; 3322 3323 std::vector<LoopCand> Cand; 3324 3325 for (auto &B : MF) { 3326 if (B.pred_size() != 2 || B.succ_size() != 2) 3327 continue; 3328 MachineBasicBlock *PB = nullptr; 3329 bool IsLoop = false; 3330 for (auto PI = B.pred_begin(), PE = B.pred_end(); PI != PE; ++PI) { 3331 if (*PI != &B) 3332 PB = *PI; 3333 else 3334 IsLoop = true; 3335 } 3336 if (!IsLoop) 3337 continue; 3338 3339 MachineBasicBlock *EB = nullptr; 3340 for (auto SI = B.succ_begin(), SE = B.succ_end(); SI != SE; ++SI) { 3341 if (*SI == &B) 3342 continue; 3343 // Set EP to the epilog block, if it has only 1 predecessor (i.e. the 3344 // edge from B to EP is non-critical. 3345 if ((*SI)->pred_size() == 1) 3346 EB = *SI; 3347 break; 3348 } 3349 3350 Cand.push_back(LoopCand(&B, PB, EB)); 3351 } 3352 3353 bool Changed = false; 3354 for (auto &C : Cand) 3355 Changed |= processLoop(C); 3356 3357 return Changed; 3358 } 3359 3360 //===----------------------------------------------------------------------===// 3361 // Public Constructor Functions 3362 //===----------------------------------------------------------------------===// 3363 3364 FunctionPass *llvm::createHexagonLoopRescheduling() { 3365 return new HexagonLoopRescheduling(); 3366 } 3367 3368 FunctionPass *llvm::createHexagonBitSimplify() { 3369 return new HexagonBitSimplify(); 3370 } 3371