1 //===- HexagonBitTracker.cpp ----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "HexagonBitTracker.h" 10 #include "HexagonInstrInfo.h" 11 #include "HexagonRegisterInfo.h" 12 #include "HexagonSubtarget.h" 13 #include "llvm/CodeGen/MachineFrameInfo.h" 14 #include "llvm/CodeGen/MachineFunction.h" 15 #include "llvm/CodeGen/MachineInstr.h" 16 #include "llvm/CodeGen/MachineOperand.h" 17 #include "llvm/CodeGen/MachineRegisterInfo.h" 18 #include "llvm/CodeGen/TargetRegisterInfo.h" 19 #include "llvm/IR/Argument.h" 20 #include "llvm/IR/Attributes.h" 21 #include "llvm/IR/Function.h" 22 #include "llvm/IR/Type.h" 23 #include "llvm/Support/Compiler.h" 24 #include "llvm/Support/Debug.h" 25 #include "llvm/Support/ErrorHandling.h" 26 #include "llvm/Support/raw_ostream.h" 27 #include <cassert> 28 #include <cstddef> 29 #include <cstdint> 30 #include <cstdlib> 31 #include <utility> 32 #include <vector> 33 34 using namespace llvm; 35 36 using BT = BitTracker; 37 38 HexagonEvaluator::HexagonEvaluator(const HexagonRegisterInfo &tri, 39 MachineRegisterInfo &mri, 40 const HexagonInstrInfo &tii, 41 MachineFunction &mf) 42 : MachineEvaluator(tri, mri), MF(mf), MFI(mf.getFrameInfo()), TII(tii) { 43 // Populate the VRX map (VR to extension-type). 44 // Go over all the formal parameters of the function. If a given parameter 45 // P is sign- or zero-extended, locate the virtual register holding that 46 // parameter and create an entry in the VRX map indicating the type of ex- 47 // tension (and the source type). 48 // This is a bit complicated to do accurately, since the memory layout in- 49 // formation is necessary to precisely determine whether an aggregate para- 50 // meter will be passed in a register or in memory. What is given in MRI 51 // is the association between the physical register that is live-in (i.e. 52 // holds an argument), and the virtual register that this value will be 53 // copied into. This, by itself, is not sufficient to map back the virtual 54 // register to a formal parameter from Function (since consecutive live-ins 55 // from MRI may not correspond to consecutive formal parameters from Func- 56 // tion). To avoid the complications with in-memory arguments, only consi- 57 // der the initial sequence of formal parameters that are known to be 58 // passed via registers. 59 unsigned InVirtReg, InPhysReg = 0; 60 61 for (const Argument &Arg : MF.getFunction().args()) { 62 Type *ATy = Arg.getType(); 63 unsigned Width = 0; 64 if (ATy->isIntegerTy()) 65 Width = ATy->getIntegerBitWidth(); 66 else if (ATy->isPointerTy()) 67 Width = 32; 68 // If pointer size is not set through target data, it will default to 69 // Module::AnyPointerSize. 70 if (Width == 0 || Width > 64) 71 break; 72 if (Arg.hasAttribute(Attribute::ByVal)) 73 continue; 74 InPhysReg = getNextPhysReg(InPhysReg, Width); 75 if (!InPhysReg) 76 break; 77 InVirtReg = getVirtRegFor(InPhysReg); 78 if (!InVirtReg) 79 continue; 80 if (Arg.hasAttribute(Attribute::SExt)) 81 VRX.insert(std::make_pair(InVirtReg, ExtType(ExtType::SExt, Width))); 82 else if (Arg.hasAttribute(Attribute::ZExt)) 83 VRX.insert(std::make_pair(InVirtReg, ExtType(ExtType::ZExt, Width))); 84 } 85 } 86 87 BT::BitMask HexagonEvaluator::mask(Register Reg, unsigned Sub) const { 88 if (Sub == 0) 89 return MachineEvaluator::mask(Reg, 0); 90 const TargetRegisterClass &RC = *MRI.getRegClass(Reg); 91 unsigned ID = RC.getID(); 92 uint16_t RW = getRegBitWidth(RegisterRef(Reg, Sub)); 93 const auto &HRI = static_cast<const HexagonRegisterInfo&>(TRI); 94 bool IsSubLo = (Sub == HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo)); 95 switch (ID) { 96 case Hexagon::DoubleRegsRegClassID: 97 case Hexagon::HvxWRRegClassID: 98 case Hexagon::HvxVQRRegClassID: 99 return IsSubLo ? BT::BitMask(0, RW-1) 100 : BT::BitMask(RW, 2*RW-1); 101 default: 102 break; 103 } 104 #ifndef NDEBUG 105 dbgs() << printReg(Reg, &TRI, Sub) << " in reg class " 106 << TRI.getRegClassName(&RC) << '\n'; 107 #endif 108 llvm_unreachable("Unexpected register/subregister"); 109 } 110 111 uint16_t HexagonEvaluator::getPhysRegBitWidth(MCRegister Reg) const { 112 using namespace Hexagon; 113 const auto &HST = MF.getSubtarget<HexagonSubtarget>(); 114 if (HST.useHVXOps()) { 115 for (auto &RC : {HvxVRRegClass, HvxWRRegClass, HvxQRRegClass, 116 HvxVQRRegClass}) 117 if (RC.contains(Reg)) 118 return TRI.getRegSizeInBits(RC); 119 } 120 // Default treatment for other physical registers. 121 if (const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(Reg)) 122 return TRI.getRegSizeInBits(*RC); 123 124 llvm_unreachable( 125 (Twine("Unhandled physical register") + TRI.getName(Reg)).str().c_str()); 126 } 127 128 const TargetRegisterClass &HexagonEvaluator::composeWithSubRegIndex( 129 const TargetRegisterClass &RC, unsigned Idx) const { 130 if (Idx == 0) 131 return RC; 132 133 #ifndef NDEBUG 134 const auto &HRI = static_cast<const HexagonRegisterInfo&>(TRI); 135 bool IsSubLo = (Idx == HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo)); 136 bool IsSubHi = (Idx == HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi)); 137 assert(IsSubLo != IsSubHi && "Must refer to either low or high subreg"); 138 #endif 139 140 switch (RC.getID()) { 141 case Hexagon::DoubleRegsRegClassID: 142 return Hexagon::IntRegsRegClass; 143 case Hexagon::HvxWRRegClassID: 144 return Hexagon::HvxVRRegClass; 145 case Hexagon::HvxVQRRegClassID: 146 return Hexagon::HvxWRRegClass; 147 default: 148 break; 149 } 150 #ifndef NDEBUG 151 dbgs() << "Reg class id: " << RC.getID() << " idx: " << Idx << '\n'; 152 #endif 153 llvm_unreachable("Unimplemented combination of reg class/subreg idx"); 154 } 155 156 namespace { 157 158 class RegisterRefs { 159 std::vector<BT::RegisterRef> Vector; 160 161 public: 162 RegisterRefs(const MachineInstr &MI) : Vector(MI.getNumOperands()) { 163 for (unsigned i = 0, n = Vector.size(); i < n; ++i) { 164 const MachineOperand &MO = MI.getOperand(i); 165 if (MO.isReg()) 166 Vector[i] = BT::RegisterRef(MO); 167 // For indices that don't correspond to registers, the entry will 168 // remain constructed via the default constructor. 169 } 170 } 171 172 size_t size() const { return Vector.size(); } 173 174 const BT::RegisterRef &operator[](unsigned n) const { 175 // The main purpose of this operator is to assert with bad argument. 176 assert(n < Vector.size()); 177 return Vector[n]; 178 } 179 }; 180 181 } // end anonymous namespace 182 183 bool HexagonEvaluator::evaluate(const MachineInstr &MI, 184 const CellMapType &Inputs, 185 CellMapType &Outputs) const { 186 using namespace Hexagon; 187 188 unsigned NumDefs = 0; 189 190 // Basic correctness check: there should not be any defs with subregisters. 191 for (const MachineOperand &MO : MI.operands()) { 192 if (!MO.isReg() || !MO.isDef()) 193 continue; 194 NumDefs++; 195 assert(MO.getSubReg() == 0); 196 } 197 198 if (NumDefs == 0) 199 return false; 200 201 unsigned Opc = MI.getOpcode(); 202 203 if (MI.mayLoad()) { 204 switch (Opc) { 205 // These instructions may be marked as mayLoad, but they are generating 206 // immediate values, so skip them. 207 case CONST32: 208 case CONST64: 209 break; 210 default: 211 return evaluateLoad(MI, Inputs, Outputs); 212 } 213 } 214 215 // Check COPY instructions that copy formal parameters into virtual 216 // registers. Such parameters can be sign- or zero-extended at the 217 // call site, and we should take advantage of this knowledge. The MRI 218 // keeps a list of pairs of live-in physical and virtual registers, 219 // which provides information about which virtual registers will hold 220 // the argument values. The function will still contain instructions 221 // defining those virtual registers, and in practice those are COPY 222 // instructions from a physical to a virtual register. In such cases, 223 // applying the argument extension to the virtual register can be seen 224 // as simply mirroring the extension that had already been applied to 225 // the physical register at the call site. If the defining instruction 226 // was not a COPY, it would not be clear how to mirror that extension 227 // on the callee's side. For that reason, only check COPY instructions 228 // for potential extensions. 229 if (MI.isCopy()) { 230 if (evaluateFormalCopy(MI, Inputs, Outputs)) 231 return true; 232 } 233 234 // Beyond this point, if any operand is a global, skip that instruction. 235 // The reason is that certain instructions that can take an immediate 236 // operand can also have a global symbol in that operand. To avoid 237 // checking what kind of operand a given instruction has individually 238 // for each instruction, do it here. Global symbols as operands gene- 239 // rally do not provide any useful information. 240 for (const MachineOperand &MO : MI.operands()) { 241 if (MO.isGlobal() || MO.isBlockAddress() || MO.isSymbol() || MO.isJTI() || 242 MO.isCPI()) 243 return false; 244 } 245 246 RegisterRefs Reg(MI); 247 #define op(i) MI.getOperand(i) 248 #define rc(i) RegisterCell::ref(getCell(Reg[i], Inputs)) 249 #define im(i) MI.getOperand(i).getImm() 250 251 // If the instruction has no register operands, skip it. 252 if (Reg.size() == 0) 253 return false; 254 255 // Record result for register in operand 0. 256 auto rr0 = [this,Reg] (const BT::RegisterCell &Val, CellMapType &Outputs) 257 -> bool { 258 putCell(Reg[0], Val, Outputs); 259 return true; 260 }; 261 // Get the cell corresponding to the N-th operand. 262 auto cop = [this, &Reg, &MI, &Inputs](unsigned N, 263 uint16_t W) -> BT::RegisterCell { 264 const MachineOperand &Op = MI.getOperand(N); 265 if (Op.isImm()) 266 return eIMM(Op.getImm(), W); 267 if (!Op.isReg()) 268 return RegisterCell::self(0, W); 269 assert(getRegBitWidth(Reg[N]) == W && "Register width mismatch"); 270 return rc(N); 271 }; 272 // Extract RW low bits of the cell. 273 auto lo = [this] (const BT::RegisterCell &RC, uint16_t RW) 274 -> BT::RegisterCell { 275 assert(RW <= RC.width()); 276 return eXTR(RC, 0, RW); 277 }; 278 // Extract RW high bits of the cell. 279 auto hi = [this] (const BT::RegisterCell &RC, uint16_t RW) 280 -> BT::RegisterCell { 281 uint16_t W = RC.width(); 282 assert(RW <= W); 283 return eXTR(RC, W-RW, W); 284 }; 285 // Extract N-th halfword (counting from the least significant position). 286 auto half = [this] (const BT::RegisterCell &RC, unsigned N) 287 -> BT::RegisterCell { 288 assert(N*16+16 <= RC.width()); 289 return eXTR(RC, N*16, N*16+16); 290 }; 291 // Shuffle bits (pick even/odd from cells and merge into result). 292 auto shuffle = [this] (const BT::RegisterCell &Rs, const BT::RegisterCell &Rt, 293 uint16_t BW, bool Odd) -> BT::RegisterCell { 294 uint16_t I = Odd, Ws = Rs.width(); 295 assert(Ws == Rt.width()); 296 RegisterCell RC = eXTR(Rt, I*BW, I*BW+BW).cat(eXTR(Rs, I*BW, I*BW+BW)); 297 I += 2; 298 while (I*BW < Ws) { 299 RC.cat(eXTR(Rt, I*BW, I*BW+BW)).cat(eXTR(Rs, I*BW, I*BW+BW)); 300 I += 2; 301 } 302 return RC; 303 }; 304 305 // The bitwidth of the 0th operand. In most (if not all) of the 306 // instructions below, the 0th operand is the defined register. 307 // Pre-compute the bitwidth here, because it is needed in many cases 308 // cases below. 309 uint16_t W0 = (Reg[0].Reg != 0) ? getRegBitWidth(Reg[0]) : 0; 310 311 // Register id of the 0th operand. It can be 0. 312 unsigned Reg0 = Reg[0].Reg; 313 314 switch (Opc) { 315 // Transfer immediate: 316 317 case A2_tfrsi: 318 case A2_tfrpi: 319 case CONST32: 320 case CONST64: 321 return rr0(eIMM(im(1), W0), Outputs); 322 case PS_false: 323 return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::Zero), Outputs); 324 case PS_true: 325 return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::One), Outputs); 326 case PS_fi: { 327 int FI = op(1).getIndex(); 328 int Off = op(2).getImm(); 329 unsigned A = MFI.getObjectAlign(FI).value() + std::abs(Off); 330 unsigned L = llvm::countr_zero(A); 331 RegisterCell RC = RegisterCell::self(Reg[0].Reg, W0); 332 RC.fill(0, L, BT::BitValue::Zero); 333 return rr0(RC, Outputs); 334 } 335 336 // Transfer register: 337 338 case A2_tfr: 339 case A2_tfrp: 340 case C2_pxfer_map: 341 return rr0(rc(1), Outputs); 342 case C2_tfrpr: { 343 uint16_t RW = W0; 344 uint16_t PW = 8; // XXX Pred size: getRegBitWidth(Reg[1]); 345 assert(PW <= RW); 346 RegisterCell PC = eXTR(rc(1), 0, PW); 347 RegisterCell RC = RegisterCell(RW).insert(PC, BT::BitMask(0, PW-1)); 348 RC.fill(PW, RW, BT::BitValue::Zero); 349 return rr0(RC, Outputs); 350 } 351 case C2_tfrrp: { 352 uint16_t RW = W0; 353 uint16_t PW = 8; // XXX Pred size: getRegBitWidth(Reg[1]); 354 RegisterCell RC = RegisterCell::self(Reg[0].Reg, RW); 355 RC.fill(PW, RW, BT::BitValue::Zero); 356 return rr0(eINS(RC, eXTR(rc(1), 0, PW), 0), Outputs); 357 } 358 359 // Arithmetic: 360 361 case A2_abs: 362 case A2_absp: 363 // TODO 364 break; 365 366 case A2_addsp: { 367 uint16_t W1 = getRegBitWidth(Reg[1]); 368 assert(W0 == 64 && W1 == 32); 369 RegisterCell CW = RegisterCell(W0).insert(rc(1), BT::BitMask(0, W1-1)); 370 RegisterCell RC = eADD(eSXT(CW, W1), rc(2)); 371 return rr0(RC, Outputs); 372 } 373 case A2_add: 374 case A2_addp: 375 return rr0(eADD(rc(1), rc(2)), Outputs); 376 case A2_addi: 377 return rr0(eADD(rc(1), eIMM(im(2), W0)), Outputs); 378 case S4_addi_asl_ri: { 379 RegisterCell RC = eADD(eIMM(im(1), W0), eASL(rc(2), im(3))); 380 return rr0(RC, Outputs); 381 } 382 case S4_addi_lsr_ri: { 383 RegisterCell RC = eADD(eIMM(im(1), W0), eLSR(rc(2), im(3))); 384 return rr0(RC, Outputs); 385 } 386 case S4_addaddi: { 387 RegisterCell RC = eADD(rc(1), eADD(rc(2), eIMM(im(3), W0))); 388 return rr0(RC, Outputs); 389 } 390 case M4_mpyri_addi: { 391 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); 392 RegisterCell RC = eADD(eIMM(im(1), W0), lo(M, W0)); 393 return rr0(RC, Outputs); 394 } 395 case M4_mpyrr_addi: { 396 RegisterCell M = eMLS(rc(2), rc(3)); 397 RegisterCell RC = eADD(eIMM(im(1), W0), lo(M, W0)); 398 return rr0(RC, Outputs); 399 } 400 case M4_mpyri_addr_u2: { 401 RegisterCell M = eMLS(eIMM(im(2), W0), rc(3)); 402 RegisterCell RC = eADD(rc(1), lo(M, W0)); 403 return rr0(RC, Outputs); 404 } 405 case M4_mpyri_addr: { 406 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); 407 RegisterCell RC = eADD(rc(1), lo(M, W0)); 408 return rr0(RC, Outputs); 409 } 410 case M4_mpyrr_addr: { 411 RegisterCell M = eMLS(rc(2), rc(3)); 412 RegisterCell RC = eADD(rc(1), lo(M, W0)); 413 return rr0(RC, Outputs); 414 } 415 case S4_subaddi: { 416 RegisterCell RC = eADD(rc(1), eSUB(eIMM(im(2), W0), rc(3))); 417 return rr0(RC, Outputs); 418 } 419 case M2_accii: { 420 RegisterCell RC = eADD(rc(1), eADD(rc(2), eIMM(im(3), W0))); 421 return rr0(RC, Outputs); 422 } 423 case M2_acci: { 424 RegisterCell RC = eADD(rc(1), eADD(rc(2), rc(3))); 425 return rr0(RC, Outputs); 426 } 427 case M2_subacc: { 428 RegisterCell RC = eADD(rc(1), eSUB(rc(2), rc(3))); 429 return rr0(RC, Outputs); 430 } 431 case S2_addasl_rrri: { 432 RegisterCell RC = eADD(rc(1), eASL(rc(2), im(3))); 433 return rr0(RC, Outputs); 434 } 435 case C4_addipc: { 436 RegisterCell RPC = RegisterCell::self(Reg[0].Reg, W0); 437 RPC.fill(0, 2, BT::BitValue::Zero); 438 return rr0(eADD(RPC, eIMM(im(2), W0)), Outputs); 439 } 440 case A2_sub: 441 case A2_subp: 442 return rr0(eSUB(rc(1), rc(2)), Outputs); 443 case A2_subri: 444 return rr0(eSUB(eIMM(im(1), W0), rc(2)), Outputs); 445 case S4_subi_asl_ri: { 446 RegisterCell RC = eSUB(eIMM(im(1), W0), eASL(rc(2), im(3))); 447 return rr0(RC, Outputs); 448 } 449 case S4_subi_lsr_ri: { 450 RegisterCell RC = eSUB(eIMM(im(1), W0), eLSR(rc(2), im(3))); 451 return rr0(RC, Outputs); 452 } 453 case M2_naccii: { 454 RegisterCell RC = eSUB(rc(1), eADD(rc(2), eIMM(im(3), W0))); 455 return rr0(RC, Outputs); 456 } 457 case M2_nacci: { 458 RegisterCell RC = eSUB(rc(1), eADD(rc(2), rc(3))); 459 return rr0(RC, Outputs); 460 } 461 // 32-bit negation is done by "Rd = A2_subri 0, Rs" 462 case A2_negp: 463 return rr0(eSUB(eIMM(0, W0), rc(1)), Outputs); 464 465 case M2_mpy_up: { 466 RegisterCell M = eMLS(rc(1), rc(2)); 467 return rr0(hi(M, W0), Outputs); 468 } 469 case M2_dpmpyss_s0: 470 return rr0(eMLS(rc(1), rc(2)), Outputs); 471 case M2_dpmpyss_acc_s0: 472 return rr0(eADD(rc(1), eMLS(rc(2), rc(3))), Outputs); 473 case M2_dpmpyss_nac_s0: 474 return rr0(eSUB(rc(1), eMLS(rc(2), rc(3))), Outputs); 475 case M2_mpyi: { 476 RegisterCell M = eMLS(rc(1), rc(2)); 477 return rr0(lo(M, W0), Outputs); 478 } 479 case M2_macsip: { 480 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); 481 RegisterCell RC = eADD(rc(1), lo(M, W0)); 482 return rr0(RC, Outputs); 483 } 484 case M2_macsin: { 485 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); 486 RegisterCell RC = eSUB(rc(1), lo(M, W0)); 487 return rr0(RC, Outputs); 488 } 489 case M2_maci: { 490 RegisterCell M = eMLS(rc(2), rc(3)); 491 RegisterCell RC = eADD(rc(1), lo(M, W0)); 492 return rr0(RC, Outputs); 493 } 494 case M2_mnaci: { 495 RegisterCell M = eMLS(rc(2), rc(3)); 496 RegisterCell RC = eSUB(rc(1), lo(M, W0)); 497 return rr0(RC, Outputs); 498 } 499 case M2_mpysmi: { 500 RegisterCell M = eMLS(rc(1), eIMM(im(2), W0)); 501 return rr0(lo(M, 32), Outputs); 502 } 503 case M2_mpysin: { 504 RegisterCell M = eMLS(rc(1), eIMM(-im(2), W0)); 505 return rr0(lo(M, 32), Outputs); 506 } 507 case M2_mpysip: { 508 RegisterCell M = eMLS(rc(1), eIMM(im(2), W0)); 509 return rr0(lo(M, 32), Outputs); 510 } 511 case M2_mpyu_up: { 512 RegisterCell M = eMLU(rc(1), rc(2)); 513 return rr0(hi(M, W0), Outputs); 514 } 515 case M2_dpmpyuu_s0: 516 return rr0(eMLU(rc(1), rc(2)), Outputs); 517 case M2_dpmpyuu_acc_s0: 518 return rr0(eADD(rc(1), eMLU(rc(2), rc(3))), Outputs); 519 case M2_dpmpyuu_nac_s0: 520 return rr0(eSUB(rc(1), eMLU(rc(2), rc(3))), Outputs); 521 //case M2_mpysu_up: 522 523 // Logical/bitwise: 524 525 case A2_andir: 526 return rr0(eAND(rc(1), eIMM(im(2), W0)), Outputs); 527 case A2_and: 528 case A2_andp: 529 return rr0(eAND(rc(1), rc(2)), Outputs); 530 case A4_andn: 531 case A4_andnp: 532 return rr0(eAND(rc(1), eNOT(rc(2))), Outputs); 533 case S4_andi_asl_ri: { 534 RegisterCell RC = eAND(eIMM(im(1), W0), eASL(rc(2), im(3))); 535 return rr0(RC, Outputs); 536 } 537 case S4_andi_lsr_ri: { 538 RegisterCell RC = eAND(eIMM(im(1), W0), eLSR(rc(2), im(3))); 539 return rr0(RC, Outputs); 540 } 541 case M4_and_and: 542 return rr0(eAND(rc(1), eAND(rc(2), rc(3))), Outputs); 543 case M4_and_andn: 544 return rr0(eAND(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 545 case M4_and_or: 546 return rr0(eAND(rc(1), eORL(rc(2), rc(3))), Outputs); 547 case M4_and_xor: 548 return rr0(eAND(rc(1), eXOR(rc(2), rc(3))), Outputs); 549 case A2_orir: 550 return rr0(eORL(rc(1), eIMM(im(2), W0)), Outputs); 551 case A2_or: 552 case A2_orp: 553 return rr0(eORL(rc(1), rc(2)), Outputs); 554 case A4_orn: 555 case A4_ornp: 556 return rr0(eORL(rc(1), eNOT(rc(2))), Outputs); 557 case S4_ori_asl_ri: { 558 RegisterCell RC = eORL(eIMM(im(1), W0), eASL(rc(2), im(3))); 559 return rr0(RC, Outputs); 560 } 561 case S4_ori_lsr_ri: { 562 RegisterCell RC = eORL(eIMM(im(1), W0), eLSR(rc(2), im(3))); 563 return rr0(RC, Outputs); 564 } 565 case M4_or_and: 566 return rr0(eORL(rc(1), eAND(rc(2), rc(3))), Outputs); 567 case M4_or_andn: 568 return rr0(eORL(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 569 case S4_or_andi: 570 case S4_or_andix: { 571 RegisterCell RC = eORL(rc(1), eAND(rc(2), eIMM(im(3), W0))); 572 return rr0(RC, Outputs); 573 } 574 case S4_or_ori: { 575 RegisterCell RC = eORL(rc(1), eORL(rc(2), eIMM(im(3), W0))); 576 return rr0(RC, Outputs); 577 } 578 case M4_or_or: 579 return rr0(eORL(rc(1), eORL(rc(2), rc(3))), Outputs); 580 case M4_or_xor: 581 return rr0(eORL(rc(1), eXOR(rc(2), rc(3))), Outputs); 582 case A2_xor: 583 case A2_xorp: 584 return rr0(eXOR(rc(1), rc(2)), Outputs); 585 case M4_xor_and: 586 return rr0(eXOR(rc(1), eAND(rc(2), rc(3))), Outputs); 587 case M4_xor_andn: 588 return rr0(eXOR(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 589 case M4_xor_or: 590 return rr0(eXOR(rc(1), eORL(rc(2), rc(3))), Outputs); 591 case M4_xor_xacc: 592 return rr0(eXOR(rc(1), eXOR(rc(2), rc(3))), Outputs); 593 case A2_not: 594 case A2_notp: 595 return rr0(eNOT(rc(1)), Outputs); 596 597 case S2_asl_i_r: 598 case S2_asl_i_p: 599 return rr0(eASL(rc(1), im(2)), Outputs); 600 case A2_aslh: 601 return rr0(eASL(rc(1), 16), Outputs); 602 case S2_asl_i_r_acc: 603 case S2_asl_i_p_acc: 604 return rr0(eADD(rc(1), eASL(rc(2), im(3))), Outputs); 605 case S2_asl_i_r_nac: 606 case S2_asl_i_p_nac: 607 return rr0(eSUB(rc(1), eASL(rc(2), im(3))), Outputs); 608 case S2_asl_i_r_and: 609 case S2_asl_i_p_and: 610 return rr0(eAND(rc(1), eASL(rc(2), im(3))), Outputs); 611 case S2_asl_i_r_or: 612 case S2_asl_i_p_or: 613 return rr0(eORL(rc(1), eASL(rc(2), im(3))), Outputs); 614 case S2_asl_i_r_xacc: 615 case S2_asl_i_p_xacc: 616 return rr0(eXOR(rc(1), eASL(rc(2), im(3))), Outputs); 617 case S2_asl_i_vh: 618 case S2_asl_i_vw: 619 // TODO 620 break; 621 622 case S2_asr_i_r: 623 case S2_asr_i_p: 624 return rr0(eASR(rc(1), im(2)), Outputs); 625 case A2_asrh: 626 return rr0(eASR(rc(1), 16), Outputs); 627 case S2_asr_i_r_acc: 628 case S2_asr_i_p_acc: 629 return rr0(eADD(rc(1), eASR(rc(2), im(3))), Outputs); 630 case S2_asr_i_r_nac: 631 case S2_asr_i_p_nac: 632 return rr0(eSUB(rc(1), eASR(rc(2), im(3))), Outputs); 633 case S2_asr_i_r_and: 634 case S2_asr_i_p_and: 635 return rr0(eAND(rc(1), eASR(rc(2), im(3))), Outputs); 636 case S2_asr_i_r_or: 637 case S2_asr_i_p_or: 638 return rr0(eORL(rc(1), eASR(rc(2), im(3))), Outputs); 639 case S2_asr_i_r_rnd: { 640 // The input is first sign-extended to 64 bits, then the output 641 // is truncated back to 32 bits. 642 assert(W0 == 32); 643 RegisterCell XC = eSXT(rc(1).cat(eIMM(0, W0)), W0); 644 RegisterCell RC = eASR(eADD(eASR(XC, im(2)), eIMM(1, 2*W0)), 1); 645 return rr0(eXTR(RC, 0, W0), Outputs); 646 } 647 case S2_asr_i_r_rnd_goodsyntax: { 648 int64_t S = im(2); 649 if (S == 0) 650 return rr0(rc(1), Outputs); 651 // Result: S2_asr_i_r_rnd Rs, u5-1 652 RegisterCell XC = eSXT(rc(1).cat(eIMM(0, W0)), W0); 653 RegisterCell RC = eLSR(eADD(eASR(XC, S-1), eIMM(1, 2*W0)), 1); 654 return rr0(eXTR(RC, 0, W0), Outputs); 655 } 656 case S2_asr_r_vh: 657 case S2_asr_i_vw: 658 case S2_asr_i_svw_trun: 659 // TODO 660 break; 661 662 case S2_lsr_i_r: 663 case S2_lsr_i_p: 664 return rr0(eLSR(rc(1), im(2)), Outputs); 665 case S2_lsr_i_r_acc: 666 case S2_lsr_i_p_acc: 667 return rr0(eADD(rc(1), eLSR(rc(2), im(3))), Outputs); 668 case S2_lsr_i_r_nac: 669 case S2_lsr_i_p_nac: 670 return rr0(eSUB(rc(1), eLSR(rc(2), im(3))), Outputs); 671 case S2_lsr_i_r_and: 672 case S2_lsr_i_p_and: 673 return rr0(eAND(rc(1), eLSR(rc(2), im(3))), Outputs); 674 case S2_lsr_i_r_or: 675 case S2_lsr_i_p_or: 676 return rr0(eORL(rc(1), eLSR(rc(2), im(3))), Outputs); 677 case S2_lsr_i_r_xacc: 678 case S2_lsr_i_p_xacc: 679 return rr0(eXOR(rc(1), eLSR(rc(2), im(3))), Outputs); 680 681 case S2_clrbit_i: { 682 RegisterCell RC = rc(1); 683 RC[im(2)] = BT::BitValue::Zero; 684 return rr0(RC, Outputs); 685 } 686 case S2_setbit_i: { 687 RegisterCell RC = rc(1); 688 RC[im(2)] = BT::BitValue::One; 689 return rr0(RC, Outputs); 690 } 691 case S2_togglebit_i: { 692 RegisterCell RC = rc(1); 693 uint16_t BX = im(2); 694 RC[BX] = RC[BX].is(0) ? BT::BitValue::One 695 : RC[BX].is(1) ? BT::BitValue::Zero 696 : BT::BitValue::self(); 697 return rr0(RC, Outputs); 698 } 699 700 case A4_bitspliti: { 701 uint16_t W1 = getRegBitWidth(Reg[1]); 702 uint16_t BX = im(2); 703 // Res.uw[1] = Rs[bx+1:], Res.uw[0] = Rs[0:bx] 704 const BT::BitValue Zero = BT::BitValue::Zero; 705 RegisterCell RZ = RegisterCell(W0).fill(BX, W1, Zero) 706 .fill(W1+(W1-BX), W0, Zero); 707 RegisterCell BF1 = eXTR(rc(1), 0, BX), BF2 = eXTR(rc(1), BX, W1); 708 RegisterCell RC = eINS(eINS(RZ, BF1, 0), BF2, W1); 709 return rr0(RC, Outputs); 710 } 711 case S4_extract: 712 case S4_extractp: 713 case S2_extractu: 714 case S2_extractup: { 715 uint16_t Wd = im(2), Of = im(3); 716 assert(Wd <= W0); 717 if (Wd == 0) 718 return rr0(eIMM(0, W0), Outputs); 719 // If the width extends beyond the register size, pad the register 720 // with 0 bits. 721 RegisterCell Pad = (Wd+Of > W0) ? rc(1).cat(eIMM(0, Wd+Of-W0)) : rc(1); 722 RegisterCell Ext = eXTR(Pad, Of, Wd+Of); 723 // Ext is short, need to extend it with 0s or sign bit. 724 RegisterCell RC = RegisterCell(W0).insert(Ext, BT::BitMask(0, Wd-1)); 725 if (Opc == S2_extractu || Opc == S2_extractup) 726 return rr0(eZXT(RC, Wd), Outputs); 727 return rr0(eSXT(RC, Wd), Outputs); 728 } 729 case S2_insert: 730 case S2_insertp: { 731 uint16_t Wd = im(3), Of = im(4); 732 assert(Wd < W0 && Of < W0); 733 // If Wd+Of exceeds W0, the inserted bits are truncated. 734 if (Wd+Of > W0) 735 Wd = W0-Of; 736 if (Wd == 0) 737 return rr0(rc(1), Outputs); 738 return rr0(eINS(rc(1), eXTR(rc(2), 0, Wd), Of), Outputs); 739 } 740 741 // Bit permutations: 742 743 case A2_combineii: 744 case A4_combineii: 745 case A4_combineir: 746 case A4_combineri: 747 case A2_combinew: 748 case V6_vcombine: 749 assert(W0 % 2 == 0); 750 return rr0(cop(2, W0/2).cat(cop(1, W0/2)), Outputs); 751 case A2_combine_ll: 752 case A2_combine_lh: 753 case A2_combine_hl: 754 case A2_combine_hh: { 755 assert(W0 == 32); 756 assert(getRegBitWidth(Reg[1]) == 32 && getRegBitWidth(Reg[2]) == 32); 757 // Low half in the output is 0 for _ll and _hl, 1 otherwise: 758 unsigned LoH = !(Opc == A2_combine_ll || Opc == A2_combine_hl); 759 // High half in the output is 0 for _ll and _lh, 1 otherwise: 760 unsigned HiH = !(Opc == A2_combine_ll || Opc == A2_combine_lh); 761 RegisterCell R1 = rc(1); 762 RegisterCell R2 = rc(2); 763 RegisterCell RC = half(R2, LoH).cat(half(R1, HiH)); 764 return rr0(RC, Outputs); 765 } 766 case S2_packhl: { 767 assert(W0 == 64); 768 assert(getRegBitWidth(Reg[1]) == 32 && getRegBitWidth(Reg[2]) == 32); 769 RegisterCell R1 = rc(1); 770 RegisterCell R2 = rc(2); 771 RegisterCell RC = half(R2, 0).cat(half(R1, 0)).cat(half(R2, 1)) 772 .cat(half(R1, 1)); 773 return rr0(RC, Outputs); 774 } 775 case S2_shuffeb: { 776 RegisterCell RC = shuffle(rc(1), rc(2), 8, false); 777 return rr0(RC, Outputs); 778 } 779 case S2_shuffeh: { 780 RegisterCell RC = shuffle(rc(1), rc(2), 16, false); 781 return rr0(RC, Outputs); 782 } 783 case S2_shuffob: { 784 RegisterCell RC = shuffle(rc(1), rc(2), 8, true); 785 return rr0(RC, Outputs); 786 } 787 case S2_shuffoh: { 788 RegisterCell RC = shuffle(rc(1), rc(2), 16, true); 789 return rr0(RC, Outputs); 790 } 791 case C2_mask: { 792 uint16_t WR = W0; 793 uint16_t WP = 8; // XXX Pred size: getRegBitWidth(Reg[1]); 794 assert(WR == 64 && WP == 8); 795 RegisterCell R1 = rc(1); 796 RegisterCell RC(WR); 797 for (uint16_t i = 0; i < WP; ++i) { 798 const BT::BitValue &V = R1[i]; 799 BT::BitValue F = (V.is(0) || V.is(1)) ? V : BT::BitValue::self(); 800 RC.fill(i*8, i*8+8, F); 801 } 802 return rr0(RC, Outputs); 803 } 804 805 // Mux: 806 807 case C2_muxii: 808 case C2_muxir: 809 case C2_muxri: 810 case C2_mux: { 811 BT::BitValue PC0 = rc(1)[0]; 812 RegisterCell R2 = cop(2, W0); 813 RegisterCell R3 = cop(3, W0); 814 if (PC0.is(0) || PC0.is(1)) 815 return rr0(RegisterCell::ref(PC0 ? R2 : R3), Outputs); 816 R2.meet(R3, Reg[0].Reg); 817 return rr0(R2, Outputs); 818 } 819 case C2_vmux: 820 // TODO 821 break; 822 823 // Sign- and zero-extension: 824 825 case A2_sxtb: 826 return rr0(eSXT(rc(1), 8), Outputs); 827 case A2_sxth: 828 return rr0(eSXT(rc(1), 16), Outputs); 829 case A2_sxtw: { 830 uint16_t W1 = getRegBitWidth(Reg[1]); 831 assert(W0 == 64 && W1 == 32); 832 RegisterCell RC = eSXT(rc(1).cat(eIMM(0, W1)), W1); 833 return rr0(RC, Outputs); 834 } 835 case A2_zxtb: 836 return rr0(eZXT(rc(1), 8), Outputs); 837 case A2_zxth: 838 return rr0(eZXT(rc(1), 16), Outputs); 839 840 // Saturations 841 842 case A2_satb: 843 return rr0(eSXT(RegisterCell::self(0, W0).regify(Reg0), 8), Outputs); 844 case A2_sath: 845 return rr0(eSXT(RegisterCell::self(0, W0).regify(Reg0), 16), Outputs); 846 case A2_satub: 847 return rr0(eZXT(RegisterCell::self(0, W0).regify(Reg0), 8), Outputs); 848 case A2_satuh: 849 return rr0(eZXT(RegisterCell::self(0, W0).regify(Reg0), 16), Outputs); 850 851 // Bit count: 852 853 case S2_cl0: 854 case S2_cl0p: 855 // Always produce a 32-bit result. 856 return rr0(eCLB(rc(1), false/*bit*/, 32), Outputs); 857 case S2_cl1: 858 case S2_cl1p: 859 return rr0(eCLB(rc(1), true/*bit*/, 32), Outputs); 860 case S2_clb: 861 case S2_clbp: { 862 uint16_t W1 = getRegBitWidth(Reg[1]); 863 RegisterCell R1 = rc(1); 864 BT::BitValue TV = R1[W1-1]; 865 if (TV.is(0) || TV.is(1)) 866 return rr0(eCLB(R1, TV, 32), Outputs); 867 break; 868 } 869 case S2_ct0: 870 case S2_ct0p: 871 return rr0(eCTB(rc(1), false/*bit*/, 32), Outputs); 872 case S2_ct1: 873 case S2_ct1p: 874 return rr0(eCTB(rc(1), true/*bit*/, 32), Outputs); 875 case S5_popcountp: 876 // TODO 877 break; 878 879 case C2_all8: { 880 RegisterCell P1 = rc(1); 881 bool Has0 = false, All1 = true; 882 for (uint16_t i = 0; i < 8/*XXX*/; ++i) { 883 if (!P1[i].is(1)) 884 All1 = false; 885 if (!P1[i].is(0)) 886 continue; 887 Has0 = true; 888 break; 889 } 890 if (!Has0 && !All1) 891 break; 892 RegisterCell RC(W0); 893 RC.fill(0, W0, (All1 ? BT::BitValue::One : BT::BitValue::Zero)); 894 return rr0(RC, Outputs); 895 } 896 case C2_any8: { 897 RegisterCell P1 = rc(1); 898 bool Has1 = false, All0 = true; 899 for (uint16_t i = 0; i < 8/*XXX*/; ++i) { 900 if (!P1[i].is(0)) 901 All0 = false; 902 if (!P1[i].is(1)) 903 continue; 904 Has1 = true; 905 break; 906 } 907 if (!Has1 && !All0) 908 break; 909 RegisterCell RC(W0); 910 RC.fill(0, W0, (Has1 ? BT::BitValue::One : BT::BitValue::Zero)); 911 return rr0(RC, Outputs); 912 } 913 case C2_and: 914 return rr0(eAND(rc(1), rc(2)), Outputs); 915 case C2_andn: 916 return rr0(eAND(rc(1), eNOT(rc(2))), Outputs); 917 case C2_not: 918 return rr0(eNOT(rc(1)), Outputs); 919 case C2_or: 920 return rr0(eORL(rc(1), rc(2)), Outputs); 921 case C2_orn: 922 return rr0(eORL(rc(1), eNOT(rc(2))), Outputs); 923 case C2_xor: 924 return rr0(eXOR(rc(1), rc(2)), Outputs); 925 case C4_and_and: 926 return rr0(eAND(rc(1), eAND(rc(2), rc(3))), Outputs); 927 case C4_and_andn: 928 return rr0(eAND(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 929 case C4_and_or: 930 return rr0(eAND(rc(1), eORL(rc(2), rc(3))), Outputs); 931 case C4_and_orn: 932 return rr0(eAND(rc(1), eORL(rc(2), eNOT(rc(3)))), Outputs); 933 case C4_or_and: 934 return rr0(eORL(rc(1), eAND(rc(2), rc(3))), Outputs); 935 case C4_or_andn: 936 return rr0(eORL(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); 937 case C4_or_or: 938 return rr0(eORL(rc(1), eORL(rc(2), rc(3))), Outputs); 939 case C4_or_orn: 940 return rr0(eORL(rc(1), eORL(rc(2), eNOT(rc(3)))), Outputs); 941 case C2_bitsclr: 942 case C2_bitsclri: 943 case C2_bitsset: 944 case C4_nbitsclr: 945 case C4_nbitsclri: 946 case C4_nbitsset: 947 // TODO 948 break; 949 case S2_tstbit_i: 950 case S4_ntstbit_i: { 951 BT::BitValue V = rc(1)[im(2)]; 952 if (V.is(0) || V.is(1)) { 953 // If instruction is S2_tstbit_i, test for 1, otherwise test for 0. 954 bool TV = (Opc == S2_tstbit_i); 955 BT::BitValue F = V.is(TV) ? BT::BitValue::One : BT::BitValue::Zero; 956 return rr0(RegisterCell(W0).fill(0, W0, F), Outputs); 957 } 958 break; 959 } 960 961 default: 962 // For instructions that define a single predicate registers, store 963 // the low 8 bits of the register only. 964 if (unsigned DefR = getUniqueDefVReg(MI)) { 965 if (MRI.getRegClass(DefR) == &Hexagon::PredRegsRegClass) { 966 BT::RegisterRef PD(DefR, 0); 967 uint16_t RW = getRegBitWidth(PD); 968 uint16_t PW = 8; // XXX Pred size: getRegBitWidth(Reg[1]); 969 RegisterCell RC = RegisterCell::self(DefR, RW); 970 RC.fill(PW, RW, BT::BitValue::Zero); 971 putCell(PD, RC, Outputs); 972 return true; 973 } 974 } 975 return MachineEvaluator::evaluate(MI, Inputs, Outputs); 976 } 977 #undef im 978 #undef rc 979 #undef op 980 return false; 981 } 982 983 bool HexagonEvaluator::evaluate(const MachineInstr &BI, 984 const CellMapType &Inputs, 985 BranchTargetList &Targets, 986 bool &FallsThru) const { 987 // We need to evaluate one branch at a time. TII::analyzeBranch checks 988 // all the branches in a basic block at once, so we cannot use it. 989 unsigned Opc = BI.getOpcode(); 990 bool SimpleBranch = false; 991 bool Negated = false; 992 switch (Opc) { 993 case Hexagon::J2_jumpf: 994 case Hexagon::J2_jumpfpt: 995 case Hexagon::J2_jumpfnew: 996 case Hexagon::J2_jumpfnewpt: 997 Negated = true; 998 [[fallthrough]]; 999 case Hexagon::J2_jumpt: 1000 case Hexagon::J2_jumptpt: 1001 case Hexagon::J2_jumptnew: 1002 case Hexagon::J2_jumptnewpt: 1003 // Simple branch: if([!]Pn) jump ... 1004 // i.e. Op0 = predicate, Op1 = branch target. 1005 SimpleBranch = true; 1006 break; 1007 case Hexagon::J2_jump: 1008 Targets.insert(BI.getOperand(0).getMBB()); 1009 FallsThru = false; 1010 return true; 1011 default: 1012 // If the branch is of unknown type, assume that all successors are 1013 // executable. 1014 return false; 1015 } 1016 1017 if (!SimpleBranch) 1018 return false; 1019 1020 // BI is a conditional branch if we got here. 1021 RegisterRef PR = BI.getOperand(0); 1022 RegisterCell PC = getCell(PR, Inputs); 1023 const BT::BitValue &Test = PC[0]; 1024 1025 // If the condition is neither true nor false, then it's unknown. 1026 if (!Test.is(0) && !Test.is(1)) 1027 return false; 1028 1029 // "Test.is(!Negated)" means "branch condition is true". 1030 if (!Test.is(!Negated)) { 1031 // Condition known to be false. 1032 FallsThru = true; 1033 return true; 1034 } 1035 1036 Targets.insert(BI.getOperand(1).getMBB()); 1037 FallsThru = false; 1038 return true; 1039 } 1040 1041 unsigned HexagonEvaluator::getUniqueDefVReg(const MachineInstr &MI) const { 1042 unsigned DefReg = 0; 1043 for (const MachineOperand &Op : MI.operands()) { 1044 if (!Op.isReg() || !Op.isDef()) 1045 continue; 1046 Register R = Op.getReg(); 1047 if (!R.isVirtual()) 1048 continue; 1049 if (DefReg != 0) 1050 return 0; 1051 DefReg = R; 1052 } 1053 return DefReg; 1054 } 1055 1056 bool HexagonEvaluator::evaluateLoad(const MachineInstr &MI, 1057 const CellMapType &Inputs, 1058 CellMapType &Outputs) const { 1059 using namespace Hexagon; 1060 1061 if (TII.isPredicated(MI)) 1062 return false; 1063 assert(MI.mayLoad() && "A load that mayn't?"); 1064 unsigned Opc = MI.getOpcode(); 1065 1066 uint16_t BitNum; 1067 bool SignEx; 1068 1069 switch (Opc) { 1070 default: 1071 return false; 1072 1073 #if 0 1074 // memb_fifo 1075 case L2_loadalignb_pbr: 1076 case L2_loadalignb_pcr: 1077 case L2_loadalignb_pi: 1078 // memh_fifo 1079 case L2_loadalignh_pbr: 1080 case L2_loadalignh_pcr: 1081 case L2_loadalignh_pi: 1082 // membh 1083 case L2_loadbsw2_pbr: 1084 case L2_loadbsw2_pci: 1085 case L2_loadbsw2_pcr: 1086 case L2_loadbsw2_pi: 1087 case L2_loadbsw4_pbr: 1088 case L2_loadbsw4_pci: 1089 case L2_loadbsw4_pcr: 1090 case L2_loadbsw4_pi: 1091 // memubh 1092 case L2_loadbzw2_pbr: 1093 case L2_loadbzw2_pci: 1094 case L2_loadbzw2_pcr: 1095 case L2_loadbzw2_pi: 1096 case L2_loadbzw4_pbr: 1097 case L2_loadbzw4_pci: 1098 case L2_loadbzw4_pcr: 1099 case L2_loadbzw4_pi: 1100 #endif 1101 1102 case L2_loadrbgp: 1103 case L2_loadrb_io: 1104 case L2_loadrb_pbr: 1105 case L2_loadrb_pci: 1106 case L2_loadrb_pcr: 1107 case L2_loadrb_pi: 1108 case PS_loadrbabs: 1109 case L4_loadrb_ap: 1110 case L4_loadrb_rr: 1111 case L4_loadrb_ur: 1112 BitNum = 8; 1113 SignEx = true; 1114 break; 1115 1116 case L2_loadrubgp: 1117 case L2_loadrub_io: 1118 case L2_loadrub_pbr: 1119 case L2_loadrub_pci: 1120 case L2_loadrub_pcr: 1121 case L2_loadrub_pi: 1122 case PS_loadrubabs: 1123 case L4_loadrub_ap: 1124 case L4_loadrub_rr: 1125 case L4_loadrub_ur: 1126 BitNum = 8; 1127 SignEx = false; 1128 break; 1129 1130 case L2_loadrhgp: 1131 case L2_loadrh_io: 1132 case L2_loadrh_pbr: 1133 case L2_loadrh_pci: 1134 case L2_loadrh_pcr: 1135 case L2_loadrh_pi: 1136 case PS_loadrhabs: 1137 case L4_loadrh_ap: 1138 case L4_loadrh_rr: 1139 case L4_loadrh_ur: 1140 BitNum = 16; 1141 SignEx = true; 1142 break; 1143 1144 case L2_loadruhgp: 1145 case L2_loadruh_io: 1146 case L2_loadruh_pbr: 1147 case L2_loadruh_pci: 1148 case L2_loadruh_pcr: 1149 case L2_loadruh_pi: 1150 case L4_loadruh_rr: 1151 case PS_loadruhabs: 1152 case L4_loadruh_ap: 1153 case L4_loadruh_ur: 1154 BitNum = 16; 1155 SignEx = false; 1156 break; 1157 1158 case L2_loadrigp: 1159 case L2_loadri_io: 1160 case L2_loadri_pbr: 1161 case L2_loadri_pci: 1162 case L2_loadri_pcr: 1163 case L2_loadri_pi: 1164 case L2_loadw_locked: 1165 case PS_loadriabs: 1166 case L4_loadri_ap: 1167 case L4_loadri_rr: 1168 case L4_loadri_ur: 1169 case LDriw_pred: 1170 BitNum = 32; 1171 SignEx = true; 1172 break; 1173 1174 case L2_loadrdgp: 1175 case L2_loadrd_io: 1176 case L2_loadrd_pbr: 1177 case L2_loadrd_pci: 1178 case L2_loadrd_pcr: 1179 case L2_loadrd_pi: 1180 case L4_loadd_locked: 1181 case PS_loadrdabs: 1182 case L4_loadrd_ap: 1183 case L4_loadrd_rr: 1184 case L4_loadrd_ur: 1185 BitNum = 64; 1186 SignEx = true; 1187 break; 1188 } 1189 1190 const MachineOperand &MD = MI.getOperand(0); 1191 assert(MD.isReg() && MD.isDef()); 1192 RegisterRef RD = MD; 1193 1194 uint16_t W = getRegBitWidth(RD); 1195 assert(W >= BitNum && BitNum > 0); 1196 RegisterCell Res(W); 1197 1198 for (uint16_t i = 0; i < BitNum; ++i) 1199 Res[i] = BT::BitValue::self(BT::BitRef(RD.Reg, i)); 1200 1201 if (SignEx) { 1202 const BT::BitValue &Sign = Res[BitNum-1]; 1203 for (uint16_t i = BitNum; i < W; ++i) 1204 Res[i] = BT::BitValue::ref(Sign); 1205 } else { 1206 for (uint16_t i = BitNum; i < W; ++i) 1207 Res[i] = BT::BitValue::Zero; 1208 } 1209 1210 putCell(RD, Res, Outputs); 1211 return true; 1212 } 1213 1214 bool HexagonEvaluator::evaluateFormalCopy(const MachineInstr &MI, 1215 const CellMapType &Inputs, 1216 CellMapType &Outputs) const { 1217 // If MI defines a formal parameter, but is not a copy (loads are handled 1218 // in evaluateLoad), then it's not clear what to do. 1219 assert(MI.isCopy()); 1220 1221 RegisterRef RD = MI.getOperand(0); 1222 RegisterRef RS = MI.getOperand(1); 1223 assert(RD.Sub == 0); 1224 if (!RS.Reg.isPhysical()) 1225 return false; 1226 RegExtMap::const_iterator F = VRX.find(RD.Reg); 1227 if (F == VRX.end()) 1228 return false; 1229 1230 uint16_t EW = F->second.Width; 1231 // Store RD's cell into the map. This will associate the cell with a virtual 1232 // register, and make zero-/sign-extends possible (otherwise we would be ex- 1233 // tending "self" bit values, which will have no effect, since "self" values 1234 // cannot be references to anything). 1235 putCell(RD, getCell(RS, Inputs), Outputs); 1236 1237 RegisterCell Res; 1238 // Read RD's cell from the outputs instead of RS's cell from the inputs: 1239 if (F->second.Type == ExtType::SExt) 1240 Res = eSXT(getCell(RD, Outputs), EW); 1241 else if (F->second.Type == ExtType::ZExt) 1242 Res = eZXT(getCell(RD, Outputs), EW); 1243 1244 putCell(RD, Res, Outputs); 1245 return true; 1246 } 1247 1248 unsigned HexagonEvaluator::getNextPhysReg(unsigned PReg, unsigned Width) const { 1249 using namespace Hexagon; 1250 1251 bool Is64 = DoubleRegsRegClass.contains(PReg); 1252 assert(PReg == 0 || Is64 || IntRegsRegClass.contains(PReg)); 1253 1254 static const unsigned Phys32[] = { R0, R1, R2, R3, R4, R5 }; 1255 static const unsigned Phys64[] = { D0, D1, D2 }; 1256 const unsigned Num32 = sizeof(Phys32)/sizeof(unsigned); 1257 const unsigned Num64 = sizeof(Phys64)/sizeof(unsigned); 1258 1259 // Return the first parameter register of the required width. 1260 if (PReg == 0) 1261 return (Width <= 32) ? Phys32[0] : Phys64[0]; 1262 1263 // Set Idx32, Idx64 in such a way that Idx+1 would give the index of the 1264 // next register. 1265 unsigned Idx32 = 0, Idx64 = 0; 1266 if (!Is64) { 1267 while (Idx32 < Num32) { 1268 if (Phys32[Idx32] == PReg) 1269 break; 1270 Idx32++; 1271 } 1272 Idx64 = Idx32/2; 1273 } else { 1274 while (Idx64 < Num64) { 1275 if (Phys64[Idx64] == PReg) 1276 break; 1277 Idx64++; 1278 } 1279 Idx32 = Idx64*2+1; 1280 } 1281 1282 if (Width <= 32) 1283 return (Idx32+1 < Num32) ? Phys32[Idx32+1] : 0; 1284 return (Idx64+1 < Num64) ? Phys64[Idx64+1] : 0; 1285 } 1286 1287 unsigned HexagonEvaluator::getVirtRegFor(unsigned PReg) const { 1288 for (std::pair<MCRegister, Register> P : MRI.liveins()) 1289 if (P.first == PReg) 1290 return P.second; 1291 return 0; 1292 } 1293