1 //===- SIMemoryLegalizer.cpp ----------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief Memory legalizer - implements memory model. More information can be 12 /// found here: 13 /// http://llvm.org/docs/AMDGPUUsage.html#memory-model 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "AMDGPU.h" 18 #include "AMDGPUMachineModuleInfo.h" 19 #include "AMDGPUSubtarget.h" 20 #include "SIDefines.h" 21 #include "SIInstrInfo.h" 22 #include "Utils/AMDGPUBaseInfo.h" 23 #include "llvm/ADT/None.h" 24 #include "llvm/ADT/Optional.h" 25 #include "llvm/CodeGen/MachineBasicBlock.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/MachineFunctionPass.h" 28 #include "llvm/CodeGen/MachineInstrBuilder.h" 29 #include "llvm/CodeGen/MachineMemOperand.h" 30 #include "llvm/CodeGen/MachineModuleInfo.h" 31 #include "llvm/CodeGen/MachineOperand.h" 32 #include "llvm/IR/DebugLoc.h" 33 #include "llvm/IR/DiagnosticInfo.h" 34 #include "llvm/IR/Function.h" 35 #include "llvm/IR/LLVMContext.h" 36 #include "llvm/MC/MCInstrDesc.h" 37 #include "llvm/Pass.h" 38 #include "llvm/Support/AtomicOrdering.h" 39 #include <cassert> 40 #include <list> 41 42 using namespace llvm; 43 using namespace llvm::AMDGPU; 44 45 #define DEBUG_TYPE "si-memory-legalizer" 46 #define PASS_NAME "SI Memory Legalizer" 47 48 namespace { 49 50 class SIMemOpInfo final { 51 private: 52 SyncScope::ID SSID = SyncScope::System; 53 AtomicOrdering Ordering = AtomicOrdering::NotAtomic; 54 AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic; 55 56 SIMemOpInfo(SyncScope::ID SSID, AtomicOrdering Ordering) 57 : SSID(SSID), Ordering(Ordering) {} 58 59 SIMemOpInfo(SyncScope::ID SSID, AtomicOrdering Ordering, 60 AtomicOrdering FailureOrdering) 61 : SSID(SSID), Ordering(Ordering), FailureOrdering(FailureOrdering) {} 62 63 /// \returns Info constructed from \p MI, which has at least machine memory 64 /// operand. 65 static Optional<SIMemOpInfo> constructFromMIWithMMO( 66 const MachineBasicBlock::iterator &MI); 67 68 public: 69 /// \returns Synchronization scope ID of the machine instruction used to 70 /// create this SIMemOpInfo. 71 SyncScope::ID getSSID() const { 72 return SSID; 73 } 74 /// \returns Ordering constraint of the machine instruction used to 75 /// create this SIMemOpInfo. 76 AtomicOrdering getOrdering() const { 77 return Ordering; 78 } 79 /// \returns Failure ordering constraint of the machine instruction used to 80 /// create this SIMemOpInfo. 81 AtomicOrdering getFailureOrdering() const { 82 return FailureOrdering; 83 } 84 85 /// \returns True if ordering constraint of the machine instruction used to 86 /// create this SIMemOpInfo is unordered or higher, false otherwise. 87 bool isAtomic() const { 88 return Ordering != AtomicOrdering::NotAtomic; 89 } 90 91 /// \returns Load info if \p MI is a load operation, "None" otherwise. 92 static Optional<SIMemOpInfo> getLoadInfo( 93 const MachineBasicBlock::iterator &MI); 94 /// \returns Store info if \p MI is a store operation, "None" otherwise. 95 static Optional<SIMemOpInfo> getStoreInfo( 96 const MachineBasicBlock::iterator &MI); 97 /// \returns Atomic fence info if \p MI is an atomic fence operation, 98 /// "None" otherwise. 99 static Optional<SIMemOpInfo> getAtomicFenceInfo( 100 const MachineBasicBlock::iterator &MI); 101 /// \returns Atomic cmpxchg info if \p MI is an atomic cmpxchg operation, 102 /// "None" otherwise. 103 static Optional<SIMemOpInfo> getAtomicCmpxchgInfo( 104 const MachineBasicBlock::iterator &MI); 105 /// \returns Atomic rmw info if \p MI is an atomic rmw operation, 106 /// "None" otherwise. 107 static Optional<SIMemOpInfo> getAtomicRmwInfo( 108 const MachineBasicBlock::iterator &MI); 109 110 /// \brief Reports unknown synchronization scope used in \p MI to LLVM 111 /// context. 112 static void reportUnknownSyncScope( 113 const MachineBasicBlock::iterator &MI); 114 }; 115 116 class SIMemoryLegalizer final : public MachineFunctionPass { 117 private: 118 /// \brief Machine module info. 119 const AMDGPUMachineModuleInfo *MMI = nullptr; 120 121 /// \brief Instruction info. 122 const SIInstrInfo *TII = nullptr; 123 124 /// \brief Immediate for "vmcnt(0)". 125 unsigned Vmcnt0Immediate = 0; 126 127 /// \brief Opcode for cache invalidation instruction (L1). 128 unsigned Wbinvl1Opcode = 0; 129 130 /// \brief List of atomic pseudo instructions. 131 std::list<MachineBasicBlock::iterator> AtomicPseudoMIs; 132 133 /// \brief Inserts "buffer_wbinvl1_vol" instruction \p Before or after \p MI. 134 /// Always returns true. 135 bool insertBufferWbinvl1Vol(MachineBasicBlock::iterator &MI, 136 bool Before = true) const; 137 /// \brief Inserts "s_waitcnt vmcnt(0)" instruction \p Before or after \p MI. 138 /// Always returns true. 139 bool insertWaitcntVmcnt0(MachineBasicBlock::iterator &MI, 140 bool Before = true) const; 141 142 /// \brief Sets GLC bit if present in \p MI. Returns true if \p MI is 143 /// modified, false otherwise. 144 bool setGLC(const MachineBasicBlock::iterator &MI) const; 145 146 /// \brief Removes all processed atomic pseudo instructions from the current 147 /// function. Returns true if current function is modified, false otherwise. 148 bool removeAtomicPseudoMIs(); 149 150 /// \brief Expands load operation \p MI. Returns true if instructions are 151 /// added/deleted or \p MI is modified, false otherwise. 152 bool expandLoad(const SIMemOpInfo &MOI, 153 MachineBasicBlock::iterator &MI); 154 /// \brief Expands store operation \p MI. Returns true if instructions are 155 /// added/deleted or \p MI is modified, false otherwise. 156 bool expandStore(const SIMemOpInfo &MOI, 157 MachineBasicBlock::iterator &MI); 158 /// \brief Expands atomic fence operation \p MI. Returns true if 159 /// instructions are added/deleted or \p MI is modified, false otherwise. 160 bool expandAtomicFence(const SIMemOpInfo &MOI, 161 MachineBasicBlock::iterator &MI); 162 /// \brief Expands atomic cmpxchg operation \p MI. Returns true if 163 /// instructions are added/deleted or \p MI is modified, false otherwise. 164 bool expandAtomicCmpxchg(const SIMemOpInfo &MOI, 165 MachineBasicBlock::iterator &MI); 166 /// \brief Expands atomic rmw operation \p MI. Returns true if 167 /// instructions are added/deleted or \p MI is modified, false otherwise. 168 bool expandAtomicRmw(const SIMemOpInfo &MOI, 169 MachineBasicBlock::iterator &MI); 170 171 public: 172 static char ID; 173 174 SIMemoryLegalizer() : MachineFunctionPass(ID) {} 175 176 void getAnalysisUsage(AnalysisUsage &AU) const override { 177 AU.setPreservesCFG(); 178 MachineFunctionPass::getAnalysisUsage(AU); 179 } 180 181 StringRef getPassName() const override { 182 return PASS_NAME; 183 } 184 185 bool runOnMachineFunction(MachineFunction &MF) override; 186 }; 187 188 } // end namespace anonymous 189 190 /* static */ 191 Optional<SIMemOpInfo> SIMemOpInfo::constructFromMIWithMMO( 192 const MachineBasicBlock::iterator &MI) { 193 assert(MI->getNumMemOperands() > 0); 194 195 const MachineFunction *MF = MI->getParent()->getParent(); 196 const AMDGPUMachineModuleInfo *MMI = 197 &MF->getMMI().getObjFileInfo<AMDGPUMachineModuleInfo>(); 198 199 SyncScope::ID SSID = SyncScope::SingleThread; 200 AtomicOrdering Ordering = AtomicOrdering::NotAtomic; 201 AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic; 202 203 // Validator should check whether or not MMOs cover the entire set of 204 // locations accessed by the memory instruction. 205 for (const auto &MMO : MI->memoperands()) { 206 const auto &IsSyncScopeInclusion = 207 MMI->isSyncScopeInclusion(SSID, MMO->getSyncScopeID()); 208 if (!IsSyncScopeInclusion) { 209 reportUnknownSyncScope(MI); 210 return None; 211 } 212 213 SSID = IsSyncScopeInclusion.getValue() ? SSID : MMO->getSyncScopeID(); 214 Ordering = 215 isStrongerThan(Ordering, MMO->getOrdering()) ? 216 Ordering : MMO->getOrdering(); 217 FailureOrdering = 218 isStrongerThan(FailureOrdering, MMO->getFailureOrdering()) ? 219 FailureOrdering : MMO->getFailureOrdering(); 220 } 221 222 return SIMemOpInfo(SSID, Ordering, FailureOrdering); 223 } 224 225 /* static */ 226 Optional<SIMemOpInfo> SIMemOpInfo::getLoadInfo( 227 const MachineBasicBlock::iterator &MI) { 228 assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic); 229 230 if (!(MI->mayLoad() && !MI->mayStore())) 231 return None; 232 233 // Be conservative if there are no memory operands. 234 if (MI->getNumMemOperands() == 0) 235 return SIMemOpInfo(SyncScope::System, 236 AtomicOrdering::SequentiallyConsistent); 237 238 return SIMemOpInfo::constructFromMIWithMMO(MI); 239 } 240 241 /* static */ 242 Optional<SIMemOpInfo> SIMemOpInfo::getStoreInfo( 243 const MachineBasicBlock::iterator &MI) { 244 assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic); 245 246 if (!(!MI->mayLoad() && MI->mayStore())) 247 return None; 248 249 // Be conservative if there are no memory operands. 250 if (MI->getNumMemOperands() == 0) 251 return SIMemOpInfo(SyncScope::System, 252 AtomicOrdering::SequentiallyConsistent); 253 254 return SIMemOpInfo::constructFromMIWithMMO(MI); 255 } 256 257 /* static */ 258 Optional<SIMemOpInfo> SIMemOpInfo::getAtomicFenceInfo( 259 const MachineBasicBlock::iterator &MI) { 260 assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic); 261 262 if (MI->getOpcode() != AMDGPU::ATOMIC_FENCE) 263 return None; 264 265 SyncScope::ID SSID = 266 static_cast<SyncScope::ID>(MI->getOperand(1).getImm()); 267 AtomicOrdering Ordering = 268 static_cast<AtomicOrdering>(MI->getOperand(0).getImm()); 269 return SIMemOpInfo(SSID, Ordering); 270 } 271 272 /* static */ 273 Optional<SIMemOpInfo> SIMemOpInfo::getAtomicCmpxchgInfo( 274 const MachineBasicBlock::iterator &MI) { 275 assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic); 276 277 if (!(MI->mayLoad() && MI->mayStore())) 278 return None; 279 280 // Be conservative if there are no memory operands. 281 if (MI->getNumMemOperands() == 0) 282 return SIMemOpInfo(SyncScope::System, 283 AtomicOrdering::SequentiallyConsistent, 284 AtomicOrdering::SequentiallyConsistent); 285 286 return SIMemOpInfo::constructFromMIWithMMO(MI); 287 } 288 289 /* static */ 290 Optional<SIMemOpInfo> SIMemOpInfo::getAtomicRmwInfo( 291 const MachineBasicBlock::iterator &MI) { 292 assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic); 293 294 if (!(MI->mayLoad() && MI->mayStore())) 295 return None; 296 297 // Be conservative if there are no memory operands. 298 if (MI->getNumMemOperands() == 0) 299 return SIMemOpInfo(SyncScope::System, 300 AtomicOrdering::SequentiallyConsistent); 301 302 return SIMemOpInfo::constructFromMIWithMMO(MI); 303 } 304 305 /* static */ 306 void SIMemOpInfo::reportUnknownSyncScope( 307 const MachineBasicBlock::iterator &MI) { 308 DiagnosticInfoUnsupported Diag(*MI->getParent()->getParent()->getFunction(), 309 "Unsupported synchronization scope"); 310 LLVMContext *CTX = &MI->getParent()->getParent()->getFunction()->getContext(); 311 CTX->diagnose(Diag); 312 } 313 314 bool SIMemoryLegalizer::insertBufferWbinvl1Vol(MachineBasicBlock::iterator &MI, 315 bool Before) const { 316 MachineBasicBlock &MBB = *MI->getParent(); 317 DebugLoc DL = MI->getDebugLoc(); 318 319 if (!Before) 320 ++MI; 321 322 BuildMI(MBB, MI, DL, TII->get(Wbinvl1Opcode)); 323 324 if (!Before) 325 --MI; 326 327 return true; 328 } 329 330 bool SIMemoryLegalizer::insertWaitcntVmcnt0(MachineBasicBlock::iterator &MI, 331 bool Before) const { 332 MachineBasicBlock &MBB = *MI->getParent(); 333 DebugLoc DL = MI->getDebugLoc(); 334 335 if (!Before) 336 ++MI; 337 338 BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_WAITCNT)).addImm(Vmcnt0Immediate); 339 340 if (!Before) 341 --MI; 342 343 return true; 344 } 345 346 bool SIMemoryLegalizer::setGLC(const MachineBasicBlock::iterator &MI) const { 347 int GLCIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::glc); 348 if (GLCIdx == -1) 349 return false; 350 351 MachineOperand &GLC = MI->getOperand(GLCIdx); 352 if (GLC.getImm() == 1) 353 return false; 354 355 GLC.setImm(1); 356 return true; 357 } 358 359 bool SIMemoryLegalizer::removeAtomicPseudoMIs() { 360 if (AtomicPseudoMIs.empty()) 361 return false; 362 363 for (auto &MI : AtomicPseudoMIs) 364 MI->eraseFromParent(); 365 366 AtomicPseudoMIs.clear(); 367 return true; 368 } 369 370 bool SIMemoryLegalizer::expandLoad(const SIMemOpInfo &MOI, 371 MachineBasicBlock::iterator &MI) { 372 assert(MI->mayLoad() && !MI->mayStore()); 373 374 bool Changed = false; 375 376 if (MOI.isAtomic()) { 377 if (MOI.getSSID() == SyncScope::System || 378 MOI.getSSID() == MMI->getAgentSSID()) { 379 if (MOI.getOrdering() == AtomicOrdering::Acquire || 380 MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent) 381 Changed |= setGLC(MI); 382 383 if (MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent) 384 Changed |= insertWaitcntVmcnt0(MI); 385 386 if (MOI.getOrdering() == AtomicOrdering::Acquire || 387 MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent) { 388 Changed |= insertWaitcntVmcnt0(MI, false); 389 Changed |= insertBufferWbinvl1Vol(MI, false); 390 } 391 392 return Changed; 393 } 394 395 if (MOI.getSSID() == SyncScope::SingleThread || 396 MOI.getSSID() == MMI->getWorkgroupSSID() || 397 MOI.getSSID() == MMI->getWavefrontSSID()) { 398 return Changed; 399 } 400 401 llvm_unreachable("Unsupported synchronization scope"); 402 } 403 404 return Changed; 405 } 406 407 bool SIMemoryLegalizer::expandStore(const SIMemOpInfo &MOI, 408 MachineBasicBlock::iterator &MI) { 409 assert(!MI->mayLoad() && MI->mayStore()); 410 411 bool Changed = false; 412 413 if (MOI.isAtomic()) { 414 if (MOI.getSSID() == SyncScope::System || 415 MOI.getSSID() == MMI->getAgentSSID()) { 416 if (MOI.getOrdering() == AtomicOrdering::Release || 417 MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent) 418 Changed |= insertWaitcntVmcnt0(MI); 419 420 return Changed; 421 } 422 423 if (MOI.getSSID() == SyncScope::SingleThread || 424 MOI.getSSID() == MMI->getWorkgroupSSID() || 425 MOI.getSSID() == MMI->getWavefrontSSID()) { 426 return Changed; 427 } 428 429 llvm_unreachable("Unsupported synchronization scope"); 430 } 431 432 return Changed; 433 } 434 435 bool SIMemoryLegalizer::expandAtomicFence(const SIMemOpInfo &MOI, 436 MachineBasicBlock::iterator &MI) { 437 assert(MI->getOpcode() == AMDGPU::ATOMIC_FENCE); 438 439 bool Changed = false; 440 441 if (MOI.isAtomic()) { 442 if (MOI.getSSID() == SyncScope::System || 443 MOI.getSSID() == MMI->getAgentSSID()) { 444 if (MOI.getOrdering() == AtomicOrdering::Acquire || 445 MOI.getOrdering() == AtomicOrdering::Release || 446 MOI.getOrdering() == AtomicOrdering::AcquireRelease || 447 MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent) 448 Changed |= insertWaitcntVmcnt0(MI); 449 450 if (MOI.getOrdering() == AtomicOrdering::Acquire || 451 MOI.getOrdering() == AtomicOrdering::AcquireRelease || 452 MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent) 453 Changed |= insertBufferWbinvl1Vol(MI); 454 455 AtomicPseudoMIs.push_back(MI); 456 return Changed; 457 } 458 459 if (MOI.getSSID() == SyncScope::SingleThread || 460 MOI.getSSID() == MMI->getWorkgroupSSID() || 461 MOI.getSSID() == MMI->getWavefrontSSID()) { 462 AtomicPseudoMIs.push_back(MI); 463 return Changed; 464 } 465 466 SIMemOpInfo::reportUnknownSyncScope(MI); 467 } 468 469 return Changed; 470 } 471 472 bool SIMemoryLegalizer::expandAtomicCmpxchg(const SIMemOpInfo &MOI, 473 MachineBasicBlock::iterator &MI) { 474 assert(MI->mayLoad() && MI->mayStore()); 475 476 bool Changed = false; 477 478 if (MOI.isAtomic()) { 479 if (MOI.getSSID() == SyncScope::System || 480 MOI.getSSID() == MMI->getAgentSSID()) { 481 if (MOI.getOrdering() == AtomicOrdering::Release || 482 MOI.getOrdering() == AtomicOrdering::AcquireRelease || 483 MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent || 484 MOI.getFailureOrdering() == AtomicOrdering::SequentiallyConsistent) 485 Changed |= insertWaitcntVmcnt0(MI); 486 487 if (MOI.getOrdering() == AtomicOrdering::Acquire || 488 MOI.getOrdering() == AtomicOrdering::AcquireRelease || 489 MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent || 490 MOI.getFailureOrdering() == AtomicOrdering::Acquire || 491 MOI.getFailureOrdering() == AtomicOrdering::SequentiallyConsistent) { 492 Changed |= insertWaitcntVmcnt0(MI, false); 493 Changed |= insertBufferWbinvl1Vol(MI, false); 494 } 495 496 return Changed; 497 } 498 499 if (MOI.getSSID() == SyncScope::SingleThread || 500 MOI.getSSID() == MMI->getWorkgroupSSID() || 501 MOI.getSSID() == MMI->getWavefrontSSID()) { 502 Changed |= setGLC(MI); 503 return Changed; 504 } 505 506 llvm_unreachable("Unsupported synchronization scope"); 507 } 508 509 return Changed; 510 } 511 512 bool SIMemoryLegalizer::expandAtomicRmw(const SIMemOpInfo &MOI, 513 MachineBasicBlock::iterator &MI) { 514 assert(MI->mayLoad() && MI->mayStore()); 515 516 bool Changed = false; 517 518 if (MOI.isAtomic()) { 519 if (MOI.getSSID() == SyncScope::System || 520 MOI.getSSID() == MMI->getAgentSSID()) { 521 if (MOI.getOrdering() == AtomicOrdering::Release || 522 MOI.getOrdering() == AtomicOrdering::AcquireRelease || 523 MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent) 524 Changed |= insertWaitcntVmcnt0(MI); 525 526 if (MOI.getOrdering() == AtomicOrdering::Acquire || 527 MOI.getOrdering() == AtomicOrdering::AcquireRelease || 528 MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent) { 529 Changed |= insertWaitcntVmcnt0(MI, false); 530 Changed |= insertBufferWbinvl1Vol(MI, false); 531 } 532 533 return Changed; 534 } 535 536 if (MOI.getSSID() == SyncScope::SingleThread || 537 MOI.getSSID() == MMI->getWorkgroupSSID() || 538 MOI.getSSID() == MMI->getWavefrontSSID()) { 539 Changed |= setGLC(MI); 540 return Changed; 541 } 542 543 llvm_unreachable("Unsupported synchronization scope"); 544 } 545 546 return Changed; 547 } 548 549 bool SIMemoryLegalizer::runOnMachineFunction(MachineFunction &MF) { 550 bool Changed = false; 551 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 552 const IsaInfo::IsaVersion IV = IsaInfo::getIsaVersion(ST.getFeatureBits()); 553 554 MMI = &MF.getMMI().getObjFileInfo<AMDGPUMachineModuleInfo>(); 555 TII = ST.getInstrInfo(); 556 557 Vmcnt0Immediate = 558 AMDGPU::encodeWaitcnt(IV, 0, getExpcntBitMask(IV), getLgkmcntBitMask(IV)); 559 Wbinvl1Opcode = ST.getGeneration() <= AMDGPUSubtarget::SOUTHERN_ISLANDS ? 560 AMDGPU::BUFFER_WBINVL1 : AMDGPU::BUFFER_WBINVL1_VOL; 561 562 for (auto &MBB : MF) { 563 for (auto MI = MBB.begin(); MI != MBB.end(); ++MI) { 564 if (!(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic)) 565 continue; 566 567 if (const auto &MOI = SIMemOpInfo::getLoadInfo(MI)) 568 Changed |= expandLoad(MOI.getValue(), MI); 569 else if (const auto &MOI = SIMemOpInfo::getStoreInfo(MI)) 570 Changed |= expandStore(MOI.getValue(), MI); 571 else if (const auto &MOI = SIMemOpInfo::getAtomicFenceInfo(MI)) 572 Changed |= expandAtomicFence(MOI.getValue(), MI); 573 else if (const auto &MOI = SIMemOpInfo::getAtomicCmpxchgInfo(MI)) 574 Changed |= expandAtomicCmpxchg(MOI.getValue(), MI); 575 else if (const auto &MOI = SIMemOpInfo::getAtomicRmwInfo(MI)) 576 Changed |= expandAtomicRmw(MOI.getValue(), MI); 577 } 578 } 579 580 Changed |= removeAtomicPseudoMIs(); 581 return Changed; 582 } 583 584 INITIALIZE_PASS(SIMemoryLegalizer, DEBUG_TYPE, PASS_NAME, false, false) 585 586 char SIMemoryLegalizer::ID = 0; 587 char &llvm::SIMemoryLegalizerID = SIMemoryLegalizer::ID; 588 589 FunctionPass *llvm::createSIMemoryLegalizerPass() { 590 return new SIMemoryLegalizer(); 591 } 592