1//===-- RISCVInstrInfoA.td - RISC-V 'A' instructions -------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes the RISC-V instructions from the standard 'A', Atomic 10// Instructions extension. 11// 12//===----------------------------------------------------------------------===// 13 14//===----------------------------------------------------------------------===// 15// Instruction class templates 16//===----------------------------------------------------------------------===// 17 18let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in 19class LR_r<bit aq, bit rl, bits<3> funct3, string opcodestr> 20 : RVInstRAtomic<0b00010, aq, rl, funct3, OPC_AMO, 21 (outs GPR:$rd), (ins GPRMemZeroOffset:$rs1), 22 opcodestr, "$rd, $rs1"> { 23 let rs2 = 0; 24} 25 26multiclass LR_r_aq_rl<bits<3> funct3, string opcodestr> { 27 def "" : LR_r<0, 0, funct3, opcodestr>; 28 def _AQ : LR_r<1, 0, funct3, opcodestr # ".aq">; 29 def _RL : LR_r<0, 1, funct3, opcodestr # ".rl">; 30 def _AQ_RL : LR_r<1, 1, funct3, opcodestr # ".aqrl">; 31} 32 33let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in 34class SC_r<bit aq, bit rl, bits<3> funct3, string opcodestr> 35 : RVInstRAtomic<0b00011, aq, rl, funct3, OPC_AMO, 36 (outs GPR:$rd), (ins GPRMemZeroOffset:$rs1, GPR:$rs2), 37 opcodestr, "$rd, $rs2, $rs1">; 38 39multiclass SC_r_aq_rl<bits<3> funct3, string opcodestr> { 40 def "" : SC_r<0, 0, funct3, opcodestr>; 41 def _AQ : SC_r<1, 0, funct3, opcodestr # ".aq">; 42 def _RL : SC_r<0, 1, funct3, opcodestr # ".rl">; 43 def _AQ_RL : SC_r<1, 1, funct3, opcodestr # ".aqrl">; 44} 45 46let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in 47class AMO_rr<bits<5> funct5, bit aq, bit rl, bits<3> funct3, string opcodestr> 48 : RVInstRAtomic<funct5, aq, rl, funct3, OPC_AMO, 49 (outs GPR:$rd), (ins GPRMemZeroOffset:$rs1, GPR:$rs2), 50 opcodestr, "$rd, $rs2, $rs1">; 51 52multiclass AMO_rr_aq_rl<bits<5> funct5, bits<3> funct3, string opcodestr> { 53 def "" : AMO_rr<funct5, 0, 0, funct3, opcodestr>; 54 def _AQ : AMO_rr<funct5, 1, 0, funct3, opcodestr # ".aq">; 55 def _RL : AMO_rr<funct5, 0, 1, funct3, opcodestr # ".rl">; 56 def _AQ_RL : AMO_rr<funct5, 1, 1, funct3, opcodestr # ".aqrl">; 57} 58 59//===----------------------------------------------------------------------===// 60// Instructions 61//===----------------------------------------------------------------------===// 62 63let Predicates = [HasStdExtZalrsc], IsSignExtendingOpW = 1 in { 64defm LR_W : LR_r_aq_rl<0b010, "lr.w">, Sched<[WriteAtomicLDW, ReadAtomicLDW]>; 65defm SC_W : SC_r_aq_rl<0b010, "sc.w">, 66 Sched<[WriteAtomicSTW, ReadAtomicSTW, ReadAtomicSTW]>; 67} // Predicates = [HasStdExtZalrsc], IsSignExtendingOpW = 1 68 69let Predicates = [HasStdExtZaamo], IsSignExtendingOpW = 1 in { 70defm AMOSWAP_W : AMO_rr_aq_rl<0b00001, 0b010, "amoswap.w">, 71 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>; 72defm AMOADD_W : AMO_rr_aq_rl<0b00000, 0b010, "amoadd.w">, 73 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>; 74defm AMOXOR_W : AMO_rr_aq_rl<0b00100, 0b010, "amoxor.w">, 75 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>; 76defm AMOAND_W : AMO_rr_aq_rl<0b01100, 0b010, "amoand.w">, 77 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>; 78defm AMOOR_W : AMO_rr_aq_rl<0b01000, 0b010, "amoor.w">, 79 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>; 80defm AMOMIN_W : AMO_rr_aq_rl<0b10000, 0b010, "amomin.w">, 81 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>; 82defm AMOMAX_W : AMO_rr_aq_rl<0b10100, 0b010, "amomax.w">, 83 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>; 84defm AMOMINU_W : AMO_rr_aq_rl<0b11000, 0b010, "amominu.w">, 85 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>; 86defm AMOMAXU_W : AMO_rr_aq_rl<0b11100, 0b010, "amomaxu.w">, 87 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>; 88} // Predicates = [HasStdExtZaamo], IsSignExtendingOpW = 1 89 90let Predicates = [HasStdExtZalrsc, IsRV64] in { 91defm LR_D : LR_r_aq_rl<0b011, "lr.d">, Sched<[WriteAtomicLDD, ReadAtomicLDD]>; 92defm SC_D : SC_r_aq_rl<0b011, "sc.d">, 93 Sched<[WriteAtomicSTD, ReadAtomicSTD, ReadAtomicSTD]>; 94} // Predicates = [HasStdExtZalrsc, IsRV64] 95 96let Predicates = [HasStdExtZaamo, IsRV64] in { 97defm AMOSWAP_D : AMO_rr_aq_rl<0b00001, 0b011, "amoswap.d">, 98 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>; 99defm AMOADD_D : AMO_rr_aq_rl<0b00000, 0b011, "amoadd.d">, 100 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>; 101defm AMOXOR_D : AMO_rr_aq_rl<0b00100, 0b011, "amoxor.d">, 102 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>; 103defm AMOAND_D : AMO_rr_aq_rl<0b01100, 0b011, "amoand.d">, 104 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>; 105defm AMOOR_D : AMO_rr_aq_rl<0b01000, 0b011, "amoor.d">, 106 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>; 107defm AMOMIN_D : AMO_rr_aq_rl<0b10000, 0b011, "amomin.d">, 108 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>; 109defm AMOMAX_D : AMO_rr_aq_rl<0b10100, 0b011, "amomax.d">, 110 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>; 111defm AMOMINU_D : AMO_rr_aq_rl<0b11000, 0b011, "amominu.d">, 112 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>; 113defm AMOMAXU_D : AMO_rr_aq_rl<0b11100, 0b011, "amomaxu.d">, 114 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>; 115} // Predicates = [HasStdExtZaamo, IsRV64] 116 117//===----------------------------------------------------------------------===// 118// Pseudo-instructions and codegen patterns 119//===----------------------------------------------------------------------===// 120 121let IsAtomic = 1 in { 122// An atomic load operation that does not need either acquire or release 123// semantics. 124class relaxed_load<PatFrags base> 125 : PatFrag<(ops node:$ptr), (base node:$ptr)> { 126 let IsAtomicOrderingAcquireOrStronger = 0; 127} 128 129// A atomic load operation that actually needs acquire semantics. 130class acquiring_load<PatFrags base> 131 : PatFrag<(ops node:$ptr), (base node:$ptr)> { 132 let IsAtomicOrderingAcquire = 1; 133} 134 135// An atomic load operation that needs sequential consistency. 136class seq_cst_load<PatFrags base> 137 : PatFrag<(ops node:$ptr), (base node:$ptr)> { 138 let IsAtomicOrderingSequentiallyConsistent = 1; 139} 140 141// An atomic store operation that does not need either acquire or release 142// semantics. 143class relaxed_store<PatFrag base> 144 : PatFrag<(ops node:$val, node:$ptr), (base node:$val, node:$ptr)> { 145 let IsAtomicOrderingReleaseOrStronger = 0; 146} 147 148// A store operation that actually needs release semantics. 149class releasing_store<PatFrag base> 150 : PatFrag<(ops node:$val, node:$ptr), (base node:$val, node:$ptr)> { 151 let IsAtomicOrderingRelease = 1; 152} 153 154// A store operation that actually needs sequential consistency. 155class seq_cst_store<PatFrag base> 156 : PatFrag<(ops node:$val, node:$ptr), (base node:$val, node:$ptr)> { 157 let IsAtomicOrderingSequentiallyConsistent = 1; 158} 159} // IsAtomic = 1 160 161// Atomic load/store are available under both +a and +force-atomics. 162// Fences will be inserted for atomic load/stores according to the logic in 163// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}. 164// The normal loads/stores are relaxed (unordered) loads/stores that don't have 165// any ordering. This is necessary because AtomicExpandPass has added fences to 166// atomic load/stores and changed them to unordered ones. 167let Predicates = [HasAtomicLdSt] in { 168 def : LdPat<relaxed_load<atomic_load_8>, LB>; 169 def : LdPat<relaxed_load<atomic_load_16>, LH>; 170 def : LdPat<relaxed_load<atomic_load_32>, LW>; 171 172 def : StPat<relaxed_store<atomic_store_8>, SB, GPR, XLenVT>; 173 def : StPat<relaxed_store<atomic_store_16>, SH, GPR, XLenVT>; 174 def : StPat<relaxed_store<atomic_store_32>, SW, GPR, XLenVT>; 175} 176 177let Predicates = [HasAtomicLdSt, IsRV64] in { 178 def : LdPat<relaxed_load<atomic_load_64>, LD, i64>; 179 def : StPat<relaxed_store<atomic_store_64>, SD, GPR, i64>; 180} 181 182/// AMOs 183 184multiclass AMOPat<string AtomicOp, string BaseInst, ValueType vt = XLenVT, 185 list<Predicate> ExtraPreds = []> { 186let Predicates = !listconcat([HasStdExtA, NotHasStdExtZtso], ExtraPreds) in { 187 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"), 188 !cast<RVInst>(BaseInst), vt>; 189 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"), 190 !cast<RVInst>(BaseInst#"_AQ"), vt>; 191 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"), 192 !cast<RVInst>(BaseInst#"_RL"), vt>; 193 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"), 194 !cast<RVInst>(BaseInst#"_AQ_RL"), vt>; 195 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"), 196 !cast<RVInst>(BaseInst#"_AQ_RL"), vt>; 197} 198let Predicates = !listconcat([HasStdExtA, HasStdExtZtso], ExtraPreds) in { 199 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"), 200 !cast<RVInst>(BaseInst), vt>; 201 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"), 202 !cast<RVInst>(BaseInst), vt>; 203 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"), 204 !cast<RVInst>(BaseInst), vt>; 205 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"), 206 !cast<RVInst>(BaseInst), vt>; 207 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"), 208 !cast<RVInst>(BaseInst), vt>; 209} 210} 211 212defm : AMOPat<"atomic_swap_i32", "AMOSWAP_W">; 213defm : AMOPat<"atomic_load_add_i32", "AMOADD_W">; 214defm : AMOPat<"atomic_load_and_i32", "AMOAND_W">; 215defm : AMOPat<"atomic_load_or_i32", "AMOOR_W">; 216defm : AMOPat<"atomic_load_xor_i32", "AMOXOR_W">; 217defm : AMOPat<"atomic_load_max_i32", "AMOMAX_W">; 218defm : AMOPat<"atomic_load_min_i32", "AMOMIN_W">; 219defm : AMOPat<"atomic_load_umax_i32", "AMOMAXU_W">; 220defm : AMOPat<"atomic_load_umin_i32", "AMOMINU_W">; 221 222defm : AMOPat<"atomic_swap_i64", "AMOSWAP_D", i64, [IsRV64]>; 223defm : AMOPat<"atomic_load_add_i64", "AMOADD_D", i64, [IsRV64]>; 224defm : AMOPat<"atomic_load_and_i64", "AMOAND_D", i64, [IsRV64]>; 225defm : AMOPat<"atomic_load_or_i64", "AMOOR_D", i64, [IsRV64]>; 226defm : AMOPat<"atomic_load_xor_i64", "AMOXOR_D", i64, [IsRV64]>; 227defm : AMOPat<"atomic_load_max_i64", "AMOMAX_D", i64, [IsRV64]>; 228defm : AMOPat<"atomic_load_min_i64", "AMOMIN_D", i64, [IsRV64]>; 229defm : AMOPat<"atomic_load_umax_i64", "AMOMAXU_D", i64, [IsRV64]>; 230defm : AMOPat<"atomic_load_umin_i64", "AMOMINU_D", i64, [IsRV64]>; 231 232 233/// Pseudo AMOs 234 235class PseudoAMO : Pseudo<(outs GPR:$res, GPR:$scratch), 236 (ins GPR:$addr, GPR:$incr, ixlenimm:$ordering), []> { 237 let Constraints = "@earlyclobber $res,@earlyclobber $scratch"; 238 let mayLoad = 1; 239 let mayStore = 1; 240 let hasSideEffects = 0; 241} 242 243class PseudoMaskedAMO 244 : Pseudo<(outs GPR:$res, GPR:$scratch), 245 (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> { 246 let Constraints = "@earlyclobber $res,@earlyclobber $scratch"; 247 let mayLoad = 1; 248 let mayStore = 1; 249 let hasSideEffects = 0; 250} 251 252class PseudoMaskedAMOMinMax 253 : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2), 254 (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$sextshamt, 255 ixlenimm:$ordering), []> { 256 let Constraints = "@earlyclobber $res,@earlyclobber $scratch1," 257 "@earlyclobber $scratch2"; 258 let mayLoad = 1; 259 let mayStore = 1; 260 let hasSideEffects = 0; 261} 262 263class PseudoMaskedAMOUMinUMax 264 : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2), 265 (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> { 266 let Constraints = "@earlyclobber $res,@earlyclobber $scratch1," 267 "@earlyclobber $scratch2"; 268 let mayLoad = 1; 269 let mayStore = 1; 270 let hasSideEffects = 0; 271} 272 273class PseudoMaskedAMOPat<Intrinsic intrin, Pseudo AMOInst> 274 : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering), 275 (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering)>; 276 277class PseudoMaskedAMOMinMaxPat<Intrinsic intrin, Pseudo AMOInst> 278 : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt, 279 timm:$ordering), 280 (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt, 281 timm:$ordering)>; 282 283let Predicates = [HasStdExtA] in { 284 285let Size = 20 in 286def PseudoAtomicLoadNand32 : PseudoAMO; 287// Ordering constants must be kept in sync with the AtomicOrdering enum in 288// AtomicOrdering.h. 289def : Pat<(XLenVT (atomic_load_nand_i32_monotonic GPR:$addr, GPR:$incr)), 290 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>; 291def : Pat<(XLenVT (atomic_load_nand_i32_acquire GPR:$addr, GPR:$incr)), 292 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>; 293def : Pat<(XLenVT (atomic_load_nand_i32_release GPR:$addr, GPR:$incr)), 294 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>; 295def : Pat<(XLenVT (atomic_load_nand_i32_acq_rel GPR:$addr, GPR:$incr)), 296 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>; 297def : Pat<(XLenVT (atomic_load_nand_i32_seq_cst GPR:$addr, GPR:$incr)), 298 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>; 299 300let Size = 28 in 301def PseudoMaskedAtomicSwap32 : PseudoMaskedAMO; 302def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i32, 303 PseudoMaskedAtomicSwap32>; 304let Size = 28 in 305def PseudoMaskedAtomicLoadAdd32 : PseudoMaskedAMO; 306def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i32, 307 PseudoMaskedAtomicLoadAdd32>; 308let Size = 28 in 309def PseudoMaskedAtomicLoadSub32 : PseudoMaskedAMO; 310def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i32, 311 PseudoMaskedAtomicLoadSub32>; 312let Size = 32 in 313def PseudoMaskedAtomicLoadNand32 : PseudoMaskedAMO; 314def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i32, 315 PseudoMaskedAtomicLoadNand32>; 316let Size = 44 in 317def PseudoMaskedAtomicLoadMax32 : PseudoMaskedAMOMinMax; 318def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i32, 319 PseudoMaskedAtomicLoadMax32>; 320let Size = 44 in 321def PseudoMaskedAtomicLoadMin32 : PseudoMaskedAMOMinMax; 322def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i32, 323 PseudoMaskedAtomicLoadMin32>; 324let Size = 36 in 325def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMOUMinUMax; 326def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i32, 327 PseudoMaskedAtomicLoadUMax32>; 328let Size = 36 in 329def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMOUMinUMax; 330def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i32, 331 PseudoMaskedAtomicLoadUMin32>; 332} // Predicates = [HasStdExtA] 333 334let Predicates = [HasStdExtA, IsRV64] in { 335 336let Size = 20 in 337def PseudoAtomicLoadNand64 : PseudoAMO; 338// Ordering constants must be kept in sync with the AtomicOrdering enum in 339// AtomicOrdering.h. 340def : Pat<(i64 (atomic_load_nand_i64_monotonic GPR:$addr, GPR:$incr)), 341 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 2)>; 342def : Pat<(i64 (atomic_load_nand_i64_acquire GPR:$addr, GPR:$incr)), 343 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 4)>; 344def : Pat<(i64 (atomic_load_nand_i64_release GPR:$addr, GPR:$incr)), 345 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 5)>; 346def : Pat<(i64 (atomic_load_nand_i64_acq_rel GPR:$addr, GPR:$incr)), 347 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 6)>; 348def : Pat<(i64 (atomic_load_nand_i64_seq_cst GPR:$addr, GPR:$incr)), 349 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 7)>; 350 351def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i64, 352 PseudoMaskedAtomicSwap32>; 353def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i64, 354 PseudoMaskedAtomicLoadAdd32>; 355def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i64, 356 PseudoMaskedAtomicLoadSub32>; 357def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i64, 358 PseudoMaskedAtomicLoadNand32>; 359def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i64, 360 PseudoMaskedAtomicLoadMax32>; 361def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i64, 362 PseudoMaskedAtomicLoadMin32>; 363def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i64, 364 PseudoMaskedAtomicLoadUMax32>; 365def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i64, 366 PseudoMaskedAtomicLoadUMin32>; 367} // Predicates = [HasStdExtA, IsRV64] 368 369 370/// Compare and exchange 371 372class PseudoCmpXchg 373 : Pseudo<(outs GPR:$res, GPR:$scratch), 374 (ins GPR:$addr, GPR:$cmpval, GPR:$newval, ixlenimm:$ordering), []> { 375 let Constraints = "@earlyclobber $res,@earlyclobber $scratch"; 376 let mayLoad = 1; 377 let mayStore = 1; 378 let hasSideEffects = 0; 379 let Size = 16; 380} 381 382// Ordering constants must be kept in sync with the AtomicOrdering enum in 383// AtomicOrdering.h. 384multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst, 385 ValueType vt = XLenVT> { 386 def : Pat<(vt (!cast<PatFrag>(Op#"_monotonic") GPR:$addr, GPR:$cmp, GPR:$new)), 387 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>; 388 def : Pat<(vt (!cast<PatFrag>(Op#"_acquire") GPR:$addr, GPR:$cmp, GPR:$new)), 389 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>; 390 def : Pat<(vt (!cast<PatFrag>(Op#"_release") GPR:$addr, GPR:$cmp, GPR:$new)), 391 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>; 392 def : Pat<(vt (!cast<PatFrag>(Op#"_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new)), 393 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>; 394 def : Pat<(vt (!cast<PatFrag>(Op#"_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new)), 395 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>; 396} 397 398let Predicates = [HasStdExtA, NoStdExtZacas] in { 399def PseudoCmpXchg32 : PseudoCmpXchg; 400defm : PseudoCmpXchgPat<"atomic_cmp_swap_i32", PseudoCmpXchg32>; 401} 402 403let Predicates = [HasStdExtA, NoStdExtZacas, IsRV64] in { 404def PseudoCmpXchg64 : PseudoCmpXchg; 405defm : PseudoCmpXchgPat<"atomic_cmp_swap_i64", PseudoCmpXchg64, i64>; 406} 407 408let Predicates = [HasStdExtA] in { 409def PseudoMaskedCmpXchg32 410 : Pseudo<(outs GPR:$res, GPR:$scratch), 411 (ins GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, 412 ixlenimm:$ordering), []> { 413 let Constraints = "@earlyclobber $res,@earlyclobber $scratch"; 414 let mayLoad = 1; 415 let mayStore = 1; 416 let hasSideEffects = 0; 417 let Size = 32; 418} 419 420def : Pat<(int_riscv_masked_cmpxchg_i32 421 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering), 422 (PseudoMaskedCmpXchg32 423 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>; 424} // Predicates = [HasStdExtA] 425 426let Predicates = [HasStdExtA, IsRV64] in { 427def : Pat<(int_riscv_masked_cmpxchg_i64 428 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering), 429 (PseudoMaskedCmpXchg32 430 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>; 431} // Predicates = [HasStdExtA, IsRV64] 432