1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the code for emitting atomic operations. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCall.h" 14 #include "CGRecordLayout.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/ASTContext.h" 19 #include "clang/CodeGen/CGFunctionInfo.h" 20 #include "clang/Frontend/FrontendDiagnostic.h" 21 #include "llvm/ADT/DenseMap.h" 22 #include "llvm/IR/DataLayout.h" 23 #include "llvm/IR/Intrinsics.h" 24 25 using namespace clang; 26 using namespace CodeGen; 27 28 namespace { 29 class AtomicInfo { 30 CodeGenFunction &CGF; 31 QualType AtomicTy; 32 QualType ValueTy; 33 uint64_t AtomicSizeInBits; 34 uint64_t ValueSizeInBits; 35 CharUnits AtomicAlign; 36 CharUnits ValueAlign; 37 TypeEvaluationKind EvaluationKind; 38 bool UseLibcall; 39 LValue LVal; 40 CGBitFieldInfo BFI; 41 public: 42 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) 43 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0), 44 EvaluationKind(TEK_Scalar), UseLibcall(true) { 45 assert(!lvalue.isGlobalReg()); 46 ASTContext &C = CGF.getContext(); 47 if (lvalue.isSimple()) { 48 AtomicTy = lvalue.getType(); 49 if (auto *ATy = AtomicTy->getAs<AtomicType>()) 50 ValueTy = ATy->getValueType(); 51 else 52 ValueTy = AtomicTy; 53 EvaluationKind = CGF.getEvaluationKind(ValueTy); 54 55 uint64_t ValueAlignInBits; 56 uint64_t AtomicAlignInBits; 57 TypeInfo ValueTI = C.getTypeInfo(ValueTy); 58 ValueSizeInBits = ValueTI.Width; 59 ValueAlignInBits = ValueTI.Align; 60 61 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy); 62 AtomicSizeInBits = AtomicTI.Width; 63 AtomicAlignInBits = AtomicTI.Align; 64 65 assert(ValueSizeInBits <= AtomicSizeInBits); 66 assert(ValueAlignInBits <= AtomicAlignInBits); 67 68 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits); 69 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits); 70 if (lvalue.getAlignment().isZero()) 71 lvalue.setAlignment(AtomicAlign); 72 73 LVal = lvalue; 74 } else if (lvalue.isBitField()) { 75 ValueTy = lvalue.getType(); 76 ValueSizeInBits = C.getTypeSize(ValueTy); 77 auto &OrigBFI = lvalue.getBitFieldInfo(); 78 auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment()); 79 AtomicSizeInBits = C.toBits( 80 C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1) 81 .alignTo(lvalue.getAlignment())); 82 llvm::Value *BitFieldPtr = lvalue.getRawBitFieldPointer(CGF); 83 auto OffsetInChars = 84 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) * 85 lvalue.getAlignment(); 86 llvm::Value *StoragePtr = CGF.Builder.CreateConstGEP1_64( 87 CGF.Int8Ty, BitFieldPtr, OffsetInChars.getQuantity()); 88 StoragePtr = CGF.Builder.CreateAddrSpaceCast( 89 StoragePtr, CGF.UnqualPtrTy, "atomic_bitfield_base"); 90 BFI = OrigBFI; 91 BFI.Offset = Offset; 92 BFI.StorageSize = AtomicSizeInBits; 93 BFI.StorageOffset += OffsetInChars; 94 llvm::Type *StorageTy = CGF.Builder.getIntNTy(AtomicSizeInBits); 95 LVal = LValue::MakeBitfield( 96 Address(StoragePtr, StorageTy, lvalue.getAlignment()), BFI, 97 lvalue.getType(), lvalue.getBaseInfo(), lvalue.getTBAAInfo()); 98 AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned); 99 if (AtomicTy.isNull()) { 100 llvm::APInt Size( 101 /*numBits=*/32, 102 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity()); 103 AtomicTy = C.getConstantArrayType(C.CharTy, Size, nullptr, 104 ArraySizeModifier::Normal, 105 /*IndexTypeQuals=*/0); 106 } 107 AtomicAlign = ValueAlign = lvalue.getAlignment(); 108 } else if (lvalue.isVectorElt()) { 109 ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType(); 110 ValueSizeInBits = C.getTypeSize(ValueTy); 111 AtomicTy = lvalue.getType(); 112 AtomicSizeInBits = C.getTypeSize(AtomicTy); 113 AtomicAlign = ValueAlign = lvalue.getAlignment(); 114 LVal = lvalue; 115 } else { 116 assert(lvalue.isExtVectorElt()); 117 ValueTy = lvalue.getType(); 118 ValueSizeInBits = C.getTypeSize(ValueTy); 119 AtomicTy = ValueTy = CGF.getContext().getExtVectorType( 120 lvalue.getType(), cast<llvm::FixedVectorType>( 121 lvalue.getExtVectorAddress().getElementType()) 122 ->getNumElements()); 123 AtomicSizeInBits = C.getTypeSize(AtomicTy); 124 AtomicAlign = ValueAlign = lvalue.getAlignment(); 125 LVal = lvalue; 126 } 127 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic( 128 AtomicSizeInBits, C.toBits(lvalue.getAlignment())); 129 } 130 131 QualType getAtomicType() const { return AtomicTy; } 132 QualType getValueType() const { return ValueTy; } 133 CharUnits getAtomicAlignment() const { return AtomicAlign; } 134 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; } 135 uint64_t getValueSizeInBits() const { return ValueSizeInBits; } 136 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; } 137 bool shouldUseLibcall() const { return UseLibcall; } 138 const LValue &getAtomicLValue() const { return LVal; } 139 llvm::Value *getAtomicPointer() const { 140 if (LVal.isSimple()) 141 return LVal.emitRawPointer(CGF); 142 else if (LVal.isBitField()) 143 return LVal.getRawBitFieldPointer(CGF); 144 else if (LVal.isVectorElt()) 145 return LVal.getRawVectorPointer(CGF); 146 assert(LVal.isExtVectorElt()); 147 return LVal.getRawExtVectorPointer(CGF); 148 } 149 Address getAtomicAddress() const { 150 llvm::Type *ElTy; 151 if (LVal.isSimple()) 152 ElTy = LVal.getAddress().getElementType(); 153 else if (LVal.isBitField()) 154 ElTy = LVal.getBitFieldAddress().getElementType(); 155 else if (LVal.isVectorElt()) 156 ElTy = LVal.getVectorAddress().getElementType(); 157 else 158 ElTy = LVal.getExtVectorAddress().getElementType(); 159 return Address(getAtomicPointer(), ElTy, getAtomicAlignment()); 160 } 161 162 Address getAtomicAddressAsAtomicIntPointer() const { 163 return castToAtomicIntPointer(getAtomicAddress()); 164 } 165 166 /// Is the atomic size larger than the underlying value type? 167 /// 168 /// Note that the absence of padding does not mean that atomic 169 /// objects are completely interchangeable with non-atomic 170 /// objects: we might have promoted the alignment of a type 171 /// without making it bigger. 172 bool hasPadding() const { 173 return (ValueSizeInBits != AtomicSizeInBits); 174 } 175 176 bool emitMemSetZeroIfNecessary() const; 177 178 llvm::Value *getAtomicSizeValue() const { 179 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits); 180 return CGF.CGM.getSize(size); 181 } 182 183 /// Cast the given pointer to an integer pointer suitable for atomic 184 /// operations if the source. 185 Address castToAtomicIntPointer(Address Addr) const; 186 187 /// If Addr is compatible with the iN that will be used for an atomic 188 /// operation, bitcast it. Otherwise, create a temporary that is suitable 189 /// and copy the value across. 190 Address convertToAtomicIntPointer(Address Addr) const; 191 192 /// Turn an atomic-layout object into an r-value. 193 RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot, 194 SourceLocation loc, bool AsValue) const; 195 196 llvm::Value *getScalarRValValueOrNull(RValue RVal) const; 197 198 /// Converts an rvalue to integer value if needed. 199 llvm::Value *convertRValueToInt(RValue RVal, bool CmpXchg = false) const; 200 201 RValue ConvertToValueOrAtomic(llvm::Value *IntVal, AggValueSlot ResultSlot, 202 SourceLocation Loc, bool AsValue, 203 bool CmpXchg = false) const; 204 205 /// Copy an atomic r-value into atomic-layout memory. 206 void emitCopyIntoMemory(RValue rvalue) const; 207 208 /// Project an l-value down to the value field. 209 LValue projectValue() const { 210 assert(LVal.isSimple()); 211 Address addr = getAtomicAddress(); 212 if (hasPadding()) 213 addr = CGF.Builder.CreateStructGEP(addr, 0); 214 215 return LValue::MakeAddr(addr, getValueType(), CGF.getContext(), 216 LVal.getBaseInfo(), LVal.getTBAAInfo()); 217 } 218 219 /// Emits atomic load. 220 /// \returns Loaded value. 221 RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc, 222 bool AsValue, llvm::AtomicOrdering AO, 223 bool IsVolatile); 224 225 /// Emits atomic compare-and-exchange sequence. 226 /// \param Expected Expected value. 227 /// \param Desired Desired value. 228 /// \param Success Atomic ordering for success operation. 229 /// \param Failure Atomic ordering for failed operation. 230 /// \param IsWeak true if atomic operation is weak, false otherwise. 231 /// \returns Pair of values: previous value from storage (value type) and 232 /// boolean flag (i1 type) with true if success and false otherwise. 233 std::pair<RValue, llvm::Value *> 234 EmitAtomicCompareExchange(RValue Expected, RValue Desired, 235 llvm::AtomicOrdering Success = 236 llvm::AtomicOrdering::SequentiallyConsistent, 237 llvm::AtomicOrdering Failure = 238 llvm::AtomicOrdering::SequentiallyConsistent, 239 bool IsWeak = false); 240 241 /// Emits atomic update. 242 /// \param AO Atomic ordering. 243 /// \param UpdateOp Update operation for the current lvalue. 244 void EmitAtomicUpdate(llvm::AtomicOrdering AO, 245 const llvm::function_ref<RValue(RValue)> &UpdateOp, 246 bool IsVolatile); 247 /// Emits atomic update. 248 /// \param AO Atomic ordering. 249 void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal, 250 bool IsVolatile); 251 252 /// Materialize an atomic r-value in atomic-layout memory. 253 Address materializeRValue(RValue rvalue) const; 254 255 /// Creates temp alloca for intermediate operations on atomic value. 256 Address CreateTempAlloca() const; 257 private: 258 bool requiresMemSetZero(llvm::Type *type) const; 259 260 261 /// Emits atomic load as a libcall. 262 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded, 263 llvm::AtomicOrdering AO, bool IsVolatile); 264 /// Emits atomic load as LLVM instruction. 265 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile, 266 bool CmpXchg = false); 267 /// Emits atomic compare-and-exchange op as a libcall. 268 llvm::Value *EmitAtomicCompareExchangeLibcall( 269 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr, 270 llvm::AtomicOrdering Success = 271 llvm::AtomicOrdering::SequentiallyConsistent, 272 llvm::AtomicOrdering Failure = 273 llvm::AtomicOrdering::SequentiallyConsistent); 274 /// Emits atomic compare-and-exchange op as LLVM instruction. 275 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp( 276 llvm::Value *ExpectedVal, llvm::Value *DesiredVal, 277 llvm::AtomicOrdering Success = 278 llvm::AtomicOrdering::SequentiallyConsistent, 279 llvm::AtomicOrdering Failure = 280 llvm::AtomicOrdering::SequentiallyConsistent, 281 bool IsWeak = false); 282 /// Emit atomic update as libcalls. 283 void 284 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, 285 const llvm::function_ref<RValue(RValue)> &UpdateOp, 286 bool IsVolatile); 287 /// Emit atomic update as LLVM instructions. 288 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, 289 const llvm::function_ref<RValue(RValue)> &UpdateOp, 290 bool IsVolatile); 291 /// Emit atomic update as libcalls. 292 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal, 293 bool IsVolatile); 294 /// Emit atomic update as LLVM instructions. 295 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal, 296 bool IsVolatile); 297 }; 298 } 299 300 Address AtomicInfo::CreateTempAlloca() const { 301 Address TempAlloca = CGF.CreateMemTemp( 302 (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy 303 : AtomicTy, 304 getAtomicAlignment(), 305 "atomic-temp"); 306 // Cast to pointer to value type for bitfields. 307 if (LVal.isBitField()) 308 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 309 TempAlloca, getAtomicAddress().getType(), 310 getAtomicAddress().getElementType()); 311 return TempAlloca; 312 } 313 314 static RValue emitAtomicLibcall(CodeGenFunction &CGF, 315 StringRef fnName, 316 QualType resultType, 317 CallArgList &args) { 318 const CGFunctionInfo &fnInfo = 319 CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args); 320 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo); 321 llvm::AttrBuilder fnAttrB(CGF.getLLVMContext()); 322 fnAttrB.addAttribute(llvm::Attribute::NoUnwind); 323 fnAttrB.addAttribute(llvm::Attribute::WillReturn); 324 llvm::AttributeList fnAttrs = llvm::AttributeList::get( 325 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB); 326 327 llvm::FunctionCallee fn = 328 CGF.CGM.CreateRuntimeFunction(fnTy, fnName, fnAttrs); 329 auto callee = CGCallee::forDirect(fn); 330 return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args); 331 } 332 333 /// Does a store of the given IR type modify the full expected width? 334 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, 335 uint64_t expectedSize) { 336 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize); 337 } 338 339 /// Does the atomic type require memsetting to zero before initialization? 340 /// 341 /// The IR type is provided as a way of making certain queries faster. 342 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const { 343 // If the atomic type has size padding, we definitely need a memset. 344 if (hasPadding()) return true; 345 346 // Otherwise, do some simple heuristics to try to avoid it: 347 switch (getEvaluationKind()) { 348 // For scalars and complexes, check whether the store size of the 349 // type uses the full size. 350 case TEK_Scalar: 351 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits); 352 case TEK_Complex: 353 return !isFullSizeType(CGF.CGM, type->getStructElementType(0), 354 AtomicSizeInBits / 2); 355 356 // Padding in structs has an undefined bit pattern. User beware. 357 case TEK_Aggregate: 358 return false; 359 } 360 llvm_unreachable("bad evaluation kind"); 361 } 362 363 bool AtomicInfo::emitMemSetZeroIfNecessary() const { 364 assert(LVal.isSimple()); 365 Address addr = LVal.getAddress(); 366 if (!requiresMemSetZero(addr.getElementType())) 367 return false; 368 369 CGF.Builder.CreateMemSet( 370 addr.emitRawPointer(CGF), llvm::ConstantInt::get(CGF.Int8Ty, 0), 371 CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(), 372 LVal.getAlignment().getAsAlign()); 373 return true; 374 } 375 376 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, 377 Address Dest, Address Ptr, 378 Address Val1, Address Val2, 379 uint64_t Size, 380 llvm::AtomicOrdering SuccessOrder, 381 llvm::AtomicOrdering FailureOrder, 382 llvm::SyncScope::ID Scope) { 383 // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment. 384 llvm::Value *Expected = CGF.Builder.CreateLoad(Val1); 385 llvm::Value *Desired = CGF.Builder.CreateLoad(Val2); 386 387 llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg( 388 Ptr, Expected, Desired, SuccessOrder, FailureOrder, Scope); 389 Pair->setVolatile(E->isVolatile()); 390 Pair->setWeak(IsWeak); 391 CGF.getTargetHooks().setTargetAtomicMetadata(CGF, *Pair, E); 392 393 // Cmp holds the result of the compare-exchange operation: true on success, 394 // false on failure. 395 llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0); 396 llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1); 397 398 // This basic block is used to hold the store instruction if the operation 399 // failed. 400 llvm::BasicBlock *StoreExpectedBB = 401 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn); 402 403 // This basic block is the exit point of the operation, we should end up 404 // here regardless of whether or not the operation succeeded. 405 llvm::BasicBlock *ContinueBB = 406 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn); 407 408 // Update Expected if Expected isn't equal to Old, otherwise branch to the 409 // exit point. 410 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB); 411 412 CGF.Builder.SetInsertPoint(StoreExpectedBB); 413 // Update the memory at Expected with Old's value. 414 CGF.Builder.CreateStore(Old, Val1); 415 // Finally, branch to the exit point. 416 CGF.Builder.CreateBr(ContinueBB); 417 418 CGF.Builder.SetInsertPoint(ContinueBB); 419 // Update the memory at Dest with Cmp's value. 420 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType())); 421 } 422 423 /// Given an ordering required on success, emit all possible cmpxchg 424 /// instructions to cope with the provided (but possibly only dynamically known) 425 /// FailureOrder. 426 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, 427 bool IsWeak, Address Dest, Address Ptr, 428 Address Val1, Address Val2, 429 llvm::Value *FailureOrderVal, 430 uint64_t Size, 431 llvm::AtomicOrdering SuccessOrder, 432 llvm::SyncScope::ID Scope) { 433 llvm::AtomicOrdering FailureOrder; 434 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) { 435 auto FOS = FO->getSExtValue(); 436 if (!llvm::isValidAtomicOrderingCABI(FOS)) 437 FailureOrder = llvm::AtomicOrdering::Monotonic; 438 else 439 switch ((llvm::AtomicOrderingCABI)FOS) { 440 case llvm::AtomicOrderingCABI::relaxed: 441 // 31.7.2.18: "The failure argument shall not be memory_order_release 442 // nor memory_order_acq_rel". Fallback to monotonic. 443 case llvm::AtomicOrderingCABI::release: 444 case llvm::AtomicOrderingCABI::acq_rel: 445 FailureOrder = llvm::AtomicOrdering::Monotonic; 446 break; 447 case llvm::AtomicOrderingCABI::consume: 448 case llvm::AtomicOrderingCABI::acquire: 449 FailureOrder = llvm::AtomicOrdering::Acquire; 450 break; 451 case llvm::AtomicOrderingCABI::seq_cst: 452 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent; 453 break; 454 } 455 // Prior to c++17, "the failure argument shall be no stronger than the 456 // success argument". This condition has been lifted and the only 457 // precondition is 31.7.2.18. Effectively treat this as a DR and skip 458 // language version checks. 459 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder, 460 FailureOrder, Scope); 461 return; 462 } 463 464 // Create all the relevant BB's 465 auto *MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn); 466 auto *AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn); 467 auto *SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn); 468 auto *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn); 469 470 // MonotonicBB is arbitrarily chosen as the default case; in practice, this 471 // doesn't matter unless someone is crazy enough to use something that 472 // doesn't fold to a constant for the ordering. 473 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB); 474 // Implemented as acquire, since it's the closest in LLVM. 475 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume), 476 AcquireBB); 477 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire), 478 AcquireBB); 479 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst), 480 SeqCstBB); 481 482 // Emit all the different atomics 483 CGF.Builder.SetInsertPoint(MonotonicBB); 484 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, 485 Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope); 486 CGF.Builder.CreateBr(ContBB); 487 488 CGF.Builder.SetInsertPoint(AcquireBB); 489 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder, 490 llvm::AtomicOrdering::Acquire, Scope); 491 CGF.Builder.CreateBr(ContBB); 492 493 CGF.Builder.SetInsertPoint(SeqCstBB); 494 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder, 495 llvm::AtomicOrdering::SequentiallyConsistent, Scope); 496 CGF.Builder.CreateBr(ContBB); 497 498 CGF.Builder.SetInsertPoint(ContBB); 499 } 500 501 /// Duplicate the atomic min/max operation in conventional IR for the builtin 502 /// variants that return the new rather than the original value. 503 static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder, 504 AtomicExpr::AtomicOp Op, 505 bool IsSigned, 506 llvm::Value *OldVal, 507 llvm::Value *RHS) { 508 llvm::CmpInst::Predicate Pred; 509 switch (Op) { 510 default: 511 llvm_unreachable("Unexpected min/max operation"); 512 case AtomicExpr::AO__atomic_max_fetch: 513 case AtomicExpr::AO__scoped_atomic_max_fetch: 514 Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT; 515 break; 516 case AtomicExpr::AO__atomic_min_fetch: 517 case AtomicExpr::AO__scoped_atomic_min_fetch: 518 Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT; 519 break; 520 } 521 llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst"); 522 return Builder.CreateSelect(Cmp, OldVal, RHS, "newval"); 523 } 524 525 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, 526 Address Ptr, Address Val1, Address Val2, 527 llvm::Value *IsWeak, llvm::Value *FailureOrder, 528 uint64_t Size, llvm::AtomicOrdering Order, 529 llvm::SyncScope::ID Scope) { 530 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add; 531 bool PostOpMinMax = false; 532 unsigned PostOp = 0; 533 534 switch (E->getOp()) { 535 case AtomicExpr::AO__c11_atomic_init: 536 case AtomicExpr::AO__opencl_atomic_init: 537 llvm_unreachable("Already handled!"); 538 539 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 540 case AtomicExpr::AO__hip_atomic_compare_exchange_strong: 541 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 542 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2, 543 FailureOrder, Size, Order, Scope); 544 return; 545 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 546 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 547 case AtomicExpr::AO__hip_atomic_compare_exchange_weak: 548 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2, 549 FailureOrder, Size, Order, Scope); 550 return; 551 case AtomicExpr::AO__atomic_compare_exchange: 552 case AtomicExpr::AO__atomic_compare_exchange_n: 553 case AtomicExpr::AO__scoped_atomic_compare_exchange: 554 case AtomicExpr::AO__scoped_atomic_compare_exchange_n: { 555 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) { 556 emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr, 557 Val1, Val2, FailureOrder, Size, Order, Scope); 558 } else { 559 // Create all the relevant BB's 560 llvm::BasicBlock *StrongBB = 561 CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn); 562 llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn); 563 llvm::BasicBlock *ContBB = 564 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn); 565 566 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB); 567 SI->addCase(CGF.Builder.getInt1(false), StrongBB); 568 569 CGF.Builder.SetInsertPoint(StrongBB); 570 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2, 571 FailureOrder, Size, Order, Scope); 572 CGF.Builder.CreateBr(ContBB); 573 574 CGF.Builder.SetInsertPoint(WeakBB); 575 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2, 576 FailureOrder, Size, Order, Scope); 577 CGF.Builder.CreateBr(ContBB); 578 579 CGF.Builder.SetInsertPoint(ContBB); 580 } 581 return; 582 } 583 case AtomicExpr::AO__c11_atomic_load: 584 case AtomicExpr::AO__opencl_atomic_load: 585 case AtomicExpr::AO__hip_atomic_load: 586 case AtomicExpr::AO__atomic_load_n: 587 case AtomicExpr::AO__atomic_load: 588 case AtomicExpr::AO__scoped_atomic_load_n: 589 case AtomicExpr::AO__scoped_atomic_load: { 590 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr); 591 Load->setAtomic(Order, Scope); 592 Load->setVolatile(E->isVolatile()); 593 CGF.Builder.CreateStore(Load, Dest); 594 return; 595 } 596 597 case AtomicExpr::AO__c11_atomic_store: 598 case AtomicExpr::AO__opencl_atomic_store: 599 case AtomicExpr::AO__hip_atomic_store: 600 case AtomicExpr::AO__atomic_store: 601 case AtomicExpr::AO__atomic_store_n: 602 case AtomicExpr::AO__scoped_atomic_store: 603 case AtomicExpr::AO__scoped_atomic_store_n: { 604 llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1); 605 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr); 606 Store->setAtomic(Order, Scope); 607 Store->setVolatile(E->isVolatile()); 608 return; 609 } 610 611 case AtomicExpr::AO__c11_atomic_exchange: 612 case AtomicExpr::AO__hip_atomic_exchange: 613 case AtomicExpr::AO__opencl_atomic_exchange: 614 case AtomicExpr::AO__atomic_exchange_n: 615 case AtomicExpr::AO__atomic_exchange: 616 case AtomicExpr::AO__scoped_atomic_exchange_n: 617 case AtomicExpr::AO__scoped_atomic_exchange: 618 Op = llvm::AtomicRMWInst::Xchg; 619 break; 620 621 case AtomicExpr::AO__atomic_add_fetch: 622 case AtomicExpr::AO__scoped_atomic_add_fetch: 623 PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd 624 : llvm::Instruction::Add; 625 [[fallthrough]]; 626 case AtomicExpr::AO__c11_atomic_fetch_add: 627 case AtomicExpr::AO__hip_atomic_fetch_add: 628 case AtomicExpr::AO__opencl_atomic_fetch_add: 629 case AtomicExpr::AO__atomic_fetch_add: 630 case AtomicExpr::AO__scoped_atomic_fetch_add: 631 Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd 632 : llvm::AtomicRMWInst::Add; 633 break; 634 635 case AtomicExpr::AO__atomic_sub_fetch: 636 case AtomicExpr::AO__scoped_atomic_sub_fetch: 637 PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FSub 638 : llvm::Instruction::Sub; 639 [[fallthrough]]; 640 case AtomicExpr::AO__c11_atomic_fetch_sub: 641 case AtomicExpr::AO__hip_atomic_fetch_sub: 642 case AtomicExpr::AO__opencl_atomic_fetch_sub: 643 case AtomicExpr::AO__atomic_fetch_sub: 644 case AtomicExpr::AO__scoped_atomic_fetch_sub: 645 Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub 646 : llvm::AtomicRMWInst::Sub; 647 break; 648 649 case AtomicExpr::AO__atomic_min_fetch: 650 case AtomicExpr::AO__scoped_atomic_min_fetch: 651 PostOpMinMax = true; 652 [[fallthrough]]; 653 case AtomicExpr::AO__c11_atomic_fetch_min: 654 case AtomicExpr::AO__hip_atomic_fetch_min: 655 case AtomicExpr::AO__opencl_atomic_fetch_min: 656 case AtomicExpr::AO__atomic_fetch_min: 657 case AtomicExpr::AO__scoped_atomic_fetch_min: 658 Op = E->getValueType()->isFloatingType() 659 ? llvm::AtomicRMWInst::FMin 660 : (E->getValueType()->isSignedIntegerType() 661 ? llvm::AtomicRMWInst::Min 662 : llvm::AtomicRMWInst::UMin); 663 break; 664 665 case AtomicExpr::AO__atomic_max_fetch: 666 case AtomicExpr::AO__scoped_atomic_max_fetch: 667 PostOpMinMax = true; 668 [[fallthrough]]; 669 case AtomicExpr::AO__c11_atomic_fetch_max: 670 case AtomicExpr::AO__hip_atomic_fetch_max: 671 case AtomicExpr::AO__opencl_atomic_fetch_max: 672 case AtomicExpr::AO__atomic_fetch_max: 673 case AtomicExpr::AO__scoped_atomic_fetch_max: 674 Op = E->getValueType()->isFloatingType() 675 ? llvm::AtomicRMWInst::FMax 676 : (E->getValueType()->isSignedIntegerType() 677 ? llvm::AtomicRMWInst::Max 678 : llvm::AtomicRMWInst::UMax); 679 break; 680 681 case AtomicExpr::AO__atomic_and_fetch: 682 case AtomicExpr::AO__scoped_atomic_and_fetch: 683 PostOp = llvm::Instruction::And; 684 [[fallthrough]]; 685 case AtomicExpr::AO__c11_atomic_fetch_and: 686 case AtomicExpr::AO__hip_atomic_fetch_and: 687 case AtomicExpr::AO__opencl_atomic_fetch_and: 688 case AtomicExpr::AO__atomic_fetch_and: 689 case AtomicExpr::AO__scoped_atomic_fetch_and: 690 Op = llvm::AtomicRMWInst::And; 691 break; 692 693 case AtomicExpr::AO__atomic_or_fetch: 694 case AtomicExpr::AO__scoped_atomic_or_fetch: 695 PostOp = llvm::Instruction::Or; 696 [[fallthrough]]; 697 case AtomicExpr::AO__c11_atomic_fetch_or: 698 case AtomicExpr::AO__hip_atomic_fetch_or: 699 case AtomicExpr::AO__opencl_atomic_fetch_or: 700 case AtomicExpr::AO__atomic_fetch_or: 701 case AtomicExpr::AO__scoped_atomic_fetch_or: 702 Op = llvm::AtomicRMWInst::Or; 703 break; 704 705 case AtomicExpr::AO__atomic_xor_fetch: 706 case AtomicExpr::AO__scoped_atomic_xor_fetch: 707 PostOp = llvm::Instruction::Xor; 708 [[fallthrough]]; 709 case AtomicExpr::AO__c11_atomic_fetch_xor: 710 case AtomicExpr::AO__hip_atomic_fetch_xor: 711 case AtomicExpr::AO__opencl_atomic_fetch_xor: 712 case AtomicExpr::AO__atomic_fetch_xor: 713 case AtomicExpr::AO__scoped_atomic_fetch_xor: 714 Op = llvm::AtomicRMWInst::Xor; 715 break; 716 717 case AtomicExpr::AO__atomic_nand_fetch: 718 case AtomicExpr::AO__scoped_atomic_nand_fetch: 719 PostOp = llvm::Instruction::And; // the NOT is special cased below 720 [[fallthrough]]; 721 case AtomicExpr::AO__c11_atomic_fetch_nand: 722 case AtomicExpr::AO__atomic_fetch_nand: 723 case AtomicExpr::AO__scoped_atomic_fetch_nand: 724 Op = llvm::AtomicRMWInst::Nand; 725 break; 726 727 case AtomicExpr::AO__atomic_test_and_set: { 728 llvm::AtomicRMWInst *RMWI = 729 CGF.emitAtomicRMWInst(llvm::AtomicRMWInst::Xchg, Ptr, 730 CGF.Builder.getInt8(1), Order, Scope, E); 731 RMWI->setVolatile(E->isVolatile()); 732 llvm::Value *Result = CGF.Builder.CreateIsNotNull(RMWI, "tobool"); 733 CGF.Builder.CreateStore(Result, Dest); 734 return; 735 } 736 737 case AtomicExpr::AO__atomic_clear: { 738 llvm::StoreInst *Store = 739 CGF.Builder.CreateStore(CGF.Builder.getInt8(0), Ptr); 740 Store->setAtomic(Order, Scope); 741 Store->setVolatile(E->isVolatile()); 742 return; 743 } 744 } 745 746 llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1); 747 llvm::AtomicRMWInst *RMWI = 748 CGF.emitAtomicRMWInst(Op, Ptr, LoadVal1, Order, Scope, E); 749 RMWI->setVolatile(E->isVolatile()); 750 751 // For __atomic_*_fetch operations, perform the operation again to 752 // determine the value which was written. 753 llvm::Value *Result = RMWI; 754 if (PostOpMinMax) 755 Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(), 756 E->getValueType()->isSignedIntegerType(), 757 RMWI, LoadVal1); 758 else if (PostOp) 759 Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI, 760 LoadVal1); 761 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch || 762 E->getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch) 763 Result = CGF.Builder.CreateNot(Result); 764 CGF.Builder.CreateStore(Result, Dest); 765 } 766 767 // This function emits any expression (scalar, complex, or aggregate) 768 // into a temporary alloca. 769 static Address 770 EmitValToTemp(CodeGenFunction &CGF, Expr *E) { 771 Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp"); 772 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(), 773 /*Init*/ true); 774 return DeclPtr; 775 } 776 777 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest, 778 Address Ptr, Address Val1, Address Val2, 779 llvm::Value *IsWeak, llvm::Value *FailureOrder, 780 uint64_t Size, llvm::AtomicOrdering Order, 781 llvm::Value *Scope) { 782 auto ScopeModel = Expr->getScopeModel(); 783 784 // LLVM atomic instructions always have synch scope. If clang atomic 785 // expression has no scope operand, use default LLVM synch scope. 786 if (!ScopeModel) { 787 llvm::SyncScope::ID SS; 788 if (CGF.getLangOpts().OpenCL) 789 // OpenCL approach is: "The functions that do not have memory_scope 790 // argument have the same semantics as the corresponding functions with 791 // the memory_scope argument set to memory_scope_device." See ref.: 792 // https://registry.khronos.org/OpenCL/specs/3.0-unified/html/OpenCL_C.html#atomic-functions 793 SS = CGF.getTargetHooks().getLLVMSyncScopeID(CGF.getLangOpts(), 794 SyncScope::OpenCLDevice, 795 Order, CGF.getLLVMContext()); 796 else 797 SS = llvm::SyncScope::System; 798 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, 799 Order, SS); 800 return; 801 } 802 803 // Handle constant scope. 804 if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) { 805 auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID( 806 CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()), 807 Order, CGF.CGM.getLLVMContext()); 808 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, 809 Order, SCID); 810 return; 811 } 812 813 // Handle non-constant scope. 814 auto &Builder = CGF.Builder; 815 auto Scopes = ScopeModel->getRuntimeValues(); 816 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB; 817 for (auto S : Scopes) 818 BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn); 819 820 llvm::BasicBlock *ContBB = 821 CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn); 822 823 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false); 824 // If unsupported synch scope is encountered at run time, assume a fallback 825 // synch scope value. 826 auto FallBack = ScopeModel->getFallBackValue(); 827 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]); 828 for (auto S : Scopes) { 829 auto *B = BB[S]; 830 if (S != FallBack) 831 SI->addCase(Builder.getInt32(S), B); 832 833 Builder.SetInsertPoint(B); 834 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, 835 Order, 836 CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(), 837 ScopeModel->map(S), 838 Order, 839 CGF.getLLVMContext())); 840 Builder.CreateBr(ContBB); 841 } 842 843 Builder.SetInsertPoint(ContBB); 844 } 845 846 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { 847 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 848 QualType MemTy = AtomicTy; 849 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>()) 850 MemTy = AT->getValueType(); 851 llvm::Value *IsWeak = nullptr, *OrderFail = nullptr; 852 853 Address Val1 = Address::invalid(); 854 Address Val2 = Address::invalid(); 855 Address Dest = Address::invalid(); 856 Address Ptr = EmitPointerWithAlignment(E->getPtr()); 857 858 if (E->getOp() == AtomicExpr::AO__c11_atomic_init || 859 E->getOp() == AtomicExpr::AO__opencl_atomic_init) { 860 LValue lvalue = MakeAddrLValue(Ptr, AtomicTy); 861 EmitAtomicInit(E->getVal1(), lvalue); 862 return RValue::get(nullptr); 863 } 864 865 auto TInfo = getContext().getTypeInfoInChars(AtomicTy); 866 uint64_t Size = TInfo.Width.getQuantity(); 867 unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth(); 868 869 CharUnits MaxInlineWidth = 870 getContext().toCharUnitsFromBits(MaxInlineWidthInBits); 871 DiagnosticsEngine &Diags = CGM.getDiags(); 872 bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0; 873 bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits; 874 if (Misaligned) { 875 Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned) 876 << (int)TInfo.Width.getQuantity() 877 << (int)Ptr.getAlignment().getQuantity(); 878 } 879 if (Oversized) { 880 Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized) 881 << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity(); 882 } 883 884 llvm::Value *Order = EmitScalarExpr(E->getOrder()); 885 llvm::Value *Scope = 886 E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr; 887 bool ShouldCastToIntPtrTy = true; 888 889 switch (E->getOp()) { 890 case AtomicExpr::AO__c11_atomic_init: 891 case AtomicExpr::AO__opencl_atomic_init: 892 llvm_unreachable("Already handled above with EmitAtomicInit!"); 893 894 case AtomicExpr::AO__atomic_load_n: 895 case AtomicExpr::AO__scoped_atomic_load_n: 896 case AtomicExpr::AO__c11_atomic_load: 897 case AtomicExpr::AO__opencl_atomic_load: 898 case AtomicExpr::AO__hip_atomic_load: 899 case AtomicExpr::AO__atomic_test_and_set: 900 case AtomicExpr::AO__atomic_clear: 901 break; 902 903 case AtomicExpr::AO__atomic_load: 904 case AtomicExpr::AO__scoped_atomic_load: 905 Dest = EmitPointerWithAlignment(E->getVal1()); 906 break; 907 908 case AtomicExpr::AO__atomic_store: 909 case AtomicExpr::AO__scoped_atomic_store: 910 Val1 = EmitPointerWithAlignment(E->getVal1()); 911 break; 912 913 case AtomicExpr::AO__atomic_exchange: 914 case AtomicExpr::AO__scoped_atomic_exchange: 915 Val1 = EmitPointerWithAlignment(E->getVal1()); 916 Dest = EmitPointerWithAlignment(E->getVal2()); 917 break; 918 919 case AtomicExpr::AO__atomic_compare_exchange: 920 case AtomicExpr::AO__atomic_compare_exchange_n: 921 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 922 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 923 case AtomicExpr::AO__hip_atomic_compare_exchange_weak: 924 case AtomicExpr::AO__hip_atomic_compare_exchange_strong: 925 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 926 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 927 case AtomicExpr::AO__scoped_atomic_compare_exchange: 928 case AtomicExpr::AO__scoped_atomic_compare_exchange_n: 929 Val1 = EmitPointerWithAlignment(E->getVal1()); 930 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange || 931 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange) 932 Val2 = EmitPointerWithAlignment(E->getVal2()); 933 else 934 Val2 = EmitValToTemp(*this, E->getVal2()); 935 OrderFail = EmitScalarExpr(E->getOrderFail()); 936 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n || 937 E->getOp() == AtomicExpr::AO__atomic_compare_exchange || 938 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n || 939 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange) 940 IsWeak = EmitScalarExpr(E->getWeak()); 941 break; 942 943 case AtomicExpr::AO__c11_atomic_fetch_add: 944 case AtomicExpr::AO__c11_atomic_fetch_sub: 945 case AtomicExpr::AO__hip_atomic_fetch_add: 946 case AtomicExpr::AO__hip_atomic_fetch_sub: 947 case AtomicExpr::AO__opencl_atomic_fetch_add: 948 case AtomicExpr::AO__opencl_atomic_fetch_sub: 949 if (MemTy->isPointerType()) { 950 // For pointer arithmetic, we're required to do a bit of math: 951 // adding 1 to an int* is not the same as adding 1 to a uintptr_t. 952 // ... but only for the C11 builtins. The GNU builtins expect the 953 // user to multiply by sizeof(T). 954 QualType Val1Ty = E->getVal1()->getType(); 955 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1()); 956 CharUnits PointeeIncAmt = 957 getContext().getTypeSizeInChars(MemTy->getPointeeType()); 958 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt)); 959 auto Temp = CreateMemTemp(Val1Ty, ".atomictmp"); 960 Val1 = Temp; 961 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty)); 962 break; 963 } 964 [[fallthrough]]; 965 case AtomicExpr::AO__atomic_fetch_add: 966 case AtomicExpr::AO__atomic_fetch_max: 967 case AtomicExpr::AO__atomic_fetch_min: 968 case AtomicExpr::AO__atomic_fetch_sub: 969 case AtomicExpr::AO__atomic_add_fetch: 970 case AtomicExpr::AO__atomic_max_fetch: 971 case AtomicExpr::AO__atomic_min_fetch: 972 case AtomicExpr::AO__atomic_sub_fetch: 973 case AtomicExpr::AO__c11_atomic_fetch_max: 974 case AtomicExpr::AO__c11_atomic_fetch_min: 975 case AtomicExpr::AO__opencl_atomic_fetch_max: 976 case AtomicExpr::AO__opencl_atomic_fetch_min: 977 case AtomicExpr::AO__hip_atomic_fetch_max: 978 case AtomicExpr::AO__hip_atomic_fetch_min: 979 case AtomicExpr::AO__scoped_atomic_fetch_add: 980 case AtomicExpr::AO__scoped_atomic_fetch_max: 981 case AtomicExpr::AO__scoped_atomic_fetch_min: 982 case AtomicExpr::AO__scoped_atomic_fetch_sub: 983 case AtomicExpr::AO__scoped_atomic_add_fetch: 984 case AtomicExpr::AO__scoped_atomic_max_fetch: 985 case AtomicExpr::AO__scoped_atomic_min_fetch: 986 case AtomicExpr::AO__scoped_atomic_sub_fetch: 987 ShouldCastToIntPtrTy = !MemTy->isFloatingType(); 988 [[fallthrough]]; 989 990 case AtomicExpr::AO__atomic_fetch_and: 991 case AtomicExpr::AO__atomic_fetch_nand: 992 case AtomicExpr::AO__atomic_fetch_or: 993 case AtomicExpr::AO__atomic_fetch_xor: 994 case AtomicExpr::AO__atomic_and_fetch: 995 case AtomicExpr::AO__atomic_nand_fetch: 996 case AtomicExpr::AO__atomic_or_fetch: 997 case AtomicExpr::AO__atomic_xor_fetch: 998 case AtomicExpr::AO__atomic_store_n: 999 case AtomicExpr::AO__atomic_exchange_n: 1000 case AtomicExpr::AO__c11_atomic_fetch_and: 1001 case AtomicExpr::AO__c11_atomic_fetch_nand: 1002 case AtomicExpr::AO__c11_atomic_fetch_or: 1003 case AtomicExpr::AO__c11_atomic_fetch_xor: 1004 case AtomicExpr::AO__c11_atomic_store: 1005 case AtomicExpr::AO__c11_atomic_exchange: 1006 case AtomicExpr::AO__hip_atomic_fetch_and: 1007 case AtomicExpr::AO__hip_atomic_fetch_or: 1008 case AtomicExpr::AO__hip_atomic_fetch_xor: 1009 case AtomicExpr::AO__hip_atomic_store: 1010 case AtomicExpr::AO__hip_atomic_exchange: 1011 case AtomicExpr::AO__opencl_atomic_fetch_and: 1012 case AtomicExpr::AO__opencl_atomic_fetch_or: 1013 case AtomicExpr::AO__opencl_atomic_fetch_xor: 1014 case AtomicExpr::AO__opencl_atomic_store: 1015 case AtomicExpr::AO__opencl_atomic_exchange: 1016 case AtomicExpr::AO__scoped_atomic_fetch_and: 1017 case AtomicExpr::AO__scoped_atomic_fetch_nand: 1018 case AtomicExpr::AO__scoped_atomic_fetch_or: 1019 case AtomicExpr::AO__scoped_atomic_fetch_xor: 1020 case AtomicExpr::AO__scoped_atomic_and_fetch: 1021 case AtomicExpr::AO__scoped_atomic_nand_fetch: 1022 case AtomicExpr::AO__scoped_atomic_or_fetch: 1023 case AtomicExpr::AO__scoped_atomic_xor_fetch: 1024 case AtomicExpr::AO__scoped_atomic_store_n: 1025 case AtomicExpr::AO__scoped_atomic_exchange_n: 1026 Val1 = EmitValToTemp(*this, E->getVal1()); 1027 break; 1028 } 1029 1030 QualType RValTy = E->getType().getUnqualifiedType(); 1031 1032 // The inlined atomics only function on iN types, where N is a power of 2. We 1033 // need to make sure (via temporaries if necessary) that all incoming values 1034 // are compatible. 1035 LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy); 1036 AtomicInfo Atomics(*this, AtomicVal); 1037 1038 if (ShouldCastToIntPtrTy) { 1039 Ptr = Atomics.castToAtomicIntPointer(Ptr); 1040 if (Val1.isValid()) 1041 Val1 = Atomics.convertToAtomicIntPointer(Val1); 1042 if (Val2.isValid()) 1043 Val2 = Atomics.convertToAtomicIntPointer(Val2); 1044 } 1045 if (Dest.isValid()) { 1046 if (ShouldCastToIntPtrTy) 1047 Dest = Atomics.castToAtomicIntPointer(Dest); 1048 } else if (E->isCmpXChg()) 1049 Dest = CreateMemTemp(RValTy, "cmpxchg.bool"); 1050 else if (!RValTy->isVoidType()) { 1051 Dest = Atomics.CreateTempAlloca(); 1052 if (ShouldCastToIntPtrTy) 1053 Dest = Atomics.castToAtomicIntPointer(Dest); 1054 } 1055 1056 bool PowerOf2Size = (Size & (Size - 1)) == 0; 1057 bool UseLibcall = !PowerOf2Size || (Size > 16); 1058 1059 // For atomics larger than 16 bytes, emit a libcall from the frontend. This 1060 // avoids the overhead of dealing with excessively-large value types in IR. 1061 // Non-power-of-2 values also lower to libcall here, as they are not currently 1062 // permitted in IR instructions (although that constraint could be relaxed in 1063 // the future). For other cases where a libcall is required on a given 1064 // platform, we let the backend handle it (this includes handling for all of 1065 // the size-optimized libcall variants, which are only valid up to 16 bytes.) 1066 // 1067 // See: https://llvm.org/docs/Atomics.html#libcalls-atomic 1068 if (UseLibcall) { 1069 CallArgList Args; 1070 // For non-optimized library calls, the size is the first parameter. 1071 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)), 1072 getContext().getSizeType()); 1073 1074 // The atomic address is the second parameter. 1075 // The OpenCL atomic library functions only accept pointer arguments to 1076 // generic address space. 1077 auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) { 1078 if (!E->isOpenCL()) 1079 return V; 1080 auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace(); 1081 if (AS == LangAS::opencl_generic) 1082 return V; 1083 auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic); 1084 auto *DestType = llvm::PointerType::get(getLLVMContext(), DestAS); 1085 1086 return getTargetHooks().performAddrSpaceCast( 1087 *this, V, AS, LangAS::opencl_generic, DestType, false); 1088 }; 1089 1090 Args.add(RValue::get(CastToGenericAddrSpace(Ptr.emitRawPointer(*this), 1091 E->getPtr()->getType())), 1092 getContext().VoidPtrTy); 1093 1094 // The next 1-3 parameters are op-dependent. 1095 std::string LibCallName; 1096 QualType RetTy; 1097 bool HaveRetTy = false; 1098 switch (E->getOp()) { 1099 case AtomicExpr::AO__c11_atomic_init: 1100 case AtomicExpr::AO__opencl_atomic_init: 1101 llvm_unreachable("Already handled!"); 1102 1103 // There is only one libcall for compare an exchange, because there is no 1104 // optimisation benefit possible from a libcall version of a weak compare 1105 // and exchange. 1106 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected, 1107 // void *desired, int success, int failure) 1108 case AtomicExpr::AO__atomic_compare_exchange: 1109 case AtomicExpr::AO__atomic_compare_exchange_n: 1110 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 1111 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 1112 case AtomicExpr::AO__hip_atomic_compare_exchange_weak: 1113 case AtomicExpr::AO__hip_atomic_compare_exchange_strong: 1114 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 1115 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 1116 case AtomicExpr::AO__scoped_atomic_compare_exchange: 1117 case AtomicExpr::AO__scoped_atomic_compare_exchange_n: 1118 LibCallName = "__atomic_compare_exchange"; 1119 RetTy = getContext().BoolTy; 1120 HaveRetTy = true; 1121 Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this), 1122 E->getVal1()->getType())), 1123 getContext().VoidPtrTy); 1124 Args.add(RValue::get(CastToGenericAddrSpace(Val2.emitRawPointer(*this), 1125 E->getVal2()->getType())), 1126 getContext().VoidPtrTy); 1127 Args.add(RValue::get(Order), getContext().IntTy); 1128 Order = OrderFail; 1129 break; 1130 // void __atomic_exchange(size_t size, void *mem, void *val, void *return, 1131 // int order) 1132 case AtomicExpr::AO__atomic_exchange: 1133 case AtomicExpr::AO__atomic_exchange_n: 1134 case AtomicExpr::AO__c11_atomic_exchange: 1135 case AtomicExpr::AO__hip_atomic_exchange: 1136 case AtomicExpr::AO__opencl_atomic_exchange: 1137 case AtomicExpr::AO__scoped_atomic_exchange: 1138 case AtomicExpr::AO__scoped_atomic_exchange_n: 1139 LibCallName = "__atomic_exchange"; 1140 Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this), 1141 E->getVal1()->getType())), 1142 getContext().VoidPtrTy); 1143 break; 1144 // void __atomic_store(size_t size, void *mem, void *val, int order) 1145 case AtomicExpr::AO__atomic_store: 1146 case AtomicExpr::AO__atomic_store_n: 1147 case AtomicExpr::AO__c11_atomic_store: 1148 case AtomicExpr::AO__hip_atomic_store: 1149 case AtomicExpr::AO__opencl_atomic_store: 1150 case AtomicExpr::AO__scoped_atomic_store: 1151 case AtomicExpr::AO__scoped_atomic_store_n: 1152 LibCallName = "__atomic_store"; 1153 RetTy = getContext().VoidTy; 1154 HaveRetTy = true; 1155 Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this), 1156 E->getVal1()->getType())), 1157 getContext().VoidPtrTy); 1158 break; 1159 // void __atomic_load(size_t size, void *mem, void *return, int order) 1160 case AtomicExpr::AO__atomic_load: 1161 case AtomicExpr::AO__atomic_load_n: 1162 case AtomicExpr::AO__c11_atomic_load: 1163 case AtomicExpr::AO__hip_atomic_load: 1164 case AtomicExpr::AO__opencl_atomic_load: 1165 case AtomicExpr::AO__scoped_atomic_load: 1166 case AtomicExpr::AO__scoped_atomic_load_n: 1167 LibCallName = "__atomic_load"; 1168 break; 1169 case AtomicExpr::AO__atomic_add_fetch: 1170 case AtomicExpr::AO__scoped_atomic_add_fetch: 1171 case AtomicExpr::AO__atomic_fetch_add: 1172 case AtomicExpr::AO__c11_atomic_fetch_add: 1173 case AtomicExpr::AO__hip_atomic_fetch_add: 1174 case AtomicExpr::AO__opencl_atomic_fetch_add: 1175 case AtomicExpr::AO__scoped_atomic_fetch_add: 1176 case AtomicExpr::AO__atomic_and_fetch: 1177 case AtomicExpr::AO__scoped_atomic_and_fetch: 1178 case AtomicExpr::AO__atomic_fetch_and: 1179 case AtomicExpr::AO__c11_atomic_fetch_and: 1180 case AtomicExpr::AO__hip_atomic_fetch_and: 1181 case AtomicExpr::AO__opencl_atomic_fetch_and: 1182 case AtomicExpr::AO__scoped_atomic_fetch_and: 1183 case AtomicExpr::AO__atomic_or_fetch: 1184 case AtomicExpr::AO__scoped_atomic_or_fetch: 1185 case AtomicExpr::AO__atomic_fetch_or: 1186 case AtomicExpr::AO__c11_atomic_fetch_or: 1187 case AtomicExpr::AO__hip_atomic_fetch_or: 1188 case AtomicExpr::AO__opencl_atomic_fetch_or: 1189 case AtomicExpr::AO__scoped_atomic_fetch_or: 1190 case AtomicExpr::AO__atomic_sub_fetch: 1191 case AtomicExpr::AO__scoped_atomic_sub_fetch: 1192 case AtomicExpr::AO__atomic_fetch_sub: 1193 case AtomicExpr::AO__c11_atomic_fetch_sub: 1194 case AtomicExpr::AO__hip_atomic_fetch_sub: 1195 case AtomicExpr::AO__opencl_atomic_fetch_sub: 1196 case AtomicExpr::AO__scoped_atomic_fetch_sub: 1197 case AtomicExpr::AO__atomic_xor_fetch: 1198 case AtomicExpr::AO__scoped_atomic_xor_fetch: 1199 case AtomicExpr::AO__atomic_fetch_xor: 1200 case AtomicExpr::AO__c11_atomic_fetch_xor: 1201 case AtomicExpr::AO__hip_atomic_fetch_xor: 1202 case AtomicExpr::AO__opencl_atomic_fetch_xor: 1203 case AtomicExpr::AO__scoped_atomic_fetch_xor: 1204 case AtomicExpr::AO__atomic_nand_fetch: 1205 case AtomicExpr::AO__atomic_fetch_nand: 1206 case AtomicExpr::AO__c11_atomic_fetch_nand: 1207 case AtomicExpr::AO__scoped_atomic_fetch_nand: 1208 case AtomicExpr::AO__scoped_atomic_nand_fetch: 1209 case AtomicExpr::AO__atomic_min_fetch: 1210 case AtomicExpr::AO__atomic_fetch_min: 1211 case AtomicExpr::AO__c11_atomic_fetch_min: 1212 case AtomicExpr::AO__hip_atomic_fetch_min: 1213 case AtomicExpr::AO__opencl_atomic_fetch_min: 1214 case AtomicExpr::AO__scoped_atomic_fetch_min: 1215 case AtomicExpr::AO__scoped_atomic_min_fetch: 1216 case AtomicExpr::AO__atomic_max_fetch: 1217 case AtomicExpr::AO__atomic_fetch_max: 1218 case AtomicExpr::AO__c11_atomic_fetch_max: 1219 case AtomicExpr::AO__hip_atomic_fetch_max: 1220 case AtomicExpr::AO__opencl_atomic_fetch_max: 1221 case AtomicExpr::AO__scoped_atomic_fetch_max: 1222 case AtomicExpr::AO__scoped_atomic_max_fetch: 1223 case AtomicExpr::AO__atomic_test_and_set: 1224 case AtomicExpr::AO__atomic_clear: 1225 llvm_unreachable("Integral atomic operations always become atomicrmw!"); 1226 } 1227 1228 if (E->isOpenCL()) { 1229 LibCallName = 1230 std::string("__opencl") + StringRef(LibCallName).drop_front(1).str(); 1231 } 1232 // By default, assume we return a value of the atomic type. 1233 if (!HaveRetTy) { 1234 // Value is returned through parameter before the order. 1235 RetTy = getContext().VoidTy; 1236 Args.add(RValue::get( 1237 CastToGenericAddrSpace(Dest.emitRawPointer(*this), RetTy)), 1238 getContext().VoidPtrTy); 1239 } 1240 // Order is always the last parameter. 1241 Args.add(RValue::get(Order), 1242 getContext().IntTy); 1243 if (E->isOpenCL()) 1244 Args.add(RValue::get(Scope), getContext().IntTy); 1245 1246 RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args); 1247 // The value is returned directly from the libcall. 1248 if (E->isCmpXChg()) 1249 return Res; 1250 1251 if (RValTy->isVoidType()) 1252 return RValue::get(nullptr); 1253 1254 return convertTempToRValue(Dest.withElementType(ConvertTypeForMem(RValTy)), 1255 RValTy, E->getExprLoc()); 1256 } 1257 1258 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store || 1259 E->getOp() == AtomicExpr::AO__opencl_atomic_store || 1260 E->getOp() == AtomicExpr::AO__hip_atomic_store || 1261 E->getOp() == AtomicExpr::AO__atomic_store || 1262 E->getOp() == AtomicExpr::AO__atomic_store_n || 1263 E->getOp() == AtomicExpr::AO__scoped_atomic_store || 1264 E->getOp() == AtomicExpr::AO__scoped_atomic_store_n || 1265 E->getOp() == AtomicExpr::AO__atomic_clear; 1266 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load || 1267 E->getOp() == AtomicExpr::AO__opencl_atomic_load || 1268 E->getOp() == AtomicExpr::AO__hip_atomic_load || 1269 E->getOp() == AtomicExpr::AO__atomic_load || 1270 E->getOp() == AtomicExpr::AO__atomic_load_n || 1271 E->getOp() == AtomicExpr::AO__scoped_atomic_load || 1272 E->getOp() == AtomicExpr::AO__scoped_atomic_load_n; 1273 1274 if (isa<llvm::ConstantInt>(Order)) { 1275 auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); 1276 // We should not ever get to a case where the ordering isn't a valid C ABI 1277 // value, but it's hard to enforce that in general. 1278 if (llvm::isValidAtomicOrderingCABI(ord)) 1279 switch ((llvm::AtomicOrderingCABI)ord) { 1280 case llvm::AtomicOrderingCABI::relaxed: 1281 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, 1282 llvm::AtomicOrdering::Monotonic, Scope); 1283 break; 1284 case llvm::AtomicOrderingCABI::consume: 1285 case llvm::AtomicOrderingCABI::acquire: 1286 if (IsStore) 1287 break; // Avoid crashing on code with undefined behavior 1288 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, 1289 llvm::AtomicOrdering::Acquire, Scope); 1290 break; 1291 case llvm::AtomicOrderingCABI::release: 1292 if (IsLoad) 1293 break; // Avoid crashing on code with undefined behavior 1294 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, 1295 llvm::AtomicOrdering::Release, Scope); 1296 break; 1297 case llvm::AtomicOrderingCABI::acq_rel: 1298 if (IsLoad || IsStore) 1299 break; // Avoid crashing on code with undefined behavior 1300 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, 1301 llvm::AtomicOrdering::AcquireRelease, Scope); 1302 break; 1303 case llvm::AtomicOrderingCABI::seq_cst: 1304 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, 1305 llvm::AtomicOrdering::SequentiallyConsistent, Scope); 1306 break; 1307 } 1308 if (RValTy->isVoidType()) 1309 return RValue::get(nullptr); 1310 1311 return convertTempToRValue(Dest.withElementType(ConvertTypeForMem(RValTy)), 1312 RValTy, E->getExprLoc()); 1313 } 1314 1315 // Long case, when Order isn't obviously constant. 1316 1317 // Create all the relevant BB's 1318 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr, 1319 *ReleaseBB = nullptr, *AcqRelBB = nullptr, 1320 *SeqCstBB = nullptr; 1321 MonotonicBB = createBasicBlock("monotonic", CurFn); 1322 if (!IsStore) 1323 AcquireBB = createBasicBlock("acquire", CurFn); 1324 if (!IsLoad) 1325 ReleaseBB = createBasicBlock("release", CurFn); 1326 if (!IsLoad && !IsStore) 1327 AcqRelBB = createBasicBlock("acqrel", CurFn); 1328 SeqCstBB = createBasicBlock("seqcst", CurFn); 1329 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); 1330 1331 // Create the switch for the split 1332 // MonotonicBB is arbitrarily chosen as the default case; in practice, this 1333 // doesn't matter unless someone is crazy enough to use something that 1334 // doesn't fold to a constant for the ordering. 1335 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); 1336 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB); 1337 1338 // Emit all the different atomics 1339 Builder.SetInsertPoint(MonotonicBB); 1340 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, 1341 llvm::AtomicOrdering::Monotonic, Scope); 1342 Builder.CreateBr(ContBB); 1343 if (!IsStore) { 1344 Builder.SetInsertPoint(AcquireBB); 1345 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, 1346 llvm::AtomicOrdering::Acquire, Scope); 1347 Builder.CreateBr(ContBB); 1348 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume), 1349 AcquireBB); 1350 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire), 1351 AcquireBB); 1352 } 1353 if (!IsLoad) { 1354 Builder.SetInsertPoint(ReleaseBB); 1355 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, 1356 llvm::AtomicOrdering::Release, Scope); 1357 Builder.CreateBr(ContBB); 1358 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release), 1359 ReleaseBB); 1360 } 1361 if (!IsLoad && !IsStore) { 1362 Builder.SetInsertPoint(AcqRelBB); 1363 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, 1364 llvm::AtomicOrdering::AcquireRelease, Scope); 1365 Builder.CreateBr(ContBB); 1366 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel), 1367 AcqRelBB); 1368 } 1369 Builder.SetInsertPoint(SeqCstBB); 1370 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, 1371 llvm::AtomicOrdering::SequentiallyConsistent, Scope); 1372 Builder.CreateBr(ContBB); 1373 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst), 1374 SeqCstBB); 1375 1376 // Cleanup and return 1377 Builder.SetInsertPoint(ContBB); 1378 if (RValTy->isVoidType()) 1379 return RValue::get(nullptr); 1380 1381 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits()); 1382 return convertTempToRValue(Dest.withElementType(ConvertTypeForMem(RValTy)), 1383 RValTy, E->getExprLoc()); 1384 } 1385 1386 Address AtomicInfo::castToAtomicIntPointer(Address addr) const { 1387 llvm::IntegerType *ty = 1388 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits); 1389 return addr.withElementType(ty); 1390 } 1391 1392 Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const { 1393 llvm::Type *Ty = Addr.getElementType(); 1394 uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty); 1395 if (SourceSizeInBits != AtomicSizeInBits) { 1396 Address Tmp = CreateTempAlloca(); 1397 CGF.Builder.CreateMemCpy(Tmp, Addr, 1398 std::min(AtomicSizeInBits, SourceSizeInBits) / 8); 1399 Addr = Tmp; 1400 } 1401 1402 return castToAtomicIntPointer(Addr); 1403 } 1404 1405 RValue AtomicInfo::convertAtomicTempToRValue(Address addr, 1406 AggValueSlot resultSlot, 1407 SourceLocation loc, 1408 bool asValue) const { 1409 if (LVal.isSimple()) { 1410 if (EvaluationKind == TEK_Aggregate) 1411 return resultSlot.asRValue(); 1412 1413 // Drill into the padding structure if we have one. 1414 if (hasPadding()) 1415 addr = CGF.Builder.CreateStructGEP(addr, 0); 1416 1417 // Otherwise, just convert the temporary to an r-value using the 1418 // normal conversion routine. 1419 return CGF.convertTempToRValue(addr, getValueType(), loc); 1420 } 1421 if (!asValue) 1422 // Get RValue from temp memory as atomic for non-simple lvalues 1423 return RValue::get(CGF.Builder.CreateLoad(addr)); 1424 if (LVal.isBitField()) 1425 return CGF.EmitLoadOfBitfieldLValue( 1426 LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(), 1427 LVal.getBaseInfo(), TBAAAccessInfo()), loc); 1428 if (LVal.isVectorElt()) 1429 return CGF.EmitLoadOfLValue( 1430 LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(), 1431 LVal.getBaseInfo(), TBAAAccessInfo()), loc); 1432 assert(LVal.isExtVectorElt()); 1433 return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt( 1434 addr, LVal.getExtVectorElts(), LVal.getType(), 1435 LVal.getBaseInfo(), TBAAAccessInfo())); 1436 } 1437 1438 /// Return true if \param ValTy is a type that should be casted to integer 1439 /// around the atomic memory operation. If \param CmpXchg is true, then the 1440 /// cast of a floating point type is made as that instruction can not have 1441 /// floating point operands. TODO: Allow compare-and-exchange and FP - see 1442 /// comment in AtomicExpandPass.cpp. 1443 static bool shouldCastToInt(llvm::Type *ValTy, bool CmpXchg) { 1444 if (ValTy->isFloatingPointTy()) 1445 return ValTy->isX86_FP80Ty() || CmpXchg; 1446 return !ValTy->isIntegerTy() && !ValTy->isPointerTy(); 1447 } 1448 1449 RValue AtomicInfo::ConvertToValueOrAtomic(llvm::Value *Val, 1450 AggValueSlot ResultSlot, 1451 SourceLocation Loc, bool AsValue, 1452 bool CmpXchg) const { 1453 // Try not to in some easy cases. 1454 assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy() || 1455 Val->getType()->isIEEELikeFPTy()) && 1456 "Expected integer, pointer or floating point value when converting " 1457 "result."); 1458 if (getEvaluationKind() == TEK_Scalar && 1459 (((!LVal.isBitField() || 1460 LVal.getBitFieldInfo().Size == ValueSizeInBits) && 1461 !hasPadding()) || 1462 !AsValue)) { 1463 auto *ValTy = AsValue 1464 ? CGF.ConvertTypeForMem(ValueTy) 1465 : getAtomicAddress().getElementType(); 1466 if (!shouldCastToInt(ValTy, CmpXchg)) { 1467 assert((!ValTy->isIntegerTy() || Val->getType() == ValTy) && 1468 "Different integer types."); 1469 return RValue::get(CGF.EmitFromMemory(Val, ValueTy)); 1470 } 1471 if (llvm::CastInst::isBitCastable(Val->getType(), ValTy)) 1472 return RValue::get(CGF.Builder.CreateBitCast(Val, ValTy)); 1473 } 1474 1475 // Create a temporary. This needs to be big enough to hold the 1476 // atomic integer. 1477 Address Temp = Address::invalid(); 1478 bool TempIsVolatile = false; 1479 if (AsValue && getEvaluationKind() == TEK_Aggregate) { 1480 assert(!ResultSlot.isIgnored()); 1481 Temp = ResultSlot.getAddress(); 1482 TempIsVolatile = ResultSlot.isVolatile(); 1483 } else { 1484 Temp = CreateTempAlloca(); 1485 } 1486 1487 // Slam the integer into the temporary. 1488 Address CastTemp = castToAtomicIntPointer(Temp); 1489 CGF.Builder.CreateStore(Val, CastTemp)->setVolatile(TempIsVolatile); 1490 1491 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue); 1492 } 1493 1494 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded, 1495 llvm::AtomicOrdering AO, bool) { 1496 // void __atomic_load(size_t size, void *mem, void *return, int order); 1497 CallArgList Args; 1498 Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType()); 1499 Args.add(RValue::get(getAtomicPointer()), CGF.getContext().VoidPtrTy); 1500 Args.add(RValue::get(AddForLoaded), CGF.getContext().VoidPtrTy); 1501 Args.add( 1502 RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))), 1503 CGF.getContext().IntTy); 1504 emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args); 1505 } 1506 1507 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO, 1508 bool IsVolatile, bool CmpXchg) { 1509 // Okay, we're doing this natively. 1510 Address Addr = getAtomicAddress(); 1511 if (shouldCastToInt(Addr.getElementType(), CmpXchg)) 1512 Addr = castToAtomicIntPointer(Addr); 1513 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load"); 1514 Load->setAtomic(AO); 1515 1516 // Other decoration. 1517 if (IsVolatile) 1518 Load->setVolatile(true); 1519 CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo()); 1520 return Load; 1521 } 1522 1523 /// An LValue is a candidate for having its loads and stores be made atomic if 1524 /// we are operating under /volatile:ms *and* the LValue itself is volatile and 1525 /// performing such an operation can be performed without a libcall. 1526 bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) { 1527 if (!CGM.getLangOpts().MSVolatile) return false; 1528 AtomicInfo AI(*this, LV); 1529 bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType()); 1530 // An atomic is inline if we don't need to use a libcall. 1531 bool AtomicIsInline = !AI.shouldUseLibcall(); 1532 // MSVC doesn't seem to do this for types wider than a pointer. 1533 if (getContext().getTypeSize(LV.getType()) > 1534 getContext().getTypeSize(getContext().getIntPtrType())) 1535 return false; 1536 return IsVolatile && AtomicIsInline; 1537 } 1538 1539 RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL, 1540 AggValueSlot Slot) { 1541 llvm::AtomicOrdering AO; 1542 bool IsVolatile = LV.isVolatileQualified(); 1543 if (LV.getType()->isAtomicType()) { 1544 AO = llvm::AtomicOrdering::SequentiallyConsistent; 1545 } else { 1546 AO = llvm::AtomicOrdering::Acquire; 1547 IsVolatile = true; 1548 } 1549 return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot); 1550 } 1551 1552 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc, 1553 bool AsValue, llvm::AtomicOrdering AO, 1554 bool IsVolatile) { 1555 // Check whether we should use a library call. 1556 if (shouldUseLibcall()) { 1557 Address TempAddr = Address::invalid(); 1558 if (LVal.isSimple() && !ResultSlot.isIgnored()) { 1559 assert(getEvaluationKind() == TEK_Aggregate); 1560 TempAddr = ResultSlot.getAddress(); 1561 } else 1562 TempAddr = CreateTempAlloca(); 1563 1564 EmitAtomicLoadLibcall(TempAddr.emitRawPointer(CGF), AO, IsVolatile); 1565 1566 // Okay, turn that back into the original value or whole atomic (for 1567 // non-simple lvalues) type. 1568 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue); 1569 } 1570 1571 // Okay, we're doing this natively. 1572 auto *Load = EmitAtomicLoadOp(AO, IsVolatile); 1573 1574 // If we're ignoring an aggregate return, don't do anything. 1575 if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored()) 1576 return RValue::getAggregate(Address::invalid(), false); 1577 1578 // Okay, turn that back into the original value or atomic (for non-simple 1579 // lvalues) type. 1580 return ConvertToValueOrAtomic(Load, ResultSlot, Loc, AsValue); 1581 } 1582 1583 /// Emit a load from an l-value of atomic type. Note that the r-value 1584 /// we produce is an r-value of the atomic *value* type. 1585 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc, 1586 llvm::AtomicOrdering AO, bool IsVolatile, 1587 AggValueSlot resultSlot) { 1588 AtomicInfo Atomics(*this, src); 1589 return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO, 1590 IsVolatile); 1591 } 1592 1593 /// Copy an r-value into memory as part of storing to an atomic type. 1594 /// This needs to create a bit-pattern suitable for atomic operations. 1595 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const { 1596 assert(LVal.isSimple()); 1597 // If we have an r-value, the rvalue should be of the atomic type, 1598 // which means that the caller is responsible for having zeroed 1599 // any padding. Just do an aggregate copy of that type. 1600 if (rvalue.isAggregate()) { 1601 LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType()); 1602 LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(), 1603 getAtomicType()); 1604 bool IsVolatile = rvalue.isVolatileQualified() || 1605 LVal.isVolatileQualified(); 1606 CGF.EmitAggregateCopy(Dest, Src, getAtomicType(), 1607 AggValueSlot::DoesNotOverlap, IsVolatile); 1608 return; 1609 } 1610 1611 // Okay, otherwise we're copying stuff. 1612 1613 // Zero out the buffer if necessary. 1614 emitMemSetZeroIfNecessary(); 1615 1616 // Drill past the padding if present. 1617 LValue TempLVal = projectValue(); 1618 1619 // Okay, store the rvalue in. 1620 if (rvalue.isScalar()) { 1621 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true); 1622 } else { 1623 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true); 1624 } 1625 } 1626 1627 1628 /// Materialize an r-value into memory for the purposes of storing it 1629 /// to an atomic type. 1630 Address AtomicInfo::materializeRValue(RValue rvalue) const { 1631 // Aggregate r-values are already in memory, and EmitAtomicStore 1632 // requires them to be values of the atomic type. 1633 if (rvalue.isAggregate()) 1634 return rvalue.getAggregateAddress(); 1635 1636 // Otherwise, make a temporary and materialize into it. 1637 LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType()); 1638 AtomicInfo Atomics(CGF, TempLV); 1639 Atomics.emitCopyIntoMemory(rvalue); 1640 return TempLV.getAddress(); 1641 } 1642 1643 llvm::Value *AtomicInfo::getScalarRValValueOrNull(RValue RVal) const { 1644 if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) 1645 return RVal.getScalarVal(); 1646 return nullptr; 1647 } 1648 1649 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal, bool CmpXchg) const { 1650 // If we've got a scalar value of the right size, try to avoid going 1651 // through memory. Floats get casted if needed by AtomicExpandPass. 1652 if (llvm::Value *Value = getScalarRValValueOrNull(RVal)) { 1653 if (!shouldCastToInt(Value->getType(), CmpXchg)) 1654 return CGF.EmitToMemory(Value, ValueTy); 1655 else { 1656 llvm::IntegerType *InputIntTy = llvm::IntegerType::get( 1657 CGF.getLLVMContext(), 1658 LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits()); 1659 if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy)) 1660 return CGF.Builder.CreateBitCast(Value, InputIntTy); 1661 } 1662 } 1663 // Otherwise, we need to go through memory. 1664 // Put the r-value in memory. 1665 Address Addr = materializeRValue(RVal); 1666 1667 // Cast the temporary to the atomic int type and pull a value out. 1668 Addr = castToAtomicIntPointer(Addr); 1669 return CGF.Builder.CreateLoad(Addr); 1670 } 1671 1672 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp( 1673 llvm::Value *ExpectedVal, llvm::Value *DesiredVal, 1674 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) { 1675 // Do the atomic store. 1676 Address Addr = getAtomicAddressAsAtomicIntPointer(); 1677 auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, ExpectedVal, DesiredVal, 1678 Success, Failure); 1679 // Other decoration. 1680 Inst->setVolatile(LVal.isVolatileQualified()); 1681 Inst->setWeak(IsWeak); 1682 1683 // Okay, turn that back into the original value type. 1684 auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0); 1685 auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1); 1686 return std::make_pair(PreviousVal, SuccessFailureVal); 1687 } 1688 1689 llvm::Value * 1690 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr, 1691 llvm::Value *DesiredAddr, 1692 llvm::AtomicOrdering Success, 1693 llvm::AtomicOrdering Failure) { 1694 // bool __atomic_compare_exchange(size_t size, void *obj, void *expected, 1695 // void *desired, int success, int failure); 1696 CallArgList Args; 1697 Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType()); 1698 Args.add(RValue::get(getAtomicPointer()), CGF.getContext().VoidPtrTy); 1699 Args.add(RValue::get(ExpectedAddr), CGF.getContext().VoidPtrTy); 1700 Args.add(RValue::get(DesiredAddr), CGF.getContext().VoidPtrTy); 1701 Args.add(RValue::get( 1702 llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))), 1703 CGF.getContext().IntTy); 1704 Args.add(RValue::get( 1705 llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))), 1706 CGF.getContext().IntTy); 1707 auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange", 1708 CGF.getContext().BoolTy, Args); 1709 1710 return SuccessFailureRVal.getScalarVal(); 1711 } 1712 1713 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange( 1714 RValue Expected, RValue Desired, llvm::AtomicOrdering Success, 1715 llvm::AtomicOrdering Failure, bool IsWeak) { 1716 // Check whether we should use a library call. 1717 if (shouldUseLibcall()) { 1718 // Produce a source address. 1719 Address ExpectedAddr = materializeRValue(Expected); 1720 llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF); 1721 llvm::Value *DesiredPtr = materializeRValue(Desired).emitRawPointer(CGF); 1722 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, 1723 Success, Failure); 1724 return std::make_pair( 1725 convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(), 1726 SourceLocation(), /*AsValue=*/false), 1727 Res); 1728 } 1729 1730 // If we've got a scalar value of the right size, try to avoid going 1731 // through memory. 1732 auto *ExpectedVal = convertRValueToInt(Expected, /*CmpXchg=*/true); 1733 auto *DesiredVal = convertRValueToInt(Desired, /*CmpXchg=*/true); 1734 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success, 1735 Failure, IsWeak); 1736 return std::make_pair( 1737 ConvertToValueOrAtomic(Res.first, AggValueSlot::ignored(), 1738 SourceLocation(), /*AsValue=*/false, 1739 /*CmpXchg=*/true), 1740 Res.second); 1741 } 1742 1743 static void 1744 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, 1745 const llvm::function_ref<RValue(RValue)> &UpdateOp, 1746 Address DesiredAddr) { 1747 RValue UpRVal; 1748 LValue AtomicLVal = Atomics.getAtomicLValue(); 1749 LValue DesiredLVal; 1750 if (AtomicLVal.isSimple()) { 1751 UpRVal = OldRVal; 1752 DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType()); 1753 } else { 1754 // Build new lvalue for temp address. 1755 Address Ptr = Atomics.materializeRValue(OldRVal); 1756 LValue UpdateLVal; 1757 if (AtomicLVal.isBitField()) { 1758 UpdateLVal = 1759 LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(), 1760 AtomicLVal.getType(), 1761 AtomicLVal.getBaseInfo(), 1762 AtomicLVal.getTBAAInfo()); 1763 DesiredLVal = 1764 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(), 1765 AtomicLVal.getType(), AtomicLVal.getBaseInfo(), 1766 AtomicLVal.getTBAAInfo()); 1767 } else if (AtomicLVal.isVectorElt()) { 1768 UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(), 1769 AtomicLVal.getType(), 1770 AtomicLVal.getBaseInfo(), 1771 AtomicLVal.getTBAAInfo()); 1772 DesiredLVal = LValue::MakeVectorElt( 1773 DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(), 1774 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo()); 1775 } else { 1776 assert(AtomicLVal.isExtVectorElt()); 1777 UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(), 1778 AtomicLVal.getType(), 1779 AtomicLVal.getBaseInfo(), 1780 AtomicLVal.getTBAAInfo()); 1781 DesiredLVal = LValue::MakeExtVectorElt( 1782 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(), 1783 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo()); 1784 } 1785 UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation()); 1786 } 1787 // Store new value in the corresponding memory area. 1788 RValue NewRVal = UpdateOp(UpRVal); 1789 if (NewRVal.isScalar()) { 1790 CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal); 1791 } else { 1792 assert(NewRVal.isComplex()); 1793 CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal, 1794 /*isInit=*/false); 1795 } 1796 } 1797 1798 void AtomicInfo::EmitAtomicUpdateLibcall( 1799 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, 1800 bool IsVolatile) { 1801 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); 1802 1803 Address ExpectedAddr = CreateTempAlloca(); 1804 1805 EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile); 1806 auto *ContBB = CGF.createBasicBlock("atomic_cont"); 1807 auto *ExitBB = CGF.createBasicBlock("atomic_exit"); 1808 CGF.EmitBlock(ContBB); 1809 Address DesiredAddr = CreateTempAlloca(); 1810 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) || 1811 requiresMemSetZero(getAtomicAddress().getElementType())) { 1812 auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr); 1813 CGF.Builder.CreateStore(OldVal, DesiredAddr); 1814 } 1815 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr, 1816 AggValueSlot::ignored(), 1817 SourceLocation(), /*AsValue=*/false); 1818 EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr); 1819 llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF); 1820 llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF); 1821 auto *Res = 1822 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure); 1823 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB); 1824 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 1825 } 1826 1827 void AtomicInfo::EmitAtomicUpdateOp( 1828 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, 1829 bool IsVolatile) { 1830 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); 1831 1832 // Do the atomic load. 1833 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile, /*CmpXchg=*/true); 1834 // For non-simple lvalues perform compare-and-swap procedure. 1835 auto *ContBB = CGF.createBasicBlock("atomic_cont"); 1836 auto *ExitBB = CGF.createBasicBlock("atomic_exit"); 1837 auto *CurBB = CGF.Builder.GetInsertBlock(); 1838 CGF.EmitBlock(ContBB); 1839 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(), 1840 /*NumReservedValues=*/2); 1841 PHI->addIncoming(OldVal, CurBB); 1842 Address NewAtomicAddr = CreateTempAlloca(); 1843 Address NewAtomicIntAddr = 1844 shouldCastToInt(NewAtomicAddr.getElementType(), /*CmpXchg=*/true) 1845 ? castToAtomicIntPointer(NewAtomicAddr) 1846 : NewAtomicAddr; 1847 1848 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) || 1849 requiresMemSetZero(getAtomicAddress().getElementType())) { 1850 CGF.Builder.CreateStore(PHI, NewAtomicIntAddr); 1851 } 1852 auto OldRVal = ConvertToValueOrAtomic(PHI, AggValueSlot::ignored(), 1853 SourceLocation(), /*AsValue=*/false, 1854 /*CmpXchg=*/true); 1855 EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr); 1856 auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr); 1857 // Try to write new value using cmpxchg operation. 1858 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure); 1859 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock()); 1860 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB); 1861 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 1862 } 1863 1864 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, 1865 RValue UpdateRVal, Address DesiredAddr) { 1866 LValue AtomicLVal = Atomics.getAtomicLValue(); 1867 LValue DesiredLVal; 1868 // Build new lvalue for temp address. 1869 if (AtomicLVal.isBitField()) { 1870 DesiredLVal = 1871 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(), 1872 AtomicLVal.getType(), AtomicLVal.getBaseInfo(), 1873 AtomicLVal.getTBAAInfo()); 1874 } else if (AtomicLVal.isVectorElt()) { 1875 DesiredLVal = 1876 LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(), 1877 AtomicLVal.getType(), AtomicLVal.getBaseInfo(), 1878 AtomicLVal.getTBAAInfo()); 1879 } else { 1880 assert(AtomicLVal.isExtVectorElt()); 1881 DesiredLVal = LValue::MakeExtVectorElt( 1882 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(), 1883 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo()); 1884 } 1885 // Store new value in the corresponding memory area. 1886 assert(UpdateRVal.isScalar()); 1887 CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal); 1888 } 1889 1890 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, 1891 RValue UpdateRVal, bool IsVolatile) { 1892 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); 1893 1894 Address ExpectedAddr = CreateTempAlloca(); 1895 1896 EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile); 1897 auto *ContBB = CGF.createBasicBlock("atomic_cont"); 1898 auto *ExitBB = CGF.createBasicBlock("atomic_exit"); 1899 CGF.EmitBlock(ContBB); 1900 Address DesiredAddr = CreateTempAlloca(); 1901 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) || 1902 requiresMemSetZero(getAtomicAddress().getElementType())) { 1903 auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr); 1904 CGF.Builder.CreateStore(OldVal, DesiredAddr); 1905 } 1906 EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr); 1907 llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF); 1908 llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF); 1909 auto *Res = 1910 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure); 1911 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB); 1912 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 1913 } 1914 1915 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal, 1916 bool IsVolatile) { 1917 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); 1918 1919 // Do the atomic load. 1920 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile, /*CmpXchg=*/true); 1921 // For non-simple lvalues perform compare-and-swap procedure. 1922 auto *ContBB = CGF.createBasicBlock("atomic_cont"); 1923 auto *ExitBB = CGF.createBasicBlock("atomic_exit"); 1924 auto *CurBB = CGF.Builder.GetInsertBlock(); 1925 CGF.EmitBlock(ContBB); 1926 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(), 1927 /*NumReservedValues=*/2); 1928 PHI->addIncoming(OldVal, CurBB); 1929 Address NewAtomicAddr = CreateTempAlloca(); 1930 Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr); 1931 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) || 1932 requiresMemSetZero(getAtomicAddress().getElementType())) { 1933 CGF.Builder.CreateStore(PHI, NewAtomicIntAddr); 1934 } 1935 EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr); 1936 auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr); 1937 // Try to write new value using cmpxchg operation. 1938 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure); 1939 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock()); 1940 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB); 1941 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 1942 } 1943 1944 void AtomicInfo::EmitAtomicUpdate( 1945 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, 1946 bool IsVolatile) { 1947 if (shouldUseLibcall()) { 1948 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile); 1949 } else { 1950 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile); 1951 } 1952 } 1953 1954 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal, 1955 bool IsVolatile) { 1956 if (shouldUseLibcall()) { 1957 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile); 1958 } else { 1959 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile); 1960 } 1961 } 1962 1963 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue, 1964 bool isInit) { 1965 bool IsVolatile = lvalue.isVolatileQualified(); 1966 llvm::AtomicOrdering AO; 1967 if (lvalue.getType()->isAtomicType()) { 1968 AO = llvm::AtomicOrdering::SequentiallyConsistent; 1969 } else { 1970 AO = llvm::AtomicOrdering::Release; 1971 IsVolatile = true; 1972 } 1973 return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit); 1974 } 1975 1976 /// Emit a store to an l-value of atomic type. 1977 /// 1978 /// Note that the r-value is expected to be an r-value *of the atomic 1979 /// type*; this means that for aggregate r-values, it should include 1980 /// storage for any padding that was necessary. 1981 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, 1982 llvm::AtomicOrdering AO, bool IsVolatile, 1983 bool isInit) { 1984 // If this is an aggregate r-value, it should agree in type except 1985 // maybe for address-space qualification. 1986 assert(!rvalue.isAggregate() || 1987 rvalue.getAggregateAddress().getElementType() == 1988 dest.getAddress().getElementType()); 1989 1990 AtomicInfo atomics(*this, dest); 1991 LValue LVal = atomics.getAtomicLValue(); 1992 1993 // If this is an initialization, just put the value there normally. 1994 if (LVal.isSimple()) { 1995 if (isInit) { 1996 atomics.emitCopyIntoMemory(rvalue); 1997 return; 1998 } 1999 2000 // Check whether we should use a library call. 2001 if (atomics.shouldUseLibcall()) { 2002 // Produce a source address. 2003 Address srcAddr = atomics.materializeRValue(rvalue); 2004 2005 // void __atomic_store(size_t size, void *mem, void *val, int order) 2006 CallArgList args; 2007 args.add(RValue::get(atomics.getAtomicSizeValue()), 2008 getContext().getSizeType()); 2009 args.add(RValue::get(atomics.getAtomicPointer()), getContext().VoidPtrTy); 2010 args.add(RValue::get(srcAddr.emitRawPointer(*this)), 2011 getContext().VoidPtrTy); 2012 args.add( 2013 RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))), 2014 getContext().IntTy); 2015 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args); 2016 return; 2017 } 2018 2019 // Okay, we're doing this natively. 2020 llvm::Value *ValToStore = atomics.convertRValueToInt(rvalue); 2021 2022 // Do the atomic store. 2023 Address Addr = atomics.getAtomicAddress(); 2024 if (llvm::Value *Value = atomics.getScalarRValValueOrNull(rvalue)) 2025 if (shouldCastToInt(Value->getType(), /*CmpXchg=*/false)) { 2026 Addr = atomics.castToAtomicIntPointer(Addr); 2027 ValToStore = Builder.CreateIntCast(ValToStore, Addr.getElementType(), 2028 /*isSigned=*/false); 2029 } 2030 llvm::StoreInst *store = Builder.CreateStore(ValToStore, Addr); 2031 2032 if (AO == llvm::AtomicOrdering::Acquire) 2033 AO = llvm::AtomicOrdering::Monotonic; 2034 else if (AO == llvm::AtomicOrdering::AcquireRelease) 2035 AO = llvm::AtomicOrdering::Release; 2036 // Initializations don't need to be atomic. 2037 if (!isInit) 2038 store->setAtomic(AO); 2039 2040 // Other decoration. 2041 if (IsVolatile) 2042 store->setVolatile(true); 2043 CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo()); 2044 return; 2045 } 2046 2047 // Emit simple atomic update operation. 2048 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile); 2049 } 2050 2051 /// Emit a compare-and-exchange op for atomic type. 2052 /// 2053 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange( 2054 LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, 2055 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak, 2056 AggValueSlot Slot) { 2057 // If this is an aggregate r-value, it should agree in type except 2058 // maybe for address-space qualification. 2059 assert(!Expected.isAggregate() || 2060 Expected.getAggregateAddress().getElementType() == 2061 Obj.getAddress().getElementType()); 2062 assert(!Desired.isAggregate() || 2063 Desired.getAggregateAddress().getElementType() == 2064 Obj.getAddress().getElementType()); 2065 AtomicInfo Atomics(*this, Obj); 2066 2067 return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure, 2068 IsWeak); 2069 } 2070 2071 llvm::AtomicRMWInst * 2072 CodeGenFunction::emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, 2073 llvm::Value *Val, llvm::AtomicOrdering Order, 2074 llvm::SyncScope::ID SSID, 2075 const AtomicExpr *AE) { 2076 llvm::AtomicRMWInst *RMW = 2077 Builder.CreateAtomicRMW(Op, Addr, Val, Order, SSID); 2078 getTargetHooks().setTargetAtomicMetadata(*this, *RMW, AE); 2079 return RMW; 2080 } 2081 2082 void CodeGenFunction::EmitAtomicUpdate( 2083 LValue LVal, llvm::AtomicOrdering AO, 2084 const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) { 2085 AtomicInfo Atomics(*this, LVal); 2086 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile); 2087 } 2088 2089 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) { 2090 AtomicInfo atomics(*this, dest); 2091 2092 switch (atomics.getEvaluationKind()) { 2093 case TEK_Scalar: { 2094 llvm::Value *value = EmitScalarExpr(init); 2095 atomics.emitCopyIntoMemory(RValue::get(value)); 2096 return; 2097 } 2098 2099 case TEK_Complex: { 2100 ComplexPairTy value = EmitComplexExpr(init); 2101 atomics.emitCopyIntoMemory(RValue::getComplex(value)); 2102 return; 2103 } 2104 2105 case TEK_Aggregate: { 2106 // Fix up the destination if the initializer isn't an expression 2107 // of atomic type. 2108 bool Zeroed = false; 2109 if (!init->getType()->isAtomicType()) { 2110 Zeroed = atomics.emitMemSetZeroIfNecessary(); 2111 dest = atomics.projectValue(); 2112 } 2113 2114 // Evaluate the expression directly into the destination. 2115 AggValueSlot slot = AggValueSlot::forLValue( 2116 dest, AggValueSlot::IsNotDestructed, 2117 AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, 2118 AggValueSlot::DoesNotOverlap, 2119 Zeroed ? AggValueSlot::IsZeroed : AggValueSlot::IsNotZeroed); 2120 2121 EmitAggExpr(init, slot); 2122 return; 2123 } 2124 } 2125 llvm_unreachable("bad evaluation kind"); 2126 } 2127