xref: /minix3/external/bsd/llvm/dist/clang/lib/CodeGen/CGAtomic.cpp (revision f4a2713ac843a11c696ec80c0a5e3e5d80b4d338)
1*f4a2713aSLionel Sambuc //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2*f4a2713aSLionel Sambuc //
3*f4a2713aSLionel Sambuc //                     The LLVM Compiler Infrastructure
4*f4a2713aSLionel Sambuc //
5*f4a2713aSLionel Sambuc // This file is distributed under the University of Illinois Open Source
6*f4a2713aSLionel Sambuc // License. See LICENSE.TXT for details.
7*f4a2713aSLionel Sambuc //
8*f4a2713aSLionel Sambuc //===----------------------------------------------------------------------===//
9*f4a2713aSLionel Sambuc //
10*f4a2713aSLionel Sambuc // This file contains the code for emitting atomic operations.
11*f4a2713aSLionel Sambuc //
12*f4a2713aSLionel Sambuc //===----------------------------------------------------------------------===//
13*f4a2713aSLionel Sambuc 
14*f4a2713aSLionel Sambuc #include "CodeGenFunction.h"
15*f4a2713aSLionel Sambuc #include "CGCall.h"
16*f4a2713aSLionel Sambuc #include "CodeGenModule.h"
17*f4a2713aSLionel Sambuc #include "clang/AST/ASTContext.h"
18*f4a2713aSLionel Sambuc #include "clang/CodeGen/CGFunctionInfo.h"
19*f4a2713aSLionel Sambuc #include "llvm/ADT/StringExtras.h"
20*f4a2713aSLionel Sambuc #include "llvm/IR/DataLayout.h"
21*f4a2713aSLionel Sambuc #include "llvm/IR/Intrinsics.h"
22*f4a2713aSLionel Sambuc #include "llvm/IR/Operator.h"
23*f4a2713aSLionel Sambuc 
24*f4a2713aSLionel Sambuc using namespace clang;
25*f4a2713aSLionel Sambuc using namespace CodeGen;
26*f4a2713aSLionel Sambuc 
27*f4a2713aSLionel Sambuc // The ABI values for various atomic memory orderings.
28*f4a2713aSLionel Sambuc enum AtomicOrderingKind {
29*f4a2713aSLionel Sambuc   AO_ABI_memory_order_relaxed = 0,
30*f4a2713aSLionel Sambuc   AO_ABI_memory_order_consume = 1,
31*f4a2713aSLionel Sambuc   AO_ABI_memory_order_acquire = 2,
32*f4a2713aSLionel Sambuc   AO_ABI_memory_order_release = 3,
33*f4a2713aSLionel Sambuc   AO_ABI_memory_order_acq_rel = 4,
34*f4a2713aSLionel Sambuc   AO_ABI_memory_order_seq_cst = 5
35*f4a2713aSLionel Sambuc };
36*f4a2713aSLionel Sambuc 
37*f4a2713aSLionel Sambuc namespace {
38*f4a2713aSLionel Sambuc   class AtomicInfo {
39*f4a2713aSLionel Sambuc     CodeGenFunction &CGF;
40*f4a2713aSLionel Sambuc     QualType AtomicTy;
41*f4a2713aSLionel Sambuc     QualType ValueTy;
42*f4a2713aSLionel Sambuc     uint64_t AtomicSizeInBits;
43*f4a2713aSLionel Sambuc     uint64_t ValueSizeInBits;
44*f4a2713aSLionel Sambuc     CharUnits AtomicAlign;
45*f4a2713aSLionel Sambuc     CharUnits ValueAlign;
46*f4a2713aSLionel Sambuc     CharUnits LValueAlign;
47*f4a2713aSLionel Sambuc     TypeEvaluationKind EvaluationKind;
48*f4a2713aSLionel Sambuc     bool UseLibcall;
49*f4a2713aSLionel Sambuc   public:
50*f4a2713aSLionel Sambuc     AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
51*f4a2713aSLionel Sambuc       assert(lvalue.isSimple());
52*f4a2713aSLionel Sambuc 
53*f4a2713aSLionel Sambuc       AtomicTy = lvalue.getType();
54*f4a2713aSLionel Sambuc       ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
55*f4a2713aSLionel Sambuc       EvaluationKind = CGF.getEvaluationKind(ValueTy);
56*f4a2713aSLionel Sambuc 
57*f4a2713aSLionel Sambuc       ASTContext &C = CGF.getContext();
58*f4a2713aSLionel Sambuc 
59*f4a2713aSLionel Sambuc       uint64_t valueAlignInBits;
60*f4a2713aSLionel Sambuc       llvm::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
61*f4a2713aSLionel Sambuc 
62*f4a2713aSLionel Sambuc       uint64_t atomicAlignInBits;
63*f4a2713aSLionel Sambuc       llvm::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
64*f4a2713aSLionel Sambuc 
65*f4a2713aSLionel Sambuc       assert(ValueSizeInBits <= AtomicSizeInBits);
66*f4a2713aSLionel Sambuc       assert(valueAlignInBits <= atomicAlignInBits);
67*f4a2713aSLionel Sambuc 
68*f4a2713aSLionel Sambuc       AtomicAlign = C.toCharUnitsFromBits(atomicAlignInBits);
69*f4a2713aSLionel Sambuc       ValueAlign = C.toCharUnitsFromBits(valueAlignInBits);
70*f4a2713aSLionel Sambuc       if (lvalue.getAlignment().isZero())
71*f4a2713aSLionel Sambuc         lvalue.setAlignment(AtomicAlign);
72*f4a2713aSLionel Sambuc 
73*f4a2713aSLionel Sambuc       UseLibcall =
74*f4a2713aSLionel Sambuc         (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
75*f4a2713aSLionel Sambuc          AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
76*f4a2713aSLionel Sambuc     }
77*f4a2713aSLionel Sambuc 
78*f4a2713aSLionel Sambuc     QualType getAtomicType() const { return AtomicTy; }
79*f4a2713aSLionel Sambuc     QualType getValueType() const { return ValueTy; }
80*f4a2713aSLionel Sambuc     CharUnits getAtomicAlignment() const { return AtomicAlign; }
81*f4a2713aSLionel Sambuc     CharUnits getValueAlignment() const { return ValueAlign; }
82*f4a2713aSLionel Sambuc     uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
83*f4a2713aSLionel Sambuc     uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
84*f4a2713aSLionel Sambuc     TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
85*f4a2713aSLionel Sambuc     bool shouldUseLibcall() const { return UseLibcall; }
86*f4a2713aSLionel Sambuc 
87*f4a2713aSLionel Sambuc     /// Is the atomic size larger than the underlying value type?
88*f4a2713aSLionel Sambuc     ///
89*f4a2713aSLionel Sambuc     /// Note that the absence of padding does not mean that atomic
90*f4a2713aSLionel Sambuc     /// objects are completely interchangeable with non-atomic
91*f4a2713aSLionel Sambuc     /// objects: we might have promoted the alignment of a type
92*f4a2713aSLionel Sambuc     /// without making it bigger.
93*f4a2713aSLionel Sambuc     bool hasPadding() const {
94*f4a2713aSLionel Sambuc       return (ValueSizeInBits != AtomicSizeInBits);
95*f4a2713aSLionel Sambuc     }
96*f4a2713aSLionel Sambuc 
97*f4a2713aSLionel Sambuc     bool emitMemSetZeroIfNecessary(LValue dest) const;
98*f4a2713aSLionel Sambuc 
99*f4a2713aSLionel Sambuc     llvm::Value *getAtomicSizeValue() const {
100*f4a2713aSLionel Sambuc       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
101*f4a2713aSLionel Sambuc       return CGF.CGM.getSize(size);
102*f4a2713aSLionel Sambuc     }
103*f4a2713aSLionel Sambuc 
104*f4a2713aSLionel Sambuc     /// Cast the given pointer to an integer pointer suitable for
105*f4a2713aSLionel Sambuc     /// atomic operations.
106*f4a2713aSLionel Sambuc     llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
107*f4a2713aSLionel Sambuc 
108*f4a2713aSLionel Sambuc     /// Turn an atomic-layout object into an r-value.
109*f4a2713aSLionel Sambuc     RValue convertTempToRValue(llvm::Value *addr,
110*f4a2713aSLionel Sambuc                                AggValueSlot resultSlot,
111*f4a2713aSLionel Sambuc                                SourceLocation loc) const;
112*f4a2713aSLionel Sambuc 
113*f4a2713aSLionel Sambuc     /// Copy an atomic r-value into atomic-layout memory.
114*f4a2713aSLionel Sambuc     void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
115*f4a2713aSLionel Sambuc 
116*f4a2713aSLionel Sambuc     /// Project an l-value down to the value field.
117*f4a2713aSLionel Sambuc     LValue projectValue(LValue lvalue) const {
118*f4a2713aSLionel Sambuc       llvm::Value *addr = lvalue.getAddress();
119*f4a2713aSLionel Sambuc       if (hasPadding())
120*f4a2713aSLionel Sambuc         addr = CGF.Builder.CreateStructGEP(addr, 0);
121*f4a2713aSLionel Sambuc 
122*f4a2713aSLionel Sambuc       return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
123*f4a2713aSLionel Sambuc                               CGF.getContext(), lvalue.getTBAAInfo());
124*f4a2713aSLionel Sambuc     }
125*f4a2713aSLionel Sambuc 
126*f4a2713aSLionel Sambuc     /// Materialize an atomic r-value in atomic-layout memory.
127*f4a2713aSLionel Sambuc     llvm::Value *materializeRValue(RValue rvalue) const;
128*f4a2713aSLionel Sambuc 
129*f4a2713aSLionel Sambuc   private:
130*f4a2713aSLionel Sambuc     bool requiresMemSetZero(llvm::Type *type) const;
131*f4a2713aSLionel Sambuc   };
132*f4a2713aSLionel Sambuc }
133*f4a2713aSLionel Sambuc 
134*f4a2713aSLionel Sambuc static RValue emitAtomicLibcall(CodeGenFunction &CGF,
135*f4a2713aSLionel Sambuc                                 StringRef fnName,
136*f4a2713aSLionel Sambuc                                 QualType resultType,
137*f4a2713aSLionel Sambuc                                 CallArgList &args) {
138*f4a2713aSLionel Sambuc   const CGFunctionInfo &fnInfo =
139*f4a2713aSLionel Sambuc     CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
140*f4a2713aSLionel Sambuc             FunctionType::ExtInfo(), RequiredArgs::All);
141*f4a2713aSLionel Sambuc   llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
142*f4a2713aSLionel Sambuc   llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
143*f4a2713aSLionel Sambuc   return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
144*f4a2713aSLionel Sambuc }
145*f4a2713aSLionel Sambuc 
146*f4a2713aSLionel Sambuc /// Does a store of the given IR type modify the full expected width?
147*f4a2713aSLionel Sambuc static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
148*f4a2713aSLionel Sambuc                            uint64_t expectedSize) {
149*f4a2713aSLionel Sambuc   return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
150*f4a2713aSLionel Sambuc }
151*f4a2713aSLionel Sambuc 
152*f4a2713aSLionel Sambuc /// Does the atomic type require memsetting to zero before initialization?
153*f4a2713aSLionel Sambuc ///
154*f4a2713aSLionel Sambuc /// The IR type is provided as a way of making certain queries faster.
155*f4a2713aSLionel Sambuc bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
156*f4a2713aSLionel Sambuc   // If the atomic type has size padding, we definitely need a memset.
157*f4a2713aSLionel Sambuc   if (hasPadding()) return true;
158*f4a2713aSLionel Sambuc 
159*f4a2713aSLionel Sambuc   // Otherwise, do some simple heuristics to try to avoid it:
160*f4a2713aSLionel Sambuc   switch (getEvaluationKind()) {
161*f4a2713aSLionel Sambuc   // For scalars and complexes, check whether the store size of the
162*f4a2713aSLionel Sambuc   // type uses the full size.
163*f4a2713aSLionel Sambuc   case TEK_Scalar:
164*f4a2713aSLionel Sambuc     return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
165*f4a2713aSLionel Sambuc   case TEK_Complex:
166*f4a2713aSLionel Sambuc     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
167*f4a2713aSLionel Sambuc                            AtomicSizeInBits / 2);
168*f4a2713aSLionel Sambuc 
169*f4a2713aSLionel Sambuc   // Padding in structs has an undefined bit pattern.  User beware.
170*f4a2713aSLionel Sambuc   case TEK_Aggregate:
171*f4a2713aSLionel Sambuc     return false;
172*f4a2713aSLionel Sambuc   }
173*f4a2713aSLionel Sambuc   llvm_unreachable("bad evaluation kind");
174*f4a2713aSLionel Sambuc }
175*f4a2713aSLionel Sambuc 
176*f4a2713aSLionel Sambuc bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
177*f4a2713aSLionel Sambuc   llvm::Value *addr = dest.getAddress();
178*f4a2713aSLionel Sambuc   if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
179*f4a2713aSLionel Sambuc     return false;
180*f4a2713aSLionel Sambuc 
181*f4a2713aSLionel Sambuc   CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
182*f4a2713aSLionel Sambuc                            AtomicSizeInBits / 8,
183*f4a2713aSLionel Sambuc                            dest.getAlignment().getQuantity());
184*f4a2713aSLionel Sambuc   return true;
185*f4a2713aSLionel Sambuc }
186*f4a2713aSLionel Sambuc 
187*f4a2713aSLionel Sambuc static void
188*f4a2713aSLionel Sambuc EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
189*f4a2713aSLionel Sambuc              llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
190*f4a2713aSLionel Sambuc              uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
191*f4a2713aSLionel Sambuc   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
192*f4a2713aSLionel Sambuc   llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
193*f4a2713aSLionel Sambuc 
194*f4a2713aSLionel Sambuc   switch (E->getOp()) {
195*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_init:
196*f4a2713aSLionel Sambuc     llvm_unreachable("Already handled!");
197*f4a2713aSLionel Sambuc 
198*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
199*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
200*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_compare_exchange:
201*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_compare_exchange_n: {
202*f4a2713aSLionel Sambuc     // Note that cmpxchg only supports specifying one ordering and
203*f4a2713aSLionel Sambuc     // doesn't support weak cmpxchg, at least at the moment.
204*f4a2713aSLionel Sambuc     llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
205*f4a2713aSLionel Sambuc     LoadVal1->setAlignment(Align);
206*f4a2713aSLionel Sambuc     llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
207*f4a2713aSLionel Sambuc     LoadVal2->setAlignment(Align);
208*f4a2713aSLionel Sambuc     llvm::AtomicCmpXchgInst *CXI =
209*f4a2713aSLionel Sambuc         CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
210*f4a2713aSLionel Sambuc     CXI->setVolatile(E->isVolatile());
211*f4a2713aSLionel Sambuc     llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
212*f4a2713aSLionel Sambuc     StoreVal1->setAlignment(Align);
213*f4a2713aSLionel Sambuc     llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
214*f4a2713aSLionel Sambuc     CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
215*f4a2713aSLionel Sambuc     return;
216*f4a2713aSLionel Sambuc   }
217*f4a2713aSLionel Sambuc 
218*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_load:
219*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_load_n:
220*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_load: {
221*f4a2713aSLionel Sambuc     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
222*f4a2713aSLionel Sambuc     Load->setAtomic(Order);
223*f4a2713aSLionel Sambuc     Load->setAlignment(Size);
224*f4a2713aSLionel Sambuc     Load->setVolatile(E->isVolatile());
225*f4a2713aSLionel Sambuc     llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
226*f4a2713aSLionel Sambuc     StoreDest->setAlignment(Align);
227*f4a2713aSLionel Sambuc     return;
228*f4a2713aSLionel Sambuc   }
229*f4a2713aSLionel Sambuc 
230*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_store:
231*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_store:
232*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_store_n: {
233*f4a2713aSLionel Sambuc     assert(!Dest && "Store does not return a value");
234*f4a2713aSLionel Sambuc     llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
235*f4a2713aSLionel Sambuc     LoadVal1->setAlignment(Align);
236*f4a2713aSLionel Sambuc     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
237*f4a2713aSLionel Sambuc     Store->setAtomic(Order);
238*f4a2713aSLionel Sambuc     Store->setAlignment(Size);
239*f4a2713aSLionel Sambuc     Store->setVolatile(E->isVolatile());
240*f4a2713aSLionel Sambuc     return;
241*f4a2713aSLionel Sambuc   }
242*f4a2713aSLionel Sambuc 
243*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_exchange:
244*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_exchange_n:
245*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_exchange:
246*f4a2713aSLionel Sambuc     Op = llvm::AtomicRMWInst::Xchg;
247*f4a2713aSLionel Sambuc     break;
248*f4a2713aSLionel Sambuc 
249*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_add_fetch:
250*f4a2713aSLionel Sambuc     PostOp = llvm::Instruction::Add;
251*f4a2713aSLionel Sambuc     // Fall through.
252*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_add:
253*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_add:
254*f4a2713aSLionel Sambuc     Op = llvm::AtomicRMWInst::Add;
255*f4a2713aSLionel Sambuc     break;
256*f4a2713aSLionel Sambuc 
257*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_sub_fetch:
258*f4a2713aSLionel Sambuc     PostOp = llvm::Instruction::Sub;
259*f4a2713aSLionel Sambuc     // Fall through.
260*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_sub:
261*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_sub:
262*f4a2713aSLionel Sambuc     Op = llvm::AtomicRMWInst::Sub;
263*f4a2713aSLionel Sambuc     break;
264*f4a2713aSLionel Sambuc 
265*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_and_fetch:
266*f4a2713aSLionel Sambuc     PostOp = llvm::Instruction::And;
267*f4a2713aSLionel Sambuc     // Fall through.
268*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_and:
269*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_and:
270*f4a2713aSLionel Sambuc     Op = llvm::AtomicRMWInst::And;
271*f4a2713aSLionel Sambuc     break;
272*f4a2713aSLionel Sambuc 
273*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_or_fetch:
274*f4a2713aSLionel Sambuc     PostOp = llvm::Instruction::Or;
275*f4a2713aSLionel Sambuc     // Fall through.
276*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_or:
277*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_or:
278*f4a2713aSLionel Sambuc     Op = llvm::AtomicRMWInst::Or;
279*f4a2713aSLionel Sambuc     break;
280*f4a2713aSLionel Sambuc 
281*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_xor_fetch:
282*f4a2713aSLionel Sambuc     PostOp = llvm::Instruction::Xor;
283*f4a2713aSLionel Sambuc     // Fall through.
284*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_xor:
285*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_xor:
286*f4a2713aSLionel Sambuc     Op = llvm::AtomicRMWInst::Xor;
287*f4a2713aSLionel Sambuc     break;
288*f4a2713aSLionel Sambuc 
289*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_nand_fetch:
290*f4a2713aSLionel Sambuc     PostOp = llvm::Instruction::And;
291*f4a2713aSLionel Sambuc     // Fall through.
292*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_nand:
293*f4a2713aSLionel Sambuc     Op = llvm::AtomicRMWInst::Nand;
294*f4a2713aSLionel Sambuc     break;
295*f4a2713aSLionel Sambuc   }
296*f4a2713aSLionel Sambuc 
297*f4a2713aSLionel Sambuc   llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
298*f4a2713aSLionel Sambuc   LoadVal1->setAlignment(Align);
299*f4a2713aSLionel Sambuc   llvm::AtomicRMWInst *RMWI =
300*f4a2713aSLionel Sambuc       CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
301*f4a2713aSLionel Sambuc   RMWI->setVolatile(E->isVolatile());
302*f4a2713aSLionel Sambuc 
303*f4a2713aSLionel Sambuc   // For __atomic_*_fetch operations, perform the operation again to
304*f4a2713aSLionel Sambuc   // determine the value which was written.
305*f4a2713aSLionel Sambuc   llvm::Value *Result = RMWI;
306*f4a2713aSLionel Sambuc   if (PostOp)
307*f4a2713aSLionel Sambuc     Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
308*f4a2713aSLionel Sambuc   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
309*f4a2713aSLionel Sambuc     Result = CGF.Builder.CreateNot(Result);
310*f4a2713aSLionel Sambuc   llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
311*f4a2713aSLionel Sambuc   StoreDest->setAlignment(Align);
312*f4a2713aSLionel Sambuc }
313*f4a2713aSLionel Sambuc 
314*f4a2713aSLionel Sambuc // This function emits any expression (scalar, complex, or aggregate)
315*f4a2713aSLionel Sambuc // into a temporary alloca.
316*f4a2713aSLionel Sambuc static llvm::Value *
317*f4a2713aSLionel Sambuc EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
318*f4a2713aSLionel Sambuc   llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
319*f4a2713aSLionel Sambuc   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
320*f4a2713aSLionel Sambuc                        /*Init*/ true);
321*f4a2713aSLionel Sambuc   return DeclPtr;
322*f4a2713aSLionel Sambuc }
323*f4a2713aSLionel Sambuc 
324*f4a2713aSLionel Sambuc static void
325*f4a2713aSLionel Sambuc AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
326*f4a2713aSLionel Sambuc                   bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
327*f4a2713aSLionel Sambuc                   SourceLocation Loc) {
328*f4a2713aSLionel Sambuc   if (UseOptimizedLibcall) {
329*f4a2713aSLionel Sambuc     // Load value and pass it to the function directly.
330*f4a2713aSLionel Sambuc     unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
331*f4a2713aSLionel Sambuc     Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy, Loc);
332*f4a2713aSLionel Sambuc     Args.add(RValue::get(Val), ValTy);
333*f4a2713aSLionel Sambuc   } else {
334*f4a2713aSLionel Sambuc     // Non-optimized functions always take a reference.
335*f4a2713aSLionel Sambuc     Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
336*f4a2713aSLionel Sambuc                          CGF.getContext().VoidPtrTy);
337*f4a2713aSLionel Sambuc   }
338*f4a2713aSLionel Sambuc }
339*f4a2713aSLionel Sambuc 
340*f4a2713aSLionel Sambuc RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
341*f4a2713aSLionel Sambuc   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
342*f4a2713aSLionel Sambuc   QualType MemTy = AtomicTy;
343*f4a2713aSLionel Sambuc   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
344*f4a2713aSLionel Sambuc     MemTy = AT->getValueType();
345*f4a2713aSLionel Sambuc   CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
346*f4a2713aSLionel Sambuc   uint64_t Size = sizeChars.getQuantity();
347*f4a2713aSLionel Sambuc   CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
348*f4a2713aSLionel Sambuc   unsigned Align = alignChars.getQuantity();
349*f4a2713aSLionel Sambuc   unsigned MaxInlineWidthInBits =
350*f4a2713aSLionel Sambuc     getTarget().getMaxAtomicInlineWidth();
351*f4a2713aSLionel Sambuc   bool UseLibcall = (Size != Align ||
352*f4a2713aSLionel Sambuc                      getContext().toBits(sizeChars) > MaxInlineWidthInBits);
353*f4a2713aSLionel Sambuc 
354*f4a2713aSLionel Sambuc   llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
355*f4a2713aSLionel Sambuc   Ptr = EmitScalarExpr(E->getPtr());
356*f4a2713aSLionel Sambuc 
357*f4a2713aSLionel Sambuc   if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
358*f4a2713aSLionel Sambuc     assert(!Dest && "Init does not return a value");
359*f4a2713aSLionel Sambuc     LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
360*f4a2713aSLionel Sambuc     EmitAtomicInit(E->getVal1(), lvalue);
361*f4a2713aSLionel Sambuc     return RValue::get(0);
362*f4a2713aSLionel Sambuc   }
363*f4a2713aSLionel Sambuc 
364*f4a2713aSLionel Sambuc   Order = EmitScalarExpr(E->getOrder());
365*f4a2713aSLionel Sambuc 
366*f4a2713aSLionel Sambuc   switch (E->getOp()) {
367*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_init:
368*f4a2713aSLionel Sambuc     llvm_unreachable("Already handled!");
369*f4a2713aSLionel Sambuc 
370*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_load:
371*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_load_n:
372*f4a2713aSLionel Sambuc     break;
373*f4a2713aSLionel Sambuc 
374*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_load:
375*f4a2713aSLionel Sambuc     Dest = EmitScalarExpr(E->getVal1());
376*f4a2713aSLionel Sambuc     break;
377*f4a2713aSLionel Sambuc 
378*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_store:
379*f4a2713aSLionel Sambuc     Val1 = EmitScalarExpr(E->getVal1());
380*f4a2713aSLionel Sambuc     break;
381*f4a2713aSLionel Sambuc 
382*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_exchange:
383*f4a2713aSLionel Sambuc     Val1 = EmitScalarExpr(E->getVal1());
384*f4a2713aSLionel Sambuc     Dest = EmitScalarExpr(E->getVal2());
385*f4a2713aSLionel Sambuc     break;
386*f4a2713aSLionel Sambuc 
387*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
388*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
389*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_compare_exchange_n:
390*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_compare_exchange:
391*f4a2713aSLionel Sambuc     Val1 = EmitScalarExpr(E->getVal1());
392*f4a2713aSLionel Sambuc     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
393*f4a2713aSLionel Sambuc       Val2 = EmitScalarExpr(E->getVal2());
394*f4a2713aSLionel Sambuc     else
395*f4a2713aSLionel Sambuc       Val2 = EmitValToTemp(*this, E->getVal2());
396*f4a2713aSLionel Sambuc     OrderFail = EmitScalarExpr(E->getOrderFail());
397*f4a2713aSLionel Sambuc     // Evaluate and discard the 'weak' argument.
398*f4a2713aSLionel Sambuc     if (E->getNumSubExprs() == 6)
399*f4a2713aSLionel Sambuc       EmitScalarExpr(E->getWeak());
400*f4a2713aSLionel Sambuc     break;
401*f4a2713aSLionel Sambuc 
402*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_add:
403*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_sub:
404*f4a2713aSLionel Sambuc     if (MemTy->isPointerType()) {
405*f4a2713aSLionel Sambuc       // For pointer arithmetic, we're required to do a bit of math:
406*f4a2713aSLionel Sambuc       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
407*f4a2713aSLionel Sambuc       // ... but only for the C11 builtins. The GNU builtins expect the
408*f4a2713aSLionel Sambuc       // user to multiply by sizeof(T).
409*f4a2713aSLionel Sambuc       QualType Val1Ty = E->getVal1()->getType();
410*f4a2713aSLionel Sambuc       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
411*f4a2713aSLionel Sambuc       CharUnits PointeeIncAmt =
412*f4a2713aSLionel Sambuc           getContext().getTypeSizeInChars(MemTy->getPointeeType());
413*f4a2713aSLionel Sambuc       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
414*f4a2713aSLionel Sambuc       Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
415*f4a2713aSLionel Sambuc       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
416*f4a2713aSLionel Sambuc       break;
417*f4a2713aSLionel Sambuc     }
418*f4a2713aSLionel Sambuc     // Fall through.
419*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_add:
420*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_sub:
421*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_add_fetch:
422*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_sub_fetch:
423*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_store:
424*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_exchange:
425*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_store_n:
426*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_exchange_n:
427*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_and:
428*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_or:
429*f4a2713aSLionel Sambuc   case AtomicExpr::AO__c11_atomic_fetch_xor:
430*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_and:
431*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_or:
432*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_xor:
433*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_fetch_nand:
434*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_and_fetch:
435*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_or_fetch:
436*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_xor_fetch:
437*f4a2713aSLionel Sambuc   case AtomicExpr::AO__atomic_nand_fetch:
438*f4a2713aSLionel Sambuc     Val1 = EmitValToTemp(*this, E->getVal1());
439*f4a2713aSLionel Sambuc     break;
440*f4a2713aSLionel Sambuc   }
441*f4a2713aSLionel Sambuc 
442*f4a2713aSLionel Sambuc   if (!E->getType()->isVoidType() && !Dest)
443*f4a2713aSLionel Sambuc     Dest = CreateMemTemp(E->getType(), ".atomicdst");
444*f4a2713aSLionel Sambuc 
445*f4a2713aSLionel Sambuc   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
446*f4a2713aSLionel Sambuc   if (UseLibcall) {
447*f4a2713aSLionel Sambuc     bool UseOptimizedLibcall = false;
448*f4a2713aSLionel Sambuc     switch (E->getOp()) {
449*f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_add:
450*f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_add:
451*f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_and:
452*f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_and:
453*f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_or:
454*f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_or:
455*f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_sub:
456*f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_sub:
457*f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_xor:
458*f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_xor:
459*f4a2713aSLionel Sambuc       // For these, only library calls for certain sizes exist.
460*f4a2713aSLionel Sambuc       UseOptimizedLibcall = true;
461*f4a2713aSLionel Sambuc       break;
462*f4a2713aSLionel Sambuc     default:
463*f4a2713aSLionel Sambuc       // Only use optimized library calls for sizes for which they exist.
464*f4a2713aSLionel Sambuc       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
465*f4a2713aSLionel Sambuc         UseOptimizedLibcall = true;
466*f4a2713aSLionel Sambuc       break;
467*f4a2713aSLionel Sambuc     }
468*f4a2713aSLionel Sambuc 
469*f4a2713aSLionel Sambuc     CallArgList Args;
470*f4a2713aSLionel Sambuc     if (!UseOptimizedLibcall) {
471*f4a2713aSLionel Sambuc       // For non-optimized library calls, the size is the first parameter
472*f4a2713aSLionel Sambuc       Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
473*f4a2713aSLionel Sambuc                getContext().getSizeType());
474*f4a2713aSLionel Sambuc     }
475*f4a2713aSLionel Sambuc     // Atomic address is the first or second parameter
476*f4a2713aSLionel Sambuc     Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
477*f4a2713aSLionel Sambuc 
478*f4a2713aSLionel Sambuc     std::string LibCallName;
479*f4a2713aSLionel Sambuc     QualType RetTy;
480*f4a2713aSLionel Sambuc     bool HaveRetTy = false;
481*f4a2713aSLionel Sambuc     switch (E->getOp()) {
482*f4a2713aSLionel Sambuc     // There is only one libcall for compare an exchange, because there is no
483*f4a2713aSLionel Sambuc     // optimisation benefit possible from a libcall version of a weak compare
484*f4a2713aSLionel Sambuc     // and exchange.
485*f4a2713aSLionel Sambuc     // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
486*f4a2713aSLionel Sambuc     //                                void *desired, int success, int failure)
487*f4a2713aSLionel Sambuc     // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
488*f4a2713aSLionel Sambuc     //                                  int success, int failure)
489*f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
490*f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
491*f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_compare_exchange:
492*f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_compare_exchange_n:
493*f4a2713aSLionel Sambuc       LibCallName = "__atomic_compare_exchange";
494*f4a2713aSLionel Sambuc       RetTy = getContext().BoolTy;
495*f4a2713aSLionel Sambuc       HaveRetTy = true;
496*f4a2713aSLionel Sambuc       Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
497*f4a2713aSLionel Sambuc       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
498*f4a2713aSLionel Sambuc                         E->getExprLoc());
499*f4a2713aSLionel Sambuc       Args.add(RValue::get(Order), getContext().IntTy);
500*f4a2713aSLionel Sambuc       Order = OrderFail;
501*f4a2713aSLionel Sambuc       break;
502*f4a2713aSLionel Sambuc     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
503*f4a2713aSLionel Sambuc     //                        int order)
504*f4a2713aSLionel Sambuc     // T __atomic_exchange_N(T *mem, T val, int order)
505*f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_exchange:
506*f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_exchange_n:
507*f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_exchange:
508*f4a2713aSLionel Sambuc       LibCallName = "__atomic_exchange";
509*f4a2713aSLionel Sambuc       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
510*f4a2713aSLionel Sambuc                         E->getExprLoc());
511*f4a2713aSLionel Sambuc       break;
512*f4a2713aSLionel Sambuc     // void __atomic_store(size_t size, void *mem, void *val, int order)
513*f4a2713aSLionel Sambuc     // void __atomic_store_N(T *mem, T val, int order)
514*f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_store:
515*f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_store:
516*f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_store_n:
517*f4a2713aSLionel Sambuc       LibCallName = "__atomic_store";
518*f4a2713aSLionel Sambuc       RetTy = getContext().VoidTy;
519*f4a2713aSLionel Sambuc       HaveRetTy = true;
520*f4a2713aSLionel Sambuc       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
521*f4a2713aSLionel Sambuc                         E->getExprLoc());
522*f4a2713aSLionel Sambuc       break;
523*f4a2713aSLionel Sambuc     // void __atomic_load(size_t size, void *mem, void *return, int order)
524*f4a2713aSLionel Sambuc     // T __atomic_load_N(T *mem, int order)
525*f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_load:
526*f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_load:
527*f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_load_n:
528*f4a2713aSLionel Sambuc       LibCallName = "__atomic_load";
529*f4a2713aSLionel Sambuc       break;
530*f4a2713aSLionel Sambuc     // T __atomic_fetch_add_N(T *mem, T val, int order)
531*f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_add:
532*f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_add:
533*f4a2713aSLionel Sambuc       LibCallName = "__atomic_fetch_add";
534*f4a2713aSLionel Sambuc       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
535*f4a2713aSLionel Sambuc                         E->getExprLoc());
536*f4a2713aSLionel Sambuc       break;
537*f4a2713aSLionel Sambuc     // T __atomic_fetch_and_N(T *mem, T val, int order)
538*f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_and:
539*f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_and:
540*f4a2713aSLionel Sambuc       LibCallName = "__atomic_fetch_and";
541*f4a2713aSLionel Sambuc       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
542*f4a2713aSLionel Sambuc                         E->getExprLoc());
543*f4a2713aSLionel Sambuc       break;
544*f4a2713aSLionel Sambuc     // T __atomic_fetch_or_N(T *mem, T val, int order)
545*f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_or:
546*f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_or:
547*f4a2713aSLionel Sambuc       LibCallName = "__atomic_fetch_or";
548*f4a2713aSLionel Sambuc       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
549*f4a2713aSLionel Sambuc                         E->getExprLoc());
550*f4a2713aSLionel Sambuc       break;
551*f4a2713aSLionel Sambuc     // T __atomic_fetch_sub_N(T *mem, T val, int order)
552*f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_sub:
553*f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_sub:
554*f4a2713aSLionel Sambuc       LibCallName = "__atomic_fetch_sub";
555*f4a2713aSLionel Sambuc       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
556*f4a2713aSLionel Sambuc                         E->getExprLoc());
557*f4a2713aSLionel Sambuc       break;
558*f4a2713aSLionel Sambuc     // T __atomic_fetch_xor_N(T *mem, T val, int order)
559*f4a2713aSLionel Sambuc     case AtomicExpr::AO__c11_atomic_fetch_xor:
560*f4a2713aSLionel Sambuc     case AtomicExpr::AO__atomic_fetch_xor:
561*f4a2713aSLionel Sambuc       LibCallName = "__atomic_fetch_xor";
562*f4a2713aSLionel Sambuc       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
563*f4a2713aSLionel Sambuc                         E->getExprLoc());
564*f4a2713aSLionel Sambuc       break;
565*f4a2713aSLionel Sambuc     default: return EmitUnsupportedRValue(E, "atomic library call");
566*f4a2713aSLionel Sambuc     }
567*f4a2713aSLionel Sambuc 
568*f4a2713aSLionel Sambuc     // Optimized functions have the size in their name.
569*f4a2713aSLionel Sambuc     if (UseOptimizedLibcall)
570*f4a2713aSLionel Sambuc       LibCallName += "_" + llvm::utostr(Size);
571*f4a2713aSLionel Sambuc     // By default, assume we return a value of the atomic type.
572*f4a2713aSLionel Sambuc     if (!HaveRetTy) {
573*f4a2713aSLionel Sambuc       if (UseOptimizedLibcall) {
574*f4a2713aSLionel Sambuc         // Value is returned directly.
575*f4a2713aSLionel Sambuc         RetTy = MemTy;
576*f4a2713aSLionel Sambuc       } else {
577*f4a2713aSLionel Sambuc         // Value is returned through parameter before the order.
578*f4a2713aSLionel Sambuc         RetTy = getContext().VoidTy;
579*f4a2713aSLionel Sambuc         Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
580*f4a2713aSLionel Sambuc                  getContext().VoidPtrTy);
581*f4a2713aSLionel Sambuc       }
582*f4a2713aSLionel Sambuc     }
583*f4a2713aSLionel Sambuc     // order is always the last parameter
584*f4a2713aSLionel Sambuc     Args.add(RValue::get(Order),
585*f4a2713aSLionel Sambuc              getContext().IntTy);
586*f4a2713aSLionel Sambuc 
587*f4a2713aSLionel Sambuc     const CGFunctionInfo &FuncInfo =
588*f4a2713aSLionel Sambuc         CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
589*f4a2713aSLionel Sambuc             FunctionType::ExtInfo(), RequiredArgs::All);
590*f4a2713aSLionel Sambuc     llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
591*f4a2713aSLionel Sambuc     llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
592*f4a2713aSLionel Sambuc     RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
593*f4a2713aSLionel Sambuc     if (!RetTy->isVoidType())
594*f4a2713aSLionel Sambuc       return Res;
595*f4a2713aSLionel Sambuc     if (E->getType()->isVoidType())
596*f4a2713aSLionel Sambuc       return RValue::get(0);
597*f4a2713aSLionel Sambuc     return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
598*f4a2713aSLionel Sambuc   }
599*f4a2713aSLionel Sambuc 
600*f4a2713aSLionel Sambuc   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
601*f4a2713aSLionel Sambuc                  E->getOp() == AtomicExpr::AO__atomic_store ||
602*f4a2713aSLionel Sambuc                  E->getOp() == AtomicExpr::AO__atomic_store_n;
603*f4a2713aSLionel Sambuc   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
604*f4a2713aSLionel Sambuc                 E->getOp() == AtomicExpr::AO__atomic_load ||
605*f4a2713aSLionel Sambuc                 E->getOp() == AtomicExpr::AO__atomic_load_n;
606*f4a2713aSLionel Sambuc 
607*f4a2713aSLionel Sambuc   llvm::Type *IPtrTy =
608*f4a2713aSLionel Sambuc       llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
609*f4a2713aSLionel Sambuc   llvm::Value *OrigDest = Dest;
610*f4a2713aSLionel Sambuc   Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
611*f4a2713aSLionel Sambuc   if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
612*f4a2713aSLionel Sambuc   if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
613*f4a2713aSLionel Sambuc   if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
614*f4a2713aSLionel Sambuc 
615*f4a2713aSLionel Sambuc   if (isa<llvm::ConstantInt>(Order)) {
616*f4a2713aSLionel Sambuc     int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
617*f4a2713aSLionel Sambuc     switch (ord) {
618*f4a2713aSLionel Sambuc     case AO_ABI_memory_order_relaxed:
619*f4a2713aSLionel Sambuc       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
620*f4a2713aSLionel Sambuc                    llvm::Monotonic);
621*f4a2713aSLionel Sambuc       break;
622*f4a2713aSLionel Sambuc     case AO_ABI_memory_order_consume:
623*f4a2713aSLionel Sambuc     case AO_ABI_memory_order_acquire:
624*f4a2713aSLionel Sambuc       if (IsStore)
625*f4a2713aSLionel Sambuc         break; // Avoid crashing on code with undefined behavior
626*f4a2713aSLionel Sambuc       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
627*f4a2713aSLionel Sambuc                    llvm::Acquire);
628*f4a2713aSLionel Sambuc       break;
629*f4a2713aSLionel Sambuc     case AO_ABI_memory_order_release:
630*f4a2713aSLionel Sambuc       if (IsLoad)
631*f4a2713aSLionel Sambuc         break; // Avoid crashing on code with undefined behavior
632*f4a2713aSLionel Sambuc       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
633*f4a2713aSLionel Sambuc                    llvm::Release);
634*f4a2713aSLionel Sambuc       break;
635*f4a2713aSLionel Sambuc     case AO_ABI_memory_order_acq_rel:
636*f4a2713aSLionel Sambuc       if (IsLoad || IsStore)
637*f4a2713aSLionel Sambuc         break; // Avoid crashing on code with undefined behavior
638*f4a2713aSLionel Sambuc       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
639*f4a2713aSLionel Sambuc                    llvm::AcquireRelease);
640*f4a2713aSLionel Sambuc       break;
641*f4a2713aSLionel Sambuc     case AO_ABI_memory_order_seq_cst:
642*f4a2713aSLionel Sambuc       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
643*f4a2713aSLionel Sambuc                    llvm::SequentiallyConsistent);
644*f4a2713aSLionel Sambuc       break;
645*f4a2713aSLionel Sambuc     default: // invalid order
646*f4a2713aSLionel Sambuc       // We should not ever get here normally, but it's hard to
647*f4a2713aSLionel Sambuc       // enforce that in general.
648*f4a2713aSLionel Sambuc       break;
649*f4a2713aSLionel Sambuc     }
650*f4a2713aSLionel Sambuc     if (E->getType()->isVoidType())
651*f4a2713aSLionel Sambuc       return RValue::get(0);
652*f4a2713aSLionel Sambuc     return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
653*f4a2713aSLionel Sambuc   }
654*f4a2713aSLionel Sambuc 
655*f4a2713aSLionel Sambuc   // Long case, when Order isn't obviously constant.
656*f4a2713aSLionel Sambuc 
657*f4a2713aSLionel Sambuc   // Create all the relevant BB's
658*f4a2713aSLionel Sambuc   llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
659*f4a2713aSLionel Sambuc                    *AcqRelBB = 0, *SeqCstBB = 0;
660*f4a2713aSLionel Sambuc   MonotonicBB = createBasicBlock("monotonic", CurFn);
661*f4a2713aSLionel Sambuc   if (!IsStore)
662*f4a2713aSLionel Sambuc     AcquireBB = createBasicBlock("acquire", CurFn);
663*f4a2713aSLionel Sambuc   if (!IsLoad)
664*f4a2713aSLionel Sambuc     ReleaseBB = createBasicBlock("release", CurFn);
665*f4a2713aSLionel Sambuc   if (!IsLoad && !IsStore)
666*f4a2713aSLionel Sambuc     AcqRelBB = createBasicBlock("acqrel", CurFn);
667*f4a2713aSLionel Sambuc   SeqCstBB = createBasicBlock("seqcst", CurFn);
668*f4a2713aSLionel Sambuc   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
669*f4a2713aSLionel Sambuc 
670*f4a2713aSLionel Sambuc   // Create the switch for the split
671*f4a2713aSLionel Sambuc   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
672*f4a2713aSLionel Sambuc   // doesn't matter unless someone is crazy enough to use something that
673*f4a2713aSLionel Sambuc   // doesn't fold to a constant for the ordering.
674*f4a2713aSLionel Sambuc   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
675*f4a2713aSLionel Sambuc   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
676*f4a2713aSLionel Sambuc 
677*f4a2713aSLionel Sambuc   // Emit all the different atomics
678*f4a2713aSLionel Sambuc   Builder.SetInsertPoint(MonotonicBB);
679*f4a2713aSLionel Sambuc   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
680*f4a2713aSLionel Sambuc                llvm::Monotonic);
681*f4a2713aSLionel Sambuc   Builder.CreateBr(ContBB);
682*f4a2713aSLionel Sambuc   if (!IsStore) {
683*f4a2713aSLionel Sambuc     Builder.SetInsertPoint(AcquireBB);
684*f4a2713aSLionel Sambuc     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
685*f4a2713aSLionel Sambuc                  llvm::Acquire);
686*f4a2713aSLionel Sambuc     Builder.CreateBr(ContBB);
687*f4a2713aSLionel Sambuc     SI->addCase(Builder.getInt32(1), AcquireBB);
688*f4a2713aSLionel Sambuc     SI->addCase(Builder.getInt32(2), AcquireBB);
689*f4a2713aSLionel Sambuc   }
690*f4a2713aSLionel Sambuc   if (!IsLoad) {
691*f4a2713aSLionel Sambuc     Builder.SetInsertPoint(ReleaseBB);
692*f4a2713aSLionel Sambuc     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
693*f4a2713aSLionel Sambuc                  llvm::Release);
694*f4a2713aSLionel Sambuc     Builder.CreateBr(ContBB);
695*f4a2713aSLionel Sambuc     SI->addCase(Builder.getInt32(3), ReleaseBB);
696*f4a2713aSLionel Sambuc   }
697*f4a2713aSLionel Sambuc   if (!IsLoad && !IsStore) {
698*f4a2713aSLionel Sambuc     Builder.SetInsertPoint(AcqRelBB);
699*f4a2713aSLionel Sambuc     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
700*f4a2713aSLionel Sambuc                  llvm::AcquireRelease);
701*f4a2713aSLionel Sambuc     Builder.CreateBr(ContBB);
702*f4a2713aSLionel Sambuc     SI->addCase(Builder.getInt32(4), AcqRelBB);
703*f4a2713aSLionel Sambuc   }
704*f4a2713aSLionel Sambuc   Builder.SetInsertPoint(SeqCstBB);
705*f4a2713aSLionel Sambuc   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
706*f4a2713aSLionel Sambuc                llvm::SequentiallyConsistent);
707*f4a2713aSLionel Sambuc   Builder.CreateBr(ContBB);
708*f4a2713aSLionel Sambuc   SI->addCase(Builder.getInt32(5), SeqCstBB);
709*f4a2713aSLionel Sambuc 
710*f4a2713aSLionel Sambuc   // Cleanup and return
711*f4a2713aSLionel Sambuc   Builder.SetInsertPoint(ContBB);
712*f4a2713aSLionel Sambuc   if (E->getType()->isVoidType())
713*f4a2713aSLionel Sambuc     return RValue::get(0);
714*f4a2713aSLionel Sambuc   return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
715*f4a2713aSLionel Sambuc }
716*f4a2713aSLionel Sambuc 
717*f4a2713aSLionel Sambuc llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
718*f4a2713aSLionel Sambuc   unsigned addrspace =
719*f4a2713aSLionel Sambuc     cast<llvm::PointerType>(addr->getType())->getAddressSpace();
720*f4a2713aSLionel Sambuc   llvm::IntegerType *ty =
721*f4a2713aSLionel Sambuc     llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
722*f4a2713aSLionel Sambuc   return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
723*f4a2713aSLionel Sambuc }
724*f4a2713aSLionel Sambuc 
725*f4a2713aSLionel Sambuc RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
726*f4a2713aSLionel Sambuc                                        AggValueSlot resultSlot,
727*f4a2713aSLionel Sambuc                                        SourceLocation loc) const {
728*f4a2713aSLionel Sambuc   if (EvaluationKind == TEK_Aggregate)
729*f4a2713aSLionel Sambuc     return resultSlot.asRValue();
730*f4a2713aSLionel Sambuc 
731*f4a2713aSLionel Sambuc   // Drill into the padding structure if we have one.
732*f4a2713aSLionel Sambuc   if (hasPadding())
733*f4a2713aSLionel Sambuc     addr = CGF.Builder.CreateStructGEP(addr, 0);
734*f4a2713aSLionel Sambuc 
735*f4a2713aSLionel Sambuc   // Otherwise, just convert the temporary to an r-value using the
736*f4a2713aSLionel Sambuc   // normal conversion routine.
737*f4a2713aSLionel Sambuc   return CGF.convertTempToRValue(addr, getValueType(), loc);
738*f4a2713aSLionel Sambuc }
739*f4a2713aSLionel Sambuc 
740*f4a2713aSLionel Sambuc /// Emit a load from an l-value of atomic type.  Note that the r-value
741*f4a2713aSLionel Sambuc /// we produce is an r-value of the atomic *value* type.
742*f4a2713aSLionel Sambuc RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
743*f4a2713aSLionel Sambuc                                        AggValueSlot resultSlot) {
744*f4a2713aSLionel Sambuc   AtomicInfo atomics(*this, src);
745*f4a2713aSLionel Sambuc 
746*f4a2713aSLionel Sambuc   // Check whether we should use a library call.
747*f4a2713aSLionel Sambuc   if (atomics.shouldUseLibcall()) {
748*f4a2713aSLionel Sambuc     llvm::Value *tempAddr;
749*f4a2713aSLionel Sambuc     if (!resultSlot.isIgnored()) {
750*f4a2713aSLionel Sambuc       assert(atomics.getEvaluationKind() == TEK_Aggregate);
751*f4a2713aSLionel Sambuc       tempAddr = resultSlot.getAddr();
752*f4a2713aSLionel Sambuc     } else {
753*f4a2713aSLionel Sambuc       tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
754*f4a2713aSLionel Sambuc     }
755*f4a2713aSLionel Sambuc 
756*f4a2713aSLionel Sambuc     // void __atomic_load(size_t size, void *mem, void *return, int order);
757*f4a2713aSLionel Sambuc     CallArgList args;
758*f4a2713aSLionel Sambuc     args.add(RValue::get(atomics.getAtomicSizeValue()),
759*f4a2713aSLionel Sambuc              getContext().getSizeType());
760*f4a2713aSLionel Sambuc     args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
761*f4a2713aSLionel Sambuc              getContext().VoidPtrTy);
762*f4a2713aSLionel Sambuc     args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
763*f4a2713aSLionel Sambuc              getContext().VoidPtrTy);
764*f4a2713aSLionel Sambuc     args.add(RValue::get(llvm::ConstantInt::get(IntTy,
765*f4a2713aSLionel Sambuc                                                 AO_ABI_memory_order_seq_cst)),
766*f4a2713aSLionel Sambuc              getContext().IntTy);
767*f4a2713aSLionel Sambuc     emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
768*f4a2713aSLionel Sambuc 
769*f4a2713aSLionel Sambuc     // Produce the r-value.
770*f4a2713aSLionel Sambuc     return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
771*f4a2713aSLionel Sambuc   }
772*f4a2713aSLionel Sambuc 
773*f4a2713aSLionel Sambuc   // Okay, we're doing this natively.
774*f4a2713aSLionel Sambuc   llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
775*f4a2713aSLionel Sambuc   llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
776*f4a2713aSLionel Sambuc   load->setAtomic(llvm::SequentiallyConsistent);
777*f4a2713aSLionel Sambuc 
778*f4a2713aSLionel Sambuc   // Other decoration.
779*f4a2713aSLionel Sambuc   load->setAlignment(src.getAlignment().getQuantity());
780*f4a2713aSLionel Sambuc   if (src.isVolatileQualified())
781*f4a2713aSLionel Sambuc     load->setVolatile(true);
782*f4a2713aSLionel Sambuc   if (src.getTBAAInfo())
783*f4a2713aSLionel Sambuc     CGM.DecorateInstruction(load, src.getTBAAInfo());
784*f4a2713aSLionel Sambuc 
785*f4a2713aSLionel Sambuc   // Okay, turn that back into the original value type.
786*f4a2713aSLionel Sambuc   QualType valueType = atomics.getValueType();
787*f4a2713aSLionel Sambuc   llvm::Value *result = load;
788*f4a2713aSLionel Sambuc 
789*f4a2713aSLionel Sambuc   // If we're ignoring an aggregate return, don't do anything.
790*f4a2713aSLionel Sambuc   if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
791*f4a2713aSLionel Sambuc     return RValue::getAggregate(0, false);
792*f4a2713aSLionel Sambuc 
793*f4a2713aSLionel Sambuc   // The easiest way to do this this is to go through memory, but we
794*f4a2713aSLionel Sambuc   // try not to in some easy cases.
795*f4a2713aSLionel Sambuc   if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
796*f4a2713aSLionel Sambuc     llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
797*f4a2713aSLionel Sambuc     if (isa<llvm::IntegerType>(resultTy)) {
798*f4a2713aSLionel Sambuc       assert(result->getType() == resultTy);
799*f4a2713aSLionel Sambuc       result = EmitFromMemory(result, valueType);
800*f4a2713aSLionel Sambuc     } else if (isa<llvm::PointerType>(resultTy)) {
801*f4a2713aSLionel Sambuc       result = Builder.CreateIntToPtr(result, resultTy);
802*f4a2713aSLionel Sambuc     } else {
803*f4a2713aSLionel Sambuc       result = Builder.CreateBitCast(result, resultTy);
804*f4a2713aSLionel Sambuc     }
805*f4a2713aSLionel Sambuc     return RValue::get(result);
806*f4a2713aSLionel Sambuc   }
807*f4a2713aSLionel Sambuc 
808*f4a2713aSLionel Sambuc   // Create a temporary.  This needs to be big enough to hold the
809*f4a2713aSLionel Sambuc   // atomic integer.
810*f4a2713aSLionel Sambuc   llvm::Value *temp;
811*f4a2713aSLionel Sambuc   bool tempIsVolatile = false;
812*f4a2713aSLionel Sambuc   CharUnits tempAlignment;
813*f4a2713aSLionel Sambuc   if (atomics.getEvaluationKind() == TEK_Aggregate) {
814*f4a2713aSLionel Sambuc     assert(!resultSlot.isIgnored());
815*f4a2713aSLionel Sambuc     temp = resultSlot.getAddr();
816*f4a2713aSLionel Sambuc     tempAlignment = atomics.getValueAlignment();
817*f4a2713aSLionel Sambuc     tempIsVolatile = resultSlot.isVolatile();
818*f4a2713aSLionel Sambuc   } else {
819*f4a2713aSLionel Sambuc     temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
820*f4a2713aSLionel Sambuc     tempAlignment = atomics.getAtomicAlignment();
821*f4a2713aSLionel Sambuc   }
822*f4a2713aSLionel Sambuc 
823*f4a2713aSLionel Sambuc   // Slam the integer into the temporary.
824*f4a2713aSLionel Sambuc   llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
825*f4a2713aSLionel Sambuc   Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
826*f4a2713aSLionel Sambuc     ->setVolatile(tempIsVolatile);
827*f4a2713aSLionel Sambuc 
828*f4a2713aSLionel Sambuc   return atomics.convertTempToRValue(temp, resultSlot, loc);
829*f4a2713aSLionel Sambuc }
830*f4a2713aSLionel Sambuc 
831*f4a2713aSLionel Sambuc 
832*f4a2713aSLionel Sambuc 
833*f4a2713aSLionel Sambuc /// Copy an r-value into memory as part of storing to an atomic type.
834*f4a2713aSLionel Sambuc /// This needs to create a bit-pattern suitable for atomic operations.
835*f4a2713aSLionel Sambuc void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
836*f4a2713aSLionel Sambuc   // If we have an r-value, the rvalue should be of the atomic type,
837*f4a2713aSLionel Sambuc   // which means that the caller is responsible for having zeroed
838*f4a2713aSLionel Sambuc   // any padding.  Just do an aggregate copy of that type.
839*f4a2713aSLionel Sambuc   if (rvalue.isAggregate()) {
840*f4a2713aSLionel Sambuc     CGF.EmitAggregateCopy(dest.getAddress(),
841*f4a2713aSLionel Sambuc                           rvalue.getAggregateAddr(),
842*f4a2713aSLionel Sambuc                           getAtomicType(),
843*f4a2713aSLionel Sambuc                           (rvalue.isVolatileQualified()
844*f4a2713aSLionel Sambuc                            || dest.isVolatileQualified()),
845*f4a2713aSLionel Sambuc                           dest.getAlignment());
846*f4a2713aSLionel Sambuc     return;
847*f4a2713aSLionel Sambuc   }
848*f4a2713aSLionel Sambuc 
849*f4a2713aSLionel Sambuc   // Okay, otherwise we're copying stuff.
850*f4a2713aSLionel Sambuc 
851*f4a2713aSLionel Sambuc   // Zero out the buffer if necessary.
852*f4a2713aSLionel Sambuc   emitMemSetZeroIfNecessary(dest);
853*f4a2713aSLionel Sambuc 
854*f4a2713aSLionel Sambuc   // Drill past the padding if present.
855*f4a2713aSLionel Sambuc   dest = projectValue(dest);
856*f4a2713aSLionel Sambuc 
857*f4a2713aSLionel Sambuc   // Okay, store the rvalue in.
858*f4a2713aSLionel Sambuc   if (rvalue.isScalar()) {
859*f4a2713aSLionel Sambuc     CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
860*f4a2713aSLionel Sambuc   } else {
861*f4a2713aSLionel Sambuc     CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
862*f4a2713aSLionel Sambuc   }
863*f4a2713aSLionel Sambuc }
864*f4a2713aSLionel Sambuc 
865*f4a2713aSLionel Sambuc 
866*f4a2713aSLionel Sambuc /// Materialize an r-value into memory for the purposes of storing it
867*f4a2713aSLionel Sambuc /// to an atomic type.
868*f4a2713aSLionel Sambuc llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
869*f4a2713aSLionel Sambuc   // Aggregate r-values are already in memory, and EmitAtomicStore
870*f4a2713aSLionel Sambuc   // requires them to be values of the atomic type.
871*f4a2713aSLionel Sambuc   if (rvalue.isAggregate())
872*f4a2713aSLionel Sambuc     return rvalue.getAggregateAddr();
873*f4a2713aSLionel Sambuc 
874*f4a2713aSLionel Sambuc   // Otherwise, make a temporary and materialize into it.
875*f4a2713aSLionel Sambuc   llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
876*f4a2713aSLionel Sambuc   LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
877*f4a2713aSLionel Sambuc   emitCopyIntoMemory(rvalue, tempLV);
878*f4a2713aSLionel Sambuc   return temp;
879*f4a2713aSLionel Sambuc }
880*f4a2713aSLionel Sambuc 
881*f4a2713aSLionel Sambuc /// Emit a store to an l-value of atomic type.
882*f4a2713aSLionel Sambuc ///
883*f4a2713aSLionel Sambuc /// Note that the r-value is expected to be an r-value *of the atomic
884*f4a2713aSLionel Sambuc /// type*; this means that for aggregate r-values, it should include
885*f4a2713aSLionel Sambuc /// storage for any padding that was necessary.
886*f4a2713aSLionel Sambuc void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
887*f4a2713aSLionel Sambuc   // If this is an aggregate r-value, it should agree in type except
888*f4a2713aSLionel Sambuc   // maybe for address-space qualification.
889*f4a2713aSLionel Sambuc   assert(!rvalue.isAggregate() ||
890*f4a2713aSLionel Sambuc          rvalue.getAggregateAddr()->getType()->getPointerElementType()
891*f4a2713aSLionel Sambuc            == dest.getAddress()->getType()->getPointerElementType());
892*f4a2713aSLionel Sambuc 
893*f4a2713aSLionel Sambuc   AtomicInfo atomics(*this, dest);
894*f4a2713aSLionel Sambuc 
895*f4a2713aSLionel Sambuc   // If this is an initialization, just put the value there normally.
896*f4a2713aSLionel Sambuc   if (isInit) {
897*f4a2713aSLionel Sambuc     atomics.emitCopyIntoMemory(rvalue, dest);
898*f4a2713aSLionel Sambuc     return;
899*f4a2713aSLionel Sambuc   }
900*f4a2713aSLionel Sambuc 
901*f4a2713aSLionel Sambuc   // Check whether we should use a library call.
902*f4a2713aSLionel Sambuc   if (atomics.shouldUseLibcall()) {
903*f4a2713aSLionel Sambuc     // Produce a source address.
904*f4a2713aSLionel Sambuc     llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
905*f4a2713aSLionel Sambuc 
906*f4a2713aSLionel Sambuc     // void __atomic_store(size_t size, void *mem, void *val, int order)
907*f4a2713aSLionel Sambuc     CallArgList args;
908*f4a2713aSLionel Sambuc     args.add(RValue::get(atomics.getAtomicSizeValue()),
909*f4a2713aSLionel Sambuc              getContext().getSizeType());
910*f4a2713aSLionel Sambuc     args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
911*f4a2713aSLionel Sambuc              getContext().VoidPtrTy);
912*f4a2713aSLionel Sambuc     args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
913*f4a2713aSLionel Sambuc              getContext().VoidPtrTy);
914*f4a2713aSLionel Sambuc     args.add(RValue::get(llvm::ConstantInt::get(IntTy,
915*f4a2713aSLionel Sambuc                                                 AO_ABI_memory_order_seq_cst)),
916*f4a2713aSLionel Sambuc              getContext().IntTy);
917*f4a2713aSLionel Sambuc     emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
918*f4a2713aSLionel Sambuc     return;
919*f4a2713aSLionel Sambuc   }
920*f4a2713aSLionel Sambuc 
921*f4a2713aSLionel Sambuc   // Okay, we're doing this natively.
922*f4a2713aSLionel Sambuc   llvm::Value *intValue;
923*f4a2713aSLionel Sambuc 
924*f4a2713aSLionel Sambuc   // If we've got a scalar value of the right size, try to avoid going
925*f4a2713aSLionel Sambuc   // through memory.
926*f4a2713aSLionel Sambuc   if (rvalue.isScalar() && !atomics.hasPadding()) {
927*f4a2713aSLionel Sambuc     llvm::Value *value = rvalue.getScalarVal();
928*f4a2713aSLionel Sambuc     if (isa<llvm::IntegerType>(value->getType())) {
929*f4a2713aSLionel Sambuc       intValue = value;
930*f4a2713aSLionel Sambuc     } else {
931*f4a2713aSLionel Sambuc       llvm::IntegerType *inputIntTy =
932*f4a2713aSLionel Sambuc         llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
933*f4a2713aSLionel Sambuc       if (isa<llvm::PointerType>(value->getType())) {
934*f4a2713aSLionel Sambuc         intValue = Builder.CreatePtrToInt(value, inputIntTy);
935*f4a2713aSLionel Sambuc       } else {
936*f4a2713aSLionel Sambuc         intValue = Builder.CreateBitCast(value, inputIntTy);
937*f4a2713aSLionel Sambuc       }
938*f4a2713aSLionel Sambuc     }
939*f4a2713aSLionel Sambuc 
940*f4a2713aSLionel Sambuc   // Otherwise, we need to go through memory.
941*f4a2713aSLionel Sambuc   } else {
942*f4a2713aSLionel Sambuc     // Put the r-value in memory.
943*f4a2713aSLionel Sambuc     llvm::Value *addr = atomics.materializeRValue(rvalue);
944*f4a2713aSLionel Sambuc 
945*f4a2713aSLionel Sambuc     // Cast the temporary to the atomic int type and pull a value out.
946*f4a2713aSLionel Sambuc     addr = atomics.emitCastToAtomicIntPointer(addr);
947*f4a2713aSLionel Sambuc     intValue = Builder.CreateAlignedLoad(addr,
948*f4a2713aSLionel Sambuc                                  atomics.getAtomicAlignment().getQuantity());
949*f4a2713aSLionel Sambuc   }
950*f4a2713aSLionel Sambuc 
951*f4a2713aSLionel Sambuc   // Do the atomic store.
952*f4a2713aSLionel Sambuc   llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
953*f4a2713aSLionel Sambuc   llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
954*f4a2713aSLionel Sambuc 
955*f4a2713aSLionel Sambuc   // Initializations don't need to be atomic.
956*f4a2713aSLionel Sambuc   if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
957*f4a2713aSLionel Sambuc 
958*f4a2713aSLionel Sambuc   // Other decoration.
959*f4a2713aSLionel Sambuc   store->setAlignment(dest.getAlignment().getQuantity());
960*f4a2713aSLionel Sambuc   if (dest.isVolatileQualified())
961*f4a2713aSLionel Sambuc     store->setVolatile(true);
962*f4a2713aSLionel Sambuc   if (dest.getTBAAInfo())
963*f4a2713aSLionel Sambuc     CGM.DecorateInstruction(store, dest.getTBAAInfo());
964*f4a2713aSLionel Sambuc }
965*f4a2713aSLionel Sambuc 
966*f4a2713aSLionel Sambuc void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
967*f4a2713aSLionel Sambuc   AtomicInfo atomics(*this, dest);
968*f4a2713aSLionel Sambuc 
969*f4a2713aSLionel Sambuc   switch (atomics.getEvaluationKind()) {
970*f4a2713aSLionel Sambuc   case TEK_Scalar: {
971*f4a2713aSLionel Sambuc     llvm::Value *value = EmitScalarExpr(init);
972*f4a2713aSLionel Sambuc     atomics.emitCopyIntoMemory(RValue::get(value), dest);
973*f4a2713aSLionel Sambuc     return;
974*f4a2713aSLionel Sambuc   }
975*f4a2713aSLionel Sambuc 
976*f4a2713aSLionel Sambuc   case TEK_Complex: {
977*f4a2713aSLionel Sambuc     ComplexPairTy value = EmitComplexExpr(init);
978*f4a2713aSLionel Sambuc     atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
979*f4a2713aSLionel Sambuc     return;
980*f4a2713aSLionel Sambuc   }
981*f4a2713aSLionel Sambuc 
982*f4a2713aSLionel Sambuc   case TEK_Aggregate: {
983*f4a2713aSLionel Sambuc     // Fix up the destination if the initializer isn't an expression
984*f4a2713aSLionel Sambuc     // of atomic type.
985*f4a2713aSLionel Sambuc     bool Zeroed = false;
986*f4a2713aSLionel Sambuc     if (!init->getType()->isAtomicType()) {
987*f4a2713aSLionel Sambuc       Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
988*f4a2713aSLionel Sambuc       dest = atomics.projectValue(dest);
989*f4a2713aSLionel Sambuc     }
990*f4a2713aSLionel Sambuc 
991*f4a2713aSLionel Sambuc     // Evaluate the expression directly into the destination.
992*f4a2713aSLionel Sambuc     AggValueSlot slot = AggValueSlot::forLValue(dest,
993*f4a2713aSLionel Sambuc                                         AggValueSlot::IsNotDestructed,
994*f4a2713aSLionel Sambuc                                         AggValueSlot::DoesNotNeedGCBarriers,
995*f4a2713aSLionel Sambuc                                         AggValueSlot::IsNotAliased,
996*f4a2713aSLionel Sambuc                                         Zeroed ? AggValueSlot::IsZeroed :
997*f4a2713aSLionel Sambuc                                                  AggValueSlot::IsNotZeroed);
998*f4a2713aSLionel Sambuc 
999*f4a2713aSLionel Sambuc     EmitAggExpr(init, slot);
1000*f4a2713aSLionel Sambuc     return;
1001*f4a2713aSLionel Sambuc   }
1002*f4a2713aSLionel Sambuc   }
1003*f4a2713aSLionel Sambuc   llvm_unreachable("bad evaluation kind");
1004*f4a2713aSLionel Sambuc }
1005