1 | |
2 | |
3 | |
4 | |
5 | |
6 | |
7 | |
8 | |
9 | |
10 | |
11 | |
12 | |
13 | #include "CGCall.h" |
14 | #include "CGRecordLayout.h" |
15 | #include "CodeGenFunction.h" |
16 | #include "CodeGenModule.h" |
17 | #include "TargetInfo.h" |
18 | #include "clang/AST/ASTContext.h" |
19 | #include "clang/CodeGen/CGFunctionInfo.h" |
20 | #include "clang/Frontend/FrontendDiagnostic.h" |
21 | #include "llvm/ADT/DenseMap.h" |
22 | #include "llvm/IR/DataLayout.h" |
23 | #include "llvm/IR/Intrinsics.h" |
24 | #include "llvm/IR/Operator.h" |
25 | |
26 | using namespace clang; |
27 | using namespace CodeGen; |
28 | |
29 | namespace { |
30 | class AtomicInfo { |
31 | CodeGenFunction &CGF; |
32 | QualType AtomicTy; |
33 | QualType ValueTy; |
34 | uint64_t AtomicSizeInBits; |
35 | uint64_t ValueSizeInBits; |
36 | CharUnits AtomicAlign; |
37 | CharUnits ValueAlign; |
38 | TypeEvaluationKind EvaluationKind; |
39 | bool UseLibcall; |
40 | LValue LVal; |
41 | CGBitFieldInfo BFI; |
42 | public: |
43 | AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) |
44 | : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0), |
45 | EvaluationKind(TEK_Scalar), UseLibcall(true) { |
46 | assert(!lvalue.isGlobalReg()); |
47 | ASTContext &C = CGF.getContext(); |
48 | if (lvalue.isSimple()) { |
49 | AtomicTy = lvalue.getType(); |
50 | if (auto *ATy = AtomicTy->getAs<AtomicType>()) |
51 | ValueTy = ATy->getValueType(); |
52 | else |
53 | ValueTy = AtomicTy; |
54 | EvaluationKind = CGF.getEvaluationKind(ValueTy); |
55 | |
56 | uint64_t ValueAlignInBits; |
57 | uint64_t AtomicAlignInBits; |
58 | TypeInfo ValueTI = C.getTypeInfo(ValueTy); |
59 | ValueSizeInBits = ValueTI.Width; |
60 | ValueAlignInBits = ValueTI.Align; |
61 | |
62 | TypeInfo AtomicTI = C.getTypeInfo(AtomicTy); |
63 | AtomicSizeInBits = AtomicTI.Width; |
64 | AtomicAlignInBits = AtomicTI.Align; |
65 | |
66 | assert(ValueSizeInBits <= AtomicSizeInBits); |
67 | assert(ValueAlignInBits <= AtomicAlignInBits); |
68 | |
69 | AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits); |
70 | ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits); |
71 | if (lvalue.getAlignment().isZero()) |
72 | lvalue.setAlignment(AtomicAlign); |
73 | |
74 | LVal = lvalue; |
75 | } else if (lvalue.isBitField()) { |
76 | ValueTy = lvalue.getType(); |
77 | ValueSizeInBits = C.getTypeSize(ValueTy); |
78 | auto &OrigBFI = lvalue.getBitFieldInfo(); |
79 | auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment()); |
80 | AtomicSizeInBits = C.toBits( |
81 | C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1) |
82 | .alignTo(lvalue.getAlignment())); |
83 | auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer()); |
84 | auto OffsetInChars = |
85 | (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) * |
86 | lvalue.getAlignment(); |
87 | VoidPtrAddr = CGF.Builder.CreateConstGEP1_64( |
88 | VoidPtrAddr, OffsetInChars.getQuantity()); |
89 | auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( |
90 | VoidPtrAddr, |
91 | CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(), |
92 | "atomic_bitfield_base"); |
93 | BFI = OrigBFI; |
94 | BFI.Offset = Offset; |
95 | BFI.StorageSize = AtomicSizeInBits; |
96 | BFI.StorageOffset += OffsetInChars; |
97 | LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()), |
98 | BFI, lvalue.getType(), lvalue.getBaseInfo(), |
99 | lvalue.getTBAAInfo()); |
100 | AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned); |
101 | if (AtomicTy.isNull()) { |
102 | llvm::APInt Size( |
103 | , |
104 | C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity()); |
105 | AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal, |
106 | ); |
107 | } |
108 | AtomicAlign = ValueAlign = lvalue.getAlignment(); |
109 | } else if (lvalue.isVectorElt()) { |
110 | ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType(); |
111 | ValueSizeInBits = C.getTypeSize(ValueTy); |
112 | AtomicTy = lvalue.getType(); |
113 | AtomicSizeInBits = C.getTypeSize(AtomicTy); |
114 | AtomicAlign = ValueAlign = lvalue.getAlignment(); |
115 | LVal = lvalue; |
116 | } else { |
117 | assert(lvalue.isExtVectorElt()); |
118 | ValueTy = lvalue.getType(); |
119 | ValueSizeInBits = C.getTypeSize(ValueTy); |
120 | AtomicTy = ValueTy = CGF.getContext().getExtVectorType( |
121 | lvalue.getType(), lvalue.getExtVectorAddress() |
122 | .getElementType()->getVectorNumElements()); |
123 | AtomicSizeInBits = C.getTypeSize(AtomicTy); |
124 | AtomicAlign = ValueAlign = lvalue.getAlignment(); |
125 | LVal = lvalue; |
126 | } |
127 | UseLibcall = !C.getTargetInfo().hasBuiltinAtomic( |
128 | AtomicSizeInBits, C.toBits(lvalue.getAlignment())); |
129 | } |
130 | |
131 | QualType getAtomicType() const { return AtomicTy; } |
132 | QualType getValueType() const { return ValueTy; } |
133 | CharUnits getAtomicAlignment() const { return AtomicAlign; } |
134 | uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; } |
135 | uint64_t getValueSizeInBits() const { return ValueSizeInBits; } |
136 | TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; } |
137 | bool shouldUseLibcall() const { return UseLibcall; } |
138 | const LValue &getAtomicLValue() const { return LVal; } |
139 | llvm::Value *getAtomicPointer() const { |
140 | if (LVal.isSimple()) |
141 | return LVal.getPointer(); |
142 | else if (LVal.isBitField()) |
143 | return LVal.getBitFieldPointer(); |
144 | else if (LVal.isVectorElt()) |
145 | return LVal.getVectorPointer(); |
146 | assert(LVal.isExtVectorElt()); |
147 | return LVal.getExtVectorPointer(); |
148 | } |
149 | Address getAtomicAddress() const { |
150 | return Address(getAtomicPointer(), getAtomicAlignment()); |
151 | } |
152 | |
153 | Address getAtomicAddressAsAtomicIntPointer() const { |
154 | return emitCastToAtomicIntPointer(getAtomicAddress()); |
155 | } |
156 | |
157 | |
158 | |
159 | |
160 | |
161 | |
162 | |
163 | bool hasPadding() const { |
164 | return (ValueSizeInBits != AtomicSizeInBits); |
165 | } |
166 | |
167 | bool emitMemSetZeroIfNecessary() const; |
168 | |
169 | llvm::Value *getAtomicSizeValue() const { |
170 | CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits); |
171 | return CGF.CGM.getSize(size); |
172 | } |
173 | |
174 | |
175 | |
176 | Address emitCastToAtomicIntPointer(Address Addr) const; |
177 | |
178 | |
179 | |
180 | |
181 | Address convertToAtomicIntPointer(Address Addr) const; |
182 | |
183 | |
184 | RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot, |
185 | SourceLocation loc, bool AsValue) const; |
186 | |
187 | |
188 | llvm::Value *convertRValueToInt(RValue RVal) const; |
189 | |
190 | RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal, |
191 | AggValueSlot ResultSlot, |
192 | SourceLocation Loc, bool AsValue) const; |
193 | |
194 | |
195 | void emitCopyIntoMemory(RValue rvalue) const; |
196 | |
197 | |
198 | LValue projectValue() const { |
199 | assert(LVal.isSimple()); |
200 | Address addr = getAtomicAddress(); |
201 | if (hasPadding()) |
202 | addr = CGF.Builder.CreateStructGEP(addr, 0); |
203 | |
204 | return LValue::MakeAddr(addr, getValueType(), CGF.getContext(), |
205 | LVal.getBaseInfo(), LVal.getTBAAInfo()); |
206 | } |
207 | |
208 | |
209 | |
210 | RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc, |
211 | bool AsValue, llvm::AtomicOrdering AO, |
212 | bool IsVolatile); |
213 | |
214 | |
215 | |
216 | |
217 | |
218 | |
219 | |
220 | |
221 | |
222 | std::pair<RValue, llvm::Value *> |
223 | EmitAtomicCompareExchange(RValue Expected, RValue Desired, |
224 | llvm::AtomicOrdering Success = |
225 | llvm::AtomicOrdering::SequentiallyConsistent, |
226 | llvm::AtomicOrdering Failure = |
227 | llvm::AtomicOrdering::SequentiallyConsistent, |
228 | bool IsWeak = false); |
229 | |
230 | |
231 | |
232 | |
233 | void EmitAtomicUpdate(llvm::AtomicOrdering AO, |
234 | const llvm::function_ref<RValue(RValue)> &UpdateOp, |
235 | bool IsVolatile); |
236 | |
237 | |
238 | void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal, |
239 | bool IsVolatile); |
240 | |
241 | |
242 | Address materializeRValue(RValue rvalue) const; |
243 | |
244 | |
245 | Address CreateTempAlloca() const; |
246 | private: |
247 | bool requiresMemSetZero(llvm::Type *type) const; |
248 | |
249 | |
250 | |
251 | void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded, |
252 | llvm::AtomicOrdering AO, bool IsVolatile); |
253 | |
254 | llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile); |
255 | |
256 | llvm::Value *EmitAtomicCompareExchangeLibcall( |
257 | llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr, |
258 | llvm::AtomicOrdering Success = |
259 | llvm::AtomicOrdering::SequentiallyConsistent, |
260 | llvm::AtomicOrdering Failure = |
261 | llvm::AtomicOrdering::SequentiallyConsistent); |
262 | |
263 | std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp( |
264 | llvm::Value *ExpectedVal, llvm::Value *DesiredVal, |
265 | llvm::AtomicOrdering Success = |
266 | llvm::AtomicOrdering::SequentiallyConsistent, |
267 | llvm::AtomicOrdering Failure = |
268 | llvm::AtomicOrdering::SequentiallyConsistent, |
269 | bool IsWeak = false); |
270 | |
271 | void |
272 | EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, |
273 | const llvm::function_ref<RValue(RValue)> &UpdateOp, |
274 | bool IsVolatile); |
275 | |
276 | void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, |
277 | const llvm::function_ref<RValue(RValue)> &UpdateOp, |
278 | bool IsVolatile); |
279 | |
280 | void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal, |
281 | bool IsVolatile); |
282 | |
283 | void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal, |
284 | bool IsVolatile); |
285 | }; |
286 | } |
287 | |
288 | Address AtomicInfo::CreateTempAlloca() const { |
289 | Address TempAlloca = CGF.CreateMemTemp( |
290 | (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy |
291 | : AtomicTy, |
292 | getAtomicAlignment(), |
293 | "atomic-temp"); |
294 | |
295 | if (LVal.isBitField()) |
296 | return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( |
297 | TempAlloca, getAtomicAddress().getType()); |
298 | return TempAlloca; |
299 | } |
300 | |
301 | static RValue emitAtomicLibcall(CodeGenFunction &CGF, |
302 | StringRef fnName, |
303 | QualType resultType, |
304 | CallArgList &args) { |
305 | const CGFunctionInfo &fnInfo = |
306 | CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args); |
307 | llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo); |
308 | llvm::FunctionCallee fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName); |
309 | auto callee = CGCallee::forDirect(fn); |
310 | return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args); |
311 | } |
312 | |
313 | |
314 | static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, |
315 | uint64_t expectedSize) { |
316 | return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize); |
317 | } |
318 | |
319 | |
320 | |
321 | |
322 | bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const { |
323 | |
324 | if (hasPadding()) return true; |
325 | |
326 | |
327 | switch (getEvaluationKind()) { |
328 | |
329 | |
330 | case TEK_Scalar: |
331 | return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits); |
332 | case TEK_Complex: |
333 | return !isFullSizeType(CGF.CGM, type->getStructElementType(0), |
334 | AtomicSizeInBits / 2); |
335 | |
336 | |
337 | case TEK_Aggregate: |
338 | return false; |
339 | } |
340 | llvm_unreachable("bad evaluation kind"); |
341 | } |
342 | |
343 | bool AtomicInfo::emitMemSetZeroIfNecessary() const { |
344 | assert(LVal.isSimple()); |
345 | llvm::Value *addr = LVal.getPointer(); |
346 | if (!requiresMemSetZero(addr->getType()->getPointerElementType())) |
347 | return false; |
348 | |
349 | CGF.Builder.CreateMemSet( |
350 | addr, llvm::ConstantInt::get(CGF.Int8Ty, 0), |
351 | CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(), |
352 | LVal.getAlignment().getQuantity()); |
353 | return true; |
354 | } |
355 | |
356 | static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, |
357 | Address Dest, Address Ptr, |
358 | Address Val1, Address Val2, |
359 | uint64_t Size, |
360 | llvm::AtomicOrdering SuccessOrder, |
361 | llvm::AtomicOrdering FailureOrder, |
362 | llvm::SyncScope::ID Scope) { |
363 | |
364 | llvm::Value *Expected = CGF.Builder.CreateLoad(Val1); |
365 | llvm::Value *Desired = CGF.Builder.CreateLoad(Val2); |
366 | |
367 | llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg( |
368 | Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder, |
369 | Scope); |
370 | Pair->setVolatile(E->isVolatile()); |
371 | Pair->setWeak(IsWeak); |
372 | |
373 | |
374 | |
375 | llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0); |
376 | llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1); |
377 | |
378 | |
379 | |
380 | llvm::BasicBlock *StoreExpectedBB = |
381 | CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn); |
382 | |
383 | |
384 | |
385 | llvm::BasicBlock *ContinueBB = |
386 | CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn); |
387 | |
388 | |
389 | |
390 | CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB); |
391 | |
392 | CGF.Builder.SetInsertPoint(StoreExpectedBB); |
393 | |
394 | CGF.Builder.CreateStore(Old, Val1); |
395 | |
396 | CGF.Builder.CreateBr(ContinueBB); |
397 | |
398 | CGF.Builder.SetInsertPoint(ContinueBB); |
399 | |
400 | CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType())); |
401 | } |
402 | |
403 | |
404 | |
405 | |
406 | static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, |
407 | bool IsWeak, Address Dest, Address Ptr, |
408 | Address Val1, Address Val2, |
409 | llvm::Value *FailureOrderVal, |
410 | uint64_t Size, |
411 | llvm::AtomicOrdering SuccessOrder, |
412 | llvm::SyncScope::ID Scope) { |
413 | llvm::AtomicOrdering FailureOrder; |
414 | if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) { |
415 | auto FOS = FO->getSExtValue(); |
416 | if (!llvm::isValidAtomicOrderingCABI(FOS)) |
417 | FailureOrder = llvm::AtomicOrdering::Monotonic; |
418 | else |
419 | switch ((llvm::AtomicOrderingCABI)FOS) { |
420 | case llvm::AtomicOrderingCABI::relaxed: |
421 | case llvm::AtomicOrderingCABI::release: |
422 | case llvm::AtomicOrderingCABI::acq_rel: |
423 | FailureOrder = llvm::AtomicOrdering::Monotonic; |
424 | break; |
425 | case llvm::AtomicOrderingCABI::consume: |
426 | case llvm::AtomicOrderingCABI::acquire: |
427 | FailureOrder = llvm::AtomicOrdering::Acquire; |
428 | break; |
429 | case llvm::AtomicOrderingCABI::seq_cst: |
430 | FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent; |
431 | break; |
432 | } |
433 | if (isStrongerThan(FailureOrder, SuccessOrder)) { |
434 | |
435 | |
436 | FailureOrder = |
437 | llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder); |
438 | } |
439 | emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder, |
440 | FailureOrder, Scope); |
441 | return; |
442 | } |
443 | |
444 | |
445 | llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr, |
446 | *SeqCstBB = nullptr; |
447 | MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn); |
448 | if (SuccessOrder != llvm::AtomicOrdering::Monotonic && |
449 | SuccessOrder != llvm::AtomicOrdering::Release) |
450 | AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn); |
451 | if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent) |
452 | SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn); |
453 | |
454 | llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn); |
455 | |
456 | llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB); |
457 | |
458 | |
459 | |
460 | |
461 | |
462 | |
463 | CGF.Builder.SetInsertPoint(MonotonicBB); |
464 | emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, |
465 | Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope); |
466 | CGF.Builder.CreateBr(ContBB); |
467 | |
468 | if (AcquireBB) { |
469 | CGF.Builder.SetInsertPoint(AcquireBB); |
470 | emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, |
471 | Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope); |
472 | CGF.Builder.CreateBr(ContBB); |
473 | SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume), |
474 | AcquireBB); |
475 | SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire), |
476 | AcquireBB); |
477 | } |
478 | if (SeqCstBB) { |
479 | CGF.Builder.SetInsertPoint(SeqCstBB); |
480 | emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder, |
481 | llvm::AtomicOrdering::SequentiallyConsistent, Scope); |
482 | CGF.Builder.CreateBr(ContBB); |
483 | SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst), |
484 | SeqCstBB); |
485 | } |
486 | |
487 | CGF.Builder.SetInsertPoint(ContBB); |
488 | } |
489 | |
490 | static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, |
491 | Address Ptr, Address Val1, Address Val2, |
492 | llvm::Value *IsWeak, llvm::Value *FailureOrder, |
493 | uint64_t Size, llvm::AtomicOrdering Order, |
494 | llvm::SyncScope::ID Scope) { |
495 | llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add; |
496 | llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0; |
497 | |
498 | switch (E->getOp()) { |
499 | case AtomicExpr::AO__c11_atomic_init: |
500 | case AtomicExpr::AO__opencl_atomic_init: |
501 | llvm_unreachable("Already handled!"); |
502 | |
503 | case AtomicExpr::AO__c11_atomic_compare_exchange_strong: |
504 | case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: |
505 | emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2, |
506 | FailureOrder, Size, Order, Scope); |
507 | return; |
508 | case AtomicExpr::AO__c11_atomic_compare_exchange_weak: |
509 | case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: |
510 | emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2, |
511 | FailureOrder, Size, Order, Scope); |
512 | return; |
513 | case AtomicExpr::AO__atomic_compare_exchange: |
514 | case AtomicExpr::AO__atomic_compare_exchange_n: { |
515 | if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) { |
516 | emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr, |
517 | Val1, Val2, FailureOrder, Size, Order, Scope); |
518 | } else { |
519 | |
520 | llvm::BasicBlock *StrongBB = |
521 | CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn); |
522 | llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn); |
523 | llvm::BasicBlock *ContBB = |
524 | CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn); |
525 | |
526 | llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB); |
527 | SI->addCase(CGF.Builder.getInt1(false), StrongBB); |
528 | |
529 | CGF.Builder.SetInsertPoint(StrongBB); |
530 | emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2, |
531 | FailureOrder, Size, Order, Scope); |
532 | CGF.Builder.CreateBr(ContBB); |
533 | |
534 | CGF.Builder.SetInsertPoint(WeakBB); |
535 | emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2, |
536 | FailureOrder, Size, Order, Scope); |
537 | CGF.Builder.CreateBr(ContBB); |
538 | |
539 | CGF.Builder.SetInsertPoint(ContBB); |
540 | } |
541 | return; |
542 | } |
543 | case AtomicExpr::AO__c11_atomic_load: |
544 | case AtomicExpr::AO__opencl_atomic_load: |
545 | case AtomicExpr::AO__atomic_load_n: |
546 | case AtomicExpr::AO__atomic_load: { |
547 | llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr); |
548 | Load->setAtomic(Order, Scope); |
549 | Load->setVolatile(E->isVolatile()); |
550 | CGF.Builder.CreateStore(Load, Dest); |
551 | return; |
552 | } |
553 | |
554 | case AtomicExpr::AO__c11_atomic_store: |
555 | case AtomicExpr::AO__opencl_atomic_store: |
556 | case AtomicExpr::AO__atomic_store: |
557 | case AtomicExpr::AO__atomic_store_n: { |
558 | llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1); |
559 | llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr); |
560 | Store->setAtomic(Order, Scope); |
561 | Store->setVolatile(E->isVolatile()); |
562 | return; |
563 | } |
564 | |
565 | case AtomicExpr::AO__c11_atomic_exchange: |
566 | case AtomicExpr::AO__opencl_atomic_exchange: |
567 | case AtomicExpr::AO__atomic_exchange_n: |
568 | case AtomicExpr::AO__atomic_exchange: |
569 | Op = llvm::AtomicRMWInst::Xchg; |
570 | break; |
571 | |
572 | case AtomicExpr::AO__atomic_add_fetch: |
573 | PostOp = llvm::Instruction::Add; |
574 | LLVM_FALLTHROUGH; |
575 | case AtomicExpr::AO__c11_atomic_fetch_add: |
576 | case AtomicExpr::AO__opencl_atomic_fetch_add: |
577 | case AtomicExpr::AO__atomic_fetch_add: |
578 | Op = llvm::AtomicRMWInst::Add; |
579 | break; |
580 | |
581 | case AtomicExpr::AO__atomic_sub_fetch: |
582 | PostOp = llvm::Instruction::Sub; |
583 | LLVM_FALLTHROUGH; |
584 | case AtomicExpr::AO__c11_atomic_fetch_sub: |
585 | case AtomicExpr::AO__opencl_atomic_fetch_sub: |
586 | case AtomicExpr::AO__atomic_fetch_sub: |
587 | Op = llvm::AtomicRMWInst::Sub; |
588 | break; |
589 | |
590 | case AtomicExpr::AO__opencl_atomic_fetch_min: |
591 | case AtomicExpr::AO__atomic_fetch_min: |
592 | Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min |
593 | : llvm::AtomicRMWInst::UMin; |
594 | break; |
595 | |
596 | case AtomicExpr::AO__opencl_atomic_fetch_max: |
597 | case AtomicExpr::AO__atomic_fetch_max: |
598 | Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max |
599 | : llvm::AtomicRMWInst::UMax; |
600 | break; |
601 | |
602 | case AtomicExpr::AO__atomic_and_fetch: |
603 | PostOp = llvm::Instruction::And; |
604 | LLVM_FALLTHROUGH; |
605 | case AtomicExpr::AO__c11_atomic_fetch_and: |
606 | case AtomicExpr::AO__opencl_atomic_fetch_and: |
607 | case AtomicExpr::AO__atomic_fetch_and: |
608 | Op = llvm::AtomicRMWInst::And; |
609 | break; |
610 | |
611 | case AtomicExpr::AO__atomic_or_fetch: |
612 | PostOp = llvm::Instruction::Or; |
613 | LLVM_FALLTHROUGH; |
614 | case AtomicExpr::AO__c11_atomic_fetch_or: |
615 | case AtomicExpr::AO__opencl_atomic_fetch_or: |
616 | case AtomicExpr::AO__atomic_fetch_or: |
617 | Op = llvm::AtomicRMWInst::Or; |
618 | break; |
619 | |
620 | case AtomicExpr::AO__atomic_xor_fetch: |
621 | PostOp = llvm::Instruction::Xor; |
622 | LLVM_FALLTHROUGH; |
623 | case AtomicExpr::AO__c11_atomic_fetch_xor: |
624 | case AtomicExpr::AO__opencl_atomic_fetch_xor: |
625 | case AtomicExpr::AO__atomic_fetch_xor: |
626 | Op = llvm::AtomicRMWInst::Xor; |
627 | break; |
628 | |
629 | case AtomicExpr::AO__atomic_nand_fetch: |
630 | PostOp = llvm::Instruction::And; |
631 | LLVM_FALLTHROUGH; |
632 | case AtomicExpr::AO__atomic_fetch_nand: |
633 | Op = llvm::AtomicRMWInst::Nand; |
634 | break; |
635 | } |
636 | |
637 | llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1); |
638 | llvm::AtomicRMWInst *RMWI = |
639 | CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope); |
640 | RMWI->setVolatile(E->isVolatile()); |
641 | |
642 | |
643 | |
644 | llvm::Value *Result = RMWI; |
645 | if (PostOp) |
646 | Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1); |
647 | if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch) |
648 | Result = CGF.Builder.CreateNot(Result); |
649 | CGF.Builder.CreateStore(Result, Dest); |
650 | } |
651 | |
652 | |
653 | |
654 | static Address |
655 | EmitValToTemp(CodeGenFunction &CGF, Expr *E) { |
656 | Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp"); |
657 | CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(), |
658 | true); |
659 | return DeclPtr; |
660 | } |
661 | |
662 | static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest, |
663 | Address Ptr, Address Val1, Address Val2, |
664 | llvm::Value *IsWeak, llvm::Value *FailureOrder, |
665 | uint64_t Size, llvm::AtomicOrdering Order, |
666 | llvm::Value *Scope) { |
667 | auto ScopeModel = Expr->getScopeModel(); |
668 | |
669 | |
670 | |
671 | if (!ScopeModel) { |
672 | EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, |
673 | Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID("")); |
674 | return; |
675 | } |
676 | |
677 | |
678 | if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) { |
679 | auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID( |
680 | CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()), |
681 | Order, CGF.CGM.getLLVMContext()); |
682 | EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, |
683 | Order, SCID); |
684 | return; |
685 | } |
686 | |
687 | |
688 | auto &Builder = CGF.Builder; |
689 | auto Scopes = ScopeModel->getRuntimeValues(); |
690 | llvm::DenseMap<unsigned, llvm::BasicBlock *> BB; |
691 | for (auto S : Scopes) |
692 | BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn); |
693 | |
694 | llvm::BasicBlock *ContBB = |
695 | CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn); |
696 | |
697 | auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false); |
698 | |
699 | |
700 | auto FallBack = ScopeModel->getFallBackValue(); |
701 | llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]); |
702 | for (auto S : Scopes) { |
703 | auto *B = BB[S]; |
704 | if (S != FallBack) |
705 | SI->addCase(Builder.getInt32(S), B); |
706 | |
707 | Builder.SetInsertPoint(B); |
708 | EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, |
709 | Order, |
710 | CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(), |
711 | ScopeModel->map(S), |
712 | Order, |
713 | CGF.getLLVMContext())); |
714 | Builder.CreateBr(ContBB); |
715 | } |
716 | |
717 | Builder.SetInsertPoint(ContBB); |
718 | } |
719 | |
720 | static void |
721 | AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, |
722 | bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy, |
723 | SourceLocation Loc, CharUnits SizeInChars) { |
724 | if (UseOptimizedLibcall) { |
725 | |
726 | CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy); |
727 | int64_t SizeInBits = CGF.getContext().toBits(SizeInChars); |
728 | ValTy = |
729 | CGF.getContext().getIntTypeForBitwidth(SizeInBits, ); |
730 | llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(), |
731 | SizeInBits)->getPointerTo(); |
732 | Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align); |
733 | Val = CGF.EmitLoadOfScalar(Ptr, false, |
734 | CGF.getContext().getPointerType(ValTy), |
735 | Loc); |
736 | |
737 | Args.add(RValue::get(Val), ValTy); |
738 | } else { |
739 | |
740 | Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)), |
741 | CGF.getContext().VoidPtrTy); |
742 | } |
743 | } |
744 | |
745 | RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { |
746 | QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); |
747 | QualType MemTy = AtomicTy; |
748 | if (const AtomicType *AT = AtomicTy->getAs<AtomicType>()) |
749 | MemTy = AT->getValueType(); |
750 | llvm::Value *IsWeak = nullptr, *OrderFail = nullptr; |
751 | |
752 | Address Val1 = Address::invalid(); |
753 | Address Val2 = Address::invalid(); |
754 | Address Dest = Address::invalid(); |
755 | Address Ptr = EmitPointerWithAlignment(E->getPtr()); |
756 | |
757 | if (E->getOp() == AtomicExpr::AO__c11_atomic_init || |
758 | E->getOp() == AtomicExpr::AO__opencl_atomic_init) { |
759 | LValue lvalue = MakeAddrLValue(Ptr, AtomicTy); |
760 | EmitAtomicInit(E->getVal1(), lvalue); |
761 | return RValue::get(nullptr); |
762 | } |
763 | |
764 | CharUnits sizeChars, alignChars; |
765 | std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy); |
766 | uint64_t Size = sizeChars.getQuantity(); |
767 | unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth(); |
768 | |
769 | bool Oversized = getContext().toBits(sizeChars) > MaxInlineWidthInBits; |
770 | bool Misaligned = (Ptr.getAlignment() % sizeChars) != 0; |
771 | bool UseLibcall = Misaligned | Oversized; |
772 | |
773 | if (UseLibcall) { |
774 | CGM.getDiags().Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned) |
775 | << !Oversized; |
776 | } |
777 | |
778 | llvm::Value *Order = EmitScalarExpr(E->getOrder()); |
779 | llvm::Value *Scope = |
780 | E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr; |
781 | |
782 | switch (E->getOp()) { |
783 | case AtomicExpr::AO__c11_atomic_init: |
784 | case AtomicExpr::AO__opencl_atomic_init: |
785 | llvm_unreachable("Already handled above with EmitAtomicInit!"); |
786 | |
787 | case AtomicExpr::AO__c11_atomic_load: |
788 | case AtomicExpr::AO__opencl_atomic_load: |
789 | case AtomicExpr::AO__atomic_load_n: |
790 | break; |
791 | |
792 | case AtomicExpr::AO__atomic_load: |
793 | Dest = EmitPointerWithAlignment(E->getVal1()); |
794 | break; |
795 | |
796 | case AtomicExpr::AO__atomic_store: |
797 | Val1 = EmitPointerWithAlignment(E->getVal1()); |
798 | break; |
799 | |
800 | case AtomicExpr::AO__atomic_exchange: |
801 | Val1 = EmitPointerWithAlignment(E->getVal1()); |
802 | Dest = EmitPointerWithAlignment(E->getVal2()); |
803 | break; |
804 | |
805 | case AtomicExpr::AO__c11_atomic_compare_exchange_strong: |
806 | case AtomicExpr::AO__c11_atomic_compare_exchange_weak: |
807 | case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: |
808 | case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: |
809 | case AtomicExpr::AO__atomic_compare_exchange_n: |
810 | case AtomicExpr::AO__atomic_compare_exchange: |
811 | Val1 = EmitPointerWithAlignment(E->getVal1()); |
812 | if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange) |
813 | Val2 = EmitPointerWithAlignment(E->getVal2()); |
814 | else |
815 | Val2 = EmitValToTemp(*this, E->getVal2()); |
816 | OrderFail = EmitScalarExpr(E->getOrderFail()); |
817 | if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n || |
818 | E->getOp() == AtomicExpr::AO__atomic_compare_exchange) |
819 | IsWeak = EmitScalarExpr(E->getWeak()); |
820 | break; |
821 | |
822 | case AtomicExpr::AO__c11_atomic_fetch_add: |
823 | case AtomicExpr::AO__c11_atomic_fetch_sub: |
824 | case AtomicExpr::AO__opencl_atomic_fetch_add: |
825 | case AtomicExpr::AO__opencl_atomic_fetch_sub: |
826 | if (MemTy->isPointerType()) { |
827 | |
828 | |
829 | |
830 | |
831 | QualType Val1Ty = E->getVal1()->getType(); |
832 | llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1()); |
833 | CharUnits PointeeIncAmt = |
834 | getContext().getTypeSizeInChars(MemTy->getPointeeType()); |
835 | Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt)); |
836 | auto Temp = CreateMemTemp(Val1Ty, ".atomictmp"); |
837 | Val1 = Temp; |
838 | EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty)); |
839 | break; |
840 | } |
841 | LLVM_FALLTHROUGH; |
842 | case AtomicExpr::AO__atomic_fetch_add: |
843 | case AtomicExpr::AO__atomic_fetch_sub: |
844 | case AtomicExpr::AO__atomic_add_fetch: |
845 | case AtomicExpr::AO__atomic_sub_fetch: |
846 | case AtomicExpr::AO__c11_atomic_store: |
847 | case AtomicExpr::AO__c11_atomic_exchange: |
848 | case AtomicExpr::AO__opencl_atomic_store: |
849 | case AtomicExpr::AO__opencl_atomic_exchange: |
850 | case AtomicExpr::AO__atomic_store_n: |
851 | case AtomicExpr::AO__atomic_exchange_n: |
852 | case AtomicExpr::AO__c11_atomic_fetch_and: |
853 | case AtomicExpr::AO__c11_atomic_fetch_or: |
854 | case AtomicExpr::AO__c11_atomic_fetch_xor: |
855 | case AtomicExpr::AO__opencl_atomic_fetch_and: |
856 | case AtomicExpr::AO__opencl_atomic_fetch_or: |
857 | case AtomicExpr::AO__opencl_atomic_fetch_xor: |
858 | case AtomicExpr::AO__opencl_atomic_fetch_min: |
859 | case AtomicExpr::AO__opencl_atomic_fetch_max: |
860 | case AtomicExpr::AO__atomic_fetch_and: |
861 | case AtomicExpr::AO__atomic_fetch_or: |
862 | case AtomicExpr::AO__atomic_fetch_xor: |
863 | case AtomicExpr::AO__atomic_fetch_nand: |
864 | case AtomicExpr::AO__atomic_and_fetch: |
865 | case AtomicExpr::AO__atomic_or_fetch: |
866 | case AtomicExpr::AO__atomic_xor_fetch: |
867 | case AtomicExpr::AO__atomic_nand_fetch: |
868 | case AtomicExpr::AO__atomic_fetch_min: |
869 | case AtomicExpr::AO__atomic_fetch_max: |
870 | Val1 = EmitValToTemp(*this, E->getVal1()); |
871 | break; |
872 | } |
873 | |
874 | QualType RValTy = E->getType().getUnqualifiedType(); |
875 | |
876 | |
877 | |
878 | |
879 | LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy); |
880 | AtomicInfo Atomics(*this, AtomicVal); |
881 | |
882 | Ptr = Atomics.emitCastToAtomicIntPointer(Ptr); |
883 | if (Val1.isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1); |
884 | if (Val2.isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2); |
885 | if (Dest.isValid()) |
886 | Dest = Atomics.emitCastToAtomicIntPointer(Dest); |
887 | else if (E->isCmpXChg()) |
888 | Dest = CreateMemTemp(RValTy, "cmpxchg.bool"); |
889 | else if (!RValTy->isVoidType()) |
890 | Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca()); |
891 | |
892 | |
893 | if (UseLibcall) { |
894 | bool UseOptimizedLibcall = false; |
895 | switch (E->getOp()) { |
896 | case AtomicExpr::AO__c11_atomic_init: |
897 | case AtomicExpr::AO__opencl_atomic_init: |
898 | llvm_unreachable("Already handled above with EmitAtomicInit!"); |
899 | |
900 | case AtomicExpr::AO__c11_atomic_fetch_add: |
901 | case AtomicExpr::AO__opencl_atomic_fetch_add: |
902 | case AtomicExpr::AO__atomic_fetch_add: |
903 | case AtomicExpr::AO__c11_atomic_fetch_and: |
904 | case AtomicExpr::AO__opencl_atomic_fetch_and: |
905 | case AtomicExpr::AO__atomic_fetch_and: |
906 | case AtomicExpr::AO__c11_atomic_fetch_or: |
907 | case AtomicExpr::AO__opencl_atomic_fetch_or: |
908 | case AtomicExpr::AO__atomic_fetch_or: |
909 | case AtomicExpr::AO__atomic_fetch_nand: |
910 | case AtomicExpr::AO__c11_atomic_fetch_sub: |
911 | case AtomicExpr::AO__opencl_atomic_fetch_sub: |
912 | case AtomicExpr::AO__atomic_fetch_sub: |
913 | case AtomicExpr::AO__c11_atomic_fetch_xor: |
914 | case AtomicExpr::AO__opencl_atomic_fetch_xor: |
915 | case AtomicExpr::AO__opencl_atomic_fetch_min: |
916 | case AtomicExpr::AO__opencl_atomic_fetch_max: |
917 | case AtomicExpr::AO__atomic_fetch_xor: |
918 | case AtomicExpr::AO__atomic_add_fetch: |
919 | case AtomicExpr::AO__atomic_and_fetch: |
920 | case AtomicExpr::AO__atomic_nand_fetch: |
921 | case AtomicExpr::AO__atomic_or_fetch: |
922 | case AtomicExpr::AO__atomic_sub_fetch: |
923 | case AtomicExpr::AO__atomic_xor_fetch: |
924 | case AtomicExpr::AO__atomic_fetch_min: |
925 | case AtomicExpr::AO__atomic_fetch_max: |
926 | |
927 | UseOptimizedLibcall = true; |
928 | break; |
929 | |
930 | case AtomicExpr::AO__atomic_load: |
931 | case AtomicExpr::AO__atomic_store: |
932 | case AtomicExpr::AO__atomic_exchange: |
933 | case AtomicExpr::AO__atomic_compare_exchange: |
934 | |
935 | |
936 | if (Misaligned) |
937 | break; |
938 | LLVM_FALLTHROUGH; |
939 | case AtomicExpr::AO__c11_atomic_load: |
940 | case AtomicExpr::AO__c11_atomic_store: |
941 | case AtomicExpr::AO__c11_atomic_exchange: |
942 | case AtomicExpr::AO__c11_atomic_compare_exchange_weak: |
943 | case AtomicExpr::AO__c11_atomic_compare_exchange_strong: |
944 | case AtomicExpr::AO__opencl_atomic_load: |
945 | case AtomicExpr::AO__opencl_atomic_store: |
946 | case AtomicExpr::AO__opencl_atomic_exchange: |
947 | case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: |
948 | case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: |
949 | case AtomicExpr::AO__atomic_load_n: |
950 | case AtomicExpr::AO__atomic_store_n: |
951 | case AtomicExpr::AO__atomic_exchange_n: |
952 | case AtomicExpr::AO__atomic_compare_exchange_n: |
953 | |
954 | |
955 | if (Size == 1 || Size == 2 || Size == 4 || Size == 8) |
956 | UseOptimizedLibcall = true; |
957 | break; |
958 | } |
959 | |
960 | CallArgList Args; |
961 | if (!UseOptimizedLibcall) { |
962 | |
963 | Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)), |
964 | getContext().getSizeType()); |
965 | } |
966 | |
967 | |
968 | |
969 | auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) { |
970 | if (!E->isOpenCL()) |
971 | return V; |
972 | auto AS = PT->getAs<PointerType>()->getPointeeType().getAddressSpace(); |
973 | if (AS == LangAS::opencl_generic) |
974 | return V; |
975 | auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic); |
976 | auto T = V->getType(); |
977 | auto *DestType = T->getPointerElementType()->getPointerTo(DestAS); |
978 | |
979 | return getTargetHooks().performAddrSpaceCast( |
980 | *this, V, AS, LangAS::opencl_generic, DestType, false); |
981 | }; |
982 | |
983 | Args.add(RValue::get(CastToGenericAddrSpace( |
984 | EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())), |
985 | getContext().VoidPtrTy); |
986 | |
987 | std::string LibCallName; |
988 | QualType LoweredMemTy = |
989 | MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy; |
990 | QualType RetTy; |
991 | bool HaveRetTy = false; |
992 | llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0; |
993 | switch (E->getOp()) { |
994 | case AtomicExpr::AO__c11_atomic_init: |
995 | case AtomicExpr::AO__opencl_atomic_init: |
996 | llvm_unreachable("Already handled!"); |
997 | |
998 | |
999 | |
1000 | |
1001 | |
1002 | |
1003 | |
1004 | |
1005 | case AtomicExpr::AO__c11_atomic_compare_exchange_weak: |
1006 | case AtomicExpr::AO__c11_atomic_compare_exchange_strong: |
1007 | case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: |
1008 | case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: |
1009 | case AtomicExpr::AO__atomic_compare_exchange: |
1010 | case AtomicExpr::AO__atomic_compare_exchange_n: |
1011 | LibCallName = "__atomic_compare_exchange"; |
1012 | RetTy = getContext().BoolTy; |
1013 | HaveRetTy = true; |
1014 | Args.add( |
1015 | RValue::get(CastToGenericAddrSpace( |
1016 | EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())), |
1017 | getContext().VoidPtrTy); |
1018 | AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(), |
1019 | MemTy, E->getExprLoc(), sizeChars); |
1020 | Args.add(RValue::get(Order), getContext().IntTy); |
1021 | Order = OrderFail; |
1022 | break; |
1023 | |
1024 | |
1025 | |
1026 | case AtomicExpr::AO__c11_atomic_exchange: |
1027 | case AtomicExpr::AO__opencl_atomic_exchange: |
1028 | case AtomicExpr::AO__atomic_exchange_n: |
1029 | case AtomicExpr::AO__atomic_exchange: |
1030 | LibCallName = "__atomic_exchange"; |
1031 | AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(), |
1032 | MemTy, E->getExprLoc(), sizeChars); |
1033 | break; |
1034 | |
1035 | |
1036 | case AtomicExpr::AO__c11_atomic_store: |
1037 | case AtomicExpr::AO__opencl_atomic_store: |
1038 | case AtomicExpr::AO__atomic_store: |
1039 | case AtomicExpr::AO__atomic_store_n: |
1040 | LibCallName = "__atomic_store"; |
1041 | RetTy = getContext().VoidTy; |
1042 | HaveRetTy = true; |
1043 | AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(), |
1044 | MemTy, E->getExprLoc(), sizeChars); |
1045 | break; |
1046 | |
1047 | |
1048 | case AtomicExpr::AO__c11_atomic_load: |
1049 | case AtomicExpr::AO__opencl_atomic_load: |
1050 | case AtomicExpr::AO__atomic_load: |
1051 | case AtomicExpr::AO__atomic_load_n: |
1052 | LibCallName = "__atomic_load"; |
1053 | break; |
1054 | |
1055 | |
1056 | case AtomicExpr::AO__atomic_add_fetch: |
1057 | PostOp = llvm::Instruction::Add; |
1058 | LLVM_FALLTHROUGH; |
1059 | case AtomicExpr::AO__c11_atomic_fetch_add: |
1060 | case AtomicExpr::AO__opencl_atomic_fetch_add: |
1061 | case AtomicExpr::AO__atomic_fetch_add: |
1062 | LibCallName = "__atomic_fetch_add"; |
1063 | AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(), |
1064 | LoweredMemTy, E->getExprLoc(), sizeChars); |
1065 | break; |
1066 | |
1067 | |
1068 | case AtomicExpr::AO__atomic_and_fetch: |
1069 | PostOp = llvm::Instruction::And; |
1070 | LLVM_FALLTHROUGH; |
1071 | case AtomicExpr::AO__c11_atomic_fetch_and: |
1072 | case AtomicExpr::AO__opencl_atomic_fetch_and: |
1073 | case AtomicExpr::AO__atomic_fetch_and: |
1074 | LibCallName = "__atomic_fetch_and"; |
1075 | AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(), |
1076 | MemTy, E->getExprLoc(), sizeChars); |
1077 | break; |
1078 | |
1079 | |
1080 | case AtomicExpr::AO__atomic_or_fetch: |
1081 | PostOp = llvm::Instruction::Or; |
1082 | LLVM_FALLTHROUGH; |
1083 | case AtomicExpr::AO__c11_atomic_fetch_or: |
1084 | case AtomicExpr::AO__opencl_atomic_fetch_or: |
1085 | case AtomicExpr::AO__atomic_fetch_or: |
1086 | LibCallName = "__atomic_fetch_or"; |
1087 | AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(), |
1088 | MemTy, E->getExprLoc(), sizeChars); |
1089 | break; |
1090 | |
1091 | |
1092 | case AtomicExpr::AO__atomic_sub_fetch: |
1093 | PostOp = llvm::Instruction::Sub; |
1094 | LLVM_FALLTHROUGH; |
1095 | case AtomicExpr::AO__c11_atomic_fetch_sub: |
1096 | case AtomicExpr::AO__opencl_atomic_fetch_sub: |
1097 | case AtomicExpr::AO__atomic_fetch_sub: |
1098 | LibCallName = "__atomic_fetch_sub"; |
1099 | AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(), |
1100 | LoweredMemTy, E->getExprLoc(), sizeChars); |
1101 | break; |
1102 | |
1103 | |
1104 | case AtomicExpr::AO__atomic_xor_fetch: |
1105 | PostOp = llvm::Instruction::Xor; |
1106 | LLVM_FALLTHROUGH; |
1107 | case AtomicExpr::AO__c11_atomic_fetch_xor: |
1108 | case AtomicExpr::AO__opencl_atomic_fetch_xor: |
1109 | case AtomicExpr::AO__atomic_fetch_xor: |
1110 | LibCallName = "__atomic_fetch_xor"; |
1111 | AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(), |
1112 | MemTy, E->getExprLoc(), sizeChars); |
1113 | break; |
1114 | case AtomicExpr::AO__atomic_fetch_min: |
1115 | case AtomicExpr::AO__opencl_atomic_fetch_min: |
1116 | LibCallName = E->getValueType()->isSignedIntegerType() |
1117 | ? "__atomic_fetch_min" |
1118 | : "__atomic_fetch_umin"; |
1119 | AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(), |
1120 | LoweredMemTy, E->getExprLoc(), sizeChars); |
1121 | break; |
1122 | case AtomicExpr::AO__atomic_fetch_max: |
1123 | case AtomicExpr::AO__opencl_atomic_fetch_max: |
1124 | LibCallName = E->getValueType()->isSignedIntegerType() |
1125 | ? "__atomic_fetch_max" |
1126 | : "__atomic_fetch_umax"; |
1127 | AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(), |
1128 | LoweredMemTy, E->getExprLoc(), sizeChars); |
1129 | break; |
1130 | |
1131 | |
1132 | case AtomicExpr::AO__atomic_nand_fetch: |
1133 | PostOp = llvm::Instruction::And; |
1134 | LLVM_FALLTHROUGH; |
1135 | case AtomicExpr::AO__atomic_fetch_nand: |
1136 | LibCallName = "__atomic_fetch_nand"; |
1137 | AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(), |
1138 | MemTy, E->getExprLoc(), sizeChars); |
1139 | break; |
1140 | } |
1141 | |
1142 | if (E->isOpenCL()) { |
1143 | LibCallName = std::string("__opencl") + |
1144 | StringRef(LibCallName).drop_front(1).str(); |
1145 | |
1146 | } |
1147 | |
1148 | if (UseOptimizedLibcall) |
1149 | LibCallName += "_" + llvm::utostr(Size); |
1150 | |
1151 | if (!HaveRetTy) { |
1152 | if (UseOptimizedLibcall) { |
1153 | |
1154 | |
1155 | RetTy = getContext().getIntTypeForBitwidth( |
1156 | getContext().toBits(sizeChars), ); |
1157 | } else { |
1158 | |
1159 | RetTy = getContext().VoidTy; |
1160 | Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())), |
1161 | getContext().VoidPtrTy); |
1162 | } |
1163 | } |
1164 | |
1165 | Args.add(RValue::get(Order), |
1166 | getContext().IntTy); |
1167 | if (E->isOpenCL()) |
1168 | Args.add(RValue::get(Scope), getContext().IntTy); |
1169 | |
1170 | |
1171 | |
1172 | |
1173 | assert(UseOptimizedLibcall || !PostOp); |
1174 | |
1175 | RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args); |
1176 | |
1177 | if (E->isCmpXChg()) |
1178 | return Res; |
1179 | |
1180 | |
1181 | |
1182 | if (UseOptimizedLibcall && Res.getScalarVal()) { |
1183 | llvm::Value *ResVal = Res.getScalarVal(); |
1184 | if (PostOp) { |
1185 | llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal(); |
1186 | ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1); |
1187 | } |
1188 | if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch) |
1189 | ResVal = Builder.CreateNot(ResVal); |
1190 | |
1191 | Builder.CreateStore( |
1192 | ResVal, |
1193 | Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo())); |
1194 | } |
1195 | |
1196 | if (RValTy->isVoidType()) |
1197 | return RValue::get(nullptr); |
1198 | |
1199 | return convertTempToRValue( |
1200 | Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()), |
1201 | RValTy, E->getExprLoc()); |
1202 | } |
1203 | |
1204 | bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store || |
1205 | E->getOp() == AtomicExpr::AO__opencl_atomic_store || |
1206 | E->getOp() == AtomicExpr::AO__atomic_store || |
1207 | E->getOp() == AtomicExpr::AO__atomic_store_n; |
1208 | bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load || |
1209 | E->getOp() == AtomicExpr::AO__opencl_atomic_load || |
1210 | E->getOp() == AtomicExpr::AO__atomic_load || |
1211 | E->getOp() == AtomicExpr::AO__atomic_load_n; |
1212 | |
1213 | if (isa<llvm::ConstantInt>(Order)) { |
1214 | auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
1215 | |
1216 | |
1217 | if (llvm::isValidAtomicOrderingCABI(ord)) |
1218 | switch ((llvm::AtomicOrderingCABI)ord) { |
1219 | case llvm::AtomicOrderingCABI::relaxed: |
1220 | EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, |
1221 | llvm::AtomicOrdering::Monotonic, Scope); |
1222 | break; |
1223 | case llvm::AtomicOrderingCABI::consume: |
1224 | case llvm::AtomicOrderingCABI::acquire: |
1225 | if (IsStore) |
1226 | break; |
1227 | EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, |
1228 | llvm::AtomicOrdering::Acquire, Scope); |
1229 | break; |
1230 | case llvm::AtomicOrderingCABI::release: |
1231 | if (IsLoad) |
1232 | break; |
1233 | EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, |
1234 | llvm::AtomicOrdering::Release, Scope); |
1235 | break; |
1236 | case llvm::AtomicOrderingCABI::acq_rel: |
1237 | if (IsLoad || IsStore) |
1238 | break; |
1239 | EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, |
1240 | llvm::AtomicOrdering::AcquireRelease, Scope); |
1241 | break; |
1242 | case llvm::AtomicOrderingCABI::seq_cst: |
1243 | EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, |
1244 | llvm::AtomicOrdering::SequentiallyConsistent, Scope); |
1245 | break; |
1246 | } |
1247 | if (RValTy->isVoidType()) |
1248 | return RValue::get(nullptr); |
1249 | |
1250 | return convertTempToRValue( |
1251 | Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo( |
1252 | Dest.getAddressSpace())), |
1253 | RValTy, E->getExprLoc()); |
1254 | } |
1255 | |
1256 | |
1257 | |
1258 | |
1259 | llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr, |
1260 | *ReleaseBB = nullptr, *AcqRelBB = nullptr, |
1261 | *SeqCstBB = nullptr; |
1262 | MonotonicBB = createBasicBlock("monotonic", CurFn); |
1263 | if (!IsStore) |
1264 | AcquireBB = createBasicBlock("acquire", CurFn); |
1265 | if (!IsLoad) |
1266 | ReleaseBB = createBasicBlock("release", CurFn); |
1267 | if (!IsLoad && !IsStore) |
1268 | AcqRelBB = createBasicBlock("acqrel", CurFn); |
1269 | SeqCstBB = createBasicBlock("seqcst", CurFn); |
1270 | llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
1271 | |
1272 | |
1273 | |
1274 | |
1275 | |
1276 | Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
1277 | llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB); |
1278 | |
1279 | |
1280 | Builder.SetInsertPoint(MonotonicBB); |
1281 | EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, |
1282 | llvm::AtomicOrdering::Monotonic, Scope); |
1283 | Builder.CreateBr(ContBB); |
1284 | if (!IsStore) { |
1285 | Builder.SetInsertPoint(AcquireBB); |
1286 | EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, |
1287 | llvm::AtomicOrdering::Acquire, Scope); |
1288 | Builder.CreateBr(ContBB); |
1289 | SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume), |
1290 | AcquireBB); |
1291 | SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire), |
1292 | AcquireBB); |
1293 | } |
1294 | if (!IsLoad) { |
1295 | Builder.SetInsertPoint(ReleaseBB); |
1296 | EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, |
1297 | llvm::AtomicOrdering::Release, Scope); |
1298 | Builder.CreateBr(ContBB); |
1299 | SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release), |
1300 | ReleaseBB); |
1301 | } |
1302 | if (!IsLoad && !IsStore) { |
1303 | Builder.SetInsertPoint(AcqRelBB); |
1304 | EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, |
1305 | llvm::AtomicOrdering::AcquireRelease, Scope); |
1306 | Builder.CreateBr(ContBB); |
1307 | SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel), |
1308 | AcqRelBB); |
1309 | } |
1310 | Builder.SetInsertPoint(SeqCstBB); |
1311 | EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, |
1312 | llvm::AtomicOrdering::SequentiallyConsistent, Scope); |
1313 | Builder.CreateBr(ContBB); |
1314 | SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst), |
1315 | SeqCstBB); |
1316 | |
1317 | |
1318 | Builder.SetInsertPoint(ContBB); |
1319 | if (RValTy->isVoidType()) |
1320 | return RValue::get(nullptr); |
1321 | |
1322 | assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits()); |
1323 | return convertTempToRValue( |
1324 | Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo( |
1325 | Dest.getAddressSpace())), |
1326 | RValTy, E->getExprLoc()); |
1327 | } |
1328 | |
1329 | Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const { |
1330 | unsigned addrspace = |
1331 | cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace(); |
1332 | llvm::IntegerType *ty = |
1333 | llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits); |
1334 | return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace)); |
1335 | } |
1336 | |
1337 | Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const { |
1338 | llvm::Type *Ty = Addr.getElementType(); |
1339 | uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty); |
1340 | if (SourceSizeInBits != AtomicSizeInBits) { |
1341 | Address Tmp = CreateTempAlloca(); |
1342 | CGF.Builder.CreateMemCpy(Tmp, Addr, |
1343 | std::min(AtomicSizeInBits, SourceSizeInBits) / 8); |
1344 | Addr = Tmp; |
1345 | } |
1346 | |
1347 | return emitCastToAtomicIntPointer(Addr); |
1348 | } |
1349 | |
1350 | RValue AtomicInfo::convertAtomicTempToRValue(Address addr, |
1351 | AggValueSlot resultSlot, |
1352 | SourceLocation loc, |
1353 | bool asValue) const { |
1354 | if (LVal.isSimple()) { |
1355 | if (EvaluationKind == TEK_Aggregate) |
1356 | return resultSlot.asRValue(); |
1357 | |
1358 | |
1359 | if (hasPadding()) |
1360 | addr = CGF.Builder.CreateStructGEP(addr, 0); |
1361 | |
1362 | |
1363 | |
1364 | return CGF.convertTempToRValue(addr, getValueType(), loc); |
1365 | } |
1366 | if (!asValue) |
1367 | |
1368 | return RValue::get(CGF.Builder.CreateLoad(addr)); |
1369 | if (LVal.isBitField()) |
1370 | return CGF.EmitLoadOfBitfieldLValue( |
1371 | LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(), |
1372 | LVal.getBaseInfo(), TBAAAccessInfo()), loc); |
1373 | if (LVal.isVectorElt()) |
1374 | return CGF.EmitLoadOfLValue( |
1375 | LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(), |
1376 | LVal.getBaseInfo(), TBAAAccessInfo()), loc); |
1377 | assert(LVal.isExtVectorElt()); |
1378 | return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt( |
1379 | addr, LVal.getExtVectorElts(), LVal.getType(), |
1380 | LVal.getBaseInfo(), TBAAAccessInfo())); |
1381 | } |
1382 | |
1383 | RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal, |
1384 | AggValueSlot ResultSlot, |
1385 | SourceLocation Loc, |
1386 | bool AsValue) const { |
1387 | |
1388 | (0) . __assert_fail ("IntVal->getType()->isIntegerTy() && \"Expected integer value\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGAtomic.cpp", 1388, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(IntVal->getType()->isIntegerTy() && "Expected integer value"); |
1389 | if (getEvaluationKind() == TEK_Scalar && |
1390 | (((!LVal.isBitField() || |
1391 | LVal.getBitFieldInfo().Size == ValueSizeInBits) && |
1392 | !hasPadding()) || |
1393 | !AsValue)) { |
1394 | auto *ValTy = AsValue |
1395 | ? CGF.ConvertTypeForMem(ValueTy) |
1396 | : getAtomicAddress().getType()->getPointerElementType(); |
1397 | if (ValTy->isIntegerTy()) { |
1398 | (0) . __assert_fail ("IntVal->getType() == ValTy && \"Different integer types.\"", "/home/seafit/code_projects/clang_source/clang/lib/CodeGen/CGAtomic.cpp", 1398, __PRETTY_FUNCTION__))" file_link="../../../include/assert.h.html#88" macro="true">assert(IntVal->getType() == ValTy && "Different integer types."); |
1399 | return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy)); |
1400 | } else if (ValTy->isPointerTy()) |
1401 | return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy)); |
1402 | else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy)) |
1403 | return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy)); |
1404 | } |
1405 | |
1406 | |
1407 | |
1408 | Address Temp = Address::invalid(); |
1409 | bool TempIsVolatile = false; |
1410 | if (AsValue && getEvaluationKind() == TEK_Aggregate) { |
1411 | assert(!ResultSlot.isIgnored()); |
1412 | Temp = ResultSlot.getAddress(); |
1413 | TempIsVolatile = ResultSlot.isVolatile(); |
1414 | } else { |
1415 | Temp = CreateTempAlloca(); |
1416 | } |
1417 | |
1418 | |
1419 | Address CastTemp = emitCastToAtomicIntPointer(Temp); |
1420 | CGF.Builder.CreateStore(IntVal, CastTemp) |
1421 | ->setVolatile(TempIsVolatile); |
1422 | |
1423 | return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue); |
1424 | } |
1425 | |
1426 | void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded, |
1427 | llvm::AtomicOrdering AO, bool) { |
1428 | |
1429 | CallArgList Args; |
1430 | Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType()); |
1431 | Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())), |
1432 | CGF.getContext().VoidPtrTy); |
1433 | Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)), |
1434 | CGF.getContext().VoidPtrTy); |
1435 | Args.add( |
1436 | RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))), |
1437 | CGF.getContext().IntTy); |
1438 | emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args); |
1439 | } |
1440 | |
1441 | llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO, |
1442 | bool IsVolatile) { |
1443 | |
1444 | Address Addr = getAtomicAddressAsAtomicIntPointer(); |
1445 | llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load"); |
1446 | Load->setAtomic(AO); |
1447 | |
1448 | |
1449 | if (IsVolatile) |
1450 | Load->setVolatile(true); |
1451 | CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo()); |
1452 | return Load; |
1453 | } |
1454 | |
1455 | |
1456 | |
1457 | |
1458 | bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) { |
1459 | if (!CGM.getCodeGenOpts().MSVolatile) return false; |
1460 | AtomicInfo AI(*this, LV); |
1461 | bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType()); |
1462 | |
1463 | bool AtomicIsInline = !AI.shouldUseLibcall(); |
1464 | |
1465 | if (getContext().getTypeSize(LV.getType()) > |
1466 | getContext().getTypeSize(getContext().getIntPtrType())) |
1467 | return false; |
1468 | return IsVolatile && AtomicIsInline; |
1469 | } |
1470 | |
1471 | RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL, |
1472 | AggValueSlot Slot) { |
1473 | llvm::AtomicOrdering AO; |
1474 | bool IsVolatile = LV.isVolatileQualified(); |
1475 | if (LV.getType()->isAtomicType()) { |
1476 | AO = llvm::AtomicOrdering::SequentiallyConsistent; |
1477 | } else { |
1478 | AO = llvm::AtomicOrdering::Acquire; |
1479 | IsVolatile = true; |
1480 | } |
1481 | return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot); |
1482 | } |
1483 | |
1484 | RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc, |
1485 | bool AsValue, llvm::AtomicOrdering AO, |
1486 | bool IsVolatile) { |
1487 | |
1488 | if (shouldUseLibcall()) { |
1489 | Address TempAddr = Address::invalid(); |
1490 | if (LVal.isSimple() && !ResultSlot.isIgnored()) { |
1491 | assert(getEvaluationKind() == TEK_Aggregate); |
1492 | TempAddr = ResultSlot.getAddress(); |
1493 | } else |
1494 | TempAddr = CreateTempAlloca(); |
1495 | |
1496 | EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile); |
1497 | |
1498 | |
1499 | |
1500 | return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue); |
1501 | } |
1502 | |
1503 | |
1504 | auto *Load = EmitAtomicLoadOp(AO, IsVolatile); |
1505 | |
1506 | |
1507 | if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored()) |
1508 | return RValue::getAggregate(Address::invalid(), false); |
1509 | |
1510 | |
1511 | |
1512 | return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue); |
1513 | } |
1514 | |
1515 | |
1516 | |
1517 | RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc, |
1518 | llvm::AtomicOrdering AO, bool IsVolatile, |
1519 | AggValueSlot resultSlot) { |
1520 | AtomicInfo Atomics(*this, src); |
1521 | return Atomics.EmitAtomicLoad(resultSlot, loc, , AO, |
1522 | IsVolatile); |
1523 | } |
1524 | |
1525 | |
1526 | |
1527 | void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const { |
1528 | assert(LVal.isSimple()); |
1529 | |
1530 | |
1531 | |
1532 | if (rvalue.isAggregate()) { |
1533 | LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType()); |
1534 | LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(), |
1535 | getAtomicType()); |
1536 | bool IsVolatile = rvalue.isVolatileQualified() || |
1537 | LVal.isVolatileQualified(); |
1538 | CGF.EmitAggregateCopy(Dest, Src, getAtomicType(), |
1539 | AggValueSlot::DoesNotOverlap, IsVolatile); |
1540 | return; |
1541 | } |
1542 | |
1543 | |
1544 | |
1545 | |
1546 | emitMemSetZeroIfNecessary(); |
1547 | |
1548 | |
1549 | LValue TempLVal = projectValue(); |
1550 | |
1551 | |
1552 | if (rvalue.isScalar()) { |
1553 | CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, true); |
1554 | } else { |
1555 | CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, true); |
1556 | } |
1557 | } |
1558 | |
1559 | |
1560 | |
1561 | |
1562 | Address AtomicInfo::materializeRValue(RValue rvalue) const { |
1563 | |
1564 | |
1565 | if (rvalue.isAggregate()) |
1566 | return rvalue.getAggregateAddress(); |
1567 | |
1568 | |
1569 | LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType()); |
1570 | AtomicInfo Atomics(CGF, TempLV); |
1571 | Atomics.emitCopyIntoMemory(rvalue); |
1572 | return TempLV.getAddress(); |
1573 | } |
1574 | |
1575 | llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const { |
1576 | |
1577 | |
1578 | if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) { |
1579 | llvm::Value *Value = RVal.getScalarVal(); |
1580 | if (isa<llvm::IntegerType>(Value->getType())) |
1581 | return CGF.EmitToMemory(Value, ValueTy); |
1582 | else { |
1583 | llvm::IntegerType *InputIntTy = llvm::IntegerType::get( |
1584 | CGF.getLLVMContext(), |
1585 | LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits()); |
1586 | if (isa<llvm::PointerType>(Value->getType())) |
1587 | return CGF.Builder.CreatePtrToInt(Value, InputIntTy); |
1588 | else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy)) |
1589 | return CGF.Builder.CreateBitCast(Value, InputIntTy); |
1590 | } |
1591 | } |
1592 | |
1593 | |
1594 | Address Addr = materializeRValue(RVal); |
1595 | |
1596 | |
1597 | Addr = emitCastToAtomicIntPointer(Addr); |
1598 | return CGF.Builder.CreateLoad(Addr); |
1599 | } |
1600 | |
1601 | std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp( |
1602 | llvm::Value *ExpectedVal, llvm::Value *DesiredVal, |
1603 | llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) { |
1604 | |
1605 | Address Addr = getAtomicAddressAsAtomicIntPointer(); |
1606 | auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(), |
1607 | ExpectedVal, DesiredVal, |
1608 | Success, Failure); |
1609 | |
1610 | Inst->setVolatile(LVal.isVolatileQualified()); |
1611 | Inst->setWeak(IsWeak); |
1612 | |
1613 | |
1614 | auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, ); |
1615 | auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, ); |
1616 | return std::make_pair(PreviousVal, SuccessFailureVal); |
1617 | } |
1618 | |
1619 | llvm::Value * |
1620 | AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr, |
1621 | llvm::Value *DesiredAddr, |
1622 | llvm::AtomicOrdering Success, |
1623 | llvm::AtomicOrdering Failure) { |
1624 | |
1625 | |
1626 | CallArgList Args; |
1627 | Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType()); |
1628 | Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())), |
1629 | CGF.getContext().VoidPtrTy); |
1630 | Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)), |
1631 | CGF.getContext().VoidPtrTy); |
1632 | Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)), |
1633 | CGF.getContext().VoidPtrTy); |
1634 | Args.add(RValue::get( |
1635 | llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))), |
1636 | CGF.getContext().IntTy); |
1637 | Args.add(RValue::get( |
1638 | llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))), |
1639 | CGF.getContext().IntTy); |
1640 | auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange", |
1641 | CGF.getContext().BoolTy, Args); |
1642 | |
1643 | return SuccessFailureRVal.getScalarVal(); |
1644 | } |
1645 | |
1646 | std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange( |
1647 | RValue Expected, RValue Desired, llvm::AtomicOrdering Success, |
1648 | llvm::AtomicOrdering Failure, bool IsWeak) { |
1649 | if (isStrongerThan(Failure, Success)) |
1650 | |
1651 | |
1652 | Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success); |
1653 | |
1654 | |
1655 | if (shouldUseLibcall()) { |
1656 | |
1657 | Address ExpectedAddr = materializeRValue(Expected); |
1658 | Address DesiredAddr = materializeRValue(Desired); |
1659 | auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(), |
1660 | DesiredAddr.getPointer(), |
1661 | Success, Failure); |
1662 | return std::make_pair( |
1663 | convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(), |
1664 | SourceLocation(), ), |
1665 | Res); |
1666 | } |
1667 | |
1668 | |
1669 | |
1670 | auto *ExpectedVal = convertRValueToInt(Expected); |
1671 | auto *DesiredVal = convertRValueToInt(Desired); |
1672 | auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success, |
1673 | Failure, IsWeak); |
1674 | return std::make_pair( |
1675 | ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(), |
1676 | SourceLocation(), ), |
1677 | Res.second); |
1678 | } |
1679 | |
1680 | static void |
1681 | EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, |
1682 | const llvm::function_ref<RValue(RValue)> &UpdateOp, |
1683 | Address DesiredAddr) { |
1684 | RValue UpRVal; |
1685 | LValue AtomicLVal = Atomics.getAtomicLValue(); |
1686 | LValue DesiredLVal; |
1687 | if (AtomicLVal.isSimple()) { |
1688 | UpRVal = OldRVal; |
1689 | DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType()); |
1690 | } else { |
1691 | |
1692 | Address Ptr = Atomics.materializeRValue(OldRVal); |
1693 | LValue UpdateLVal; |
1694 | if (AtomicLVal.isBitField()) { |
1695 | UpdateLVal = |
1696 | LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(), |
1697 | AtomicLVal.getType(), |
1698 | AtomicLVal.getBaseInfo(), |
1699 | AtomicLVal.getTBAAInfo()); |
1700 | DesiredLVal = |
1701 | LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(), |
1702 | AtomicLVal.getType(), AtomicLVal.getBaseInfo(), |
1703 | AtomicLVal.getTBAAInfo()); |
1704 | } else if (AtomicLVal.isVectorElt()) { |
1705 | UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(), |
1706 | AtomicLVal.getType(), |
1707 | AtomicLVal.getBaseInfo(), |
1708 | AtomicLVal.getTBAAInfo()); |
1709 | DesiredLVal = LValue::MakeVectorElt( |
1710 | DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(), |
1711 | AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo()); |
1712 | } else { |
1713 | assert(AtomicLVal.isExtVectorElt()); |
1714 | UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(), |
1715 | AtomicLVal.getType(), |
1716 | AtomicLVal.getBaseInfo(), |
1717 | AtomicLVal.getTBAAInfo()); |
1718 | DesiredLVal = LValue::MakeExtVectorElt( |
1719 | DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(), |
1720 | AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo()); |
1721 | } |
1722 | UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation()); |
1723 | } |
1724 | |
1725 | RValue NewRVal = UpdateOp(UpRVal); |
1726 | if (NewRVal.isScalar()) { |
1727 | CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal); |
1728 | } else { |
1729 | assert(NewRVal.isComplex()); |
1730 | CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal, |
1731 | ); |
1732 | } |
1733 | } |
1734 | |
1735 | void AtomicInfo::EmitAtomicUpdateLibcall( |
1736 | llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, |
1737 | bool IsVolatile) { |
1738 | auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); |
1739 | |
1740 | Address ExpectedAddr = CreateTempAlloca(); |
1741 | |
1742 | EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile); |
1743 | auto *ContBB = CGF.createBasicBlock("atomic_cont"); |
1744 | auto *ExitBB = CGF.createBasicBlock("atomic_exit"); |
1745 | CGF.EmitBlock(ContBB); |
1746 | Address DesiredAddr = CreateTempAlloca(); |
1747 | if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) || |
1748 | requiresMemSetZero(getAtomicAddress().getElementType())) { |
1749 | auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr); |
1750 | CGF.Builder.CreateStore(OldVal, DesiredAddr); |
1751 | } |
1752 | auto OldRVal = convertAtomicTempToRValue(ExpectedAddr, |
1753 | AggValueSlot::ignored(), |
1754 | SourceLocation(), ); |
1755 | EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr); |
1756 | auto *Res = |
1757 | EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(), |
1758 | DesiredAddr.getPointer(), |
1759 | AO, Failure); |
1760 | CGF.Builder.CreateCondBr(Res, ExitBB, ContBB); |
1761 | CGF.EmitBlock(ExitBB, ); |
1762 | } |
1763 | |
1764 | void AtomicInfo::EmitAtomicUpdateOp( |
1765 | llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, |
1766 | bool IsVolatile) { |
1767 | auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); |
1768 | |
1769 | |
1770 | auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile); |
1771 | |
1772 | auto *ContBB = CGF.createBasicBlock("atomic_cont"); |
1773 | auto *ExitBB = CGF.createBasicBlock("atomic_exit"); |
1774 | auto *CurBB = CGF.Builder.GetInsertBlock(); |
1775 | CGF.EmitBlock(ContBB); |
1776 | llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(), |
1777 | ); |
1778 | PHI->addIncoming(OldVal, CurBB); |
1779 | Address NewAtomicAddr = CreateTempAlloca(); |
1780 | Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr); |
1781 | if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) || |
1782 | requiresMemSetZero(getAtomicAddress().getElementType())) { |
1783 | CGF.Builder.CreateStore(PHI, NewAtomicIntAddr); |
1784 | } |
1785 | auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(), |
1786 | SourceLocation(), ); |
1787 | EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr); |
1788 | auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr); |
1789 | |
1790 | auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure); |
1791 | PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock()); |
1792 | CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB); |
1793 | CGF.EmitBlock(ExitBB, ); |
1794 | } |
1795 | |
1796 | static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, |
1797 | RValue UpdateRVal, Address DesiredAddr) { |
1798 | LValue AtomicLVal = Atomics.getAtomicLValue(); |
1799 | LValue DesiredLVal; |
1800 | |
1801 | if (AtomicLVal.isBitField()) { |
1802 | DesiredLVal = |
1803 | LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(), |
1804 | AtomicLVal.getType(), AtomicLVal.getBaseInfo(), |
1805 | AtomicLVal.getTBAAInfo()); |
1806 | } else if (AtomicLVal.isVectorElt()) { |
1807 | DesiredLVal = |
1808 | LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(), |
1809 | AtomicLVal.getType(), AtomicLVal.getBaseInfo(), |
1810 | AtomicLVal.getTBAAInfo()); |
1811 | } else { |
1812 | assert(AtomicLVal.isExtVectorElt()); |
1813 | DesiredLVal = LValue::MakeExtVectorElt( |
1814 | DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(), |
1815 | AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo()); |
1816 | } |
1817 | |
1818 | assert(UpdateRVal.isScalar()); |
1819 | CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal); |
1820 | } |
1821 | |
1822 | void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, |
1823 | RValue UpdateRVal, bool IsVolatile) { |
1824 | auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); |
1825 | |
1826 | Address ExpectedAddr = CreateTempAlloca(); |
1827 | |
1828 | EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile); |
1829 | auto *ContBB = CGF.createBasicBlock("atomic_cont"); |
1830 | auto *ExitBB = CGF.createBasicBlock("atomic_exit"); |
1831 | CGF.EmitBlock(ContBB); |
1832 | Address DesiredAddr = CreateTempAlloca(); |
1833 | if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) || |
1834 | requiresMemSetZero(getAtomicAddress().getElementType())) { |
1835 | auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr); |
1836 | CGF.Builder.CreateStore(OldVal, DesiredAddr); |
1837 | } |
1838 | EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr); |
1839 | auto *Res = |
1840 | EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(), |
1841 | DesiredAddr.getPointer(), |
1842 | AO, Failure); |
1843 | CGF.Builder.CreateCondBr(Res, ExitBB, ContBB); |
1844 | CGF.EmitBlock(ExitBB, ); |
1845 | } |
1846 | |
1847 | void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal, |
1848 | bool IsVolatile) { |
1849 | auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); |
1850 | |
1851 | |
1852 | auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile); |
1853 | |
1854 | auto *ContBB = CGF.createBasicBlock("atomic_cont"); |
1855 | auto *ExitBB = CGF.createBasicBlock("atomic_exit"); |
1856 | auto *CurBB = CGF.Builder.GetInsertBlock(); |
1857 | CGF.EmitBlock(ContBB); |
1858 | llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(), |
1859 | ); |
1860 | PHI->addIncoming(OldVal, CurBB); |
1861 | Address NewAtomicAddr = CreateTempAlloca(); |
1862 | Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr); |
1863 | if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) || |
1864 | requiresMemSetZero(getAtomicAddress().getElementType())) { |
1865 | CGF.Builder.CreateStore(PHI, NewAtomicIntAddr); |
1866 | } |
1867 | EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr); |
1868 | auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr); |
1869 | |
1870 | auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure); |
1871 | PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock()); |
1872 | CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB); |
1873 | CGF.EmitBlock(ExitBB, ); |
1874 | } |
1875 | |
1876 | void AtomicInfo::EmitAtomicUpdate( |
1877 | llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, |
1878 | bool IsVolatile) { |
1879 | if (shouldUseLibcall()) { |
1880 | EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile); |
1881 | } else { |
1882 | EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile); |
1883 | } |
1884 | } |
1885 | |
1886 | void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal, |
1887 | bool IsVolatile) { |
1888 | if (shouldUseLibcall()) { |
1889 | EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile); |
1890 | } else { |
1891 | EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile); |
1892 | } |
1893 | } |
1894 | |
1895 | void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue, |
1896 | bool isInit) { |
1897 | bool IsVolatile = lvalue.isVolatileQualified(); |
1898 | llvm::AtomicOrdering AO; |
1899 | if (lvalue.getType()->isAtomicType()) { |
1900 | AO = llvm::AtomicOrdering::SequentiallyConsistent; |
1901 | } else { |
1902 | AO = llvm::AtomicOrdering::Release; |
1903 | IsVolatile = true; |
1904 | } |
1905 | return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit); |
1906 | } |
1907 | |
1908 | |
1909 | |
1910 | |
1911 | |
1912 | |
1913 | void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, |
1914 | llvm::AtomicOrdering AO, bool IsVolatile, |
1915 | bool isInit) { |
1916 | |
1917 | |
1918 | assert(!rvalue.isAggregate() || |
1919 | rvalue.getAggregateAddress().getElementType() |
1920 | == dest.getAddress().getElementType()); |
1921 | |
1922 | AtomicInfo atomics(*this, dest); |
1923 | LValue LVal = atomics.getAtomicLValue(); |
1924 | |
1925 | |
1926 | if (LVal.isSimple()) { |
1927 | if (isInit) { |
1928 | atomics.emitCopyIntoMemory(rvalue); |
1929 | return; |
1930 | } |
1931 | |
1932 | |
1933 | if (atomics.shouldUseLibcall()) { |
1934 | |
1935 | Address srcAddr = atomics.materializeRValue(rvalue); |
1936 | |
1937 | |
1938 | CallArgList args; |
1939 | args.add(RValue::get(atomics.getAtomicSizeValue()), |
1940 | getContext().getSizeType()); |
1941 | args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())), |
1942 | getContext().VoidPtrTy); |
1943 | args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())), |
1944 | getContext().VoidPtrTy); |
1945 | args.add( |
1946 | RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))), |
1947 | getContext().IntTy); |
1948 | emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args); |
1949 | return; |
1950 | } |
1951 | |
1952 | |
1953 | llvm::Value *intValue = atomics.convertRValueToInt(rvalue); |
1954 | |
1955 | |
1956 | Address addr = |
1957 | atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress()); |
1958 | intValue = Builder.CreateIntCast( |
1959 | intValue, addr.getElementType(), ); |
1960 | llvm::StoreInst *store = Builder.CreateStore(intValue, addr); |
1961 | |
1962 | |
1963 | if (!isInit) |
1964 | store->setAtomic(AO); |
1965 | |
1966 | |
1967 | if (IsVolatile) |
1968 | store->setVolatile(true); |
1969 | CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo()); |
1970 | return; |
1971 | } |
1972 | |
1973 | |
1974 | atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile); |
1975 | } |
1976 | |
1977 | |
1978 | |
1979 | std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange( |
1980 | LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, |
1981 | llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak, |
1982 | AggValueSlot Slot) { |
1983 | |
1984 | |
1985 | assert(!Expected.isAggregate() || |
1986 | Expected.getAggregateAddress().getElementType() == |
1987 | Obj.getAddress().getElementType()); |
1988 | assert(!Desired.isAggregate() || |
1989 | Desired.getAggregateAddress().getElementType() == |
1990 | Obj.getAddress().getElementType()); |
1991 | AtomicInfo Atomics(*this, Obj); |
1992 | |
1993 | return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure, |
1994 | IsWeak); |
1995 | } |
1996 | |
1997 | void CodeGenFunction::EmitAtomicUpdate( |
1998 | LValue LVal, llvm::AtomicOrdering AO, |
1999 | const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) { |
2000 | AtomicInfo Atomics(*this, LVal); |
2001 | Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile); |
2002 | } |
2003 | |
2004 | void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) { |
2005 | AtomicInfo atomics(*this, dest); |
2006 | |
2007 | switch (atomics.getEvaluationKind()) { |
2008 | case TEK_Scalar: { |
2009 | llvm::Value *value = EmitScalarExpr(init); |
2010 | atomics.emitCopyIntoMemory(RValue::get(value)); |
2011 | return; |
2012 | } |
2013 | |
2014 | case TEK_Complex: { |
2015 | ComplexPairTy value = EmitComplexExpr(init); |
2016 | atomics.emitCopyIntoMemory(RValue::getComplex(value)); |
2017 | return; |
2018 | } |
2019 | |
2020 | case TEK_Aggregate: { |
2021 | |
2022 | |
2023 | bool Zeroed = false; |
2024 | if (!init->getType()->isAtomicType()) { |
2025 | Zeroed = atomics.emitMemSetZeroIfNecessary(); |
2026 | dest = atomics.projectValue(); |
2027 | } |
2028 | |
2029 | |
2030 | AggValueSlot slot = AggValueSlot::forLValue(dest, |
2031 | AggValueSlot::IsNotDestructed, |
2032 | AggValueSlot::DoesNotNeedGCBarriers, |
2033 | AggValueSlot::IsNotAliased, |
2034 | AggValueSlot::DoesNotOverlap, |
2035 | Zeroed ? AggValueSlot::IsZeroed : |
2036 | AggValueSlot::IsNotZeroed); |
2037 | |
2038 | EmitAggExpr(init, slot); |
2039 | return; |
2040 | } |
2041 | } |
2042 | llvm_unreachable("bad evaluation kind"); |
2043 | } |
2044 | |