Implement the missing pieces needed to support libstdc++4.7's <atomic>:
__atomic_test_and_set, __atomic_clear, plus a pile of undocumented __GCC_*
predefined macros.
Implement library fallback for __atomic_is_lock_free and
__c11_atomic_is_lock_free, and implement __atomic_always_lock_free.
Contrary to their documentation, GCC's __atomic_fetch_add family don't
multiply the operand by sizeof(T) when operating on a pointer type.
libstdc++ relies on this quirk. Remove this handling for all but the
__c11_atomic_fetch_add and __c11_atomic_fetch_sub builtins.
Contrary to their documentation, __atomic_test_and_set and __atomic_clear
take a first argument of type 'volatile void *', not 'void *' or 'bool *',
and __atomic_is_lock_free and __atomic_always_lock_free have an argument
of type 'const volatile void *', not 'void *'.
With this change, libstdc++4.7's <atomic> passes libc++'s atomic test suite,
except for a couple of libstdc++ bugs and some cases where libc++'s test
suite tests for properties which implementations have latitude to vary.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@154640 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index ce41308..01c9fe7 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -4306,7 +4306,7 @@
}
bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
- switch (E->isBuiltinCall()) {
+ switch (unsigned BuiltinOp = E->isBuiltinCall()) {
default:
return ExprEvaluatorBaseTy::VisitCallExpr(E);
@@ -4365,6 +4365,7 @@
return Error(E);
+ case Builtin::BI__atomic_always_lock_free:
case Builtin::BI__atomic_is_lock_free:
case Builtin::BI__c11_atomic_is_lock_free: {
APSInt SizeVal;
@@ -4382,32 +4383,31 @@
// Check power-of-two.
CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
- if (!Size.isPowerOfTwo())
-#if 0
- // FIXME: Suppress this folding until the ABI for the promotion width
- // settles.
- return Success(0, E);
-#else
- return Error(E);
-#endif
+ if (Size.isPowerOfTwo()) {
+ // Check against inlining width.
+ unsigned InlineWidthBits =
+ Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth();
+ if (Size <= Info.Ctx.toCharUnitsFromBits(InlineWidthBits)) {
+ if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
+ Size == CharUnits::One() ||
+ E->getArg(1)->isNullPointerConstant(Info.Ctx,
+ Expr::NPC_NeverValueDependent))
+ // OK, we will inline appropriately-aligned operations of this size,
+ // and _Atomic(T) is appropriately-aligned.
+ return Success(1, E);
-#if 0
- // Check against promotion width.
- // FIXME: Suppress this folding until the ABI for the promotion width
- // settles.
- unsigned PromoteWidthBits =
- Info.Ctx.getTargetInfo().getMaxAtomicPromoteWidth();
- if (Size > Info.Ctx.toCharUnitsFromBits(PromoteWidthBits))
- return Success(0, E);
-#endif
+ QualType PointeeType = E->getArg(1)->IgnoreImpCasts()->getType()->
+ castAs<PointerType>()->getPointeeType();
+ if (!PointeeType->isIncompleteType() &&
+ Info.Ctx.getTypeAlignInChars(PointeeType) >= Size) {
+ // OK, we will inline operations on this object.
+ return Success(1, E);
+ }
+ }
+ }
- // Check against inlining width.
- unsigned InlineWidthBits =
- Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth();
- if (Size <= Info.Ctx.toCharUnitsFromBits(InlineWidthBits))
- return Success(1, E);
-
- return Error(E);
+ return BuiltinOp == Builtin::BI__atomic_always_lock_free ?
+ Success(0, E) : Error(E);
}
}
}