add a __sync_swap builtin to fill out the rest of the __sync builtins. 
Patch by Dave Zarzycki!


git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@129189 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/docs/LanguageExtensions.html b/docs/LanguageExtensions.html
index 1cd77a1..b16c38c 100644
--- a/docs/LanguageExtensions.html
+++ b/docs/LanguageExtensions.html
@@ -56,6 +56,7 @@
   <ul>
   <li><a href="#__builtin_shufflevector">__builtin_shufflevector</a></li>
   <li><a href="#__builtin_unreachable">__builtin_unreachable</a></li>
+  <li><a href="#__sync_swap">__sync_swap</a></li>
  </ul>
 </li>
 <li><a href="#targetspecific">Target-Specific Extensions</a>
@@ -711,6 +712,36 @@
 
 <p>Query for this feature with __has_builtin(__builtin_unreachable).</p>
 
+<!-- ======================================================================= -->
+<h3 id="__sync_swap">__sync_swap</h3>
+<!-- ======================================================================= -->
+
+<p><tt>__sync_swap</tt> is used to atomically swap integers or pointers in
+memory.
+</p>
+
+<p><b>Syntax:</b></p>
+
+<pre>
+<i>type</i> __sync_swap(<i>type</i> *ptr, <i>type</i> value, ...)
+</pre>
+
+<p><b>Example of Use:</b></p>
+
+<pre>
+int old_value = __sync_swap(&value, new_value);
+</pre>
+
+<p><b>Description:</b></p>
+
+<p>The __sync_swap() builtin extends the existing __sync_*() family of atomic
+intrinsics to allow code to atomically swap the current value with the new
+value.  More importantly, it helps developers write more efficient and correct
+code by avoiding expensive loops around __sync_bool_compare_and_swap() or
+relying on the platform specific implementation details of
+__sync_lock_test_and_set(). The __sync_swap() builtin is a full barrier.
+</p>
+
 
 <!-- ======================================================================= -->
 <h2 id="targetspecific">Target-Specific Extensions</h2>
diff --git a/include/clang/Basic/Builtins.def b/include/clang/Basic/Builtins.def
index 3f97b78..9a4c768 100644
--- a/include/clang/Basic/Builtins.def
+++ b/include/clang/Basic/Builtins.def
@@ -577,6 +577,13 @@
 BUILTIN(__sync_lock_release_8, "vLLiD*.", "n")
 BUILTIN(__sync_lock_release_16, "vLLLiD*.", "n")
 
+BUILTIN(__sync_swap, "v.", "")
+BUILTIN(__sync_swap_1, "ccD*c.", "n")
+BUILTIN(__sync_swap_2, "ssD*s.", "n")
+BUILTIN(__sync_swap_4, "iiD*i.", "n")
+BUILTIN(__sync_swap_8, "LLiLLiD*LLi.", "n")
+BUILTIN(__sync_swap_16, "LLLiLLLiD*LLLi.", "n")
+
 
 
 // Non-overloaded atomic builtins.
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 35507b9..af8d37a 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -721,6 +721,7 @@
   case Builtin::BI__sync_bool_compare_and_swap:
   case Builtin::BI__sync_lock_test_and_set:
   case Builtin::BI__sync_lock_release:
+  case Builtin::BI__sync_swap:
     assert(0 && "Shouldn't make it through sema");
   case Builtin::BI__sync_fetch_and_add_1:
   case Builtin::BI__sync_fetch_and_add_2:
@@ -860,6 +861,13 @@
     return RValue::get(Result);
   }
 
+  case Builtin::BI__sync_swap_1:
+  case Builtin::BI__sync_swap_2:
+  case Builtin::BI__sync_swap_4:
+  case Builtin::BI__sync_swap_8:
+  case Builtin::BI__sync_swap_16:
+    return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E);
+
   case Builtin::BI__sync_lock_test_and_set_1:
   case Builtin::BI__sync_lock_test_and_set_2:
   case Builtin::BI__sync_lock_test_and_set_4:
diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp
index 105fb52..e1adfd4 100644
--- a/lib/Sema/SemaChecking.cpp
+++ b/lib/Sema/SemaChecking.cpp
@@ -180,6 +180,7 @@
   case Builtin::BI__sync_bool_compare_and_swap:
   case Builtin::BI__sync_lock_test_and_set:
   case Builtin::BI__sync_lock_release:
+  case Builtin::BI__sync_swap:
     return SemaBuiltinAtomicOverloaded(move(TheCallResult));
   }
   
@@ -415,7 +416,8 @@
     BUILTIN_ROW(__sync_val_compare_and_swap),
     BUILTIN_ROW(__sync_bool_compare_and_swap),
     BUILTIN_ROW(__sync_lock_test_and_set),
-    BUILTIN_ROW(__sync_lock_release)
+    BUILTIN_ROW(__sync_lock_release),
+    BUILTIN_ROW(__sync_swap)
   };
 #undef BUILTIN_ROW
 
@@ -468,6 +470,7 @@
     NumFixed = 0;
     ResultType = Context.VoidTy;
     break;
+  case Builtin::BI__sync_swap: BuiltinIndex = 14; break;
   }
 
   // Now that we know how many fixed arguments we expect, first check that we
diff --git a/test/CodeGen/atomic.c b/test/CodeGen/atomic.c
index 4a7c13f..8ce2d96 100644
--- a/test/CodeGen/atomic.c
+++ b/test/CodeGen/atomic.c
@@ -44,6 +44,11 @@
   // CHECK: call i32 @llvm.atomic.swap.i32.p0i32(i32* %val, i32 7)
   // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
   
+  old = __sync_swap(&val, 8);
+  // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+  // CHECK: call i32 @llvm.atomic.swap.i32.p0i32(i32* %val, i32 8)
+  // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
+  
   old = __sync_val_compare_and_swap(&val, 4, 1976);
   // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
   // CHECK: call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %val, i32 4, i32 1976)