Atomic op support.  If any gcc test uses __sync builtins, it might start failing on archs that haven't implemented them yet

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@47430 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/docs/LangRef.html b/docs/LangRef.html
index af88237..d09d348 100644
--- a/docs/LangRef.html
+++ b/docs/LangRef.html
@@ -206,7 +206,10 @@
       </li>
           <li><a href="#int_atomics">Atomic intrinsics</a>
             <ol>
-              <li><a href="#int_memory_barrier"><tt>llvm.memory_barrier</tt></li>
+              <li><a href="#int_memory_barrier"><tt>llvm.memory_barrier</tt></a></li>
+              <li><a href="#int_atomic_lcs"><tt>llvm.atomic.lcs</tt></a></li>
+              <li><a href="#int_atomic_las"><tt>llvm.atomic.las</tt></a></li>
+              <li><a href="#int_atomic_swap"><tt>llvm.atomic.swap</tt></a></li>
             </ol>
           </li>
       <li><a href="#int_general">General intrinsics</a>
@@ -5339,6 +5342,176 @@
 </pre>
 </div>
 
+<!-- _______________________________________________________________________ -->
+<div class="doc_subsubsection">
+  <a name="int_atomic_lcs">'<tt>llvm.atomic.lcs.*</tt>' Intrinsic</a>
+</div>
+<div class="doc_text">
+<h5>Syntax:</h5>
+<p>
+  This is an overloaded intrinsic. You can use <tt>llvm.atomic.lcs</tt> on any 
+  integer bit width. Not all targets support all bit widths however.</p>
+
+<pre>
+declare i8 @llvm.atomic.lcs.i8( i8* &lt;ptr&gt;, i8 &lt;cmp&gt;, i8 &lt;val&gt; )
+declare i16 @llvm.atomic.lcs.i16( i16* &lt;ptr&gt;, i16 &lt;cmp&gt;, i16 &lt;val&gt; )
+declare i32 @llvm.atomic.lcs.i32( i32* &lt;ptr&gt;, i32 &lt;cmp&gt;, i32 &lt;val&gt; )
+declare i64 @llvm.atomic.lcs.i64( i64* &lt;ptr&gt;, i64 &lt;cmp&gt;, i64 &lt;val&gt; )
+
+</pre>
+<h5>Overview:</h5>
+<p>
+  This loads a value in memory and compares it to a given value. If they are 
+  equal, it stores a new value into the memory.
+</p>
+<h5>Arguments:</h5>
+<p>
+  The <tt>llvm.atomic.lcs</tt> intrinsic takes three arguments. The result as 
+  well as both <tt>cmp</tt> and <tt>val</tt> must be integer values with the 
+  same bit width. The <tt>ptr</tt> argument must be a pointer to a value of 
+  this integer type. While any bit width integer may be used, targets may only 
+  lower representations they support in hardware.
+
+</p>
+<h5>Semantics:</h5>
+<p>
+  This entire intrinsic must be executed atomically. It first loads the value 
+  in memory pointed to by <tt>ptr</tt> and compares it with the value 
+  <tt>cmp</tt>. If they are equal, <tt>val</tt> is stored into the memory. The 
+  loaded value is yielded in all cases. This provides the equivalent of an 
+  atomic compare-and-swap operation within the SSA framework.
+</p>
+<h5>Examples:</h5>
+
+<pre>
+%ptr      = malloc i32
+            store i32 4, %ptr
+
+%val1     = add i32 4, 4
+%result1  = call i32 @llvm.atomic.lcs.i32( i32* %ptr, i32 4, %val1 )
+                                          <i>; yields {i32}:result1 = 4</i>
+%stored1  = icmp eq i32 %result1, 4       <i>; yields {i1}:stored1 = true</i>
+%memval1  = load i32* %ptr                <i>; yields {i32}:memval1 = 8</i>
+
+%val2     = add i32 1, 1
+%result2  = call i32 @llvm.atomic.lcs.i32( i32* %ptr, i32 5, %val2 )
+                                          <i>; yields {i32}:result2 = 8</i>
+%stored2  = icmp eq i32 %result2, 5       <i>; yields {i1}:stored2 = false</i>
+
+%memval2  = load i32* %ptr                <i>; yields {i32}:memval2 = 8</i>
+</pre>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<div class="doc_subsubsection">
+  <a name="int_atomic_swap">'<tt>llvm.atomic.swap.*</tt>' Intrinsic</a>
+</div>
+<div class="doc_text">
+<h5>Syntax:</h5>
+
+<p>
+  This is an overloaded intrinsic. You can use <tt>llvm.atomic.swap</tt> on any 
+  integer bit width. Not all targets support all bit widths however.</p>
+<pre>
+declare i8 @llvm.atomic.swap.i8( i8* &lt;ptr&gt;, i8 &lt;val&gt; )
+declare i16 @llvm.atomic.swap.i16( i16* &lt;ptr&gt;, i16 &lt;val&gt; )
+declare i32 @llvm.atomic.swap.i32( i32* &lt;ptr&gt;, i32 &lt;val&gt; )
+declare i64 @llvm.atomic.swap.i64( i64* &lt;ptr&gt;, i64 &lt;val&gt; )
+
+</pre>
+<h5>Overview:</h5>
+<p>
+  This intrinsic loads the value stored in memory at <tt>ptr</tt> and yields 
+  the value from memory. It then stores the value in <tt>val</tt> in the memory 
+  at <tt>ptr</tt>.
+</p>
+<h5>Arguments:</h5>
+
+<p>
+  The <tt>llvm.atomic.ls</tt> intrinsic takes two arguments. Both the 
+  <tt>val</tt> argument and the result must be integers of the same bit width. 
+  The first argument, <tt>ptr</tt>, must be a pointer to a value of this 
+  integer type. The targets may only lower integer representations they 
+  support.
+</p>
+<h5>Semantics:</h5>
+<p>
+  This intrinsic loads the value pointed to by <tt>ptr</tt>, yields it, and 
+  stores <tt>val</tt> back into <tt>ptr</tt> atomically. This provides the 
+  equivalent of an atomic swap operation within the SSA framework.
+
+</p>
+<h5>Examples:</h5>
+<pre>
+%ptr      = malloc i32
+            store i32 4, %ptr
+
+%val1     = add i32 4, 4
+%result1  = call i32 @llvm.atomic.swap.i32( i32* %ptr, i32 %val1 )
+                                        <i>; yields {i32}:result1 = 4</i>
+%stored1  = icmp eq i32 %result1, 4     <i>; yields {i1}:stored1 = true</i>
+%memval1  = load i32* %ptr              <i>; yields {i32}:memval1 = 8</i>
+
+%val2     = add i32 1, 1
+%result2  = call i32 @llvm.atomic.swap.i32( i32* %ptr, i32 %val2 )
+                                        <i>; yields {i32}:result2 = 8</i>
+
+%stored2  = icmp eq i32 %result2, 8     <i>; yields {i1}:stored2 = true</i>
+%memval2  = load i32* %ptr              <i>; yields {i32}:memval2 = 2</i>
+</pre>
+</div>
+
+<!-- _______________________________________________________________________ -->
+<div class="doc_subsubsection">
+  <a name="int_atomic_las">'<tt>llvm.atomic.las.*</tt>' Intrinsic</a>
+
+</div>
+<div class="doc_text">
+<h5>Syntax:</h5>
+<p>
+  This is an overloaded intrinsic. You can use <tt>llvm.atomic.las</tt> on any 
+  integer bit width. Not all targets support all bit widths however.</p>
+<pre>
+declare i8 @llvm.atomic.las.i8.( i8* &lt;ptr&gt;, i8 &lt;delta&gt; )
+declare i16 @llvm.atomic.las.i16.( i16* &lt;ptr&gt;, i16 &lt;delta&gt; )
+declare i32 @llvm.atomic.las.i32.( i32* &lt;ptr&gt;, i32 &lt;delta&gt; )
+declare i64 @llvm.atomic.las.i64.( i64* &lt;ptr&gt;, i64 &lt;delta&gt; )
+
+</pre>
+<h5>Overview:</h5>
+<p>
+  This intrinsic adds <tt>delta</tt> to the value stored in memory at 
+  <tt>ptr</tt>. It yields the original value at <tt>ptr</tt>.
+</p>
+<h5>Arguments:</h5>
+<p>
+
+  The intrinsic takes two arguments, the first a pointer to an integer value 
+  and the second an integer value. The result is also an integer value. These 
+  integer types can have any bit width, but they must all have the same bit 
+  width. The targets may only lower integer representations they support.
+</p>
+<h5>Semantics:</h5>
+<p>
+  This intrinsic does a series of operations atomically. It first loads the 
+  value stored at <tt>ptr</tt>. It then adds <tt>delta</tt>, stores the result 
+  to <tt>ptr</tt>. It yields the original value stored at <tt>ptr</tt>.
+</p>
+
+<h5>Examples:</h5>
+<pre>
+%ptr      = malloc i32
+        store i32 4, %ptr
+%result1  = call i32 @llvm.atomic.las.i32( i32* %ptr, i32 4 )
+                                <i>; yields {i32}:result1 = 4</i>
+%result2  = call i32 @llvm.atomic.las.i32( i32* %ptr, i32 2 )
+                                <i>; yields {i32}:result2 = 8</i>
+%result3  = call i32 @llvm.atomic.las.i32( i32* %ptr, i32 5 )
+                                <i>; yields {i32}:result3 = 10</i>
+%memval   = load i32* %ptr      <i>; yields {i32}:memval1 = 15</i>
+</pre>
+</div>
+
 
 <!-- ======================================================================= -->
 <div class="doc_subsection">