Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h
new file mode 100644
index 0000000..8c3872d
--- /dev/null
+++ b/include/asm-sh64/atomic.h
@@ -0,0 +1,126 @@
+#ifndef __ASM_SH64_ATOMIC_H
+#define __ASM_SH64_ATOMIC_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * include/asm-sh64/atomic.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003  Paul Mundt
+ *
+ */
+
+/*
+ * Atomic operations that C can't guarantee us.  Useful for
+ * resource counting etc..
+ *
+ */
+
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i)	( (atomic_t) { (i) } )
+
+#define atomic_read(v)		((v)->counter)
+#define atomic_set(v,i)		((v)->counter = (i))
+
+#include <asm/system.h>
+
+/*
+ * To get proper branch prediction for the main line, we must branch
+ * forward to code at the end of this object's .text section, then
+ * branch back to restart the operation.
+ */
+
+static __inline__ void atomic_add(int i, atomic_t * v)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	*(long *)v += i;
+	local_irq_restore(flags);
+}
+
+static __inline__ void atomic_sub(int i, atomic_t *v)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	*(long *)v -= i;
+	local_irq_restore(flags);
+}
+
+static __inline__ int atomic_add_return(int i, atomic_t * v)
+{
+	unsigned long temp, flags;
+
+	local_irq_save(flags);
+	temp = *(long *)v;
+	temp += i;
+	*(long *)v = temp;
+	local_irq_restore(flags);
+
+	return temp;
+}
+
+#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
+
+static __inline__ int atomic_sub_return(int i, atomic_t * v)
+{
+	unsigned long temp, flags;
+
+	local_irq_save(flags);
+	temp = *(long *)v;
+	temp -= i;
+	*(long *)v = temp;
+	local_irq_restore(flags);
+
+	return temp;
+}
+
+#define atomic_dec_return(v) atomic_sub_return(1,(v))
+#define atomic_inc_return(v) atomic_add_return(1,(v))
+
+/*
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
+#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+
+#define atomic_inc(v) atomic_add(1,(v))
+#define atomic_dec(v) atomic_sub(1,(v))
+
+static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	*(long *)v &= ~mask;
+	local_irq_restore(flags);
+}
+
+static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	*(long *)v |= mask;
+	local_irq_restore(flags);
+}
+
+/* Atomic operations are already serializing on SH */
+#define smp_mb__before_atomic_dec()	barrier()
+#define smp_mb__after_atomic_dec()	barrier()
+#define smp_mb__before_atomic_inc()	barrier()
+#define smp_mb__after_atomic_inc()	barrier()
+
+#endif /* __ASM_SH64_ATOMIC_H */