Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/include/asm-arm26/semaphore-helper.h b/include/asm-arm26/semaphore-helper.h
new file mode 100644
index 0000000..1d7f198
--- /dev/null
+++ b/include/asm-arm26/semaphore-helper.h
@@ -0,0 +1,84 @@
+#ifndef ASMARM_SEMAPHORE_HELPER_H
+#define ASMARM_SEMAPHORE_HELPER_H
+
+/*
+ * These two _must_ execute atomically wrt each other.
+ */
+static inline void wake_one_more(struct semaphore * sem)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&semaphore_wake_lock, flags);
+	if (atomic_read(&sem->count) <= 0)
+		sem->waking++;
+	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+}
+
+static inline int waking_non_zero(struct semaphore *sem)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&semaphore_wake_lock, flags);
+	if (sem->waking > 0) {
+		sem->waking--;
+		ret = 1;
+	}
+	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+	return ret;
+}
+
+/*
+ * waking non zero interruptible
+ *	1	got the lock
+ *	0	go to sleep
+ *	-EINTR	interrupted
+ *
+ * We must undo the sem->count down_interruptible() increment while we are
+ * protected by the spinlock in order to make this atomic_inc() with the
+ * atomic_read() in wake_one_more(), otherwise we can race. -arca
+ */
+static inline int waking_non_zero_interruptible(struct semaphore *sem,
+						struct task_struct *tsk)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&semaphore_wake_lock, flags);
+	if (sem->waking > 0) {
+		sem->waking--;
+		ret = 1;
+	} else if (signal_pending(tsk)) {
+		atomic_inc(&sem->count);
+		ret = -EINTR;
+	}
+	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+	return ret;	
+}
+
+/*
+ * waking_non_zero_try_lock:
+ *	1	failed to lock
+ *	0	got the lock
+ *
+ * We must undo the sem->count down_interruptible() increment while we are
+ * protected by the spinlock in order to make this atomic_inc() with the
+ * atomic_read() in wake_one_more(), otherwise we can race. -arca
+ */
+static inline int waking_non_zero_trylock(struct semaphore *sem)
+{
+	unsigned long flags;
+	int ret = 1;
+
+	spin_lock_irqsave(&semaphore_wake_lock, flags);
+	if (sem->waking <= 0)
+		atomic_inc(&sem->count);
+	else {
+		sem->waking--;
+		ret = 0;
+	}
+	spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+	return ret;
+}
+
+#endif