Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.26
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index c38744c..89cbab2 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -2,7 +2,7 @@
  * net/tipc/ref.c: TIPC object registry code
  *
  * Copyright (c) 1991-2006, Ericsson AB
- * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2004-2007, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -36,32 +36,60 @@
 
 #include "core.h"
 #include "ref.h"
-#include "port.h"
-#include "subscr.h"
-#include "name_distr.h"
-#include "name_table.h"
-#include "config.h"
-#include "discover.h"
-#include "bearer.h"
-#include "node.h"
-#include "bcast.h"
+
+/**
+ * struct reference - TIPC object reference entry
+ * @object: pointer to object associated with reference entry
+ * @lock: spinlock controlling access to object
+ * @ref: reference value for object (combines instance & array index info)
+ */
+
+struct reference {
+	void *object;
+	spinlock_t lock;
+	u32 ref;
+};
+
+/**
+ * struct tipc_ref_table - table of TIPC object reference entries
+ * @entries: pointer to array of reference entries
+ * @capacity: array index of first unusable entry
+ * @init_point: array index of first uninitialized entry
+ * @first_free: array index of first unused object reference entry
+ * @last_free: array index of last unused object reference entry
+ * @index_mask: bitmask for array index portion of reference values
+ * @start_mask: initial value for instance value portion of reference values
+ */
+
+struct ref_table {
+	struct reference *entries;
+	u32 capacity;
+	u32 init_point;
+	u32 first_free;
+	u32 last_free;
+	u32 index_mask;
+	u32 start_mask;
+};
 
 /*
  * Object reference table consists of 2**N entries.
  *
- * A used entry has object ptr != 0, reference == XXXX|own index
- *				     (XXXX changes each time entry is acquired)
- * A free entry has object ptr == 0, reference == YYYY|next free index
- *				     (YYYY is one more than last used XXXX)
+ * State	Object ptr	Reference
+ * -----        ----------      ---------
+ * In use        non-NULL       XXXX|own index
+ *				(XXXX changes each time entry is acquired)
+ * Free            NULL         YYYY|next free index
+ *				(YYYY is one more than last used XXXX)
+ * Uninitialized   NULL         0
  *
- * Free list is initially chained from entry (2**N)-1 to entry 1.
- * Entry 0 is not used to allow index 0 to indicate the end of the free list.
+ * Entry 0 is not used; this allows index 0 to denote the end of the free list.
  *
- * Note: Any accidental reference of the form XXXX|0--0 won't match entry 0
- * because entry 0's reference field has the form XXXX|1--1.
+ * Note that a reference value of 0 does not necessarily indicate that an
+ * entry is uninitialized, since the last entry in the free list could also
+ * have a reference value of 0 (although this is unlikely).
  */
 
-struct ref_table tipc_ref_table = { NULL };
+static struct ref_table tipc_ref_table = { NULL };
 
 static DEFINE_RWLOCK(ref_table_lock);
 
@@ -72,29 +100,29 @@
 int tipc_ref_table_init(u32 requested_size, u32 start)
 {
 	struct reference *table;
-	u32 sz = 1 << 4;
-	u32 index_mask;
-	int i;
+	u32 actual_size;
 
-	while (sz < requested_size) {
-		sz <<= 1;
-	}
-	table = vmalloc(sz * sizeof(*table));
+	/* account for unused entry, then round up size to a power of 2 */
+
+	requested_size++;
+	for (actual_size = 16; actual_size < requested_size; actual_size <<= 1)
+		/* do nothing */ ;
+
+	/* allocate table & mark all entries as uninitialized */
+
+	table = __vmalloc(actual_size * sizeof(struct reference),
+			  GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
 	if (table == NULL)
 		return -ENOMEM;
 
-	write_lock_bh(&ref_table_lock);
-	index_mask = sz - 1;
-	for (i = sz - 1; i >= 0; i--) {
-		table[i].object = NULL;
-		spin_lock_init(&table[i].lock);
-		table[i].data.next_plus_upper = (start & ~index_mask) + i - 1;
-	}
 	tipc_ref_table.entries = table;
-	tipc_ref_table.index_mask = index_mask;
-	tipc_ref_table.first_free = sz - 1;
-	tipc_ref_table.last_free = 1;
-	write_unlock_bh(&ref_table_lock);
+	tipc_ref_table.capacity = requested_size;
+	tipc_ref_table.init_point = 1;
+	tipc_ref_table.first_free = 0;
+	tipc_ref_table.last_free = 0;
+	tipc_ref_table.index_mask = actual_size - 1;
+	tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask;
+
 	return TIPC_OK;
 }
 
@@ -125,7 +153,7 @@
 	u32 index;
 	u32 index_mask;
 	u32 next_plus_upper;
-	u32 reference = 0;
+	u32 ref;
 
 	if (!object) {
 		err("Attempt to acquire reference to non-existent object\n");
@@ -136,6 +164,8 @@
 		return 0;
 	}
 
+	/* take a free entry, if available; otherwise initialize a new entry */
+
 	write_lock_bh(&ref_table_lock);
 	if (tipc_ref_table.first_free) {
 		index = tipc_ref_table.first_free;
@@ -143,17 +173,29 @@
 		index_mask = tipc_ref_table.index_mask;
 		/* take lock in case a previous user of entry still holds it */
 		spin_lock_bh(&entry->lock);
-		next_plus_upper = entry->data.next_plus_upper;
+		next_plus_upper = entry->ref;
 		tipc_ref_table.first_free = next_plus_upper & index_mask;
-		reference = (next_plus_upper & ~index_mask) + index;
-		entry->data.reference = reference;
+		ref = (next_plus_upper & ~index_mask) + index;
+		entry->ref = ref;
 		entry->object = object;
-		if (lock != NULL)
-			*lock = &entry->lock;
 		spin_unlock_bh(&entry->lock);
+		*lock = &entry->lock;
+	}
+	else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
+		index = tipc_ref_table.init_point++;
+		entry = &(tipc_ref_table.entries[index]);
+		spin_lock_init(&entry->lock);
+		ref = tipc_ref_table.start_mask + index;
+		entry->ref = ref;
+		entry->object = object;
+		*lock = &entry->lock;
+	}
+	else {
+		ref = 0;
 	}
 	write_unlock_bh(&ref_table_lock);
-	return reference;
+
+	return ref;
 }
 
 /**
@@ -169,42 +211,99 @@
 	u32 index;
 	u32 index_mask;
 
-	if (!ref) {
-		err("Attempt to discard reference 0\n");
-		return;
-	}
 	if (!tipc_ref_table.entries) {
 		err("Reference table not found during discard attempt\n");
 		return;
 	}
 
-	write_lock_bh(&ref_table_lock);
 	index_mask = tipc_ref_table.index_mask;
 	index = ref & index_mask;
 	entry = &(tipc_ref_table.entries[index]);
 
+	write_lock_bh(&ref_table_lock);
+
 	if (!entry->object) {
 		err("Attempt to discard reference to non-existent object\n");
 		goto exit;
 	}
-	if (entry->data.reference != ref) {
+	if (entry->ref != ref) {
 		err("Attempt to discard non-existent reference\n");
 		goto exit;
 	}
 
-	/* mark entry as unused */
+	/*
+	 * mark entry as unused; increment instance part of entry's reference
+	 * to invalidate any subsequent references
+	 */
+
 	entry->object = NULL;
+	entry->ref = (ref & ~index_mask) + (index_mask + 1);
+
+	/* append entry to free entry list */
+
 	if (tipc_ref_table.first_free == 0)
 		tipc_ref_table.first_free = index;
 	else
-		/* next_plus_upper is always XXXX|0--0 for last free entry */
-		tipc_ref_table.entries[tipc_ref_table.last_free].data.next_plus_upper
-			|= index;
+		tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index;
 	tipc_ref_table.last_free = index;
 
-	/* increment upper bits of entry to invalidate subsequent references */
-	entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1);
 exit:
 	write_unlock_bh(&ref_table_lock);
 }
 
+/**
+ * tipc_ref_lock - lock referenced object and return pointer to it
+ */
+
+void *tipc_ref_lock(u32 ref)
+{
+	if (likely(tipc_ref_table.entries)) {
+		struct reference *entry;
+
+		entry = &tipc_ref_table.entries[ref &
+						tipc_ref_table.index_mask];
+		if (likely(entry->ref != 0)) {
+			spin_lock_bh(&entry->lock);
+			if (likely((entry->ref == ref) && (entry->object)))
+				return entry->object;
+			spin_unlock_bh(&entry->lock);
+		}
+	}
+	return NULL;
+}
+
+/**
+ * tipc_ref_unlock - unlock referenced object
+ */
+
+void tipc_ref_unlock(u32 ref)
+{
+	if (likely(tipc_ref_table.entries)) {
+		struct reference *entry;
+
+		entry = &tipc_ref_table.entries[ref &
+						tipc_ref_table.index_mask];
+		if (likely((entry->ref == ref) && (entry->object)))
+			spin_unlock_bh(&entry->lock);
+		else
+			err("Attempt to unlock non-existent reference\n");
+	}
+}
+
+/**
+ * tipc_ref_deref - return pointer referenced object (without locking it)
+ */
+
+void *tipc_ref_deref(u32 ref)
+{
+	if (likely(tipc_ref_table.entries)) {
+		struct reference *entry;
+
+		entry = &tipc_ref_table.entries[ref &
+						tipc_ref_table.index_mask];
+		if (likely(entry->ref == ref))
+			return entry->object;
+	}
+	return NULL;
+}
+
diff --git a/net/tipc/ref.h b/net/tipc/ref.h
index 38f3a7f..7e3798e 100644
--- a/net/tipc/ref.h
+++ b/net/tipc/ref.h
@@ -2,7 +2,7 @@
  * net/tipc/ref.h: Include file for TIPC object registry code
  *
  * Copyright (c) 1991-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -37,95 +37,14 @@
 #ifndef _TIPC_REF_H
 #define _TIPC_REF_H
 
-/**
- * struct reference - TIPC object reference entry
- * @object: pointer to object associated with reference entry
- * @lock: spinlock controlling access to object
- * @data: reference value associated with object (or link to next unused entry)
- */
-
-struct reference {
-	void *object;
-	spinlock_t lock;
-	union {
-		u32 next_plus_upper;
-		u32 reference;
-	} data;
-};
-
-/**
- * struct tipc_ref_table - table of TIPC object reference entries
- * @entries: pointer to array of reference entries
- * @index_mask: bitmask for array index portion of reference values
- * @first_free: array index of first unused object reference entry
- * @last_free: array index of last unused object reference entry
- */
-
-struct ref_table {
-	struct reference *entries;
-	u32 index_mask;
-	u32 first_free;
-	u32 last_free;
-};
-
-extern struct ref_table tipc_ref_table;
-
 int tipc_ref_table_init(u32 requested_size, u32 start);
 void tipc_ref_table_stop(void);
 
 u32 tipc_ref_acquire(void *object, spinlock_t **lock);
 void tipc_ref_discard(u32 ref);
 
-
-/**
- * tipc_ref_lock - lock referenced object and return pointer to it
- */
-
-static inline void *tipc_ref_lock(u32 ref)
-{
-	if (likely(tipc_ref_table.entries)) {
-		struct reference *r =
-			&tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
-
-		spin_lock_bh(&r->lock);
-		if (likely(r->data.reference == ref))
-			return r->object;
-		spin_unlock_bh(&r->lock);
-	}
-	return NULL;
-}
-
-/**
- * tipc_ref_unlock - unlock referenced object
- */
-
-static inline void tipc_ref_unlock(u32 ref)
-{
-	if (likely(tipc_ref_table.entries)) {
-		struct reference *r =
-			&tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
-
-		if (likely(r->data.reference == ref))
-			spin_unlock_bh(&r->lock);
-		else
-			err("tipc_ref_unlock() invoked using obsolete reference\n");
-	}
-}
-
-/**
- * tipc_ref_deref - return pointer referenced object (without locking it)
- */
-
-static inline void *tipc_ref_deref(u32 ref)
-{
-	if (likely(tipc_ref_table.entries)) {
-		struct reference *r =
-			&tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
-
-		if (likely(r->data.reference == ref))
-			return r->object;
-	}
-	return NULL;
-}
+void *tipc_ref_lock(u32 ref);
+void tipc_ref_unlock(u32 ref);
+void *tipc_ref_deref(u32 ref);
 
 #endif