[AX.25]: Optimize AX.25 socket list lock

Right now all uses of the ax25_list_lock lock are _bh locks but knowing
some code is only ever getting invoked from _bh context we can better.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index f12be2a..000695c 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -145,7 +145,7 @@
 	ax25_cb *s;
 	struct hlist_node *node;
 
-	spin_lock_bh(&ax25_list_lock);
+	spin_lock(&ax25_list_lock);
 	ax25_for_each(s, node, &ax25_list) {
 		if ((s->iamdigi && !digi) || (!s->iamdigi && digi))
 			continue;
@@ -154,12 +154,12 @@
 			/* If device is null we match any device */
 			if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) {
 				sock_hold(s->sk);
-				spin_unlock_bh(&ax25_list_lock);
+				spin_unlock(&ax25_list_lock);
 				return s->sk;
 			}
 		}
 	}
-	spin_unlock_bh(&ax25_list_lock);
+	spin_unlock(&ax25_list_lock);
 
 	return NULL;
 }
@@ -174,7 +174,7 @@
 	ax25_cb *s;
 	struct hlist_node *node;
 
-	spin_lock_bh(&ax25_list_lock);
+	spin_lock(&ax25_list_lock);
 	ax25_for_each(s, node, &ax25_list) {
 		if (s->sk && !ax25cmp(&s->source_addr, my_addr) &&
 		    !ax25cmp(&s->dest_addr, dest_addr) &&
@@ -185,7 +185,7 @@
 		}
 	}
 
-	spin_unlock_bh(&ax25_list_lock);
+	spin_unlock(&ax25_list_lock);
 
 	return sk;
 }
@@ -235,7 +235,7 @@
 	struct sk_buff *copy;
 	struct hlist_node *node;
 
-	spin_lock_bh(&ax25_list_lock);
+	spin_lock(&ax25_list_lock);
 	ax25_for_each(s, node, &ax25_list) {
 		if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 &&
 		    s->sk->sk_type == SOCK_RAW &&
@@ -248,7 +248,7 @@
 				kfree_skb(copy);
 		}
 	}
-	spin_unlock_bh(&ax25_list_lock);
+	spin_unlock(&ax25_list_lock);
 }
 
 /*