Merge branch 'tipc-next'

Jon Maloy says:

====================
tipc: bearer and link improvements

The first commit makes it possible to set and check the 'blocked' state
of a bearer from the generic bearer layer. The second commit is a small
improvement to the link congestion mechanism.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 65b1bbf..6fc4e3c 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -56,6 +56,13 @@
 	NULL
 };
 
+static struct tipc_bearer *bearer_get(struct net *net, int bearer_id)
+{
+	struct tipc_net *tn = tipc_net(net);
+
+	return rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+}
+
 static void bearer_disable(struct net *net, struct tipc_bearer *b);
 
 /**
@@ -323,6 +330,7 @@
 	b->domain = disc_domain;
 	b->net_plane = bearer_id + 'A';
 	b->priority = priority;
+	test_and_set_bit_lock(0, &b->up);
 
 	res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
 	if (res) {
@@ -360,15 +368,24 @@
  */
 void tipc_bearer_reset_all(struct net *net)
 {
-	struct tipc_net *tn = tipc_net(net);
 	struct tipc_bearer *b;
 	int i;
 
 	for (i = 0; i < MAX_BEARERS; i++) {
-		b = rcu_dereference_rtnl(tn->bearer_list[i]);
+		b = bearer_get(net, i);
+		if (b)
+			clear_bit_unlock(0, &b->up);
+	}
+	for (i = 0; i < MAX_BEARERS; i++) {
+		b = bearer_get(net, i);
 		if (b)
 			tipc_reset_bearer(net, b);
 	}
+	for (i = 0; i < MAX_BEARERS; i++) {
+		b = bearer_get(net, i);
+		if (b)
+			test_and_set_bit_lock(0, &b->up);
+	}
 }
 
 /**
@@ -382,8 +399,9 @@
 	int bearer_id = b->identity;
 
 	pr_info("Disabling bearer <%s>\n", b->name);
-	b->media->disable_media(b);
+	clear_bit_unlock(0, &b->up);
 	tipc_node_delete_links(net, bearer_id);
+	b->media->disable_media(b);
 	RCU_INIT_POINTER(b->media_ptr, NULL);
 	if (b->link_req)
 		tipc_disc_delete(b->link_req);
@@ -440,22 +458,16 @@
 {
 	struct net_device *dev;
 	int delta;
-	void *tipc_ptr;
 
 	dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr);
 	if (!dev)
 		return 0;
 
-	/* Send RESET message even if bearer is detached from device */
-	tipc_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
-	if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb))))
-		goto drop;
-
-	delta = dev->hard_header_len - skb_headroom(skb);
-	if ((delta > 0) &&
-	    pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC))
-		goto drop;
-
+	delta = SKB_DATA_ALIGN(dev->hard_header_len - skb_headroom(skb));
+	if ((delta > 0) && pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) {
+		kfree_skb(skb);
+		return 0;
+	}
 	skb_reset_network_header(skb);
 	skb->dev = dev;
 	skb->protocol = htons(ETH_P_TIPC);
@@ -463,9 +475,6 @@
 			dev->dev_addr, skb->len);
 	dev_queue_xmit(skb);
 	return 0;
-drop:
-	kfree_skb(skb);
-	return 0;
 }
 
 int tipc_bearer_mtu(struct net *net, u32 bearer_id)
@@ -487,12 +496,12 @@
 			  struct sk_buff *skb,
 			  struct tipc_media_addr *dest)
 {
-	struct tipc_net *tn = tipc_net(net);
+	struct tipc_msg *hdr = buf_msg(skb);
 	struct tipc_bearer *b;
 
 	rcu_read_lock();
-	b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
-	if (likely(b))
+	b = bearer_get(net, bearer_id);
+	if (likely(b && (test_bit(0, &b->up) || msg_is_reset(hdr))))
 		b->media->send_msg(net, skb, b, dest);
 	else
 		kfree_skb(skb);
@@ -505,7 +514,6 @@
 		      struct sk_buff_head *xmitq,
 		      struct tipc_media_addr *dst)
 {
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
 	struct tipc_bearer *b;
 	struct sk_buff *skb, *tmp;
 
@@ -513,12 +521,15 @@
 		return;
 
 	rcu_read_lock();
-	b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+	b = bearer_get(net, bearer_id);
 	if (unlikely(!b))
 		__skb_queue_purge(xmitq);
 	skb_queue_walk_safe(xmitq, skb, tmp) {
 		__skb_dequeue(xmitq);
-		b->media->send_msg(net, skb, b, dst);
+		if (likely(test_bit(0, &b->up) || msg_is_reset(buf_msg(skb))))
+			b->media->send_msg(net, skb, b, dst);
+		else
+			kfree(skb);
 	}
 	rcu_read_unlock();
 }
@@ -535,8 +546,8 @@
 	struct tipc_msg *hdr;
 
 	rcu_read_lock();
-	b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
-	if (unlikely(!b))
+	b = bearer_get(net, bearer_id);
+	if (unlikely(!b || !test_bit(0, &b->up)))
 		__skb_queue_purge(xmitq);
 	skb_queue_walk_safe(xmitq, skb, tmp) {
 		hdr = buf_msg(skb);
@@ -566,7 +577,8 @@
 
 	rcu_read_lock();
 	b = rcu_dereference_rtnl(dev->tipc_ptr);
-	if (likely(b && (skb->pkt_type <= PACKET_BROADCAST))) {
+	if (likely(b && test_bit(0, &b->up) &&
+		   (skb->pkt_type <= PACKET_BROADCAST))) {
 		skb->next = NULL;
 		tipc_rcv(dev_net(dev), skb, b);
 		rcu_read_unlock();
@@ -591,18 +603,9 @@
 {
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 	struct net *net = dev_net(dev);
-	struct tipc_net *tn = tipc_net(net);
 	struct tipc_bearer *b;
-	int i;
 
 	b = rtnl_dereference(dev->tipc_ptr);
-	if (!b) {
-		for (i = 0; i < MAX_BEARERS; b = NULL, i++) {
-			b = rtnl_dereference(tn->bearer_list[i]);
-			if (b && (b->media_ptr == dev))
-				break;
-		}
-	}
 	if (!b)
 		return NOTIFY_DONE;
 
@@ -613,11 +616,10 @@
 		if (netif_carrier_ok(dev))
 			break;
 	case NETDEV_UP:
-		rcu_assign_pointer(dev->tipc_ptr, b);
+		test_and_set_bit_lock(0, &b->up);
 		break;
 	case NETDEV_GOING_DOWN:
-		RCU_INIT_POINTER(dev->tipc_ptr, NULL);
-		synchronize_net();
+		clear_bit_unlock(0, &b->up);
 		tipc_reset_bearer(net, b);
 		break;
 	case NETDEV_CHANGEMTU:
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 43757f1..83a9abb 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -150,6 +150,7 @@
 	u32 identity;
 	struct tipc_link_req *link_req;
 	char net_plane;
+	unsigned long up;
 };
 
 struct tipc_bearer_names {
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 877d94f..2c6e1b9 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -807,7 +807,7 @@
 
 	skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
 		imp = TIPC_SKB_CB(skb)->chain_imp;
-		lim = l->window + l->backlog[imp].limit;
+		lim = l->backlog[imp].limit;
 		pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
 		if ((pnd[imp] + l->backlog[imp].len) >= lim)
 			break;
@@ -873,9 +873,11 @@
 	struct sk_buff *skb, *_skb, *bskb;
 
 	/* Match msg importance against this and all higher backlog limits: */
-	for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
-		if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
-			return link_schedule_user(l, list);
+	if (!skb_queue_empty(backlogq)) {
+		for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
+			if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
+				return link_schedule_user(l, list);
+		}
 	}
 	if (unlikely(msg_size(hdr) > mtu)) {
 		skb_queue_purge(list);
@@ -1692,10 +1694,10 @@
 	int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
 
 	l->window = win;
-	l->backlog[TIPC_LOW_IMPORTANCE].limit      = win / 2;
-	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = win;
-	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = win / 2 * 3;
-	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
+	l->backlog[TIPC_LOW_IMPORTANCE].limit      = max_t(u16, 50, win);
+	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = max_t(u16, 100, win * 2);
+	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = max_t(u16, 150, win * 3);
+	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
 	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
 }
 
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index b016c01..33bdf54 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -224,7 +224,7 @@
 	rcu_read_lock();
 	b = rcu_dereference_rtnl(ub->bearer);
 
-	if (b) {
+	if (b && test_bit(0, &b->up)) {
 		tipc_rcv(sock_net(sk), skb, b);
 		rcu_read_unlock();
 		return 0;