net: bonding: factor out rlock(bond->lock) in xmit path
Pull read_lock(&bond->lock) and BOND_IS_OK() to bond_start_xmit() from
mode-dependent xmit functions.
netif_running() is always true in hard_start_xmit.
Signed-off-by: Michał Mirosław <mirq-linux@rere.qmqm.pl>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index d4160f8..c7537abc 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2403,14 +2403,6 @@
struct ad_info ad_info;
int res = 1;
- /* make sure that the slaves list will
- * not change during tx
- */
- read_lock(&bond->lock);
-
- if (!BOND_IS_OK(bond))
- goto out;
-
if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n",
dev->name);
@@ -2464,7 +2456,7 @@
/* no suitable interface, frame not sent */
dev_kfree_skb(skb);
}
- read_unlock(&bond->lock);
+
return NETDEV_TX_OK;
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 3b7b040..8f2d2e7 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1225,16 +1225,10 @@
skb_reset_mac_header(skb);
eth_data = eth_hdr(skb);
- /* make sure that the curr_active_slave and the slaves list do
- * not change during tx
+ /* make sure that the curr_active_slave do not change during tx
*/
- read_lock(&bond->lock);
read_lock(&bond->curr_slave_lock);
- if (!BOND_IS_OK(bond)) {
- goto out;
- }
-
switch (ntohs(skb->protocol)) {
case ETH_P_IP: {
const struct iphdr *iph = ip_hdr(skb);
@@ -1334,13 +1328,12 @@
}
}
-out:
if (res) {
/* no suitable interface, frame not sent */
dev_kfree_skb(skb);
}
read_unlock(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
+
return NETDEV_TX_OK;
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 9a5feaf..6312db1 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4004,10 +4004,6 @@
int i, slave_no, res = 1;
struct iphdr *iph = ip_hdr(skb);
- read_lock(&bond->lock);
-
- if (!BOND_IS_OK(bond))
- goto out;
/*
* Start with the curr_active_slave that joined the bond as the
* default for sending IGMP traffic. For failover purposes one
@@ -4054,7 +4050,7 @@
/* no suitable interface, frame not sent */
dev_kfree_skb(skb);
}
- read_unlock(&bond->lock);
+
return NETDEV_TX_OK;
}
@@ -4068,24 +4064,18 @@
struct bonding *bond = netdev_priv(bond_dev);
int res = 1;
- read_lock(&bond->lock);
read_lock(&bond->curr_slave_lock);
- if (!BOND_IS_OK(bond))
- goto out;
+ if (bond->curr_active_slave)
+ res = bond_dev_queue_xmit(bond, skb,
+ bond->curr_active_slave->dev);
- if (!bond->curr_active_slave)
- goto out;
-
- res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev);
-
-out:
if (res)
/* no suitable interface, frame not sent */
dev_kfree_skb(skb);
read_unlock(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
+
return NETDEV_TX_OK;
}
@@ -4102,11 +4092,6 @@
int i;
int res = 1;
- read_lock(&bond->lock);
-
- if (!BOND_IS_OK(bond))
- goto out;
-
slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt);
bond_for_each_slave(bond, slave, i) {
@@ -4126,12 +4111,11 @@
}
}
-out:
if (res) {
/* no suitable interface, frame not sent */
dev_kfree_skb(skb);
}
- read_unlock(&bond->lock);
+
return NETDEV_TX_OK;
}
@@ -4146,11 +4130,6 @@
int i;
int res = 1;
- read_lock(&bond->lock);
-
- if (!BOND_IS_OK(bond))
- goto out;
-
read_lock(&bond->curr_slave_lock);
start_at = bond->curr_active_slave;
read_unlock(&bond->curr_slave_lock);
@@ -4189,7 +4168,6 @@
dev_kfree_skb(skb);
/* frame sent to all suitable interfaces */
- read_unlock(&bond->lock);
return NETDEV_TX_OK;
}
@@ -4221,10 +4199,8 @@
struct slave *slave = NULL;
struct slave *check_slave;
- read_lock(&bond->lock);
-
- if (!BOND_IS_OK(bond) || !skb->queue_mapping)
- goto out;
+ if (!skb->queue_mapping)
+ return 1;
/* Find out if any slaves have the same mapping as this skb. */
bond_for_each_slave(bond, check_slave, i) {
@@ -4240,8 +4216,6 @@
res = bond_dev_queue_xmit(bond, skb, slave->dev);
}
-out:
- read_unlock(&bond->lock);
return res;
}
@@ -4263,17 +4237,10 @@
return txq;
}
-static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
- /*
- * If we risk deadlock from transmitting this in the
- * netpoll path, tell netpoll to queue the frame for later tx
- */
- if (is_netpoll_tx_blocked(dev))
- return NETDEV_TX_BUSY;
-
if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
if (!bond_slave_override(bond, skb))
return NETDEV_TX_OK;
@@ -4303,6 +4270,29 @@
}
}
+static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bonding *bond = netdev_priv(dev);
+ netdev_tx_t ret = NETDEV_TX_OK;
+
+ /*
+ * If we risk deadlock from transmitting this in the
+ * netpoll path, tell netpoll to queue the frame for later tx
+ */
+ if (is_netpoll_tx_blocked(dev))
+ return NETDEV_TX_BUSY;
+
+ read_lock(&bond->lock);
+
+ if (bond->slave_cnt)
+ ret = __bond_start_xmit(skb, dev);
+ else
+ dev_kfree_skb(skb);
+
+ read_unlock(&bond->lock);
+
+ return ret;
+}
/*
* set bond mode specific net device operations