net: remove skb recycling
Over time, skb recycling infrastructure got litle interest and
many bugs. Generic rx path skb allocation is now using page
fragments for efficient GRO / TCP coalescing, and recyling
a tx skb for rx path is not worth the pain.
Last identified bug is that fat skbs can be recycled
and it can endup using high order pages after few iterations.
With help from Maxime Bizon, who pointed out that commit
87151b8689d (net: allow pskb_expand_head() to get maximum tailroom)
introduced this regression for recycled skbs.
Instead of fixing this bug, lets remove skb recycling.
Drivers wanting really hot skbs should use build_skb() anyway,
to allocate/populate sk_buff right before netif_receive_skb()
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Maxime Bizon <mbizon@freebox.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index a1b52ec..1d03dcd 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1765,7 +1765,6 @@
sizeof(struct rxbd8) * priv->total_rx_ring_size,
priv->tx_queue[0]->tx_bd_base,
priv->tx_queue[0]->tx_bd_dma_base);
- skb_queue_purge(&priv->rx_recycle);
}
void gfar_start(struct net_device *dev)
@@ -1943,8 +1942,6 @@
enable_napi(priv);
- skb_queue_head_init(&priv->rx_recycle);
-
/* Initialize a bunch of registers */
init_registers(dev);
@@ -2533,16 +2530,7 @@
bytes_sent += skb->len;
- /* If there's room in the queue (limit it to rx_buffer_size)
- * we add this skb back into the pool, if it's the right size
- */
- if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
- skb_recycle_check(skb, priv->rx_buffer_size +
- RXBUF_ALIGNMENT)) {
- gfar_align_skb(skb);
- skb_queue_head(&priv->rx_recycle, skb);
- } else
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb);
tx_queue->tx_skbuff[skb_dirtytx] = NULL;
@@ -2608,7 +2596,7 @@
static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
if (!skb)
@@ -2621,14 +2609,7 @@
struct sk_buff *gfar_new_skb(struct net_device *dev)
{
- struct gfar_private *priv = netdev_priv(dev);
- struct sk_buff *skb = NULL;
-
- skb = skb_dequeue(&priv->rx_recycle);
- if (!skb)
- skb = gfar_alloc_skb(dev);
-
- return skb;
+ return gfar_alloc_skb(dev);
}
static inline void count_errors(unsigned short status, struct net_device *dev)
@@ -2787,7 +2768,7 @@
if (unlikely(!newskb))
newskb = skb;
else if (skb)
- skb_queue_head(&priv->rx_recycle, skb);
+ dev_kfree_skb(skb);
} else {
/* Increment the number of packets */
rx_queue->stats.rx_packets++;