blob: d345c61d476cd49a28a143e4578ac2d8429462c2 [file] [log] [blame]
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001/* drivers/net/ifb.c:
Jamal Hadi Salim253af422006-01-08 22:34:25 -08002
3 The purpose of this driver is to provide a device that allows
4 for sharing of resources:
5
6 1) qdiscs/policies that are per device as opposed to system wide.
7 ifb allows for a device which can be redirected to thus providing
8 an impression of sharing.
9
10 2) Allows for queueing incoming traffic for shaping instead of
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011 dropping.
12
Jamal Hadi Salim253af422006-01-08 22:34:25 -080013 The original concept is based on what is known as the IMQ
14 driver initially written by Martin Devera, later rewritten
15 by Patrick McHardy and then maintained by Andre Correa.
16
17 You need the tc action mirror or redirect to feed this device
18 packets.
19
20 This program is free software; you can redistribute it and/or
21 modify it under the terms of the GNU General Public License
22 as published by the Free Software Foundation; either version
23 2 of the License, or (at your option) any later version.
Jeff Garzik6aa20a22006-09-13 13:24:59 -040024
Jamal Hadi Salim253af422006-01-08 22:34:25 -080025 Authors: Jamal Hadi Salim (2005)
Jeff Garzik6aa20a22006-09-13 13:24:59 -040026
Jamal Hadi Salim253af422006-01-08 22:34:25 -080027*/
28
29
Jamal Hadi Salim253af422006-01-08 22:34:25 -080030#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/init.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000035#include <linux/interrupt.h>
Jamal Hadi Salim253af422006-01-08 22:34:25 -080036#include <linux/moduleparam.h>
Jeff Garzik6aa20a22006-09-13 13:24:59 -040037#include <net/pkt_sched.h>
Eric W. Biederman881d9662007-09-17 11:56:21 -070038#include <net/net_namespace.h>
Jamal Hadi Salim253af422006-01-08 22:34:25 -080039
Jamal Hadi Salim253af422006-01-08 22:34:25 -080040#define TX_Q_LIMIT 32
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020041struct ifb_q_private {
42 struct net_device *dev;
Jamal Hadi Salim253af422006-01-08 22:34:25 -080043 struct tasklet_struct ifb_tasklet;
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020044 int tasklet_pending;
45 int txqnum;
Jamal Hadi Salim253af422006-01-08 22:34:25 -080046 struct sk_buff_head rq;
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020047 u64 rx_packets;
48 u64 rx_bytes;
49 struct u64_stats_sync rsync;
stephen hemminger3b0c9cb2011-06-20 11:42:30 +000050
51 struct u64_stats_sync tsync;
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020052 u64 tx_packets;
53 u64 tx_bytes;
Jamal Hadi Salim253af422006-01-08 22:34:25 -080054 struct sk_buff_head tq;
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020055} ____cacheline_aligned_in_smp;
56
57struct ifb_dev_private {
58 struct ifb_q_private *tx_private;
Jamal Hadi Salim253af422006-01-08 22:34:25 -080059};
60
Stephen Hemminger424efe92009-08-31 19:50:51 +000061static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
Jamal Hadi Salim253af422006-01-08 22:34:25 -080062static int ifb_open(struct net_device *dev);
63static int ifb_close(struct net_device *dev);
64
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020065static void ifb_ri_tasklet(unsigned long _txp)
Jamal Hadi Salim253af422006-01-08 22:34:25 -080066{
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020067 struct ifb_q_private *txp = (struct ifb_q_private *)_txp;
David S. Millerc3f26a22008-07-31 16:58:50 -070068 struct netdev_queue *txq;
Jamal Hadi Salim253af422006-01-08 22:34:25 -080069 struct sk_buff *skb;
70
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020071 txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
72 skb = skb_peek(&txp->tq);
73 if (!skb) {
74 if (!__netif_tx_trylock(txq))
Jamal Hadi Salim253af422006-01-08 22:34:25 -080075 goto resched;
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020076 skb_queue_splice_tail_init(&txp->rq, &txp->tq);
77 __netif_tx_unlock(txq);
Jamal Hadi Salim253af422006-01-08 22:34:25 -080078 }
79
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020080 while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
Willem de Bruijnbc31c902017-01-07 17:06:38 -050081 skb->tc_redirected = 0;
Willem de Bruijne7246e12017-01-07 17:06:35 -050082 skb->tc_skip_classify = 1;
stephen hemminger3b0c9cb2011-06-20 11:42:30 +000083
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020084 u64_stats_update_begin(&txp->tsync);
85 txp->tx_packets++;
86 txp->tx_bytes += skb->len;
87 u64_stats_update_end(&txp->tsync);
Patrick McHardyc01003c2007-03-29 11:46:52 -070088
Eric Dumazet05e86892009-11-01 19:45:16 +000089 rcu_read_lock();
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020090 skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
Patrick McHardyc01003c2007-03-29 11:46:52 -070091 if (!skb->dev) {
Eric Dumazet05e86892009-11-01 19:45:16 +000092 rcu_read_unlock();
Patrick McHardyc01003c2007-03-29 11:46:52 -070093 dev_kfree_skb(skb);
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020094 txp->dev->stats.tx_dropped++;
95 if (skb_queue_len(&txp->tq) != 0)
Changli Gao75c1c822010-12-04 14:09:08 +000096 goto resched;
Patrick McHardyc01003c2007-03-29 11:46:52 -070097 break;
98 }
Eric Dumazet05e86892009-11-01 19:45:16 +000099 rcu_read_unlock();
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200100 skb->skb_iif = txp->dev->ifindex;
Patrick McHardyc01003c2007-03-29 11:46:52 -0700101
Willem de Bruijnbc31c902017-01-07 17:06:38 -0500102 if (!skb->tc_from_ingress) {
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800103 dev_queue_xmit(skb);
Willem de Bruijnbc31c902017-01-07 17:06:38 -0500104 } else {
Jon Maxwellb1d2e4e2018-05-25 07:38:29 +1000105 skb_pull_rcsum(skb, skb->mac_len);
Eric Dumazet1a759722010-12-14 22:39:58 +0000106 netif_receive_skb(skb);
Willem de Bruijnbc31c902017-01-07 17:06:38 -0500107 }
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800108 }
109
David S. Millerc3f26a22008-07-31 16:58:50 -0700110 if (__netif_tx_trylock(txq)) {
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200111 skb = skb_peek(&txp->rq);
112 if (!skb) {
113 txp->tasklet_pending = 0;
114 if (netif_tx_queue_stopped(txq))
115 netif_tx_wake_queue(txq);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800116 } else {
David S. Millerc3f26a22008-07-31 16:58:50 -0700117 __netif_tx_unlock(txq);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800118 goto resched;
119 }
David S. Millerc3f26a22008-07-31 16:58:50 -0700120 __netif_tx_unlock(txq);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800121 } else {
122resched:
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200123 txp->tasklet_pending = 1;
124 tasklet_schedule(&txp->ifb_tasklet);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800125 }
126
127}
128
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800129static void ifb_stats64(struct net_device *dev,
130 struct rtnl_link_stats64 *stats)
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000131{
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200132 struct ifb_dev_private *dp = netdev_priv(dev);
133 struct ifb_q_private *txp = dp->tx_private;
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000134 unsigned int start;
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200135 u64 packets, bytes;
136 int i;
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000137
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200138 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
139 do {
140 start = u64_stats_fetch_begin_irq(&txp->rsync);
141 packets = txp->rx_packets;
142 bytes = txp->rx_bytes;
143 } while (u64_stats_fetch_retry_irq(&txp->rsync, start));
144 stats->rx_packets += packets;
145 stats->rx_bytes += bytes;
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000146
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200147 do {
148 start = u64_stats_fetch_begin_irq(&txp->tsync);
149 packets = txp->tx_packets;
150 bytes = txp->tx_bytes;
151 } while (u64_stats_fetch_retry_irq(&txp->tsync, start));
152 stats->tx_packets += packets;
153 stats->tx_bytes += bytes;
154 }
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000155 stats->rx_dropped = dev->stats.rx_dropped;
156 stats->tx_dropped = dev->stats.tx_dropped;
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000157}
158
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200159static int ifb_dev_init(struct net_device *dev)
160{
161 struct ifb_dev_private *dp = netdev_priv(dev);
162 struct ifb_q_private *txp;
163 int i;
164
165 txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
166 if (!txp)
167 return -ENOMEM;
168 dp->tx_private = txp;
169 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
170 txp->txqnum = i;
171 txp->dev = dev;
172 __skb_queue_head_init(&txp->rq);
173 __skb_queue_head_init(&txp->tq);
174 u64_stats_init(&txp->rsync);
175 u64_stats_init(&txp->tsync);
176 tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet,
177 (unsigned long)txp);
178 netif_tx_start_queue(netdev_get_tx_queue(dev, i));
179 }
180 return 0;
181}
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000182
Stephen Hemminger8dfcdf32008-11-19 21:47:07 -0800183static const struct net_device_ops ifb_netdev_ops = {
Stephen Hemminger8dfcdf32008-11-19 21:47:07 -0800184 .ndo_open = ifb_open,
185 .ndo_stop = ifb_close,
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000186 .ndo_get_stats64 = ifb_stats64,
Stephen Hemminger00829822008-11-20 20:14:53 -0800187 .ndo_start_xmit = ifb_xmit,
188 .ndo_validate_addr = eth_validate_addr,
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200189 .ndo_init = ifb_dev_init,
Stephen Hemminger8dfcdf32008-11-19 21:47:07 -0800190};
191
Michał Mirosław34324dc2011-11-15 15:29:55 +0000192#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
Eric Dumazet39980292011-01-03 10:35:22 +0000193 NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
Eric Dumazet7d945792016-05-06 18:19:59 -0700194 NETIF_F_GSO_ENCAP_ALL | \
Patrick McHardy28d2b132013-04-19 02:04:32 +0000195 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
196 NETIF_F_HW_VLAN_STAG_TX)
Eric Dumazet39980292011-01-03 10:35:22 +0000197
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200198static void ifb_dev_free(struct net_device *dev)
199{
200 struct ifb_dev_private *dp = netdev_priv(dev);
201 struct ifb_q_private *txp = dp->tx_private;
202 int i;
203
204 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
205 tasklet_kill(&txp->ifb_tasklet);
206 __skb_queue_purge(&txp->rq);
207 __skb_queue_purge(&txp->tq);
208 }
209 kfree(dp->tx_private);
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200210}
211
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700212static void ifb_setup(struct net_device *dev)
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800213{
214 /* Initialize the device structure. */
Stephen Hemminger8dfcdf32008-11-19 21:47:07 -0800215 dev->netdev_ops = &ifb_netdev_ops;
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800216
217 /* Fill in device structure with ethernet-generic values. */
218 ether_setup(dev);
219 dev->tx_queue_len = TX_Q_LIMIT;
Stephen Hemminger8dfcdf32008-11-19 21:47:07 -0800220
Eric Dumazet39980292011-01-03 10:35:22 +0000221 dev->features |= IFB_FEATURES;
Eric Dumazet7d945792016-05-06 18:19:59 -0700222 dev->hw_features |= dev->features;
223 dev->hw_enc_features |= dev->features;
Vlad Yasevich8dd6e142014-03-27 22:14:47 -0400224 dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
225 NETIF_F_HW_VLAN_STAG_TX);
Eric Dumazet39980292011-01-03 10:35:22 +0000226
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800227 dev->flags |= IFF_NOARP;
228 dev->flags &= ~IFF_MULTICAST;
Eric Dumazet02875872014-10-05 18:38:35 -0700229 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
230 netif_keep_dst(dev);
Danny Kukawkaf2cedb62012-02-15 06:45:39 +0000231 eth_hw_addr_random(dev);
David S. Millercf124db2017-05-08 12:52:56 -0400232 dev->needs_free_netdev = true;
233 dev->priv_destructor = ifb_dev_free;
Zhang Shengjue94cd812017-09-22 23:57:49 +0800234
235 dev->min_mtu = 0;
236 dev->max_mtu = 0;
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800237}
238
Stephen Hemminger424efe92009-08-31 19:50:51 +0000239static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800240{
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200241 struct ifb_dev_private *dp = netdev_priv(dev);
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200242 struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800243
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200244 u64_stats_update_begin(&txp->rsync);
245 txp->rx_packets++;
246 txp->rx_bytes += skb->len;
247 u64_stats_update_end(&txp->rsync);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800248
Willem de Bruijnbc31c902017-01-07 17:06:38 -0500249 if (!skb->tc_redirected || !skb->skb_iif) {
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800250 dev_kfree_skb(skb);
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000251 dev->stats.rx_dropped++;
Stephen Hemminger424efe92009-08-31 19:50:51 +0000252 return NETDEV_TX_OK;
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800253 }
254
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200255 if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
256 netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800257
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200258 __skb_queue_tail(&txp->rq, skb);
259 if (!txp->tasklet_pending) {
260 txp->tasklet_pending = 1;
261 tasklet_schedule(&txp->ifb_tasklet);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800262 }
263
Stephen Hemminger424efe92009-08-31 19:50:51 +0000264 return NETDEV_TX_OK;
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800265}
266
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800267static int ifb_close(struct net_device *dev)
268{
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200269 netif_tx_stop_all_queues(dev);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800270 return 0;
271}
272
273static int ifb_open(struct net_device *dev)
274{
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200275 netif_tx_start_all_queues(dev);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800276 return 0;
277}
278
Matthias Schiffera8b8a8892017-06-25 23:56:01 +0200279static int ifb_validate(struct nlattr *tb[], struct nlattr *data[],
280 struct netlink_ext_ack *extack)
Patrick McHardy0e068772007-07-11 19:42:31 -0700281{
282 if (tb[IFLA_ADDRESS]) {
283 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
284 return -EINVAL;
285 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
286 return -EADDRNOTAVAIL;
287 }
288 return 0;
289}
290
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700291static struct rtnl_link_ops ifb_link_ops __read_mostly = {
292 .kind = "ifb",
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200293 .priv_size = sizeof(struct ifb_dev_private),
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700294 .setup = ifb_setup,
Patrick McHardy0e068772007-07-11 19:42:31 -0700295 .validate = ifb_validate,
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700296};
297
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200298/* Number of ifb devices to be set up by this module.
299 * Note that these legacy devices have one queue.
300 * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb
301 */
302static int numifbs = 2;
Patrick McHardy2d85cba2007-07-11 19:42:13 -0700303module_param(numifbs, int, 0);
304MODULE_PARM_DESC(numifbs, "Number of ifb devices");
305
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800306static int __init ifb_init_one(int index)
307{
308 struct net_device *dev_ifb;
309 int err;
310
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200311 dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
Tom Gundersenc835a672014-07-14 16:37:24 +0200312 NET_NAME_UNKNOWN, ifb_setup);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800313
314 if (!dev_ifb)
315 return -ENOMEM;
316
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700317 dev_ifb->rtnl_link_ops = &ifb_link_ops;
318 err = register_netdevice(dev_ifb);
319 if (err < 0)
320 goto err;
Jarek Poplawski94833df2008-03-20 17:05:13 -0700321
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700322 return 0;
323
324err:
325 free_netdev(dev_ifb);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800326 return err;
327}
328
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800329static int __init ifb_init_module(void)
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400330{
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700331 int i, err;
332
Kirill Tkhai554873e2018-03-30 19:38:37 +0300333 down_write(&pernet_ops_rwsem);
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700334 rtnl_lock();
335 err = __rtnl_link_register(&ifb_link_ops);
dingtianhongf2966cd2013-07-11 19:04:06 +0800336 if (err < 0)
337 goto out;
Patrick McHardy62b7ffc2007-06-13 12:04:51 -0700338
dingtianhong440d57b2013-07-10 12:04:02 +0800339 for (i = 0; i < numifbs && !err; i++) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400340 err = ifb_init_one(i);
dingtianhong440d57b2013-07-10 12:04:02 +0800341 cond_resched();
342 }
Patrick McHardy2d85cba2007-07-11 19:42:13 -0700343 if (err)
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700344 __rtnl_link_unregister(&ifb_link_ops);
dingtianhongf2966cd2013-07-11 19:04:06 +0800345
346out:
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700347 rtnl_unlock();
Kirill Tkhai554873e2018-03-30 19:38:37 +0300348 up_write(&pernet_ops_rwsem);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800349
350 return err;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400351}
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800352
353static void __exit ifb_cleanup_module(void)
354{
Patrick McHardy2d85cba2007-07-11 19:42:13 -0700355 rtnl_link_unregister(&ifb_link_ops);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800356}
357
358module_init(ifb_init_module);
359module_exit(ifb_cleanup_module);
360MODULE_LICENSE("GPL");
361MODULE_AUTHOR("Jamal Hadi Salim");
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700362MODULE_ALIAS_RTNL_LINK("ifb");