1 // SPDX-License-Identifier: GPL-2.0-or-later
4 The purpose of this driver is to provide a device that allows
5 for sharing of resources:
7 1) qdiscs/policies that are per device as opposed to system wide.
8 ifb allows for a device which can be redirected to thus providing
9 an impression of sharing.
11 2) Allows for queueing incoming traffic for shaping instead of
14 The original concept is based on what is known as the IMQ
15 driver initially written by Martin Devera, later rewritten
16 by Patrick McHardy and then maintained by Andre Correa.
18 You need the tc action mirror or redirect to feed this device
22 Authors: Jamal Hadi Salim (2005)
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/init.h>
32 #include <linux/interrupt.h>
33 #include <linux/moduleparam.h>
34 #include <net/pkt_sched.h>
35 #include <net/net_namespace.h>
38 struct ifb_q_private {
39 struct net_device *dev;
40 struct tasklet_struct ifb_tasklet;
43 struct sk_buff_head rq;
46 struct u64_stats_sync rsync;
48 struct u64_stats_sync tsync;
51 struct sk_buff_head tq;
52 } ____cacheline_aligned_in_smp;
54 struct ifb_dev_private {
55 struct ifb_q_private *tx_private;
58 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
59 static int ifb_open(struct net_device *dev);
60 static int ifb_close(struct net_device *dev);
62 static void ifb_ri_tasklet(struct tasklet_struct *t)
64 struct ifb_q_private *txp = from_tasklet(txp, t, ifb_tasklet);
65 struct netdev_queue *txq;
68 txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
69 skb = skb_peek(&txp->tq);
71 if (!__netif_tx_trylock(txq))
73 skb_queue_splice_tail_init(&txp->rq, &txp->tq);
74 __netif_tx_unlock(txq);
77 while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
79 skb->tc_skip_classify = 1;
81 u64_stats_update_begin(&txp->tsync);
83 txp->tx_bytes += skb->len;
84 u64_stats_update_end(&txp->tsync);
87 skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
91 txp->dev->stats.tx_dropped++;
92 if (skb_queue_len(&txp->tq) != 0)
97 skb->skb_iif = txp->dev->ifindex;
99 if (!skb->from_ingress) {
102 skb_pull_rcsum(skb, skb->mac_len);
103 netif_receive_skb(skb);
107 if (__netif_tx_trylock(txq)) {
108 skb = skb_peek(&txp->rq);
110 txp->tasklet_pending = 0;
111 if (netif_tx_queue_stopped(txq))
112 netif_tx_wake_queue(txq);
114 __netif_tx_unlock(txq);
117 __netif_tx_unlock(txq);
120 txp->tasklet_pending = 1;
121 tasklet_schedule(&txp->ifb_tasklet);
126 static void ifb_stats64(struct net_device *dev,
127 struct rtnl_link_stats64 *stats)
129 struct ifb_dev_private *dp = netdev_priv(dev);
130 struct ifb_q_private *txp = dp->tx_private;
135 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
137 start = u64_stats_fetch_begin_irq(&txp->rsync);
138 packets = txp->rx_packets;
139 bytes = txp->rx_bytes;
140 } while (u64_stats_fetch_retry_irq(&txp->rsync, start));
141 stats->rx_packets += packets;
142 stats->rx_bytes += bytes;
145 start = u64_stats_fetch_begin_irq(&txp->tsync);
146 packets = txp->tx_packets;
147 bytes = txp->tx_bytes;
148 } while (u64_stats_fetch_retry_irq(&txp->tsync, start));
149 stats->tx_packets += packets;
150 stats->tx_bytes += bytes;
152 stats->rx_dropped = dev->stats.rx_dropped;
153 stats->tx_dropped = dev->stats.tx_dropped;
156 static int ifb_dev_init(struct net_device *dev)
158 struct ifb_dev_private *dp = netdev_priv(dev);
159 struct ifb_q_private *txp;
162 txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
165 dp->tx_private = txp;
166 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
169 __skb_queue_head_init(&txp->rq);
170 __skb_queue_head_init(&txp->tq);
171 u64_stats_init(&txp->rsync);
172 u64_stats_init(&txp->tsync);
173 tasklet_setup(&txp->ifb_tasklet, ifb_ri_tasklet);
174 netif_tx_start_queue(netdev_get_tx_queue(dev, i));
179 static const struct net_device_ops ifb_netdev_ops = {
180 .ndo_open = ifb_open,
181 .ndo_stop = ifb_close,
182 .ndo_get_stats64 = ifb_stats64,
183 .ndo_start_xmit = ifb_xmit,
184 .ndo_validate_addr = eth_validate_addr,
185 .ndo_init = ifb_dev_init,
188 #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
189 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
190 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
191 NETIF_F_HW_VLAN_STAG_TX)
193 static void ifb_dev_free(struct net_device *dev)
195 struct ifb_dev_private *dp = netdev_priv(dev);
196 struct ifb_q_private *txp = dp->tx_private;
199 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
200 tasklet_kill(&txp->ifb_tasklet);
201 __skb_queue_purge(&txp->rq);
202 __skb_queue_purge(&txp->tq);
204 kfree(dp->tx_private);
207 static void ifb_setup(struct net_device *dev)
209 /* Initialize the device structure. */
210 dev->netdev_ops = &ifb_netdev_ops;
212 /* Fill in device structure with ethernet-generic values. */
214 dev->tx_queue_len = TX_Q_LIMIT;
216 dev->features |= IFB_FEATURES;
217 dev->hw_features |= dev->features;
218 dev->hw_enc_features |= dev->features;
219 dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
220 NETIF_F_HW_VLAN_STAG_TX);
222 dev->flags |= IFF_NOARP;
223 dev->flags &= ~IFF_MULTICAST;
224 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
226 eth_hw_addr_random(dev);
227 dev->needs_free_netdev = true;
228 dev->priv_destructor = ifb_dev_free;
234 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
236 struct ifb_dev_private *dp = netdev_priv(dev);
237 struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
239 u64_stats_update_begin(&txp->rsync);
241 txp->rx_bytes += skb->len;
242 u64_stats_update_end(&txp->rsync);
244 if (!skb->redirected || !skb->skb_iif) {
246 dev->stats.rx_dropped++;
250 if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
251 netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
253 __skb_queue_tail(&txp->rq, skb);
254 if (!txp->tasklet_pending) {
255 txp->tasklet_pending = 1;
256 tasklet_schedule(&txp->ifb_tasklet);
262 static int ifb_close(struct net_device *dev)
264 netif_tx_stop_all_queues(dev);
268 static int ifb_open(struct net_device *dev)
270 netif_tx_start_all_queues(dev);
274 static int ifb_validate(struct nlattr *tb[], struct nlattr *data[],
275 struct netlink_ext_ack *extack)
277 if (tb[IFLA_ADDRESS]) {
278 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
280 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
281 return -EADDRNOTAVAIL;
286 static struct rtnl_link_ops ifb_link_ops __read_mostly = {
288 .priv_size = sizeof(struct ifb_dev_private),
290 .validate = ifb_validate,
293 /* Number of ifb devices to be set up by this module.
294 * Note that these legacy devices have one queue.
295 * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb
297 static int numifbs = 2;
298 module_param(numifbs, int, 0);
299 MODULE_PARM_DESC(numifbs, "Number of ifb devices");
301 static int __init ifb_init_one(int index)
303 struct net_device *dev_ifb;
306 dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
307 NET_NAME_UNKNOWN, ifb_setup);
312 dev_ifb->rtnl_link_ops = &ifb_link_ops;
313 err = register_netdevice(dev_ifb);
320 free_netdev(dev_ifb);
324 static int __init ifb_init_module(void)
328 down_write(&pernet_ops_rwsem);
330 err = __rtnl_link_register(&ifb_link_ops);
334 for (i = 0; i < numifbs && !err; i++) {
335 err = ifb_init_one(i);
339 __rtnl_link_unregister(&ifb_link_ops);
343 up_write(&pernet_ops_rwsem);
348 static void __exit ifb_cleanup_module(void)
350 rtnl_link_unregister(&ifb_link_ops);
353 module_init(ifb_init_module);
354 module_exit(ifb_cleanup_module);
355 MODULE_LICENSE("GPL");
356 MODULE_AUTHOR("Jamal Hadi Salim");
357 MODULE_ALIAS_RTNL_LINK("ifb");