Merge tag 'omap-for-v4.7/fixes-v2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / net / ifb.c
CommitLineData
6aa20a22 1/* drivers/net/ifb.c:
253af423
JHS
2
3 The purpose of this driver is to provide a device that allows
4 for sharing of resources:
5
6 1) qdiscs/policies that are per device as opposed to system wide.
7 ifb allows for a device which can be redirected to thus providing
8 an impression of sharing.
9
10 2) Allows for queueing incoming traffic for shaping instead of
6aa20a22
JG
11 dropping.
12
253af423
JHS
13 The original concept is based on what is known as the IMQ
14 driver initially written by Martin Devera, later rewritten
15 by Patrick McHardy and then maintained by Andre Correa.
16
17 You need the tc action mirror or redirect to feed this device
18 packets.
19
20 This program is free software; you can redistribute it and/or
21 modify it under the terms of the GNU General Public License
22 as published by the Free Software Foundation; either version
23 2 of the License, or (at your option) any later version.
6aa20a22 24
253af423 25 Authors: Jamal Hadi Salim (2005)
6aa20a22 26
253af423
JHS
27*/
28
29
253af423
JHS
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/init.h>
a6b7a407 35#include <linux/interrupt.h>
253af423 36#include <linux/moduleparam.h>
6aa20a22 37#include <net/pkt_sched.h>
881d966b 38#include <net/net_namespace.h>
253af423 39
253af423 40#define TX_Q_LIMIT 32
9e29e21a
ED
41struct ifb_q_private {
42 struct net_device *dev;
253af423 43 struct tasklet_struct ifb_tasklet;
9e29e21a
ED
44 int tasklet_pending;
45 int txqnum;
253af423 46 struct sk_buff_head rq;
9e29e21a
ED
47 u64 rx_packets;
48 u64 rx_bytes;
49 struct u64_stats_sync rsync;
3b0c9cbb 50
51 struct u64_stats_sync tsync;
9e29e21a
ED
52 u64 tx_packets;
53 u64 tx_bytes;
253af423 54 struct sk_buff_head tq;
9e29e21a 55} ____cacheline_aligned_in_smp;
253af423 56
9e29e21a
ED
57struct ifb_dev_private {
58 struct ifb_q_private *tx_private;
59};
253af423 60
424efe9c 61static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
253af423
JHS
62static int ifb_open(struct net_device *dev);
63static int ifb_close(struct net_device *dev);
64
9e29e21a 65static void ifb_ri_tasklet(unsigned long _txp)
253af423 66{
9e29e21a 67 struct ifb_q_private *txp = (struct ifb_q_private *)_txp;
c3f26a26 68 struct netdev_queue *txq;
253af423
JHS
69 struct sk_buff *skb;
70
9e29e21a
ED
71 txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
72 skb = skb_peek(&txp->tq);
73 if (!skb) {
74 if (!__netif_tx_trylock(txq))
253af423 75 goto resched;
9e29e21a
ED
76 skb_queue_splice_tail_init(&txp->rq, &txp->tq);
77 __netif_tx_unlock(txq);
253af423
JHS
78 }
79
9e29e21a 80 while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
253af423
JHS
81 u32 from = G_TC_FROM(skb->tc_verd);
82
83 skb->tc_verd = 0;
84 skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
3b0c9cbb 85
9e29e21a
ED
86 u64_stats_update_begin(&txp->tsync);
87 txp->tx_packets++;
88 txp->tx_bytes += skb->len;
89 u64_stats_update_end(&txp->tsync);
c01003c2 90
05e8689c 91 rcu_read_lock();
9e29e21a 92 skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
c01003c2 93 if (!skb->dev) {
05e8689c 94 rcu_read_unlock();
c01003c2 95 dev_kfree_skb(skb);
9e29e21a
ED
96 txp->dev->stats.tx_dropped++;
97 if (skb_queue_len(&txp->tq) != 0)
75c1c825 98 goto resched;
c01003c2
PM
99 break;
100 }
05e8689c 101 rcu_read_unlock();
9e29e21a 102 skb->skb_iif = txp->dev->ifindex;
c01003c2 103
253af423 104 if (from & AT_EGRESS) {
253af423
JHS
105 dev_queue_xmit(skb);
106 } else if (from & AT_INGRESS) {
f40ae913 107 skb_pull(skb, skb->mac_len);
1a75972c 108 netif_receive_skb(skb);
c01003c2
PM
109 } else
110 BUG();
253af423
JHS
111 }
112
c3f26a26 113 if (__netif_tx_trylock(txq)) {
9e29e21a
ED
114 skb = skb_peek(&txp->rq);
115 if (!skb) {
116 txp->tasklet_pending = 0;
117 if (netif_tx_queue_stopped(txq))
118 netif_tx_wake_queue(txq);
253af423 119 } else {
c3f26a26 120 __netif_tx_unlock(txq);
253af423
JHS
121 goto resched;
122 }
c3f26a26 123 __netif_tx_unlock(txq);
253af423
JHS
124 } else {
125resched:
9e29e21a
ED
126 txp->tasklet_pending = 1;
127 tasklet_schedule(&txp->ifb_tasklet);
253af423
JHS
128 }
129
130}
131
3b0c9cbb 132static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
133 struct rtnl_link_stats64 *stats)
134{
9e29e21a
ED
135 struct ifb_dev_private *dp = netdev_priv(dev);
136 struct ifb_q_private *txp = dp->tx_private;
3b0c9cbb 137 unsigned int start;
9e29e21a
ED
138 u64 packets, bytes;
139 int i;
140
141 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
142 do {
143 start = u64_stats_fetch_begin_irq(&txp->rsync);
144 packets = txp->rx_packets;
145 bytes = txp->rx_bytes;
146 } while (u64_stats_fetch_retry_irq(&txp->rsync, start));
147 stats->rx_packets += packets;
148 stats->rx_bytes += bytes;
149
150 do {
151 start = u64_stats_fetch_begin_irq(&txp->tsync);
152 packets = txp->tx_packets;
153 bytes = txp->tx_bytes;
154 } while (u64_stats_fetch_retry_irq(&txp->tsync, start));
155 stats->tx_packets += packets;
156 stats->tx_bytes += bytes;
157 }
3b0c9cbb 158 stats->rx_dropped = dev->stats.rx_dropped;
159 stats->tx_dropped = dev->stats.tx_dropped;
160
161 return stats;
162}
163
9e29e21a
ED
164static int ifb_dev_init(struct net_device *dev)
165{
166 struct ifb_dev_private *dp = netdev_priv(dev);
167 struct ifb_q_private *txp;
168 int i;
169
170 txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
171 if (!txp)
172 return -ENOMEM;
173 dp->tx_private = txp;
174 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
175 txp->txqnum = i;
176 txp->dev = dev;
177 __skb_queue_head_init(&txp->rq);
178 __skb_queue_head_init(&txp->tq);
179 u64_stats_init(&txp->rsync);
180 u64_stats_init(&txp->tsync);
181 tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet,
182 (unsigned long)txp);
183 netif_tx_start_queue(netdev_get_tx_queue(dev, i));
184 }
185 return 0;
186}
3b0c9cbb 187
8dfcdf34 188static const struct net_device_ops ifb_netdev_ops = {
8dfcdf34
SH
189 .ndo_open = ifb_open,
190 .ndo_stop = ifb_close,
3b0c9cbb 191 .ndo_get_stats64 = ifb_stats64,
00829823
SH
192 .ndo_start_xmit = ifb_xmit,
193 .ndo_validate_addr = eth_validate_addr,
9e29e21a 194 .ndo_init = ifb_dev_init,
8dfcdf34
SH
195};
196
34324dc2 197#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
39980292 198 NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
7d945796 199 NETIF_F_GSO_ENCAP_ALL | \
28d2b136
PM
200 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
201 NETIF_F_HW_VLAN_STAG_TX)
39980292 202
9e29e21a
ED
203static void ifb_dev_free(struct net_device *dev)
204{
205 struct ifb_dev_private *dp = netdev_priv(dev);
206 struct ifb_q_private *txp = dp->tx_private;
207 int i;
208
209 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
210 tasklet_kill(&txp->ifb_tasklet);
211 __skb_queue_purge(&txp->rq);
212 __skb_queue_purge(&txp->tq);
213 }
214 kfree(dp->tx_private);
215 free_netdev(dev);
216}
217
9ba2cd65 218static void ifb_setup(struct net_device *dev)
253af423
JHS
219{
220 /* Initialize the device structure. */
8dfcdf34 221 dev->netdev_ops = &ifb_netdev_ops;
253af423
JHS
222
223 /* Fill in device structure with ethernet-generic values. */
224 ether_setup(dev);
225 dev->tx_queue_len = TX_Q_LIMIT;
8dfcdf34 226
39980292 227 dev->features |= IFB_FEATURES;
7d945796
ED
228 dev->hw_features |= dev->features;
229 dev->hw_enc_features |= dev->features;
8dd6e147
VY
230 dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
231 NETIF_F_HW_VLAN_STAG_TX);
39980292 232
253af423
JHS
233 dev->flags |= IFF_NOARP;
234 dev->flags &= ~IFF_MULTICAST;
02875878
ED
235 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
236 netif_keep_dst(dev);
f2cedb63 237 eth_hw_addr_random(dev);
9e29e21a 238 dev->destructor = ifb_dev_free;
253af423
JHS
239}
240
424efe9c 241static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
253af423 242{
9e29e21a 243 struct ifb_dev_private *dp = netdev_priv(dev);
253af423 244 u32 from = G_TC_FROM(skb->tc_verd);
9e29e21a 245 struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
253af423 246
9e29e21a
ED
247 u64_stats_update_begin(&txp->rsync);
248 txp->rx_packets++;
249 txp->rx_bytes += skb->len;
250 u64_stats_update_end(&txp->rsync);
253af423 251
8964be4a 252 if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) {
253af423 253 dev_kfree_skb(skb);
3b0c9cbb 254 dev->stats.rx_dropped++;
424efe9c 255 return NETDEV_TX_OK;
253af423
JHS
256 }
257
9e29e21a
ED
258 if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
259 netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
253af423 260
9e29e21a
ED
261 __skb_queue_tail(&txp->rq, skb);
262 if (!txp->tasklet_pending) {
263 txp->tasklet_pending = 1;
264 tasklet_schedule(&txp->ifb_tasklet);
253af423
JHS
265 }
266
424efe9c 267 return NETDEV_TX_OK;
253af423
JHS
268}
269
253af423
JHS
270static int ifb_close(struct net_device *dev)
271{
9e29e21a 272 netif_tx_stop_all_queues(dev);
253af423
JHS
273 return 0;
274}
275
276static int ifb_open(struct net_device *dev)
277{
9e29e21a 278 netif_tx_start_all_queues(dev);
253af423
JHS
279 return 0;
280}
281
0e06877c
PM
282static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
283{
284 if (tb[IFLA_ADDRESS]) {
285 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
286 return -EINVAL;
287 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
288 return -EADDRNOTAVAIL;
289 }
290 return 0;
291}
292
9ba2cd65
PM
293static struct rtnl_link_ops ifb_link_ops __read_mostly = {
294 .kind = "ifb",
9e29e21a 295 .priv_size = sizeof(struct ifb_dev_private),
9ba2cd65 296 .setup = ifb_setup,
0e06877c 297 .validate = ifb_validate,
9ba2cd65
PM
298};
299
9e29e21a
ED
300/* Number of ifb devices to be set up by this module.
301 * Note that these legacy devices have one queue.
302 * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb
303 */
304static int numifbs = 2;
2d85cba2
PM
305module_param(numifbs, int, 0);
306MODULE_PARM_DESC(numifbs, "Number of ifb devices");
307
253af423
JHS
308static int __init ifb_init_one(int index)
309{
310 struct net_device *dev_ifb;
311 int err;
312
9e29e21a 313 dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
c835a677 314 NET_NAME_UNKNOWN, ifb_setup);
253af423
JHS
315
316 if (!dev_ifb)
317 return -ENOMEM;
318
9ba2cd65
PM
319 dev_ifb->rtnl_link_ops = &ifb_link_ops;
320 err = register_netdevice(dev_ifb);
321 if (err < 0)
322 goto err;
94833dfb 323
9ba2cd65 324 return 0;
62b7ffca 325
9ba2cd65
PM
326err:
327 free_netdev(dev_ifb);
328 return err;
6aa20a22 329}
253af423
JHS
330
331static int __init ifb_init_module(void)
6aa20a22 332{
9ba2cd65
PM
333 int i, err;
334
335 rtnl_lock();
336 err = __rtnl_link_register(&ifb_link_ops);
f2966cd5 337 if (err < 0)
338 goto out;
62b7ffca 339
440d57bc 340 for (i = 0; i < numifbs && !err; i++) {
6aa20a22 341 err = ifb_init_one(i);
440d57bc 342 cond_resched();
343 }
2d85cba2 344 if (err)
9ba2cd65 345 __rtnl_link_unregister(&ifb_link_ops);
f2966cd5 346
347out:
9ba2cd65 348 rtnl_unlock();
253af423
JHS
349
350 return err;
6aa20a22 351}
253af423
JHS
352
353static void __exit ifb_cleanup_module(void)
354{
2d85cba2 355 rtnl_link_unregister(&ifb_link_ops);
253af423
JHS
356}
357
358module_init(ifb_init_module);
359module_exit(ifb_cleanup_module);
360MODULE_LICENSE("GPL");
361MODULE_AUTHOR("Jamal Hadi Salim");
9ba2cd65 362MODULE_ALIAS_RTNL_LINK("ifb");