mm/page_alloc: prevent merging between isolated and other pageblocks
[linux-2.6-block.git] / drivers / net / ifb.c
CommitLineData
6aa20a22 1/* drivers/net/ifb.c:
253af423
JHS
2
3 The purpose of this driver is to provide a device that allows
4 for sharing of resources:
5
6 1) qdiscs/policies that are per device as opposed to system wide.
7 ifb allows for a device which can be redirected to thus providing
8 an impression of sharing.
9
10 2) Allows for queueing incoming traffic for shaping instead of
6aa20a22
JG
11 dropping.
12
253af423
JHS
13 The original concept is based on what is known as the IMQ
14 driver initially written by Martin Devera, later rewritten
15 by Patrick McHardy and then maintained by Andre Correa.
16
17 You need the tc action mirror or redirect to feed this device
18 packets.
19
20 This program is free software; you can redistribute it and/or
21 modify it under the terms of the GNU General Public License
22 as published by the Free Software Foundation; either version
23 2 of the License, or (at your option) any later version.
6aa20a22 24
253af423 25 Authors: Jamal Hadi Salim (2005)
6aa20a22 26
253af423
JHS
27*/
28
29
253af423
JHS
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/init.h>
a6b7a407 35#include <linux/interrupt.h>
253af423 36#include <linux/moduleparam.h>
6aa20a22 37#include <net/pkt_sched.h>
881d966b 38#include <net/net_namespace.h>
253af423 39
253af423 40#define TX_Q_LIMIT 32
9e29e21a
ED
41struct ifb_q_private {
42 struct net_device *dev;
253af423 43 struct tasklet_struct ifb_tasklet;
9e29e21a
ED
44 int tasklet_pending;
45 int txqnum;
253af423 46 struct sk_buff_head rq;
9e29e21a
ED
47 u64 rx_packets;
48 u64 rx_bytes;
49 struct u64_stats_sync rsync;
3b0c9cbb 50
51 struct u64_stats_sync tsync;
9e29e21a
ED
52 u64 tx_packets;
53 u64 tx_bytes;
253af423 54 struct sk_buff_head tq;
9e29e21a 55} ____cacheline_aligned_in_smp;
253af423 56
9e29e21a
ED
57struct ifb_dev_private {
58 struct ifb_q_private *tx_private;
59};
253af423 60
424efe9c 61static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
253af423
JHS
62static int ifb_open(struct net_device *dev);
63static int ifb_close(struct net_device *dev);
64
9e29e21a 65static void ifb_ri_tasklet(unsigned long _txp)
253af423 66{
9e29e21a 67 struct ifb_q_private *txp = (struct ifb_q_private *)_txp;
c3f26a26 68 struct netdev_queue *txq;
253af423
JHS
69 struct sk_buff *skb;
70
9e29e21a
ED
71 txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
72 skb = skb_peek(&txp->tq);
73 if (!skb) {
74 if (!__netif_tx_trylock(txq))
253af423 75 goto resched;
9e29e21a
ED
76 skb_queue_splice_tail_init(&txp->rq, &txp->tq);
77 __netif_tx_unlock(txq);
253af423
JHS
78 }
79
9e29e21a 80 while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
253af423
JHS
81 u32 from = G_TC_FROM(skb->tc_verd);
82
83 skb->tc_verd = 0;
84 skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
3b0c9cbb 85
9e29e21a
ED
86 u64_stats_update_begin(&txp->tsync);
87 txp->tx_packets++;
88 txp->tx_bytes += skb->len;
89 u64_stats_update_end(&txp->tsync);
c01003c2 90
05e8689c 91 rcu_read_lock();
9e29e21a 92 skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
c01003c2 93 if (!skb->dev) {
05e8689c 94 rcu_read_unlock();
c01003c2 95 dev_kfree_skb(skb);
9e29e21a
ED
96 txp->dev->stats.tx_dropped++;
97 if (skb_queue_len(&txp->tq) != 0)
75c1c825 98 goto resched;
c01003c2
PM
99 break;
100 }
05e8689c 101 rcu_read_unlock();
9e29e21a 102 skb->skb_iif = txp->dev->ifindex;
c01003c2 103
253af423 104 if (from & AT_EGRESS) {
253af423
JHS
105 dev_queue_xmit(skb);
106 } else if (from & AT_INGRESS) {
f40ae913 107 skb_pull(skb, skb->mac_len);
1a75972c 108 netif_receive_skb(skb);
c01003c2
PM
109 } else
110 BUG();
253af423
JHS
111 }
112
c3f26a26 113 if (__netif_tx_trylock(txq)) {
9e29e21a
ED
114 skb = skb_peek(&txp->rq);
115 if (!skb) {
116 txp->tasklet_pending = 0;
117 if (netif_tx_queue_stopped(txq))
118 netif_tx_wake_queue(txq);
253af423 119 } else {
c3f26a26 120 __netif_tx_unlock(txq);
253af423
JHS
121 goto resched;
122 }
c3f26a26 123 __netif_tx_unlock(txq);
253af423
JHS
124 } else {
125resched:
9e29e21a
ED
126 txp->tasklet_pending = 1;
127 tasklet_schedule(&txp->ifb_tasklet);
253af423
JHS
128 }
129
130}
131
3b0c9cbb 132static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
133 struct rtnl_link_stats64 *stats)
134{
9e29e21a
ED
135 struct ifb_dev_private *dp = netdev_priv(dev);
136 struct ifb_q_private *txp = dp->tx_private;
3b0c9cbb 137 unsigned int start;
9e29e21a
ED
138 u64 packets, bytes;
139 int i;
140
141 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
142 do {
143 start = u64_stats_fetch_begin_irq(&txp->rsync);
144 packets = txp->rx_packets;
145 bytes = txp->rx_bytes;
146 } while (u64_stats_fetch_retry_irq(&txp->rsync, start));
147 stats->rx_packets += packets;
148 stats->rx_bytes += bytes;
149
150 do {
151 start = u64_stats_fetch_begin_irq(&txp->tsync);
152 packets = txp->tx_packets;
153 bytes = txp->tx_bytes;
154 } while (u64_stats_fetch_retry_irq(&txp->tsync, start));
155 stats->tx_packets += packets;
156 stats->tx_bytes += bytes;
157 }
3b0c9cbb 158 stats->rx_dropped = dev->stats.rx_dropped;
159 stats->tx_dropped = dev->stats.tx_dropped;
160
161 return stats;
162}
163
9e29e21a
ED
164static int ifb_dev_init(struct net_device *dev)
165{
166 struct ifb_dev_private *dp = netdev_priv(dev);
167 struct ifb_q_private *txp;
168 int i;
169
170 txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
171 if (!txp)
172 return -ENOMEM;
173 dp->tx_private = txp;
174 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
175 txp->txqnum = i;
176 txp->dev = dev;
177 __skb_queue_head_init(&txp->rq);
178 __skb_queue_head_init(&txp->tq);
179 u64_stats_init(&txp->rsync);
180 u64_stats_init(&txp->tsync);
181 tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet,
182 (unsigned long)txp);
183 netif_tx_start_queue(netdev_get_tx_queue(dev, i));
184 }
185 return 0;
186}
3b0c9cbb 187
8dfcdf34 188static const struct net_device_ops ifb_netdev_ops = {
8dfcdf34
SH
189 .ndo_open = ifb_open,
190 .ndo_stop = ifb_close,
3b0c9cbb 191 .ndo_get_stats64 = ifb_stats64,
00829823
SH
192 .ndo_start_xmit = ifb_xmit,
193 .ndo_validate_addr = eth_validate_addr,
9e29e21a 194 .ndo_init = ifb_dev_init,
8dfcdf34
SH
195};
196
34324dc2 197#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
39980292 198 NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
28d2b136
PM
199 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
200 NETIF_F_HW_VLAN_STAG_TX)
39980292 201
9e29e21a
ED
202static void ifb_dev_free(struct net_device *dev)
203{
204 struct ifb_dev_private *dp = netdev_priv(dev);
205 struct ifb_q_private *txp = dp->tx_private;
206 int i;
207
208 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
209 tasklet_kill(&txp->ifb_tasklet);
210 __skb_queue_purge(&txp->rq);
211 __skb_queue_purge(&txp->tq);
212 }
213 kfree(dp->tx_private);
214 free_netdev(dev);
215}
216
9ba2cd65 217static void ifb_setup(struct net_device *dev)
253af423
JHS
218{
219 /* Initialize the device structure. */
8dfcdf34 220 dev->netdev_ops = &ifb_netdev_ops;
253af423
JHS
221
222 /* Fill in device structure with ethernet-generic values. */
223 ether_setup(dev);
224 dev->tx_queue_len = TX_Q_LIMIT;
8dfcdf34 225
39980292 226 dev->features |= IFB_FEATURES;
8dd6e147
VY
227 dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
228 NETIF_F_HW_VLAN_STAG_TX);
39980292 229
253af423
JHS
230 dev->flags |= IFF_NOARP;
231 dev->flags &= ~IFF_MULTICAST;
02875878
ED
232 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
233 netif_keep_dst(dev);
f2cedb63 234 eth_hw_addr_random(dev);
9e29e21a 235 dev->destructor = ifb_dev_free;
253af423
JHS
236}
237
424efe9c 238static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
253af423 239{
9e29e21a 240 struct ifb_dev_private *dp = netdev_priv(dev);
253af423 241 u32 from = G_TC_FROM(skb->tc_verd);
9e29e21a 242 struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
253af423 243
9e29e21a
ED
244 u64_stats_update_begin(&txp->rsync);
245 txp->rx_packets++;
246 txp->rx_bytes += skb->len;
247 u64_stats_update_end(&txp->rsync);
253af423 248
8964be4a 249 if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) {
253af423 250 dev_kfree_skb(skb);
3b0c9cbb 251 dev->stats.rx_dropped++;
424efe9c 252 return NETDEV_TX_OK;
253af423
JHS
253 }
254
9e29e21a
ED
255 if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
256 netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
253af423 257
9e29e21a
ED
258 __skb_queue_tail(&txp->rq, skb);
259 if (!txp->tasklet_pending) {
260 txp->tasklet_pending = 1;
261 tasklet_schedule(&txp->ifb_tasklet);
253af423
JHS
262 }
263
424efe9c 264 return NETDEV_TX_OK;
253af423
JHS
265}
266
253af423
JHS
267static int ifb_close(struct net_device *dev)
268{
9e29e21a 269 netif_tx_stop_all_queues(dev);
253af423
JHS
270 return 0;
271}
272
273static int ifb_open(struct net_device *dev)
274{
9e29e21a 275 netif_tx_start_all_queues(dev);
253af423
JHS
276 return 0;
277}
278
0e06877c
PM
279static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
280{
281 if (tb[IFLA_ADDRESS]) {
282 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
283 return -EINVAL;
284 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
285 return -EADDRNOTAVAIL;
286 }
287 return 0;
288}
289
9ba2cd65
PM
290static struct rtnl_link_ops ifb_link_ops __read_mostly = {
291 .kind = "ifb",
9e29e21a 292 .priv_size = sizeof(struct ifb_dev_private),
9ba2cd65 293 .setup = ifb_setup,
0e06877c 294 .validate = ifb_validate,
9ba2cd65
PM
295};
296
9e29e21a
ED
297/* Number of ifb devices to be set up by this module.
298 * Note that these legacy devices have one queue.
299 * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb
300 */
301static int numifbs = 2;
2d85cba2
PM
302module_param(numifbs, int, 0);
303MODULE_PARM_DESC(numifbs, "Number of ifb devices");
304
253af423
JHS
305static int __init ifb_init_one(int index)
306{
307 struct net_device *dev_ifb;
308 int err;
309
9e29e21a 310 dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
c835a677 311 NET_NAME_UNKNOWN, ifb_setup);
253af423
JHS
312
313 if (!dev_ifb)
314 return -ENOMEM;
315
9ba2cd65
PM
316 dev_ifb->rtnl_link_ops = &ifb_link_ops;
317 err = register_netdevice(dev_ifb);
318 if (err < 0)
319 goto err;
94833dfb 320
9ba2cd65 321 return 0;
62b7ffca 322
9ba2cd65
PM
323err:
324 free_netdev(dev_ifb);
325 return err;
6aa20a22 326}
253af423
JHS
327
328static int __init ifb_init_module(void)
6aa20a22 329{
9ba2cd65
PM
330 int i, err;
331
332 rtnl_lock();
333 err = __rtnl_link_register(&ifb_link_ops);
f2966cd5 334 if (err < 0)
335 goto out;
62b7ffca 336
440d57bc 337 for (i = 0; i < numifbs && !err; i++) {
6aa20a22 338 err = ifb_init_one(i);
440d57bc 339 cond_resched();
340 }
2d85cba2 341 if (err)
9ba2cd65 342 __rtnl_link_unregister(&ifb_link_ops);
f2966cd5 343
344out:
9ba2cd65 345 rtnl_unlock();
253af423
JHS
346
347 return err;
6aa20a22 348}
253af423
JHS
349
350static void __exit ifb_cleanup_module(void)
351{
2d85cba2 352 rtnl_link_unregister(&ifb_link_ops);
253af423
JHS
353}
354
355module_init(ifb_init_module);
356module_exit(ifb_cleanup_module);
357MODULE_LICENSE("GPL");
358MODULE_AUTHOR("Jamal Hadi Salim");
9ba2cd65 359MODULE_ALIAS_RTNL_LINK("ifb");