treewide: Add SPDX license identifier for missed files
[linux-block.git] / net / core / netpoll.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * Common framework for low-level network console, dump, and debugger code
4 *
5 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 *
7 * based on the netconsole code from:
8 *
9 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
10 * Copyright (C) 2002 Red Hat, Inc.
11 */
12
e6ec2693
JP
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
bff38771 15#include <linux/moduleparam.h>
4cd5773a 16#include <linux/kernel.h>
1da177e4
LT
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/string.h>
14c85021 20#include <linux/if_arp.h>
1da177e4
LT
21#include <linux/inetdevice.h>
22#include <linux/inet.h>
23#include <linux/interrupt.h>
24#include <linux/netpoll.h>
25#include <linux/sched.h>
26#include <linux/delay.h>
27#include <linux/rcupdate.h>
28#include <linux/workqueue.h>
5a0e3ad6 29#include <linux/slab.h>
bc3b2d7f 30#include <linux/export.h>
689971b4 31#include <linux/if_vlan.h>
1da177e4
LT
32#include <net/tcp.h>
33#include <net/udp.h>
b3d936f3
CW
34#include <net/addrconf.h>
35#include <net/ndisc.h>
36#include <net/ip6_checksum.h>
1da177e4 37#include <asm/unaligned.h>
9cbc1cb8 38#include <trace/events/napi.h>
1da177e4
LT
39
40/*
41 * We maintain a small pool of fully-sized skbs, to make sure the
42 * message gets out even in extreme OOM situations.
43 */
44
45#define MAX_UDP_CHUNK 1460
46#define MAX_SKBS 32
1da177e4 47
a1bcfacd 48static struct sk_buff_head skb_pool;
1da177e4 49
7f9421c2 50DEFINE_STATIC_SRCU(netpoll_srcu);
ca99ca14 51
2bdfe0ba 52#define USEC_PER_POLL 50
1da177e4 53
6f706245
JP
54#define MAX_SKB_SIZE \
55 (sizeof(struct ethhdr) + \
56 sizeof(struct iphdr) + \
57 sizeof(struct udphdr) + \
58 MAX_UDP_CHUNK)
1da177e4 59
3578b0c8 60static void zap_completion_queue(void);
1da177e4 61
bff38771
AV
62static unsigned int carrier_timeout = 4;
63module_param(carrier_timeout, uint, 0644);
64
e6ec2693
JP
65#define np_info(np, fmt, ...) \
66 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
67#define np_err(np, fmt, ...) \
68 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
69#define np_notice(np, fmt, ...) \
70 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
71
944e2948
EB
72static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
73 struct netdev_queue *txq)
74{
944e2948
EB
75 int status = NETDEV_TX_OK;
76 netdev_features_t features;
77
78 features = netif_skb_features(skb);
79
df8a39de 80 if (skb_vlan_tag_present(skb) &&
944e2948 81 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
5968250c 82 skb = __vlan_hwaccel_push_inside(skb);
944e2948
EB
83 if (unlikely(!skb)) {
84 /* This is actually a packet drop, but we
85 * don't want the code that calls this
86 * function to try and operate on a NULL skb.
87 */
88 goto out;
89 }
944e2948
EB
90 }
91
fa2dbdc2 92 status = netdev_start_xmit(skb, dev, txq, false);
944e2948
EB
93
94out:
95 return status;
96}
97
c4028958 98static void queue_process(struct work_struct *work)
1da177e4 99{
4c1ac1b4
DH
100 struct netpoll_info *npinfo =
101 container_of(work, struct netpoll_info, tx_work.work);
1da177e4 102 struct sk_buff *skb;
3640543d 103 unsigned long flags;
1da177e4 104
6c43ff18
SH
105 while ((skb = skb_dequeue(&npinfo->txq))) {
106 struct net_device *dev = skb->dev;
fd2ea0a7 107 struct netdev_queue *txq;
c70b17b7 108 unsigned int q_index;
1da177e4 109
6c43ff18 110 if (!netif_device_present(dev) || !netif_running(dev)) {
080b3c19 111 kfree_skb(skb);
6c43ff18
SH
112 continue;
113 }
1da177e4 114
3640543d 115 local_irq_save(flags);
c70b17b7
TD
116 /* check if skb->queue_mapping is still valid */
117 q_index = skb_get_queue_mapping(skb);
118 if (unlikely(q_index >= dev->real_num_tx_queues)) {
119 q_index = q_index % dev->real_num_tx_queues;
120 skb_set_queue_mapping(skb, q_index);
121 }
122 txq = netdev_get_tx_queue(dev, q_index);
5efeac44 123 HARD_TX_LOCK(dev, txq, smp_processor_id());
73466498 124 if (netif_xmit_frozen_or_stopped(txq) ||
944e2948 125 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
6c43ff18 126 skb_queue_head(&npinfo->txq, skb);
5efeac44 127 HARD_TX_UNLOCK(dev, txq);
3640543d 128 local_irq_restore(flags);
1da177e4 129
25442caf 130 schedule_delayed_work(&npinfo->tx_work, HZ/10);
6c43ff18
SH
131 return;
132 }
5efeac44 133 HARD_TX_UNLOCK(dev, txq);
3640543d 134 local_irq_restore(flags);
1da177e4 135 }
1da177e4
LT
136}
137
822d54b9 138static void poll_one_napi(struct napi_struct *napi)
0a7606c1 139{
c24498c6 140 int work;
0a7606c1 141
2d8bff12
NH
142 /* If we set this bit but see that it has already been set,
143 * that indicates that napi has been disabled and we need
144 * to abort this operation
145 */
146 if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
822d54b9 147 return;
0a7606c1 148
822d54b9
AD
149 /* We explicilty pass the polling call a budget of 0 to
150 * indicate that we are clearing the Tx path only.
151 */
152 work = napi->poll(napi, 0);
d75f773c 153 WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
1db19db7 154 trace_napi_poll(napi, work, 0);
0a7606c1 155
7b363e44 156 clear_bit(NAPI_STATE_NPSVC, &napi->state);
0a7606c1
DM
157}
158
822d54b9 159static void poll_napi(struct net_device *dev)
1da177e4 160{
bea3348e 161 struct napi_struct *napi;
89c4b442 162 int cpu = smp_processor_id();
1da177e4 163
f13d493d 164 list_for_each_entry(napi, &dev->napi_list, dev_list) {
89c4b442 165 if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
822d54b9 166 poll_one_napi(napi);
89c4b442 167 smp_store_release(&napi->poll_owner, -1);
bea3348e 168 }
1da177e4
LT
169 }
170}
171
ac3d9dd0 172void netpoll_poll_dev(struct net_device *dev)
1da177e4 173{
2899656b 174 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
ac3d9dd0 175 const struct net_device_ops *ops;
5106930b 176
ca99ca14
NH
177 /* Don't do any rx activity if the dev_lock mutex is held
178 * the dev_open/close paths use this to block netpoll activity
179 * while changing device state
180 */
ac3d9dd0 181 if (!ni || down_trylock(&ni->dev_lock))
ca99ca14
NH
182 return;
183
959d5fde 184 if (!netif_running(dev)) {
bd7c4b60 185 up(&ni->dev_lock);
5e392739 186 return;
959d5fde 187 }
5e392739
PE
188
189 ops = dev->netdev_ops;
ac3d9dd0
ED
190 if (ops->ndo_poll_controller)
191 ops->ndo_poll_controller(dev);
5106930b 192
822d54b9 193 poll_napi(dev);
1da177e4 194
bd7c4b60 195 up(&ni->dev_lock);
ca99ca14 196
3578b0c8 197 zap_completion_queue();
1da177e4 198}
ac3d9dd0 199EXPORT_SYMBOL(netpoll_poll_dev);
1da177e4 200
66b5552f 201void netpoll_poll_disable(struct net_device *dev)
ca99ca14
NH
202{
203 struct netpoll_info *ni;
204 int idx;
205 might_sleep();
206 idx = srcu_read_lock(&netpoll_srcu);
207 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
208 if (ni)
bd7c4b60 209 down(&ni->dev_lock);
ca99ca14 210 srcu_read_unlock(&netpoll_srcu, idx);
ca99ca14 211}
66b5552f 212EXPORT_SYMBOL(netpoll_poll_disable);
ca99ca14 213
66b5552f 214void netpoll_poll_enable(struct net_device *dev)
ca99ca14
NH
215{
216 struct netpoll_info *ni;
217 rcu_read_lock();
218 ni = rcu_dereference(dev->npinfo);
219 if (ni)
bd7c4b60 220 up(&ni->dev_lock);
ca99ca14
NH
221 rcu_read_unlock();
222}
66b5552f 223EXPORT_SYMBOL(netpoll_poll_enable);
ca99ca14 224
1da177e4
LT
225static void refill_skbs(void)
226{
227 struct sk_buff *skb;
228 unsigned long flags;
229
a1bcfacd
SH
230 spin_lock_irqsave(&skb_pool.lock, flags);
231 while (skb_pool.qlen < MAX_SKBS) {
1da177e4
LT
232 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
233 if (!skb)
234 break;
235
a1bcfacd 236 __skb_queue_tail(&skb_pool, skb);
1da177e4 237 }
a1bcfacd 238 spin_unlock_irqrestore(&skb_pool.lock, flags);
1da177e4
LT
239}
240
3578b0c8
DM
241static void zap_completion_queue(void)
242{
243 unsigned long flags;
244 struct softnet_data *sd = &get_cpu_var(softnet_data);
245
246 if (sd->completion_queue) {
247 struct sk_buff *clist;
248
249 local_irq_save(flags);
250 clist = sd->completion_queue;
251 sd->completion_queue = NULL;
252 local_irq_restore(flags);
253
254 while (clist != NULL) {
255 struct sk_buff *skb = clist;
256 clist = clist->next;
b1586f09 257 if (!skb_irq_freeable(skb)) {
230cd127 258 refcount_set(&skb->users, 1);
3578b0c8
DM
259 dev_kfree_skb_any(skb); /* put this one back */
260 } else {
261 __kfree_skb(skb);
262 }
263 }
264 }
265
266 put_cpu_var(softnet_data);
267}
268
a1bcfacd 269static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
1da177e4 270{
a1bcfacd
SH
271 int count = 0;
272 struct sk_buff *skb;
1da177e4 273
3578b0c8 274 zap_completion_queue();
a1bcfacd 275 refill_skbs();
1da177e4 276repeat:
1da177e4
LT
277
278 skb = alloc_skb(len, GFP_ATOMIC);
a1bcfacd
SH
279 if (!skb)
280 skb = skb_dequeue(&skb_pool);
1da177e4
LT
281
282 if (!skb) {
a1bcfacd 283 if (++count < 10) {
2a49e001 284 netpoll_poll_dev(np->dev);
a1bcfacd 285 goto repeat;
1da177e4 286 }
a1bcfacd 287 return NULL;
1da177e4
LT
288 }
289
63354797 290 refcount_set(&skb->users, 1);
1da177e4
LT
291 skb_reserve(skb, reserve);
292 return skb;
293}
294
bea3348e
SH
295static int netpoll_owner_active(struct net_device *dev)
296{
297 struct napi_struct *napi;
298
299 list_for_each_entry(napi, &dev->napi_list, dev_list) {
300 if (napi->poll_owner == smp_processor_id())
301 return 1;
302 }
303 return 0;
304}
305
2899656b 306/* call with IRQ disabled */
c2355e1a
NH
307void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
308 struct net_device *dev)
1da177e4 309{
2bdfe0ba
SH
310 int status = NETDEV_TX_BUSY;
311 unsigned long tries;
de85d99e 312 /* It is up to the caller to keep npinfo alive. */
2899656b 313 struct netpoll_info *npinfo;
2bdfe0ba 314
af073393 315 lockdep_assert_irqs_disabled();
2899656b
AW
316
317 npinfo = rcu_dereference_bh(np->dev->npinfo);
4ec93edb 318 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
080b3c19 319 dev_kfree_skb_irq(skb);
4ec93edb
YH
320 return;
321 }
2bdfe0ba
SH
322
323 /* don't get messages out of order, and no recursion */
bea3348e 324 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
fd2ea0a7 325 struct netdev_queue *txq;
a49f99ff 326
4bd97d51 327 txq = netdev_core_pick_tx(dev, skb, NULL);
fd2ea0a7 328
0db3dc73
SH
329 /* try until next clock tick */
330 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
331 tries > 0; --tries) {
5efeac44 332 if (HARD_TX_TRYLOCK(dev, txq)) {
944e2948
EB
333 if (!netif_xmit_stopped(txq))
334 status = netpoll_start_xmit(skb, dev, txq);
335
5efeac44 336 HARD_TX_UNLOCK(dev, txq);
e37b8d93
AM
337
338 if (status == NETDEV_TX_OK)
339 break;
340
e37b8d93 341 }
0db3dc73
SH
342
343 /* tickle device maybe there is some cleanup */
2a49e001 344 netpoll_poll_dev(np->dev);
0db3dc73
SH
345
346 udelay(USEC_PER_POLL);
0db1d6fc 347 }
79b1bee8
DD
348
349 WARN_ONCE(!irqs_disabled(),
d75f773c 350 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
944e2948 351 dev->name, dev->netdev_ops->ndo_start_xmit);
79b1bee8 352
1da177e4 353 }
1da177e4 354
2bdfe0ba 355 if (status != NETDEV_TX_OK) {
5de4a473 356 skb_queue_tail(&npinfo->txq, skb);
4c1ac1b4 357 schedule_delayed_work(&npinfo->tx_work,0);
1da177e4 358 }
1da177e4 359}
c2355e1a 360EXPORT_SYMBOL(netpoll_send_skb_on_dev);
1da177e4
LT
361
362void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
363{
954fba02 364 int total_len, ip_len, udp_len;
1da177e4
LT
365 struct sk_buff *skb;
366 struct udphdr *udph;
367 struct iphdr *iph;
368 struct ethhdr *eth;
ee130409 369 static atomic_t ip_ident;
b3d936f3 370 struct ipv6hdr *ip6h;
1da177e4 371
c9fd56b3
NA
372 WARN_ON_ONCE(!irqs_disabled());
373
1da177e4 374 udp_len = len + sizeof(*udph);
b3d936f3
CW
375 if (np->ipv6)
376 ip_len = udp_len + sizeof(*ip6h);
377 else
b7394d24
CW
378 ip_len = udp_len + sizeof(*iph);
379
954fba02 380 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
1da177e4 381
954fba02
ED
382 skb = find_skb(np, total_len + np->dev->needed_tailroom,
383 total_len - len);
1da177e4
LT
384 if (!skb)
385 return;
386
27d7ff46 387 skb_copy_to_linear_data(skb, msg, len);
954fba02 388 skb_put(skb, len);
1da177e4 389
4bedb452
ACM
390 skb_push(skb, sizeof(*udph));
391 skb_reset_transport_header(skb);
392 udph = udp_hdr(skb);
1da177e4
LT
393 udph->source = htons(np->local_port);
394 udph->dest = htons(np->remote_port);
395 udph->len = htons(udp_len);
b7394d24 396
b3d936f3
CW
397 if (np->ipv6) {
398 udph->check = 0;
399 udph->check = csum_ipv6_magic(&np->local_ip.in6,
400 &np->remote_ip.in6,
401 udp_len, IPPROTO_UDP,
402 csum_partial(udph, udp_len, 0));
403 if (udph->check == 0)
404 udph->check = CSUM_MANGLED_0;
405
406 skb_push(skb, sizeof(*ip6h));
407 skb_reset_network_header(skb);
408 ip6h = ipv6_hdr(skb);
409
410 /* ip6h->version = 6; ip6h->priority = 0; */
411 put_unaligned(0x60, (unsigned char *)ip6h);
412 ip6h->flow_lbl[0] = 0;
413 ip6h->flow_lbl[1] = 0;
414 ip6h->flow_lbl[2] = 0;
415
416 ip6h->payload_len = htons(sizeof(struct udphdr) + len);
417 ip6h->nexthdr = IPPROTO_UDP;
418 ip6h->hop_limit = 32;
419 ip6h->saddr = np->local_ip.in6;
420 ip6h->daddr = np->remote_ip.in6;
421
d58ff351 422 eth = skb_push(skb, ETH_HLEN);
b3d936f3
CW
423 skb_reset_mac_header(skb);
424 skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
425 } else {
b7394d24
CW
426 udph->check = 0;
427 udph->check = csum_tcpudp_magic(np->local_ip.ip,
428 np->remote_ip.ip,
429 udp_len, IPPROTO_UDP,
430 csum_partial(udph, udp_len, 0));
431 if (udph->check == 0)
432 udph->check = CSUM_MANGLED_0;
433
434 skb_push(skb, sizeof(*iph));
435 skb_reset_network_header(skb);
436 iph = ip_hdr(skb);
437
438 /* iph->version = 4; iph->ihl = 5; */
439 put_unaligned(0x45, (unsigned char *)iph);
440 iph->tos = 0;
441 put_unaligned(htons(ip_len), &(iph->tot_len));
442 iph->id = htons(atomic_inc_return(&ip_ident));
443 iph->frag_off = 0;
444 iph->ttl = 64;
445 iph->protocol = IPPROTO_UDP;
446 iph->check = 0;
447 put_unaligned(np->local_ip.ip, &(iph->saddr));
448 put_unaligned(np->remote_ip.ip, &(iph->daddr));
449 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
450
d58ff351 451 eth = skb_push(skb, ETH_HLEN);
b7394d24
CW
452 skb_reset_mac_header(skb);
453 skb->protocol = eth->h_proto = htons(ETH_P_IP);
454 }
455
c62326ab
JP
456 ether_addr_copy(eth->h_source, np->dev->dev_addr);
457 ether_addr_copy(eth->h_dest, np->remote_mac);
1da177e4
LT
458
459 skb->dev = np->dev;
460
461 netpoll_send_skb(np, skb);
462}
9e34a5b5 463EXPORT_SYMBOL(netpoll_send_udp);
1da177e4 464
0bcc1816
SS
465void netpoll_print_options(struct netpoll *np)
466{
e6ec2693 467 np_info(np, "local port %d\n", np->local_port);
b3d936f3
CW
468 if (np->ipv6)
469 np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
470 else
b7394d24 471 np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
e6ec2693
JP
472 np_info(np, "interface '%s'\n", np->dev_name);
473 np_info(np, "remote port %d\n", np->remote_port);
b3d936f3
CW
474 if (np->ipv6)
475 np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
476 else
b7394d24 477 np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
e6ec2693 478 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
0bcc1816 479}
9e34a5b5 480EXPORT_SYMBOL(netpoll_print_options);
0bcc1816 481
b7394d24
CW
482static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
483{
484 const char *end;
485
486 if (!strchr(str, ':') &&
487 in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
488 if (!*end)
489 return 0;
490 }
491 if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
492#if IS_ENABLED(CONFIG_IPV6)
493 if (!*end)
494 return 1;
495#else
496 return -1;
497#endif
498 }
499 return -1;
500}
501
1da177e4
LT
502int netpoll_parse_options(struct netpoll *np, char *opt)
503{
504 char *cur=opt, *delim;
b7394d24 505 int ipv6;
00fe11b3 506 bool ipversion_set = false;
1da177e4 507
c68b9070 508 if (*cur != '@') {
1da177e4
LT
509 if ((delim = strchr(cur, '@')) == NULL)
510 goto parse_failed;
c68b9070 511 *delim = 0;
4b5511eb
AP
512 if (kstrtou16(cur, 10, &np->local_port))
513 goto parse_failed;
c68b9070 514 cur = delim;
1da177e4
LT
515 }
516 cur++;
1da177e4 517
c68b9070 518 if (*cur != '/') {
00fe11b3 519 ipversion_set = true;
1da177e4
LT
520 if ((delim = strchr(cur, '/')) == NULL)
521 goto parse_failed;
c68b9070 522 *delim = 0;
b7394d24
CW
523 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
524 if (ipv6 < 0)
525 goto parse_failed;
526 else
527 np->ipv6 = (bool)ipv6;
c68b9070 528 cur = delim;
1da177e4
LT
529 }
530 cur++;
531
c68b9070 532 if (*cur != ',') {
1da177e4
LT
533 /* parse out dev name */
534 if ((delim = strchr(cur, ',')) == NULL)
535 goto parse_failed;
c68b9070 536 *delim = 0;
1da177e4 537 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
c68b9070 538 cur = delim;
1da177e4
LT
539 }
540 cur++;
541
c68b9070 542 if (*cur != '@') {
1da177e4
LT
543 /* dst port */
544 if ((delim = strchr(cur, '@')) == NULL)
545 goto parse_failed;
c68b9070 546 *delim = 0;
5fc05f87 547 if (*cur == ' ' || *cur == '\t')
e6ec2693 548 np_info(np, "warning: whitespace is not allowed\n");
4b5511eb
AP
549 if (kstrtou16(cur, 10, &np->remote_port))
550 goto parse_failed;
c68b9070 551 cur = delim;
1da177e4
LT
552 }
553 cur++;
1da177e4
LT
554
555 /* dst ip */
556 if ((delim = strchr(cur, '/')) == NULL)
557 goto parse_failed;
c68b9070 558 *delim = 0;
b7394d24
CW
559 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
560 if (ipv6 < 0)
561 goto parse_failed;
00fe11b3 562 else if (ipversion_set && np->ipv6 != (bool)ipv6)
b7394d24
CW
563 goto parse_failed;
564 else
565 np->ipv6 = (bool)ipv6;
c68b9070 566 cur = delim + 1;
1da177e4 567
c68b9070 568 if (*cur != 0) {
1da177e4 569 /* MAC address */
4940fc88 570 if (!mac_pton(cur, np->remote_mac))
1da177e4 571 goto parse_failed;
1da177e4
LT
572 }
573
0bcc1816 574 netpoll_print_options(np);
1da177e4
LT
575
576 return 0;
577
578 parse_failed:
e6ec2693 579 np_info(np, "couldn't parse config at '%s'!\n", cur);
1da177e4
LT
580 return -1;
581}
9e34a5b5 582EXPORT_SYMBOL(netpoll_parse_options);
1da177e4 583
a8779ec1 584int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
1da177e4 585{
115c1d6e 586 struct netpoll_info *npinfo;
4247e161 587 const struct net_device_ops *ops;
b41848b6 588 int err;
1da177e4 589
727ceaa4 590 np->dev = ndev;
30fdd8a0
JP
591 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
592
ac3d9dd0 593 if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
e6ec2693
JP
594 np_err(np, "%s doesn't support polling, aborting\n",
595 np->dev_name);
8fdd95ec
HX
596 err = -ENOTSUPP;
597 goto out;
598 }
599
600 if (!ndev->npinfo) {
a8779ec1 601 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
8fdd95ec
HX
602 if (!npinfo) {
603 err = -ENOMEM;
604 goto out;
605 }
606
bd7c4b60 607 sema_init(&npinfo->dev_lock, 1);
8fdd95ec
HX
608 skb_queue_head_init(&npinfo->txq);
609 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
610
433cea4d 611 refcount_set(&npinfo->refcnt, 1);
8fdd95ec
HX
612
613 ops = np->dev->netdev_ops;
614 if (ops->ndo_netpoll_setup) {
a8779ec1 615 err = ops->ndo_netpoll_setup(ndev, npinfo);
8fdd95ec
HX
616 if (err)
617 goto free_npinfo;
618 }
619 } else {
0790bbb6 620 npinfo = rtnl_dereference(ndev->npinfo);
433cea4d 621 refcount_inc(&npinfo->refcnt);
8fdd95ec
HX
622 }
623
624 npinfo->netpoll = np;
625
8fdd95ec 626 /* last thing to do is link it to the net device structure */
cf778b00 627 rcu_assign_pointer(ndev->npinfo, npinfo);
8fdd95ec
HX
628
629 return 0;
630
631free_npinfo:
632 kfree(npinfo);
633out:
634 return err;
635}
636EXPORT_SYMBOL_GPL(__netpoll_setup);
637
638int netpoll_setup(struct netpoll *np)
639{
640 struct net_device *ndev = NULL;
641 struct in_device *in_dev;
642 int err;
643
f92d3180 644 rtnl_lock();
0c3a8f8b 645 if (np->dev_name[0]) {
556e6256
CW
646 struct net *net = current->nsproxy->net_ns;
647 ndev = __dev_get_by_name(net, np->dev_name);
648 }
1da177e4 649 if (!ndev) {
e6ec2693 650 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
f92d3180
CW
651 err = -ENODEV;
652 goto unlock;
1da177e4 653 }
5bd30d39 654 dev_hold(ndev);
1da177e4 655
49bd8fb0 656 if (netdev_master_upper_dev_get(ndev)) {
e6ec2693 657 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
83fe32de
DC
658 err = -EBUSY;
659 goto put;
0c1ad04a
WC
660 }
661
1da177e4
LT
662 if (!netif_running(ndev)) {
663 unsigned long atmost, atleast;
664
e6ec2693 665 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
1da177e4 666
00f54e68 667 err = dev_open(ndev, NULL);
b41848b6
SH
668
669 if (err) {
e6ec2693 670 np_err(np, "failed to open %s\n", ndev->name);
dbaa1541 671 goto put;
1da177e4 672 }
1da177e4 673
f92d3180 674 rtnl_unlock();
1da177e4 675 atleast = jiffies + HZ/10;
bff38771 676 atmost = jiffies + carrier_timeout * HZ;
1da177e4
LT
677 while (!netif_carrier_ok(ndev)) {
678 if (time_after(jiffies, atmost)) {
e6ec2693 679 np_notice(np, "timeout waiting for carrier\n");
1da177e4
LT
680 break;
681 }
1b614fb9 682 msleep(1);
1da177e4
LT
683 }
684
685 /* If carrier appears to come up instantly, we don't
686 * trust it and pause so that we don't pump all our
687 * queued console messages into the bitbucket.
688 */
689
690 if (time_before(jiffies, atleast)) {
e6ec2693 691 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
1da177e4
LT
692 msleep(4000);
693 }
f92d3180 694 rtnl_lock();
1da177e4
LT
695 }
696
b7394d24
CW
697 if (!np->local_ip.ip) {
698 if (!np->ipv6) {
f92d3180 699 in_dev = __in_dev_get_rtnl(ndev);
b7394d24
CW
700
701 if (!in_dev || !in_dev->ifa_list) {
b7394d24
CW
702 np_err(np, "no IP address for %s, aborting\n",
703 np->dev_name);
704 err = -EDESTADDRREQ;
705 goto put;
706 }
707
708 np->local_ip.ip = in_dev->ifa_list->ifa_local;
b7394d24 709 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
b3d936f3
CW
710 } else {
711#if IS_ENABLED(CONFIG_IPV6)
712 struct inet6_dev *idev;
713
714 err = -EDESTADDRREQ;
b3d936f3
CW
715 idev = __in6_dev_get(ndev);
716 if (idev) {
717 struct inet6_ifaddr *ifp;
718
719 read_lock_bh(&idev->lock);
720 list_for_each_entry(ifp, &idev->addr_list, if_list) {
d016b4a3
MK
721 if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
722 !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
b3d936f3
CW
723 continue;
724 np->local_ip.in6 = ifp->addr;
725 err = 0;
726 break;
727 }
728 read_unlock_bh(&idev->lock);
729 }
b3d936f3
CW
730 if (err) {
731 np_err(np, "no IPv6 address for %s, aborting\n",
732 np->dev_name);
733 goto put;
734 } else
735 np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
736#else
737 np_err(np, "IPv6 is not supported %s, aborting\n",
738 np->dev_name);
e39363a9 739 err = -EINVAL;
b3d936f3
CW
740 goto put;
741#endif
1da177e4 742 }
1da177e4
LT
743 }
744
dbaa1541
HX
745 /* fill up the skb queue */
746 refill_skbs();
747
a8779ec1 748 err = __netpoll_setup(np, ndev);
8fdd95ec
HX
749 if (err)
750 goto put;
751
f92d3180 752 rtnl_unlock();
1da177e4
LT
753 return 0;
754
21edbb22 755put:
1da177e4 756 dev_put(ndev);
f92d3180
CW
757unlock:
758 rtnl_unlock();
b41848b6 759 return err;
1da177e4 760}
9e34a5b5 761EXPORT_SYMBOL(netpoll_setup);
1da177e4 762
c68b9070
DM
763static int __init netpoll_init(void)
764{
a1bcfacd
SH
765 skb_queue_head_init(&skb_pool);
766 return 0;
767}
768core_initcall(netpoll_init);
769
38e6bc18
AW
770static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
771{
772 struct netpoll_info *npinfo =
773 container_of(rcu_head, struct netpoll_info, rcu);
774
38e6bc18
AW
775 skb_queue_purge(&npinfo->txq);
776
777 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
778 cancel_delayed_work(&npinfo->tx_work);
779
780 /* clean after last, unfinished work */
781 __skb_queue_purge(&npinfo->txq);
782 /* now cancel it again */
783 cancel_delayed_work(&npinfo->tx_work);
784 kfree(npinfo);
785}
786
8fdd95ec 787void __netpoll_cleanup(struct netpoll *np)
1da177e4 788{
fbeec2e1 789 struct netpoll_info *npinfo;
fbeec2e1 790
0790bbb6 791 npinfo = rtnl_dereference(np->dev->npinfo);
8fdd95ec 792 if (!npinfo)
dbaa1541 793 return;
93ec2c72 794
ca99ca14
NH
795 synchronize_srcu(&netpoll_srcu);
796
433cea4d 797 if (refcount_dec_and_test(&npinfo->refcnt)) {
8fdd95ec 798 const struct net_device_ops *ops;
de85d99e 799
8fdd95ec
HX
800 ops = np->dev->netdev_ops;
801 if (ops->ndo_netpoll_cleanup)
802 ops->ndo_netpoll_cleanup(np->dev);
de85d99e 803
fcb144b5 804 RCU_INIT_POINTER(np->dev->npinfo, NULL);
5da54c18 805 call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
efa95b01 806 } else
807 RCU_INIT_POINTER(np->dev->npinfo, NULL);
38e6bc18
AW
808}
809EXPORT_SYMBOL_GPL(__netpoll_cleanup);
de85d99e 810
c9fbd71f 811void __netpoll_free(struct netpoll *np)
38e6bc18 812{
c9fbd71f 813 ASSERT_RTNL();
93ec2c72 814
c9fbd71f 815 /* Wait for transmitting packets to finish before freeing. */
5da54c18 816 synchronize_rcu();
38e6bc18
AW
817 __netpoll_cleanup(np);
818 kfree(np);
819}
c9fbd71f 820EXPORT_SYMBOL_GPL(__netpoll_free);
fbeec2e1 821
8fdd95ec
HX
822void netpoll_cleanup(struct netpoll *np)
823{
8fdd95ec 824 rtnl_lock();
d0fe8c88
NA
825 if (!np->dev)
826 goto out;
8fdd95ec 827 __netpoll_cleanup(np);
8fdd95ec 828 dev_put(np->dev);
1da177e4 829 np->dev = NULL;
d0fe8c88
NA
830out:
831 rtnl_unlock();
1da177e4 832}
9e34a5b5 833EXPORT_SYMBOL(netpoll_cleanup);