mm: Don't pin ZERO_PAGE in pin_user_pages()
[linux-block.git] / include / net / busy_poll.h
CommitLineData
a61127c2 1/* SPDX-License-Identifier: GPL-2.0-only */
06021292 2/*
8b80cda5 3 * net busy poll support
06021292
ET
4 * Copyright(c) 2013 Intel Corporation.
5 *
06021292
ET
6 * Author: Eliezer Tamir
7 *
8 * Contact Information:
9 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
10 */
11
8b80cda5
ET
12#ifndef _LINUX_NET_BUSY_POLL_H
13#define _LINUX_NET_BUSY_POLL_H
06021292
ET
14
15#include <linux/netdevice.h>
e6017571 16#include <linux/sched/clock.h>
174cd4b1 17#include <linux/sched/signal.h>
06021292
ET
18#include <net/ip.h>
19
545cd5e5
AD
20/* 0 - Reserved to indicate value not set
21 * 1..NR_CPUS - Reserved for sender_cpu
22 * NR_CPUS+1..~0 - Region available for NAPI IDs
23 */
24#define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
25
7c951caf
BT
26#define BUSY_POLL_BUDGET 8
27
e4dde412
DB
28#ifdef CONFIG_NET_RX_BUSY_POLL
29
30struct napi_struct;
31extern unsigned int sysctl_net_busy_read __read_mostly;
32extern unsigned int sysctl_net_busy_poll __read_mostly;
33
cbf55001 34static inline bool net_busy_loop_on(void)
91e2fd33 35{
c42b7cdd 36 return READ_ONCE(sysctl_net_busy_poll);
91e2fd33
ET
37}
38
37056719 39static inline bool sk_can_busy_loop(const struct sock *sk)
ad6276e0 40{
0dbffbb5 41 return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
ad6276e0 42}
ad6276e0 43
7db6b048
SS
44bool sk_busy_loop_end(void *p, unsigned long start_time);
45
46void napi_busy_loop(unsigned int napi_id,
47 bool (*loop_end)(void *, unsigned long),
7c951caf 48 void *loop_end_arg, bool prefer_busy_poll, u16 budget);
9a3c71aa 49
37056719
AD
50#else /* CONFIG_NET_RX_BUSY_POLL */
51static inline unsigned long net_busy_loop_on(void)
1bc2774d 52{
37056719 53 return 0;
06021292
ET
54}
55
37056719 56static inline bool sk_can_busy_loop(struct sock *sk)
06021292 57{
37056719 58 return false;
06021292
ET
59}
60
37056719 61#endif /* CONFIG_NET_RX_BUSY_POLL */
06021292 62
37056719 63static inline unsigned long busy_loop_current_time(void)
91e2fd33 64{
37056719
AD
65#ifdef CONFIG_NET_RX_BUSY_POLL
66 return (unsigned long)(local_clock() >> 10);
67#else
91e2fd33 68 return 0;
37056719 69#endif
91e2fd33 70}
06021292 71
37056719
AD
72/* in poll/select we use the global sysctl_net_ll_poll value */
73static inline bool busy_loop_timeout(unsigned long start_time)
06021292 74{
37056719
AD
75#ifdef CONFIG_NET_RX_BUSY_POLL
76 unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll);
06021292 77
37056719
AD
78 if (bp_usec) {
79 unsigned long end_time = start_time + bp_usec;
80 unsigned long now = busy_loop_current_time();
06021292 81
37056719
AD
82 return time_after(now, end_time);
83 }
84#endif
76b1e9b9 85 return true;
06021292
ET
86}
87
37056719
AD
88static inline bool sk_busy_loop_timeout(struct sock *sk,
89 unsigned long start_time)
dfcefb0b 90{
37056719
AD
91#ifdef CONFIG_NET_RX_BUSY_POLL
92 unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec);
dfcefb0b 93
37056719
AD
94 if (bp_usec) {
95 unsigned long end_time = start_time + bp_usec;
96 unsigned long now = busy_loop_current_time();
97
98 return time_after(now, end_time);
99 }
100#endif
101 return true;
102}
e68b6e50 103
7db6b048
SS
104static inline void sk_busy_loop(struct sock *sk, int nonblock)
105{
106#ifdef CONFIG_NET_RX_BUSY_POLL
107 unsigned int napi_id = READ_ONCE(sk->sk_napi_id);
108
109 if (napi_id >= MIN_NAPI_ID)
7fd3253a 110 napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk,
7c951caf
BT
111 READ_ONCE(sk->sk_prefer_busy_poll),
112 READ_ONCE(sk->sk_busy_poll_budget) ?: BUSY_POLL_BUDGET);
7db6b048
SS
113#endif
114}
115
d2e64dbb
AD
116/* used in the NIC receive handler to mark the skb */
117static inline void skb_mark_napi_id(struct sk_buff *skb,
118 struct napi_struct *napi)
119{
120#ifdef CONFIG_NET_RX_BUSY_POLL
78e57f15
AN
121 /* If the skb was already marked with a valid NAPI ID, avoid overwriting
122 * it.
123 */
124 if (skb->napi_id < MIN_NAPI_ID)
125 skb->napi_id = napi->napi_id;
d2e64dbb
AD
126#endif
127}
128
e68b6e50
ED
129/* used in the protocol hanlder to propagate the napi_id to the socket */
130static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
131{
132#ifdef CONFIG_NET_RX_BUSY_POLL
2b13af8a
ED
133 if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id))
134 WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
e68b6e50 135#endif
a37a0ee4 136 sk_rx_queue_update(sk, skb);
e68b6e50
ED
137}
138
03cfda4f
ED
139/* Variant of sk_mark_napi_id() for passive flow setup,
140 * as sk->sk_napi_id and sk->sk_rx_queue_mapping content
141 * needs to be set.
142 */
143static inline void sk_mark_napi_id_set(struct sock *sk,
144 const struct sk_buff *skb)
145{
146#ifdef CONFIG_NET_RX_BUSY_POLL
147 WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
148#endif
149 sk_rx_queue_set(sk, skb);
150}
151
ba058174 152static inline void __sk_mark_napi_id_once(struct sock *sk, unsigned int napi_id)
e68b6e50
ED
153{
154#ifdef CONFIG_NET_RX_BUSY_POLL
ee8d153d 155 if (!READ_ONCE(sk->sk_napi_id))
b02e5a0e 156 WRITE_ONCE(sk->sk_napi_id, napi_id);
e68b6e50
ED
157#endif
158}
159
b02e5a0e
BT
160/* variant used for unconnected sockets */
161static inline void sk_mark_napi_id_once(struct sock *sk,
162 const struct sk_buff *skb)
163{
ba058174
DB
164#ifdef CONFIG_NET_RX_BUSY_POLL
165 __sk_mark_napi_id_once(sk, skb->napi_id);
166#endif
b02e5a0e
BT
167}
168
169static inline void sk_mark_napi_id_once_xdp(struct sock *sk,
170 const struct xdp_buff *xdp)
171{
ba058174
DB
172#ifdef CONFIG_NET_RX_BUSY_POLL
173 __sk_mark_napi_id_once(sk, xdp->rxq->napi_id);
174#endif
b02e5a0e
BT
175}
176
8b80cda5 177#endif /* _LINUX_NET_BUSY_POLL_H */