Merge tag 'perf-tools-for-v6.7-1-2023-11-01' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / drivers / net / xen-netback / common.h
CommitLineData
f942dc25
IC
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License version 2
4 * as published by the Free Software Foundation; or, when distributed
5 * separately from the Linux kernel or incorporated into other
6 * software packages, subject to the following license:
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this source file (the "Software"), to deal in the Software without
10 * restriction, including without limitation the rights to use, copy, modify,
11 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * IN THE SOFTWARE.
25 */
26
27#ifndef __XEN_NETBACK__COMMON_H__
28#define __XEN_NETBACK__COMMON_H__
29
30#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
31
32#include <linux/module.h>
33#include <linux/interrupt.h>
34#include <linux/slab.h>
35#include <linux/ip.h>
36#include <linux/in.h>
37#include <linux/io.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/wait.h>
41#include <linux/sched.h>
42
43#include <xen/interface/io/netif.h>
44#include <xen/interface/grant_table.h>
45#include <xen/grant_table.h>
46#include <xen/xenbus.h>
d0089e8a 47#include <xen/page.h>
f51de243 48#include <linux/debugfs.h>
f942dc25 49
b3f980bd 50typedef unsigned int pending_ring_idx_t;
b3f980bd 51
b3f980bd 52struct pending_tx_info {
62bad319 53 struct xen_netif_tx_request req; /* tx request */
562abd39 54 unsigned int extra_count;
f53c3fe8
ZK
55 /* Callback data for released SKBs. The callback is always
56 * xenvif_zerocopy_callback, desc contains the pending_idx, which is
57 * also an index in pending_tx_info array. It is initialized in
58 * xenvif_alloc and it never changes.
59 * skb_shinfo(skb)->destructor_arg points to the first mapped slot's
60 * callback_struct in this array of struct pending_tx_info's, then ctx
61 * to the next, or NULL if there is no more slot for this skb.
62 * ubuf_to_vif is a helper which finds the struct xenvif from a pointer
63 * to this field.
64 */
b63ca3e8 65 struct ubuf_info_msgzc callback_struct;
b3f980bd
WL
66};
67
d0089e8a
JG
68#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
69#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
b3f980bd
WL
70
71struct xenvif_rx_meta {
72 int id;
73 int size;
82cada22 74 int gso_type;
b3f980bd
WL
75 int gso_size;
76};
77
82cada22
PD
78#define GSO_BIT(type) \
79 (1 << XEN_NETIF_GSO_TYPE_ ## type)
80
b3f980bd
WL
81/* Discriminate from any valid pending_idx value. */
82#define INVALID_PENDING_IDX 0xFFFF
83
869b9b19 84#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
f942dc25 85
d0089e8a
JG
86/* The maximum number of frags is derived from the size of a grant (same
87 * as a Xen page size for now).
88 */
89#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
90
121fa4b7
ZK
91#define NETBACK_INVALID_HANDLE -1
92
93/* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
94 * the maximum slots a valid packet can use. Now this value is defined
95 * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
96 * all backend.
97 */
98#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
99
e9ce7cb6
WL
100/* Queue name is interface name with "-qNNN" appended */
101#define QUEUE_NAME_SIZE (IFNAMSIZ + 5)
f942dc25 102
e9ce7cb6
WL
103/* IRQ name is queue name with "-tx" or "-rx" appended */
104#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
105
106struct xenvif;
107
108struct xenvif_stats {
109 /* Stats fields to be updated per-queue.
110 * A subset of struct net_device_stats that contains only the
111 * fields that are updated in netback.c for each queue.
e9d8b2c2 112 */
ebf692f8
MS
113 u64 rx_bytes;
114 u64 rx_packets;
115 u64 tx_bytes;
116 u64 tx_packets;
e9ce7cb6
WL
117
118 /* Additional stats used by xenvif */
119 unsigned long rx_gso_checksum_fixup;
120 unsigned long tx_zerocopy_sent;
121 unsigned long tx_zerocopy_success;
122 unsigned long tx_zerocopy_fail;
123 unsigned long tx_frag_overflow;
124};
125
eb1723a2
DV
126#define COPY_BATCH_SIZE 64
127
128struct xenvif_copy_state {
129 struct gnttab_copy op[COPY_BATCH_SIZE];
130 RING_IDX idx[COPY_BATCH_SIZE];
131 unsigned int num;
a37f1229 132 struct sk_buff_head *completed;
eb1723a2
DV
133};
134
e9ce7cb6
WL
135struct xenvif_queue { /* Per-queue data for xenvif */
136 unsigned int id; /* Queue ID, 0-based */
137 char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
138 struct xenvif *vif; /* Parent VIF */
e9d8b2c2 139
23025393
JG
140 /*
141 * TX/RX common EOI handling.
142 * When feature-split-event-channels = 0, interrupt handler sets
143 * NETBK_COMMON_EOI, otherwise NETBK_RX_EOI and NETBK_TX_EOI are set
144 * by the RX and TX interrupt handlers.
145 * RX and TX handler threads will issue an EOI when either
146 * NETBK_COMMON_EOI or their specific bits (NETBK_RX_EOI or
147 * NETBK_TX_EOI) are set and they will reset those bits.
148 */
149 atomic_t eoi_pending;
150#define NETBK_RX_EOI 0x01
151#define NETBK_TX_EOI 0x02
152#define NETBK_COMMON_EOI 0x04
153
b3f980bd
WL
154 /* Use NAPI for guest TX */
155 struct napi_struct napi;
156 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
157 unsigned int tx_irq;
158 /* Only used when feature-split-event-channels = 1 */
e9ce7cb6 159 char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
b3f980bd
WL
160 struct xen_netif_tx_back_ring tx;
161 struct sk_buff_head tx_queue;
162 struct page *mmap_pages[MAX_PENDING_REQS];
163 pending_ring_idx_t pending_prod;
164 pending_ring_idx_t pending_cons;
165 u16 pending_ring[MAX_PENDING_REQS];
166 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
f53c3fe8 167 grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
b3f980bd 168
05310f31 169 struct gnttab_copy tx_copy_ops[2 * MAX_PENDING_REQS];
f53c3fe8
ZK
170 struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
171 struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
172 /* passed to gnttab_[un]map_refs with pages under (un)mapping */
173 struct page *pages_to_map[MAX_PENDING_REQS];
174 struct page *pages_to_unmap[MAX_PENDING_REQS];
175
176 /* This prevents zerocopy callbacks to race over dealloc_ring */
177 spinlock_t callback_lock;
178 /* This prevents dealloc thread and NAPI instance to race over response
179 * creation and pending_ring in xenvif_idx_release. In xenvif_tx_err
180 * it only protect response creation
181 */
182 spinlock_t response_lock;
183 pending_ring_idx_t dealloc_prod;
184 pending_ring_idx_t dealloc_cons;
185 u16 dealloc_ring[MAX_PENDING_REQS];
186 struct task_struct *dealloc_task;
187 wait_queue_head_t dealloc_wq;
a64bd934 188 atomic_t inflight_packets;
f942dc25 189
b3f980bd
WL
190 /* Use kthread for guest RX */
191 struct task_struct *task;
192 wait_queue_head_t wq;
e1f00a69 193 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
e1f00a69
WL
194 unsigned int rx_irq;
195 /* Only used when feature-split-event-channels = 1 */
e9ce7cb6 196 char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
b3f980bd
WL
197 struct xen_netif_rx_back_ring rx;
198 struct sk_buff_head rx_queue;
09350788 199
f48da8b1
DV
200 unsigned int rx_queue_max;
201 unsigned int rx_queue_len;
ecf08d2d 202 unsigned long last_rx_time;
6032046e 203 unsigned int rx_slots_needed;
ecf08d2d 204 bool stalled;
b3f980bd 205
eb1723a2 206 struct xenvif_copy_state rx_copy;
b3f980bd 207
e9ce7cb6
WL
208 /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
209 unsigned long credit_bytes;
210 unsigned long credit_usec;
211 unsigned long remaining_credit;
212 struct timer_list credit_timeout;
213 u64 credit_window_start;
dfa523ae 214 bool rate_limited;
e9ce7cb6
WL
215
216 /* Statistics */
217 struct xenvif_stats stats;
218};
219
3d1af1df
ZK
220enum state_bit_shift {
221 /* This bit marks that the vif is connected */
f34a4cf9 222 VIF_STATUS_CONNECTED,
3d1af1df
ZK
223};
224
210c34dc
PD
225struct xenvif_mcast_addr {
226 struct list_head entry;
227 struct rcu_head rcu;
228 u8 addr[6];
229};
230
231#define XEN_NETBK_MCAST_MAX 64
232
40d8abde
PD
233#define XEN_NETBK_MAX_HASH_KEY_SIZE 40
234#define XEN_NETBK_MAX_HASH_MAPPING_SIZE 128
235#define XEN_NETBK_HASH_TAG_SIZE 40
236
237struct xenvif_hash_cache_entry {
238 struct list_head link;
239 struct rcu_head rcu;
240 u8 tag[XEN_NETBK_HASH_TAG_SIZE];
241 unsigned int len;
242 u32 val;
243 int seq;
244};
245
246struct xenvif_hash_cache {
247 spinlock_t lock;
248 struct list_head list;
249 unsigned int count;
250 atomic_t seq;
251};
252
253struct xenvif_hash {
254 unsigned int alg;
255 u32 flags;
22f9cde3 256 bool mapping_sel;
40d8abde 257 u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
22f9cde3 258 u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE];
40d8abde
PD
259 unsigned int size;
260 struct xenvif_hash_cache cache;
261};
262
6dc400af
DZ
263struct backend_info {
264 struct xenbus_device *dev;
265 struct xenvif *vif;
266
267 /* This is the state that will be reflected in xenstore when any
268 * active hotplug script completes.
269 */
270 enum xenbus_state state;
271
272 enum xenbus_state frontend_state;
273 struct xenbus_watch hotplug_status_watch;
274 u8 have_hotplug_status_watch:1;
275
276 const char *hotplug_script;
277};
278
e9ce7cb6
WL
279struct xenvif {
280 /* Unique identifier for this interface. */
281 domid_t domid;
282 unsigned int handle;
283
b3f980bd 284 u8 fe_dev_addr[6];
210c34dc
PD
285 struct list_head fe_mcast_addr;
286 unsigned int fe_mcast_count;
f942dc25 287
f942dc25 288 /* Frontend feature information. */
82cada22 289 int gso_mask;
82cada22 290
f942dc25 291 u8 can_sg:1;
146c8a77
PD
292 u8 ip_csum:1;
293 u8 ipv6_csum:1;
210c34dc 294 u8 multicast_control:1;
f942dc25 295
1c9535c7
DK
296 /* headroom requested by xen-netfront */
297 u16 xdp_headroom;
298
e9ce7cb6
WL
299 /* Is this interface disabled? True when backend discovers
300 * frontend is rogue.
301 */
302 bool disabled;
3d1af1df 303 unsigned long status;
26c0e102
DV
304 unsigned long drain_timeout;
305 unsigned long stall_timeout;
f942dc25 306
e9ce7cb6
WL
307 /* Queues */
308 struct xenvif_queue *queues;
f7b50c4e 309 unsigned int num_queues; /* active queues, resource allocated */
ecf08d2d
DV
310 unsigned int stalled_queues;
311
40d8abde
PD
312 struct xenvif_hash hash;
313
edafc132 314 struct xenbus_watch credit_watch;
22fae97d 315 struct xenbus_watch mcast_ctrl_watch;
edafc132 316
6dc400af
DZ
317 struct backend_info *be;
318
ecf08d2d 319 spinlock_t lock;
f942dc25 320
f51de243
ZK
321#ifdef CONFIG_DEBUG_FS
322 struct dentry *xenvif_dbg_root;
323#endif
324
4e15ee2c 325 struct xen_netif_ctrl_back_ring ctrl;
4e15ee2c
PD
326 unsigned int ctrl_irq;
327
f942dc25 328 /* Miscellaneous private stuff. */
f942dc25 329 struct net_device *dev;
f942dc25
IC
330};
331
f48da8b1
DV
332struct xenvif_rx_cb {
333 unsigned long expires;
334 int meta_slots_used;
f48da8b1
DV
335};
336
337#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
338
c9d63699
DV
339static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
340{
341 return to_xenbus_device(vif->dev->dev.parent);
342}
343
cac6a8f9 344void xenvif_tx_credit_callback(struct timer_list *t);
edafc132 345
f942dc25
IC
346struct xenvif *xenvif_alloc(struct device *parent,
347 domid_t domid,
348 unsigned int handle);
349
e9ce7cb6 350int xenvif_init_queue(struct xenvif_queue *queue);
8d3d53b3 351void xenvif_deinit_queue(struct xenvif_queue *queue);
e9ce7cb6 352
4e15ee2c
PD
353int xenvif_connect_data(struct xenvif_queue *queue,
354 unsigned long tx_ring_ref,
355 unsigned long rx_ring_ref,
356 unsigned int tx_evtchn,
357 unsigned int rx_evtchn);
358void xenvif_disconnect_data(struct xenvif *vif);
359int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
360 unsigned int evtchn);
361void xenvif_disconnect_ctrl(struct xenvif *vif);
279f438e 362void xenvif_free(struct xenvif *vif);
f942dc25 363
f942dc25 364int xenvif_xenbus_init(void);
b103f358 365void xenvif_xenbus_fini(void);
f942dc25 366
f942dc25 367/* (Un)Map communication rings. */
4e15ee2c
PD
368void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
369int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
370 grant_ref_t tx_ring_ref,
371 grant_ref_t rx_ring_ref);
f942dc25 372
f942dc25 373/* Check for SKBs from frontend and schedule backend processing */
e9ce7cb6 374void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
f942dc25 375
48856286
IC
376/* Prevent the device from generating any further traffic. */
377void xenvif_carrier_off(struct xenvif *vif);
378
e9ce7cb6 379int xenvif_tx_action(struct xenvif_queue *queue, int budget);
b3f980bd 380
121fa4b7 381int xenvif_kthread_guest_rx(void *data);
e9ce7cb6 382void xenvif_kick_thread(struct xenvif_queue *queue);
ca2f09f2 383
f53c3fe8
ZK
384int xenvif_dealloc_kthread(void *data);
385
0364a882 386irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
4e15ee2c 387
23025393 388bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
74e7e1ef 389bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
f48da8b1 390
e9ce7cb6 391void xenvif_carrier_on(struct xenvif *vif);
b3f980bd 392
f53c3fe8 393/* Callback from stack when TX packet can be released */
36177832
JL
394void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
395 bool zerocopy_success);
f53c3fe8 396
e9ce7cb6 397static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
121fa4b7
ZK
398{
399 return MAX_PENDING_REQS -
e9ce7cb6 400 queue->pending_prod + queue->pending_cons;
121fa4b7
ZK
401}
402
f51de243
ZK
403irqreturn_t xenvif_interrupt(int irq, void *dev_id);
404
e1f00a69 405extern bool separate_tx_rx_irq;
1c9535c7 406extern bool provides_xdp_headroom;
e1f00a69 407
09350788 408extern unsigned int rx_drain_timeout_msecs;
26c0e102 409extern unsigned int rx_stall_timeout_msecs;
8d3d53b3 410extern unsigned int xenvif_max_queues;
40d8abde 411extern unsigned int xenvif_hash_cache_size;
09350788 412
f51de243
ZK
413#ifdef CONFIG_DEBUG_FS
414extern struct dentry *xen_netback_dbg_root;
415#endif
416
a64bd934
WL
417void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
418 struct sk_buff *skb);
419void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
420
210c34dc
PD
421/* Multicast control */
422bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr);
423void xenvif_mcast_addr_list_free(struct xenvif *vif);
424
40d8abde
PD
425/* Hash */
426void xenvif_init_hash(struct xenvif *vif);
427void xenvif_deinit_hash(struct xenvif *vif);
428
429u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg);
430u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags);
431u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags);
432u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len);
433u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size);
434u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
435 u32 off);
436
437void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb);
438
a9339b8e
PD
439#ifdef CONFIG_DEBUG_FS
440void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m);
441#endif
442
f942dc25 443#endif /* __XEN_NETBACK__COMMON_H__ */