Commit | Line | Data |
---|---|---|
893ce44d CS |
1 | /* SPDX-License-Identifier: (GPL-2.0 OR MIT) |
2 | * Google virtual Ethernet (gve) driver | |
3 | * | |
920fb451 | 4 | * Copyright (C) 2015-2021 Google, Inc. |
893ce44d CS |
5 | */ |
6 | ||
7 | #ifndef _GVE_H_ | |
8 | #define _GVE_H_ | |
9 | ||
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/netdevice.h> | |
12 | #include <linux/pci.h> | |
f5cedc84 | 13 | #include <linux/u64_stats_sync.h> |
92272ec4 | 14 | #include <net/xdp.h> |
c4b87ac8 | 15 | |
f5cedc84 | 16 | #include "gve_desc.h" |
a4aa1f1e | 17 | #include "gve_desc_dqo.h" |
893ce44d CS |
18 | |
19 | #ifndef PCI_VENDOR_ID_GOOGLE | |
20 | #define PCI_VENDOR_ID_GOOGLE 0x1ae0 | |
21 | #endif | |
22 | ||
23 | #define PCI_DEV_ID_GVNIC 0x0042 | |
24 | ||
25 | #define GVE_REGISTER_BAR 0 | |
26 | #define GVE_DOORBELL_BAR 2 | |
27 | ||
f5cedc84 CS |
28 | /* Driver can alloc up to 2 segments for the header and 2 for the payload. */ |
29 | #define GVE_TX_MAX_IOVEC 4 | |
30 | /* 1 for management, 1 for rx, 1 for tx */ | |
893ce44d CS |
31 | #define GVE_MIN_MSIX 3 |
32 | ||
24aeb56f | 33 | /* Numbers of gve tx/rx stats in stats report. */ |
87a7f321 | 34 | #define GVE_TX_STATS_REPORT_NUM 6 |
24aeb56f KZ |
35 | #define GVE_RX_STATS_REPORT_NUM 2 |
36 | ||
37 | /* Interval to schedule a stats report update, 20000ms. */ | |
38 | #define GVE_STATS_REPORT_TIMER_PERIOD 20000 | |
39 | ||
2f523dc3 DA |
40 | /* Numbers of NIC tx/rx stats in stats report. */ |
41 | #define NIC_TX_STATS_REPORT_NUM 0 | |
42 | #define NIC_RX_STATS_REPORT_NUM 4 | |
43 | ||
ede3fcf5 CS |
44 | #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1)) |
45 | ||
c4b87ac8 BF |
46 | /* PTYPEs are always 10 bits. */ |
47 | #define GVE_NUM_PTYPES 1024 | |
48 | ||
5e8c5adf BF |
49 | #define GVE_RX_BUFFER_SIZE_DQO 2048 |
50 | ||
75eaae15 PK |
51 | #define GVE_XDP_ACTIONS 5 |
52 | ||
3ce93455 SC |
53 | #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182 |
54 | ||
66ce8e6b RG |
55 | #define DQO_QPL_DEFAULT_TX_PAGES 512 |
56 | #define DQO_QPL_DEFAULT_RX_PAGES 2048 | |
57 | ||
58 | /* Maximum TSO size supported on DQO */ | |
59 | #define GVE_DQO_TX_MAX 0x3FFFF | |
60 | ||
f5cedc84 CS |
61 | /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */ |
62 | struct gve_rx_desc_queue { | |
63 | struct gve_rx_desc *desc_ring; /* the descriptor ring */ | |
64 | dma_addr_t bus; /* the bus for the desc_ring */ | |
f5cedc84 CS |
65 | u8 seqno; /* the next expected seqno for this desc*/ |
66 | }; | |
67 | ||
68 | /* The page info for a single slot in the RX data queue */ | |
69 | struct gve_rx_slot_page_info { | |
70 | struct page *page; | |
71 | void *page_address; | |
920fb451 | 72 | u32 page_offset; /* offset to write to in page */ |
9b8dd5e5 | 73 | int pagecnt_bias; /* expected pagecnt if only the driver has a ref */ |
82fd151d SC |
74 | u16 pad; /* adjustment for rx padding */ |
75 | u8 can_flip; /* tracks if the networking stack is using the page */ | |
f5cedc84 CS |
76 | }; |
77 | ||
78 | /* A list of pages registered with the device during setup and used by a queue | |
79 | * as buffers | |
80 | */ | |
81 | struct gve_queue_page_list { | |
82 | u32 id; /* unique id */ | |
83 | u32 num_entries; | |
84 | struct page **pages; /* list of num_entries pages */ | |
85 | dma_addr_t *page_buses; /* the dma addrs of the pages */ | |
86 | }; | |
87 | ||
88 | /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */ | |
89 | struct gve_rx_data_queue { | |
ede3fcf5 | 90 | union gve_rx_data_slot *data_ring; /* read by NIC */ |
f5cedc84 CS |
91 | dma_addr_t data_bus; /* dma mapping of the slots */ |
92 | struct gve_rx_slot_page_info *page_info; /* page info of the buffers */ | |
93 | struct gve_queue_page_list *qpl; /* qpl assigned to this queue */ | |
ede3fcf5 | 94 | u8 raw_addressing; /* use raw_addressing? */ |
f5cedc84 CS |
95 | }; |
96 | ||
97 | struct gve_priv; | |
98 | ||
a4aa1f1e BF |
99 | /* RX buffer queue for posting buffers to HW. |
100 | * Each RX (completion) queue has a corresponding buffer queue. | |
101 | */ | |
102 | struct gve_rx_buf_queue_dqo { | |
103 | struct gve_rx_desc_dqo *desc_ring; | |
104 | dma_addr_t bus; | |
105 | u32 head; /* Pointer to start cleaning buffers at. */ | |
106 | u32 tail; /* Last posted buffer index + 1 */ | |
107 | u32 mask; /* Mask for indices to the size of the ring */ | |
108 | }; | |
109 | ||
110 | /* RX completion queue to receive packets from HW. */ | |
111 | struct gve_rx_compl_queue_dqo { | |
112 | struct gve_rx_compl_desc_dqo *desc_ring; | |
113 | dma_addr_t bus; | |
114 | ||
115 | /* Number of slots which did not have a buffer posted yet. We should not | |
116 | * post more buffers than the queue size to avoid HW overrunning the | |
117 | * queue. | |
118 | */ | |
119 | int num_free_slots; | |
120 | ||
121 | /* HW uses a "generation bit" to notify SW of new descriptors. When a | |
122 | * descriptor's generation bit is different from the current generation, | |
123 | * that descriptor is ready to be consumed by SW. | |
124 | */ | |
125 | u8 cur_gen_bit; | |
126 | ||
127 | /* Pointer into desc_ring where the next completion descriptor will be | |
128 | * received. | |
129 | */ | |
130 | u32 head; | |
131 | u32 mask; /* Mask for indices to the size of the ring */ | |
132 | }; | |
133 | ||
134 | /* Stores state for tracking buffers posted to HW */ | |
135 | struct gve_rx_buf_state_dqo { | |
136 | /* The page posted to HW. */ | |
137 | struct gve_rx_slot_page_info page_info; | |
138 | ||
139 | /* The DMA address corresponding to `page_info`. */ | |
140 | dma_addr_t addr; | |
141 | ||
142 | /* Last offset into the page when it only had a single reference, at | |
143 | * which point every other offset is free to be reused. | |
144 | */ | |
145 | u32 last_single_ref_offset; | |
146 | ||
147 | /* Linked list index to next element in the list, or -1 if none */ | |
148 | s16 next; | |
149 | }; | |
150 | ||
151 | /* `head` and `tail` are indices into an array, or -1 if empty. */ | |
152 | struct gve_index_list { | |
153 | s16 head; | |
154 | s16 tail; | |
155 | }; | |
156 | ||
1344e751 DA |
157 | /* A single received packet split across multiple buffers may be |
158 | * reconstructed using the information in this structure. | |
159 | */ | |
160 | struct gve_rx_ctx { | |
161 | /* head and tail of skb chain for the current packet or NULL if none */ | |
162 | struct sk_buff *skb_head; | |
163 | struct sk_buff *skb_tail; | |
82fd151d SC |
164 | u32 total_size; |
165 | u8 frag_cnt; | |
166 | bool drop_pkt; | |
167 | }; | |
168 | ||
169 | struct gve_rx_cnts { | |
170 | u32 ok_pkt_bytes; | |
171 | u16 ok_pkt_cnt; | |
172 | u16 total_pkt_cnt; | |
173 | u16 cont_pkt_cnt; | |
174 | u16 desc_err_pkt_cnt; | |
1344e751 DA |
175 | }; |
176 | ||
a4aa1f1e | 177 | /* Contains datapath state used to represent an RX queue. */ |
f5cedc84 CS |
178 | struct gve_rx_ring { |
179 | struct gve_priv *gve; | |
a4aa1f1e BF |
180 | union { |
181 | /* GQI fields */ | |
182 | struct { | |
183 | struct gve_rx_desc_queue desc; | |
184 | struct gve_rx_data_queue data; | |
185 | ||
186 | /* threshold for posting new buffs and descs */ | |
187 | u32 db_threshold; | |
37149e93 | 188 | u16 packet_buffer_size; |
82fd151d SC |
189 | |
190 | u32 qpl_copy_pool_mask; | |
191 | u32 qpl_copy_pool_head; | |
192 | struct gve_rx_slot_page_info *qpl_copy_pool; | |
a4aa1f1e BF |
193 | }; |
194 | ||
195 | /* DQO fields. */ | |
196 | struct { | |
197 | struct gve_rx_buf_queue_dqo bufq; | |
198 | struct gve_rx_compl_queue_dqo complq; | |
199 | ||
200 | struct gve_rx_buf_state_dqo *buf_states; | |
201 | u16 num_buf_states; | |
202 | ||
203 | /* Linked list of gve_rx_buf_state_dqo. Index into | |
204 | * buf_states, or -1 if empty. | |
205 | */ | |
206 | s16 free_buf_states; | |
207 | ||
208 | /* Linked list of gve_rx_buf_state_dqo. Indexes into | |
209 | * buf_states, or -1 if empty. | |
210 | * | |
211 | * This list contains buf_states which are pointing to | |
212 | * valid buffers. | |
213 | * | |
214 | * We use a FIFO here in order to increase the | |
215 | * probability that buffers can be reused by increasing | |
216 | * the time between usages. | |
217 | */ | |
218 | struct gve_index_list recycled_buf_states; | |
219 | ||
220 | /* Linked list of gve_rx_buf_state_dqo. Indexes into | |
221 | * buf_states, or -1 if empty. | |
222 | * | |
223 | * This list contains buf_states which have buffers | |
224 | * which cannot be reused yet. | |
225 | */ | |
226 | struct gve_index_list used_buf_states; | |
66ce8e6b RG |
227 | |
228 | /* qpl assigned to this queue */ | |
229 | struct gve_queue_page_list *qpl; | |
a4aa1f1e BF |
230 | } dqo; |
231 | }; | |
232 | ||
f5cedc84 CS |
233 | u64 rbytes; /* free-running bytes received */ |
234 | u64 rpackets; /* free-running packets received */ | |
438b43bd CS |
235 | u32 cnt; /* free-running total number of completed packets */ |
236 | u32 fill_cnt; /* free-running total number of descs and buffs posted */ | |
237 | u32 mask; /* masks the cnt and fill_cnt to the size of the ring */ | |
433e274b KZ |
238 | u64 rx_copybreak_pkt; /* free-running count of copybreak packets */ |
239 | u64 rx_copied_pkt; /* free-running total number of copied packets */ | |
240 | u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */ | |
241 | u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */ | |
242 | u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */ | |
37149e93 DA |
243 | u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */ |
244 | u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */ | |
82fd151d SC |
245 | u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */ |
246 | u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */ | |
75eaae15 PK |
247 | u64 xdp_tx_errors; |
248 | u64 xdp_redirect_errors; | |
39a7f4aa | 249 | u64 xdp_alloc_fails; |
75eaae15 | 250 | u64 xdp_actions[GVE_XDP_ACTIONS]; |
f5cedc84 CS |
251 | u32 q_num; /* queue index */ |
252 | u32 ntfy_id; /* notification block index */ | |
253 | struct gve_queue_resources *q_resources; /* head and tail pointer idx */ | |
254 | dma_addr_t q_resources_bus; /* dma address for the queue resources */ | |
255 | struct u64_stats_sync statss; /* sync stats for 32bit archs */ | |
9c1a59a2 | 256 | |
1344e751 | 257 | struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */ |
75eaae15 PK |
258 | |
259 | /* XDP stuff */ | |
260 | struct xdp_rxq_info xdp_rxq; | |
fd8e4032 PK |
261 | struct xdp_rxq_info xsk_rxq; |
262 | struct xsk_buff_pool *xsk_pool; | |
39a7f4aa | 263 | struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */ |
f5cedc84 CS |
264 | }; |
265 | ||
266 | /* A TX desc ring entry */ | |
267 | union gve_tx_desc { | |
268 | struct gve_tx_pkt_desc pkt; /* first desc for a packet */ | |
497dbb2b | 269 | struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */ |
f5cedc84 CS |
270 | struct gve_tx_seg_desc seg; /* subsequent descs for a packet */ |
271 | }; | |
272 | ||
273 | /* Tracks the memory in the fifo occupied by a segment of a packet */ | |
274 | struct gve_tx_iovec { | |
275 | u32 iov_offset; /* offset into this segment */ | |
276 | u32 iov_len; /* length */ | |
277 | u32 iov_padding; /* padding associated with this segment */ | |
278 | }; | |
279 | ||
280 | /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc | |
281 | * ring entry but only used for a pkt_desc not a seg_desc | |
282 | */ | |
283 | struct gve_tx_buffer_state { | |
39a7f4aa PK |
284 | union { |
285 | struct sk_buff *skb; /* skb for this pkt */ | |
286 | struct xdp_frame *xdp_frame; /* xdp_frame */ | |
287 | }; | |
75eaae15 PK |
288 | struct { |
289 | u16 size; /* size of xmitted xdp pkt */ | |
fd8e4032 | 290 | u8 is_xsk; /* xsk buff */ |
75eaae15 | 291 | } xdp; |
6f007c64 CS |
292 | union { |
293 | struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ | |
1e0083bd AB |
294 | struct { |
295 | DEFINE_DMA_UNMAP_ADDR(dma); | |
296 | DEFINE_DMA_UNMAP_LEN(len); | |
297 | }; | |
6f007c64 | 298 | }; |
f5cedc84 CS |
299 | }; |
300 | ||
301 | /* A TX buffer - each queue has one */ | |
302 | struct gve_tx_fifo { | |
303 | void *base; /* address of base of FIFO */ | |
304 | u32 size; /* total size */ | |
305 | atomic_t available; /* how much space is still available */ | |
306 | u32 head; /* offset to write at */ | |
307 | struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */ | |
308 | }; | |
309 | ||
a4aa1f1e BF |
310 | /* TX descriptor for DQO format */ |
311 | union gve_tx_desc_dqo { | |
312 | struct gve_tx_pkt_desc_dqo pkt; | |
313 | struct gve_tx_tso_context_desc_dqo tso_ctx; | |
314 | struct gve_tx_general_context_desc_dqo general_ctx; | |
315 | }; | |
316 | ||
317 | enum gve_packet_state { | |
318 | /* Packet is in free list, available to be allocated. | |
319 | * This should always be zero since state is not explicitly initialized. | |
320 | */ | |
321 | GVE_PACKET_STATE_UNALLOCATED, | |
322 | /* Packet is expecting a regular data completion or miss completion */ | |
323 | GVE_PACKET_STATE_PENDING_DATA_COMPL, | |
324 | /* Packet has received a miss completion and is expecting a | |
325 | * re-injection completion. | |
326 | */ | |
327 | GVE_PACKET_STATE_PENDING_REINJECT_COMPL, | |
328 | /* No valid completion received within the specified timeout. */ | |
329 | GVE_PACKET_STATE_TIMED_OUT_COMPL, | |
330 | }; | |
331 | ||
332 | struct gve_tx_pending_packet_dqo { | |
333 | struct sk_buff *skb; /* skb for this packet */ | |
334 | ||
335 | /* 0th element corresponds to the linear portion of `skb`, should be | |
336 | * unmapped with `dma_unmap_single`. | |
337 | * | |
338 | * All others correspond to `skb`'s frags and should be unmapped with | |
339 | * `dma_unmap_page`. | |
340 | */ | |
1e0083bd AB |
341 | DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); |
342 | DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); | |
a4aa1f1e BF |
343 | u16 num_bufs; |
344 | ||
345 | /* Linked list index to next element in the list, or -1 if none */ | |
346 | s16 next; | |
347 | ||
348 | /* Linked list index to prev element in the list, or -1 if none. | |
349 | * Used for tracking either outstanding miss completions or prematurely | |
350 | * freed packets. | |
351 | */ | |
352 | s16 prev; | |
353 | ||
354 | /* Identifies the current state of the packet as defined in | |
355 | * `enum gve_packet_state`. | |
356 | */ | |
357 | u8 state; | |
358 | ||
359 | /* If packet is an outstanding miss completion, then the packet is | |
360 | * freed if the corresponding re-injection completion is not received | |
361 | * before kernel jiffies exceeds timeout_jiffies. | |
362 | */ | |
363 | unsigned long timeout_jiffies; | |
364 | }; | |
365 | ||
366 | /* Contains datapath state used to represent a TX queue. */ | |
f5cedc84 CS |
367 | struct gve_tx_ring { |
368 | /* Cacheline 0 -- Accessed & dirtied during transmit */ | |
a4aa1f1e BF |
369 | union { |
370 | /* GQI fields */ | |
371 | struct { | |
372 | struct gve_tx_fifo tx_fifo; | |
373 | u32 req; /* driver tracked head pointer */ | |
374 | u32 done; /* driver tracked tail pointer */ | |
375 | }; | |
376 | ||
377 | /* DQO fields. */ | |
378 | struct { | |
379 | /* Linked list of gve_tx_pending_packet_dqo. Index into | |
380 | * pending_packets, or -1 if empty. | |
381 | * | |
382 | * This is a consumer list owned by the TX path. When it | |
383 | * runs out, the producer list is stolen from the | |
384 | * completion handling path | |
385 | * (dqo_compl.free_pending_packets). | |
386 | */ | |
387 | s16 free_pending_packets; | |
388 | ||
389 | /* Cached value of `dqo_compl.hw_tx_head` */ | |
390 | u32 head; | |
391 | u32 tail; /* Last posted buffer index + 1 */ | |
392 | ||
393 | /* Index of the last descriptor with "report event" bit | |
394 | * set. | |
395 | */ | |
396 | u32 last_re_idx; | |
397 | } dqo_tx; | |
398 | }; | |
f5cedc84 CS |
399 | |
400 | /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */ | |
a4aa1f1e BF |
401 | union { |
402 | /* GQI fields */ | |
403 | struct { | |
61d72c7e TL |
404 | /* Spinlock for when cleanup in progress */ |
405 | spinlock_t clean_lock; | |
39a7f4aa PK |
406 | /* Spinlock for XDP tx traffic */ |
407 | spinlock_t xdp_lock; | |
a4aa1f1e BF |
408 | }; |
409 | ||
410 | /* DQO fields. */ | |
411 | struct { | |
412 | u32 head; /* Last read on compl_desc */ | |
413 | ||
414 | /* Tracks the current gen bit of compl_q */ | |
415 | u8 cur_gen_bit; | |
416 | ||
417 | /* Linked list of gve_tx_pending_packet_dqo. Index into | |
418 | * pending_packets, or -1 if empty. | |
419 | * | |
420 | * This is the producer list, owned by the completion | |
421 | * handling path. When the consumer list | |
422 | * (dqo_tx.free_pending_packets) is runs out, this list | |
423 | * will be stolen. | |
424 | */ | |
425 | atomic_t free_pending_packets; | |
426 | ||
427 | /* Last TX ring index fetched by HW */ | |
428 | atomic_t hw_tx_head; | |
429 | ||
430 | /* List to track pending packets which received a miss | |
431 | * completion but not a corresponding reinjection. | |
432 | */ | |
433 | struct gve_index_list miss_completions; | |
434 | ||
435 | /* List to track pending packets that were completed | |
436 | * before receiving a valid completion because they | |
437 | * reached a specified timeout. | |
438 | */ | |
439 | struct gve_index_list timed_out_completions; | |
440 | } dqo_compl; | |
441 | } ____cacheline_aligned; | |
f5cedc84 CS |
442 | u64 pkt_done; /* free-running - total packets completed */ |
443 | u64 bytes_done; /* free-running - total bytes completed */ | |
6f007c64 CS |
444 | u64 dropped_pkt; /* free-running - total packets dropped */ |
445 | u64 dma_mapping_error; /* count of dma mapping errors */ | |
f5cedc84 CS |
446 | |
447 | /* Cacheline 2 -- Read-mostly fields */ | |
a4aa1f1e BF |
448 | union { |
449 | /* GQI fields */ | |
450 | struct { | |
451 | union gve_tx_desc *desc; | |
452 | ||
453 | /* Maps 1:1 to a desc */ | |
454 | struct gve_tx_buffer_state *info; | |
455 | }; | |
456 | ||
457 | /* DQO fields. */ | |
458 | struct { | |
459 | union gve_tx_desc_dqo *tx_ring; | |
460 | struct gve_tx_compl_desc *compl_ring; | |
461 | ||
462 | struct gve_tx_pending_packet_dqo *pending_packets; | |
463 | s16 num_pending_packets; | |
464 | ||
465 | u32 complq_mask; /* complq size is complq_mask + 1 */ | |
66ce8e6b RG |
466 | |
467 | /* QPL fields */ | |
468 | struct { | |
469 | /* qpl assigned to this queue */ | |
470 | struct gve_queue_page_list *qpl; | |
471 | }; | |
a4aa1f1e BF |
472 | } dqo; |
473 | } ____cacheline_aligned; | |
f5cedc84 CS |
474 | struct netdev_queue *netdev_txq; |
475 | struct gve_queue_resources *q_resources; /* head and tail pointer idx */ | |
6f007c64 | 476 | struct device *dev; |
f5cedc84 | 477 | u32 mask; /* masks req and done down to queue size */ |
6f007c64 | 478 | u8 raw_addressing; /* use raw_addressing? */ |
f5cedc84 CS |
479 | |
480 | /* Slow-path fields */ | |
481 | u32 q_num ____cacheline_aligned; /* queue idx */ | |
482 | u32 stop_queue; /* count of queue stops */ | |
483 | u32 wake_queue; /* count of queue wakes */ | |
87a7f321 | 484 | u32 queue_timeout; /* count of queue timeouts */ |
f5cedc84 | 485 | u32 ntfy_id; /* notification block index */ |
87a7f321 | 486 | u32 last_kick_msec; /* Last time the queue was kicked */ |
f5cedc84 CS |
487 | dma_addr_t bus; /* dma address of the descr ring */ |
488 | dma_addr_t q_resources_bus; /* dma address of the queue resources */ | |
a4aa1f1e | 489 | dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */ |
f5cedc84 | 490 | struct u64_stats_sync statss; /* sync stats for 32bit archs */ |
fd8e4032 PK |
491 | struct xsk_buff_pool *xsk_pool; |
492 | u32 xdp_xsk_wakeup; | |
493 | u32 xdp_xsk_done; | |
494 | u64 xdp_xsk_sent; | |
39a7f4aa PK |
495 | u64 xdp_xmit; |
496 | u64 xdp_xmit_errors; | |
f5cedc84 CS |
497 | } ____cacheline_aligned; |
498 | ||
499 | /* Wraps the info for one irq including the napi struct and the queues | |
500 | * associated with that irq. | |
501 | */ | |
893ce44d | 502 | struct gve_notify_block { |
d30baacc | 503 | __be32 *irq_db_index; /* pointer to idx into Bar2 */ |
893ce44d CS |
504 | char name[IFNAMSIZ + 16]; /* name registered with the kernel */ |
505 | struct napi_struct napi; /* kernel napi struct for this block */ | |
506 | struct gve_priv *priv; | |
f5cedc84 CS |
507 | struct gve_tx_ring *tx; /* tx rings on this block */ |
508 | struct gve_rx_ring *rx; /* rx rings on this block */ | |
d30baacc | 509 | }; |
893ce44d | 510 | |
f5cedc84 CS |
511 | /* Tracks allowed and current queue settings */ |
512 | struct gve_queue_config { | |
513 | u16 max_queues; | |
514 | u16 num_queues; /* current */ | |
515 | }; | |
516 | ||
517 | /* Tracks the available and used qpl IDs */ | |
518 | struct gve_qpl_config { | |
519 | u32 qpl_map_size; /* map memory size */ | |
520 | unsigned long *qpl_id_map; /* bitmap of used qpl ids */ | |
521 | }; | |
522 | ||
5ca2265e BF |
523 | struct gve_options_dqo_rda { |
524 | u16 tx_comp_ring_entries; /* number of tx_comp descriptors */ | |
525 | u16 rx_buff_ring_entries; /* number of rx_buff descriptors */ | |
526 | }; | |
527 | ||
d30baacc CS |
528 | struct gve_irq_db { |
529 | __be32 index; | |
530 | } ____cacheline_aligned; | |
531 | ||
c4b87ac8 BF |
532 | struct gve_ptype { |
533 | u8 l3_type; /* `gve_l3_type` in gve_adminq.h */ | |
534 | u8 l4_type; /* `gve_l4_type` in gve_adminq.h */ | |
535 | }; | |
536 | ||
537 | struct gve_ptype_lut { | |
538 | struct gve_ptype ptypes[GVE_NUM_PTYPES]; | |
539 | }; | |
540 | ||
a5886ef4 BF |
541 | /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value |
542 | * when the entire configure_device_resources command is zeroed out and the | |
543 | * queue_format is not specified. | |
544 | */ | |
545 | enum gve_queue_format { | |
546 | GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0, | |
547 | GVE_GQI_RDA_FORMAT = 0x1, | |
548 | GVE_GQI_QPL_FORMAT = 0x2, | |
549 | GVE_DQO_RDA_FORMAT = 0x3, | |
66ce8e6b | 550 | GVE_DQO_QPL_FORMAT = 0x4, |
a5886ef4 BF |
551 | }; |
552 | ||
893ce44d CS |
553 | struct gve_priv { |
554 | struct net_device *dev; | |
f5cedc84 CS |
555 | struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ |
556 | struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */ | |
557 | struct gve_queue_page_list *qpls; /* array of num qpls */ | |
893ce44d | 558 | struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */ |
d30baacc CS |
559 | struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */ |
560 | dma_addr_t irq_db_indices_bus; | |
893ce44d CS |
561 | struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */ |
562 | char mgmt_msix_name[IFNAMSIZ + 16]; | |
563 | u32 mgmt_msix_idx; | |
564 | __be32 *counter_array; /* array of num_event_counters */ | |
565 | dma_addr_t counter_array_bus; | |
566 | ||
567 | u16 num_event_counters; | |
f5cedc84 CS |
568 | u16 tx_desc_cnt; /* num desc per ring */ |
569 | u16 rx_desc_cnt; /* num desc per ring */ | |
66ce8e6b RG |
570 | u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */ |
571 | u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */ | |
ede3fcf5 | 572 | u16 rx_data_slot_cnt; /* rx buffer length */ |
f5cedc84 CS |
573 | u64 max_registered_pages; |
574 | u64 num_registered_pages; /* num pages registered with NIC */ | |
75eaae15 | 575 | struct bpf_prog *xdp_prog; /* XDP BPF program */ |
f5cedc84 CS |
576 | u32 rx_copybreak; /* copy packets smaller than this */ |
577 | u16 default_num_queues; /* default num queues to set up */ | |
893ce44d | 578 | |
75eaae15 | 579 | u16 num_xdp_queues; |
f5cedc84 CS |
580 | struct gve_queue_config tx_cfg; |
581 | struct gve_queue_config rx_cfg; | |
582 | struct gve_qpl_config qpl_cfg; /* map used QPL ids */ | |
893ce44d CS |
583 | u32 num_ntfy_blks; /* spilt between TX and RX so must be even */ |
584 | ||
585 | struct gve_registers __iomem *reg_bar0; /* see gve_register.h */ | |
586 | __be32 __iomem *db_bar2; /* "array" of doorbells */ | |
587 | u32 msg_enable; /* level for netif* netdev print macros */ | |
588 | struct pci_dev *pdev; | |
589 | ||
f5cedc84 CS |
590 | /* metrics */ |
591 | u32 tx_timeo_cnt; | |
592 | ||
893ce44d CS |
593 | /* Admin queue - see gve_adminq.h*/ |
594 | union gve_adminq_command *adminq; | |
595 | dma_addr_t adminq_bus_addr; | |
596 | u32 adminq_mask; /* masks prod_cnt to adminq size */ | |
597 | u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */ | |
433e274b KZ |
598 | u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */ |
599 | u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */ | |
600 | /* free-running count of per AQ cmd executed */ | |
601 | u32 adminq_describe_device_cnt; | |
602 | u32 adminq_cfg_device_resources_cnt; | |
603 | u32 adminq_register_page_list_cnt; | |
604 | u32 adminq_unregister_page_list_cnt; | |
605 | u32 adminq_create_tx_queue_cnt; | |
606 | u32 adminq_create_rx_queue_cnt; | |
607 | u32 adminq_destroy_tx_queue_cnt; | |
608 | u32 adminq_destroy_rx_queue_cnt; | |
609 | u32 adminq_dcfg_device_resources_cnt; | |
610 | u32 adminq_set_driver_parameter_cnt; | |
24aeb56f | 611 | u32 adminq_report_stats_cnt; |
7e074d5a | 612 | u32 adminq_report_link_speed_cnt; |
c4b87ac8 | 613 | u32 adminq_get_ptype_map_cnt; |
c2a0c3ed | 614 | u32 adminq_verify_driver_compatibility_cnt; |
433e274b KZ |
615 | |
616 | /* Global stats */ | |
617 | u32 interface_up_cnt; /* count of times interface turned up since last reset */ | |
618 | u32 interface_down_cnt; /* count of times interface turned down since last reset */ | |
619 | u32 reset_cnt; /* count of reset */ | |
620 | u32 page_alloc_fail; /* count of page alloc fails */ | |
621 | u32 dma_mapping_error; /* count of dma mapping errors */ | |
24aeb56f | 622 | u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */ |
974365e5 CS |
623 | u32 suspend_cnt; /* count of times suspended */ |
624 | u32 resume_cnt; /* count of times resumed */ | |
9e5f7d26 CS |
625 | struct workqueue_struct *gve_wq; |
626 | struct work_struct service_task; | |
24aeb56f | 627 | struct work_struct stats_report_task; |
9e5f7d26 | 628 | unsigned long service_task_flags; |
893ce44d | 629 | unsigned long state_flags; |
24aeb56f KZ |
630 | |
631 | struct gve_stats_report *stats_report; | |
632 | u64 stats_report_len; | |
633 | dma_addr_t stats_report_bus; /* dma address for the stats report */ | |
634 | unsigned long ethtool_flags; | |
635 | ||
636 | unsigned long stats_report_timer_period; | |
637 | struct timer_list stats_report_timer; | |
638 | ||
7e074d5a DA |
639 | /* Gvnic device link speed from hypervisor. */ |
640 | u64 link_speed; | |
974365e5 | 641 | bool up_before_suspend; /* True if dev was up before suspend */ |
a5886ef4 | 642 | |
5ca2265e | 643 | struct gve_options_dqo_rda options_dqo_rda; |
c4b87ac8 | 644 | struct gve_ptype_lut *ptype_lut_dqo; |
5ca2265e | 645 | |
1f6228e4 BF |
646 | /* Must be a power of two. */ |
647 | int data_buffer_size_dqo; | |
648 | ||
a5886ef4 | 649 | enum gve_queue_format queue_format; |
6081ac20 TL |
650 | |
651 | /* Interrupt coalescing settings */ | |
652 | u32 tx_coalesce_usecs; | |
653 | u32 rx_coalesce_usecs; | |
893ce44d CS |
654 | }; |
655 | ||
24aeb56f KZ |
656 | enum gve_service_task_flags_bit { |
657 | GVE_PRIV_FLAGS_DO_RESET = 1, | |
658 | GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2, | |
659 | GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3, | |
660 | GVE_PRIV_FLAGS_DO_REPORT_STATS = 4, | |
9e5f7d26 CS |
661 | }; |
662 | ||
24aeb56f KZ |
663 | enum gve_state_flags_bit { |
664 | GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1, | |
665 | GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2, | |
666 | GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3, | |
667 | GVE_PRIV_FLAGS_NAPI_ENABLED = 4, | |
668 | }; | |
669 | ||
670 | enum gve_ethtool_flags_bit { | |
671 | GVE_PRIV_FLAGS_REPORT_STATS = 0, | |
893ce44d CS |
672 | }; |
673 | ||
9e5f7d26 CS |
674 | static inline bool gve_get_do_reset(struct gve_priv *priv) |
675 | { | |
676 | return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); | |
677 | } | |
678 | ||
679 | static inline void gve_set_do_reset(struct gve_priv *priv) | |
680 | { | |
681 | set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); | |
682 | } | |
683 | ||
684 | static inline void gve_clear_do_reset(struct gve_priv *priv) | |
685 | { | |
686 | clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); | |
687 | } | |
688 | ||
689 | static inline bool gve_get_reset_in_progress(struct gve_priv *priv) | |
690 | { | |
691 | return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, | |
692 | &priv->service_task_flags); | |
693 | } | |
694 | ||
695 | static inline void gve_set_reset_in_progress(struct gve_priv *priv) | |
696 | { | |
697 | set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); | |
698 | } | |
699 | ||
700 | static inline void gve_clear_reset_in_progress(struct gve_priv *priv) | |
701 | { | |
702 | clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); | |
703 | } | |
704 | ||
705 | static inline bool gve_get_probe_in_progress(struct gve_priv *priv) | |
706 | { | |
707 | return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, | |
708 | &priv->service_task_flags); | |
709 | } | |
710 | ||
711 | static inline void gve_set_probe_in_progress(struct gve_priv *priv) | |
712 | { | |
713 | set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); | |
714 | } | |
715 | ||
716 | static inline void gve_clear_probe_in_progress(struct gve_priv *priv) | |
717 | { | |
718 | clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); | |
719 | } | |
720 | ||
24aeb56f KZ |
721 | static inline bool gve_get_do_report_stats(struct gve_priv *priv) |
722 | { | |
723 | return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, | |
724 | &priv->service_task_flags); | |
725 | } | |
726 | ||
727 | static inline void gve_set_do_report_stats(struct gve_priv *priv) | |
728 | { | |
729 | set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); | |
730 | } | |
731 | ||
732 | static inline void gve_clear_do_report_stats(struct gve_priv *priv) | |
733 | { | |
734 | clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); | |
735 | } | |
736 | ||
893ce44d CS |
737 | static inline bool gve_get_admin_queue_ok(struct gve_priv *priv) |
738 | { | |
739 | return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); | |
740 | } | |
741 | ||
742 | static inline void gve_set_admin_queue_ok(struct gve_priv *priv) | |
743 | { | |
744 | set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); | |
745 | } | |
746 | ||
747 | static inline void gve_clear_admin_queue_ok(struct gve_priv *priv) | |
748 | { | |
749 | clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); | |
750 | } | |
751 | ||
752 | static inline bool gve_get_device_resources_ok(struct gve_priv *priv) | |
753 | { | |
754 | return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); | |
755 | } | |
756 | ||
757 | static inline void gve_set_device_resources_ok(struct gve_priv *priv) | |
758 | { | |
759 | set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); | |
760 | } | |
761 | ||
762 | static inline void gve_clear_device_resources_ok(struct gve_priv *priv) | |
763 | { | |
764 | clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); | |
765 | } | |
766 | ||
767 | static inline bool gve_get_device_rings_ok(struct gve_priv *priv) | |
768 | { | |
769 | return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); | |
770 | } | |
771 | ||
772 | static inline void gve_set_device_rings_ok(struct gve_priv *priv) | |
773 | { | |
774 | set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); | |
775 | } | |
776 | ||
777 | static inline void gve_clear_device_rings_ok(struct gve_priv *priv) | |
778 | { | |
779 | clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); | |
780 | } | |
781 | ||
782 | static inline bool gve_get_napi_enabled(struct gve_priv *priv) | |
783 | { | |
784 | return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); | |
785 | } | |
786 | ||
787 | static inline void gve_set_napi_enabled(struct gve_priv *priv) | |
788 | { | |
789 | set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); | |
790 | } | |
791 | ||
792 | static inline void gve_clear_napi_enabled(struct gve_priv *priv) | |
793 | { | |
794 | clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); | |
795 | } | |
796 | ||
24aeb56f KZ |
797 | static inline bool gve_get_report_stats(struct gve_priv *priv) |
798 | { | |
799 | return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); | |
800 | } | |
801 | ||
802 | static inline void gve_clear_report_stats(struct gve_priv *priv) | |
803 | { | |
804 | clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); | |
805 | } | |
806 | ||
893ce44d CS |
807 | /* Returns the address of the ntfy_blocks irq doorbell |
808 | */ | |
809 | static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv, | |
810 | struct gve_notify_block *block) | |
811 | { | |
d30baacc | 812 | return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)]; |
893ce44d | 813 | } |
f5cedc84 CS |
814 | |
815 | /* Returns the index into ntfy_blocks of the given tx ring's block | |
816 | */ | |
817 | static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) | |
818 | { | |
819 | return queue_idx; | |
820 | } | |
821 | ||
822 | /* Returns the index into ntfy_blocks of the given rx ring's block | |
823 | */ | |
824 | static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) | |
825 | { | |
826 | return (priv->num_ntfy_blks / 2) + queue_idx; | |
827 | } | |
828 | ||
66ce8e6b RG |
829 | static inline bool gve_is_qpl(struct gve_priv *priv) |
830 | { | |
831 | return priv->queue_format == GVE_GQI_QPL_FORMAT || | |
832 | priv->queue_format == GVE_DQO_QPL_FORMAT; | |
833 | } | |
834 | ||
f5cedc84 CS |
835 | /* Returns the number of tx queue page lists |
836 | */ | |
837 | static inline u32 gve_num_tx_qpls(struct gve_priv *priv) | |
838 | { | |
66ce8e6b | 839 | if (!gve_is_qpl(priv)) |
a5886ef4 BF |
840 | return 0; |
841 | ||
75eaae15 PK |
842 | return priv->tx_cfg.num_queues + priv->num_xdp_queues; |
843 | } | |
844 | ||
845 | /* Returns the number of XDP tx queue page lists | |
846 | */ | |
847 | static inline u32 gve_num_xdp_qpls(struct gve_priv *priv) | |
848 | { | |
849 | if (priv->queue_format != GVE_GQI_QPL_FORMAT) | |
850 | return 0; | |
851 | ||
852 | return priv->num_xdp_queues; | |
f5cedc84 CS |
853 | } |
854 | ||
855 | /* Returns the number of rx queue page lists | |
856 | */ | |
857 | static inline u32 gve_num_rx_qpls(struct gve_priv *priv) | |
858 | { | |
66ce8e6b | 859 | if (!gve_is_qpl(priv)) |
a5886ef4 BF |
860 | return 0; |
861 | ||
862 | return priv->rx_cfg.num_queues; | |
f5cedc84 CS |
863 | } |
864 | ||
7fc2bf78 PK |
865 | static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid) |
866 | { | |
867 | return tx_qid; | |
868 | } | |
869 | ||
870 | static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid) | |
871 | { | |
872 | return priv->tx_cfg.max_queues + rx_qid; | |
873 | } | |
874 | ||
875 | static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv) | |
876 | { | |
877 | return gve_tx_qpl_id(priv, 0); | |
878 | } | |
879 | ||
880 | static inline u32 gve_rx_start_qpl_id(struct gve_priv *priv) | |
881 | { | |
882 | return gve_rx_qpl_id(priv, 0); | |
883 | } | |
884 | ||
f5cedc84 CS |
885 | /* Returns a pointer to the next available tx qpl in the list of qpls |
886 | */ | |
887 | static inline | |
7fc2bf78 | 888 | struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv, int tx_qid) |
f5cedc84 | 889 | { |
7fc2bf78 | 890 | int id = gve_tx_qpl_id(priv, tx_qid); |
f5cedc84 | 891 | |
7fc2bf78 PK |
892 | /* QPL already in use */ |
893 | if (test_bit(id, priv->qpl_cfg.qpl_id_map)) | |
f5cedc84 CS |
894 | return NULL; |
895 | ||
896 | set_bit(id, priv->qpl_cfg.qpl_id_map); | |
897 | return &priv->qpls[id]; | |
898 | } | |
899 | ||
900 | /* Returns a pointer to the next available rx qpl in the list of qpls | |
901 | */ | |
902 | static inline | |
7fc2bf78 | 903 | struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv, int rx_qid) |
f5cedc84 | 904 | { |
7fc2bf78 | 905 | int id = gve_rx_qpl_id(priv, rx_qid); |
f5cedc84 | 906 | |
7fc2bf78 PK |
907 | /* QPL already in use */ |
908 | if (test_bit(id, priv->qpl_cfg.qpl_id_map)) | |
f5cedc84 CS |
909 | return NULL; |
910 | ||
911 | set_bit(id, priv->qpl_cfg.qpl_id_map); | |
912 | return &priv->qpls[id]; | |
913 | } | |
914 | ||
915 | /* Unassigns the qpl with the given id | |
916 | */ | |
917 | static inline void gve_unassign_qpl(struct gve_priv *priv, int id) | |
918 | { | |
919 | clear_bit(id, priv->qpl_cfg.qpl_id_map); | |
920 | } | |
921 | ||
922 | /* Returns the correct dma direction for tx and rx qpls | |
923 | */ | |
924 | static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv, | |
925 | int id) | |
926 | { | |
7fc2bf78 | 927 | if (id < gve_rx_start_qpl_id(priv)) |
f5cedc84 CS |
928 | return DMA_TO_DEVICE; |
929 | else | |
930 | return DMA_FROM_DEVICE; | |
931 | } | |
932 | ||
5ca2265e BF |
933 | static inline bool gve_is_gqi(struct gve_priv *priv) |
934 | { | |
935 | return priv->queue_format == GVE_GQI_RDA_FORMAT || | |
936 | priv->queue_format == GVE_GQI_QPL_FORMAT; | |
937 | } | |
938 | ||
2e80aeae PK |
939 | static inline u32 gve_num_tx_queues(struct gve_priv *priv) |
940 | { | |
75eaae15 PK |
941 | return priv->tx_cfg.num_queues + priv->num_xdp_queues; |
942 | } | |
943 | ||
944 | static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id) | |
945 | { | |
946 | return priv->tx_cfg.num_queues + queue_id; | |
947 | } | |
948 | ||
949 | static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv) | |
950 | { | |
951 | return gve_xdp_tx_queue_id(priv, 0); | |
2e80aeae PK |
952 | } |
953 | ||
f5cedc84 | 954 | /* buffers */ |
433e274b KZ |
955 | int gve_alloc_page(struct gve_priv *priv, struct device *dev, |
956 | struct page **page, dma_addr_t *dma, | |
a92f7a6f | 957 | enum dma_data_direction, gfp_t gfp_flags); |
f5cedc84 CS |
958 | void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, |
959 | enum dma_data_direction); | |
960 | /* tx handling */ | |
961 | netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); | |
39a7f4aa PK |
962 | int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, |
963 | u32 flags); | |
75eaae15 | 964 | int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, |
39a7f4aa | 965 | void *data, int len, void *frame_p); |
75eaae15 | 966 | void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid); |
f5cedc84 | 967 | bool gve_tx_poll(struct gve_notify_block *block, int budget); |
75eaae15 | 968 | bool gve_xdp_poll(struct gve_notify_block *block, int budget); |
7fc2bf78 PK |
969 | int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings); |
970 | void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings); | |
61d72c7e TL |
971 | u32 gve_tx_load_event_counter(struct gve_priv *priv, |
972 | struct gve_tx_ring *tx); | |
973 | bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx); | |
f5cedc84 CS |
974 | /* rx handling */ |
975 | void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx); | |
2cb67ab1 YF |
976 | int gve_rx_poll(struct gve_notify_block *block, int budget); |
977 | bool gve_rx_work_pending(struct gve_rx_ring *rx); | |
f5cedc84 | 978 | int gve_rx_alloc_rings(struct gve_priv *priv); |
9c1a59a2 | 979 | void gve_rx_free_rings_gqi(struct gve_priv *priv); |
9e5f7d26 CS |
980 | /* Reset */ |
981 | void gve_schedule_reset(struct gve_priv *priv); | |
982 | int gve_reset(struct gve_priv *priv, bool attempt_teardown); | |
983 | int gve_adjust_queues(struct gve_priv *priv, | |
984 | struct gve_queue_config new_rx_config, | |
985 | struct gve_queue_config new_tx_config); | |
24aeb56f KZ |
986 | /* report stats handling */ |
987 | void gve_handle_report_stats(struct gve_priv *priv); | |
e5b845dc CS |
988 | /* exported by ethtool.c */ |
989 | extern const struct ethtool_ops gve_ethtool_ops; | |
990 | /* needed by ethtool */ | |
9d0aba98 | 991 | extern char gve_driver_name[]; |
e5b845dc | 992 | extern const char gve_version_str[]; |
893ce44d | 993 | #endif /* _GVE_H_ */ |