Commit | Line | Data |
---|---|---|
ca9c54d2 DC |
1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ |
2 | /* Copyright (c) 2021, Microsoft Corporation. */ | |
3 | ||
4 | #ifndef _MANA_H | |
5 | #define _MANA_H | |
6 | ||
7 | #include "gdma.h" | |
8 | #include "hw_channel.h" | |
9 | ||
10 | /* Microsoft Azure Network Adapter (MANA)'s definitions | |
11 | * | |
12 | * Structures labeled with "HW DATA" are exchanged with the hardware. All of | |
13 | * them are naturally aligned and hence don't need __packed. | |
14 | */ | |
15 | ||
16 | /* MANA protocol version */ | |
17 | #define MANA_MAJOR_VERSION 0 | |
18 | #define MANA_MINOR_VERSION 1 | |
19 | #define MANA_MICRO_VERSION 1 | |
20 | ||
21 | typedef u64 mana_handle_t; | |
22 | #define INVALID_MANA_HANDLE ((mana_handle_t)-1) | |
23 | ||
24 | enum TRI_STATE { | |
25 | TRI_STATE_UNKNOWN = -1, | |
26 | TRI_STATE_FALSE = 0, | |
27 | TRI_STATE_TRUE = 1 | |
28 | }; | |
29 | ||
30 | /* Number of entries for hardware indirection table must be in power of 2 */ | |
31 | #define MANA_INDIRECT_TABLE_SIZE 64 | |
32 | #define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1) | |
33 | ||
34 | /* The Toeplitz hash key's length in bytes: should be multiple of 8 */ | |
35 | #define MANA_HASH_KEY_SIZE 40 | |
36 | ||
37 | #define COMP_ENTRY_SIZE 64 | |
38 | ||
ca9c54d2 DC |
39 | #define RX_BUFFERS_PER_QUEUE 512 |
40 | ||
41 | #define MAX_SEND_BUFFERS_PER_QUEUE 256 | |
42 | ||
43 | #define EQ_SIZE (8 * PAGE_SIZE) | |
44 | #define LOG2_EQ_THROTTLE 3 | |
45 | ||
1e2d0824 | 46 | #define MAX_PORTS_IN_MANA_DEV 256 |
ca9c54d2 | 47 | |
bd7fc6e1 SG |
48 | /* Update this count whenever the respective structures are changed */ |
49 | #define MANA_STATS_RX_COUNT 5 | |
50 | #define MANA_STATS_TX_COUNT 11 | |
51 | ||
f90f8420 HZ |
52 | struct mana_stats_rx { |
53 | u64 packets; | |
54 | u64 bytes; | |
55 | u64 xdp_drop; | |
d356abb9 | 56 | u64 xdp_tx; |
7a8938cd | 57 | u64 xdp_redirect; |
f90f8420 HZ |
58 | struct u64_stats_sync syncp; |
59 | }; | |
60 | ||
61 | struct mana_stats_tx { | |
ca9c54d2 DC |
62 | u64 packets; |
63 | u64 bytes; | |
7a8938cd | 64 | u64 xdp_xmit; |
bd7fc6e1 SG |
65 | u64 tso_packets; |
66 | u64 tso_bytes; | |
67 | u64 tso_inner_packets; | |
68 | u64 tso_inner_bytes; | |
69 | u64 short_pkt_fmt; | |
70 | u64 long_pkt_fmt; | |
71 | u64 csum_partial; | |
72 | u64 mana_map_err; | |
ca9c54d2 DC |
73 | struct u64_stats_sync syncp; |
74 | }; | |
75 | ||
76 | struct mana_txq { | |
77 | struct gdma_queue *gdma_sq; | |
78 | ||
79 | union { | |
80 | u32 gdma_txq_id; | |
81 | struct { | |
82 | u32 reserved1 : 10; | |
83 | u32 vsq_frame : 14; | |
84 | u32 reserved2 : 8; | |
85 | }; | |
86 | }; | |
87 | ||
88 | u16 vp_offset; | |
89 | ||
90 | struct net_device *ndev; | |
91 | ||
92 | /* The SKBs are sent to the HW and we are waiting for the CQEs. */ | |
93 | struct sk_buff_head pending_skbs; | |
94 | struct netdev_queue *net_txq; | |
95 | ||
96 | atomic_t pending_sends; | |
97 | ||
f90f8420 | 98 | struct mana_stats_tx stats; |
ca9c54d2 DC |
99 | }; |
100 | ||
101 | /* skb data and frags dma mappings */ | |
102 | struct mana_skb_head { | |
103 | dma_addr_t dma_handle[MAX_SKB_FRAGS + 1]; | |
104 | ||
105 | u32 size[MAX_SKB_FRAGS + 1]; | |
106 | }; | |
107 | ||
108 | #define MANA_HEADROOM sizeof(struct mana_skb_head) | |
109 | ||
110 | enum mana_tx_pkt_format { | |
111 | MANA_SHORT_PKT_FMT = 0, | |
112 | MANA_LONG_PKT_FMT = 1, | |
113 | }; | |
114 | ||
115 | struct mana_tx_short_oob { | |
116 | u32 pkt_fmt : 2; | |
117 | u32 is_outer_ipv4 : 1; | |
118 | u32 is_outer_ipv6 : 1; | |
119 | u32 comp_iphdr_csum : 1; | |
120 | u32 comp_tcp_csum : 1; | |
121 | u32 comp_udp_csum : 1; | |
122 | u32 supress_txcqe_gen : 1; | |
123 | u32 vcq_num : 24; | |
124 | ||
125 | u32 trans_off : 10; /* Transport header offset */ | |
126 | u32 vsq_frame : 14; | |
127 | u32 short_vp_offset : 8; | |
128 | }; /* HW DATA */ | |
129 | ||
130 | struct mana_tx_long_oob { | |
131 | u32 is_encap : 1; | |
132 | u32 inner_is_ipv6 : 1; | |
133 | u32 inner_tcp_opt : 1; | |
134 | u32 inject_vlan_pri_tag : 1; | |
135 | u32 reserved1 : 12; | |
136 | u32 pcp : 3; /* 802.1Q */ | |
137 | u32 dei : 1; /* 802.1Q */ | |
138 | u32 vlan_id : 12; /* 802.1Q */ | |
139 | ||
140 | u32 inner_frame_offset : 10; | |
141 | u32 inner_ip_rel_offset : 6; | |
142 | u32 long_vp_offset : 12; | |
143 | u32 reserved2 : 4; | |
144 | ||
145 | u32 reserved3; | |
146 | u32 reserved4; | |
147 | }; /* HW DATA */ | |
148 | ||
149 | struct mana_tx_oob { | |
150 | struct mana_tx_short_oob s_oob; | |
151 | struct mana_tx_long_oob l_oob; | |
152 | }; /* HW DATA */ | |
153 | ||
154 | enum mana_cq_type { | |
155 | MANA_CQ_TYPE_RX, | |
156 | MANA_CQ_TYPE_TX, | |
157 | }; | |
158 | ||
159 | enum mana_cqe_type { | |
160 | CQE_INVALID = 0, | |
161 | CQE_RX_OKAY = 1, | |
162 | CQE_RX_COALESCED_4 = 2, | |
163 | CQE_RX_OBJECT_FENCE = 3, | |
164 | CQE_RX_TRUNCATED = 4, | |
165 | ||
166 | CQE_TX_OKAY = 32, | |
167 | CQE_TX_SA_DROP = 33, | |
168 | CQE_TX_MTU_DROP = 34, | |
169 | CQE_TX_INVALID_OOB = 35, | |
170 | CQE_TX_INVALID_ETH_TYPE = 36, | |
171 | CQE_TX_HDR_PROCESSING_ERROR = 37, | |
172 | CQE_TX_VF_DISABLED = 38, | |
173 | CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39, | |
174 | CQE_TX_VPORT_DISABLED = 40, | |
175 | CQE_TX_VLAN_TAGGING_VIOLATION = 41, | |
176 | }; | |
177 | ||
178 | #define MANA_CQE_COMPLETION 1 | |
179 | ||
180 | struct mana_cqe_header { | |
181 | u32 cqe_type : 6; | |
182 | u32 client_type : 2; | |
183 | u32 vendor_err : 24; | |
184 | }; /* HW DATA */ | |
185 | ||
186 | /* NDIS HASH Types */ | |
187 | #define NDIS_HASH_IPV4 BIT(0) | |
188 | #define NDIS_HASH_TCP_IPV4 BIT(1) | |
189 | #define NDIS_HASH_UDP_IPV4 BIT(2) | |
190 | #define NDIS_HASH_IPV6 BIT(3) | |
191 | #define NDIS_HASH_TCP_IPV6 BIT(4) | |
192 | #define NDIS_HASH_UDP_IPV6 BIT(5) | |
193 | #define NDIS_HASH_IPV6_EX BIT(6) | |
194 | #define NDIS_HASH_TCP_IPV6_EX BIT(7) | |
195 | #define NDIS_HASH_UDP_IPV6_EX BIT(8) | |
196 | ||
197 | #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX) | |
198 | #define MANA_HASH_L4 \ | |
199 | (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \ | |
200 | NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX) | |
201 | ||
202 | struct mana_rxcomp_perpkt_info { | |
203 | u32 pkt_len : 16; | |
204 | u32 reserved1 : 16; | |
205 | u32 reserved2; | |
206 | u32 pkt_hash; | |
207 | }; /* HW DATA */ | |
208 | ||
209 | #define MANA_RXCOMP_OOB_NUM_PPI 4 | |
210 | ||
211 | /* Receive completion OOB */ | |
212 | struct mana_rxcomp_oob { | |
213 | struct mana_cqe_header cqe_hdr; | |
214 | ||
215 | u32 rx_vlan_id : 12; | |
216 | u32 rx_vlantag_present : 1; | |
217 | u32 rx_outer_iphdr_csum_succeed : 1; | |
218 | u32 rx_outer_iphdr_csum_fail : 1; | |
219 | u32 reserved1 : 1; | |
220 | u32 rx_hashtype : 9; | |
221 | u32 rx_iphdr_csum_succeed : 1; | |
222 | u32 rx_iphdr_csum_fail : 1; | |
223 | u32 rx_tcp_csum_succeed : 1; | |
224 | u32 rx_tcp_csum_fail : 1; | |
225 | u32 rx_udp_csum_succeed : 1; | |
226 | u32 rx_udp_csum_fail : 1; | |
227 | u32 reserved2 : 1; | |
228 | ||
229 | struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI]; | |
230 | ||
231 | u32 rx_wqe_offset; | |
232 | }; /* HW DATA */ | |
233 | ||
234 | struct mana_tx_comp_oob { | |
235 | struct mana_cqe_header cqe_hdr; | |
236 | ||
237 | u32 tx_data_offset; | |
238 | ||
239 | u32 tx_sgl_offset : 5; | |
240 | u32 tx_wqe_offset : 27; | |
241 | ||
242 | u32 reserved[12]; | |
243 | }; /* HW DATA */ | |
244 | ||
245 | struct mana_rxq; | |
246 | ||
e1b5683f HZ |
247 | #define CQE_POLLING_BUFFER 512 |
248 | ||
ca9c54d2 DC |
249 | struct mana_cq { |
250 | struct gdma_queue *gdma_cq; | |
251 | ||
252 | /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */ | |
253 | u32 gdma_id; | |
254 | ||
255 | /* Type of the CQ: TX or RX */ | |
256 | enum mana_cq_type type; | |
257 | ||
258 | /* Pointer to the mana_rxq that is pushing RX CQEs to the queue. | |
259 | * Only and must be non-NULL if type is MANA_CQ_TYPE_RX. | |
260 | */ | |
261 | struct mana_rxq *rxq; | |
262 | ||
263 | /* Pointer to the mana_txq that is pushing TX CQEs to the queue. | |
264 | * Only and must be non-NULL if type is MANA_CQ_TYPE_TX. | |
265 | */ | |
266 | struct mana_txq *txq; | |
267 | ||
e1b5683f HZ |
268 | /* Buffer which the CQ handler can copy the CQE's into. */ |
269 | struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER]; | |
270 | ||
271 | /* NAPI data */ | |
272 | struct napi_struct napi; | |
273 | int work_done; | |
274 | int budget; | |
ca9c54d2 DC |
275 | }; |
276 | ||
ca9c54d2 DC |
277 | struct mana_recv_buf_oob { |
278 | /* A valid GDMA work request representing the data buffer. */ | |
279 | struct gdma_wqe_request wqe_req; | |
280 | ||
281 | void *buf_va; | |
ca9c54d2 DC |
282 | |
283 | /* SGL of the buffer going to be sent has part of the work request. */ | |
284 | u32 num_sge; | |
aa565497 | 285 | struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES]; |
ca9c54d2 DC |
286 | |
287 | /* Required to store the result of mana_gd_post_work_request. | |
288 | * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the | |
289 | * work queue when the WQE is consumed. | |
290 | */ | |
291 | struct gdma_posted_wqe_info wqe_inf; | |
292 | }; | |
293 | ||
294 | struct mana_rxq { | |
295 | struct gdma_queue *gdma_rq; | |
296 | /* Cache the gdma receive queue id */ | |
297 | u32 gdma_id; | |
298 | ||
299 | /* Index of RQ in the vPort, not gdma receive queue id */ | |
300 | u32 rxq_idx; | |
301 | ||
302 | u32 datasize; | |
303 | ||
304 | mana_handle_t rxobj; | |
305 | ||
306 | struct mana_cq rx_cq; | |
307 | ||
6cc74443 DC |
308 | struct completion fence_event; |
309 | ||
ca9c54d2 DC |
310 | struct net_device *ndev; |
311 | ||
312 | /* Total number of receive buffers to be allocated */ | |
313 | u32 num_rx_buf; | |
314 | ||
315 | u32 buf_index; | |
316 | ||
f90f8420 | 317 | struct mana_stats_rx stats; |
ca9c54d2 | 318 | |
ed5356b5 HZ |
319 | struct bpf_prog __rcu *bpf_prog; |
320 | struct xdp_rxq_info xdp_rxq; | |
a2917b23 | 321 | void *xdp_save_va; /* for reusing */ |
7a8938cd HZ |
322 | bool xdp_flush; |
323 | int xdp_rc; /* XDP redirect return code */ | |
ed5356b5 | 324 | |
ca9c54d2 DC |
325 | /* MUST BE THE LAST MEMBER: |
326 | * Each receive buffer has an associated mana_recv_buf_oob. | |
327 | */ | |
328 | struct mana_recv_buf_oob rx_oobs[]; | |
329 | }; | |
330 | ||
331 | struct mana_tx_qp { | |
332 | struct mana_txq txq; | |
333 | ||
334 | struct mana_cq tx_cq; | |
335 | ||
336 | mana_handle_t tx_object; | |
337 | }; | |
338 | ||
339 | struct mana_ethtool_stats { | |
340 | u64 stop_queue; | |
341 | u64 wake_queue; | |
bd7fc6e1 SG |
342 | u64 tx_cqes; |
343 | u64 tx_cqe_err; | |
344 | u64 tx_cqe_unknown_type; | |
345 | u64 rx_cqes; | |
346 | u64 rx_coalesced_err; | |
347 | u64 rx_cqe_unknown_type; | |
ca9c54d2 DC |
348 | }; |
349 | ||
350 | struct mana_context { | |
351 | struct gdma_dev *gdma_dev; | |
352 | ||
353 | u16 num_ports; | |
354 | ||
1e2d0824 HZ |
355 | struct mana_eq *eqs; |
356 | ||
ca9c54d2 DC |
357 | struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; |
358 | }; | |
359 | ||
360 | struct mana_port_context { | |
361 | struct mana_context *ac; | |
362 | struct net_device *ndev; | |
363 | ||
364 | u8 mac_addr[ETH_ALEN]; | |
365 | ||
ca9c54d2 DC |
366 | enum TRI_STATE rss_state; |
367 | ||
368 | mana_handle_t default_rxobj; | |
369 | bool tx_shortform_allowed; | |
370 | u16 tx_vp_offset; | |
371 | ||
372 | struct mana_tx_qp *tx_qp; | |
373 | ||
374 | /* Indirection Table for RX & TX. The values are queue indexes */ | |
375 | u32 indir_table[MANA_INDIRECT_TABLE_SIZE]; | |
376 | ||
377 | /* Indirection table containing RxObject Handles */ | |
378 | mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE]; | |
379 | ||
380 | /* Hash key used by the NIC */ | |
381 | u8 hashkey[MANA_HASH_KEY_SIZE]; | |
382 | ||
383 | /* This points to an array of num_queues of RQ pointers. */ | |
384 | struct mana_rxq **rxqs; | |
385 | ||
ed5356b5 HZ |
386 | struct bpf_prog *bpf_prog; |
387 | ||
ca9c54d2 DC |
388 | /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ |
389 | unsigned int max_queues; | |
390 | unsigned int num_queues; | |
391 | ||
392 | mana_handle_t port_handle; | |
1566e7d6 | 393 | mana_handle_t pf_filter_handle; |
ca9c54d2 | 394 | |
b5c1c985 LL |
395 | /* Mutex for sharing access to vport_use_count */ |
396 | struct mutex vport_mutex; | |
397 | int vport_use_count; | |
398 | ||
ca9c54d2 DC |
399 | u16 port_idx; |
400 | ||
401 | bool port_is_up; | |
402 | bool port_st_save; /* Saved port state */ | |
403 | ||
404 | struct mana_ethtool_stats eth_stats; | |
405 | }; | |
406 | ||
0c9ef08a | 407 | netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev); |
ca9c54d2 DC |
408 | int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx, |
409 | bool update_hash, bool update_tab); | |
410 | ||
411 | int mana_alloc_queues(struct net_device *ndev); | |
412 | int mana_attach(struct net_device *ndev); | |
413 | int mana_detach(struct net_device *ndev, bool from_close); | |
414 | ||
635096a8 DC |
415 | int mana_probe(struct gdma_dev *gd, bool resuming); |
416 | void mana_remove(struct gdma_dev *gd, bool suspending); | |
ca9c54d2 | 417 | |
ed5356b5 | 418 | void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev); |
7a8938cd HZ |
419 | int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, |
420 | u32 flags); | |
ed5356b5 HZ |
421 | u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, |
422 | struct xdp_buff *xdp, void *buf_va, uint pkt_len); | |
423 | struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); | |
424 | void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); | |
425 | int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); | |
426 | ||
ca9c54d2 DC |
427 | extern const struct ethtool_ops mana_ethtool_ops; |
428 | ||
0266a177 LL |
429 | /* A CQ can be created not associated with any EQ */ |
430 | #define GDMA_CQ_NO_EQ 0xffff | |
431 | ||
ca9c54d2 DC |
432 | struct mana_obj_spec { |
433 | u32 queue_index; | |
434 | u64 gdma_region; | |
435 | u32 queue_size; | |
436 | u32 attached_eq; | |
437 | u32 modr_ctx_id; | |
438 | }; | |
439 | ||
440 | enum mana_command_code { | |
441 | MANA_QUERY_DEV_CONFIG = 0x20001, | |
442 | MANA_QUERY_GF_STAT = 0x20002, | |
443 | MANA_CONFIG_VPORT_TX = 0x20003, | |
444 | MANA_CREATE_WQ_OBJ = 0x20004, | |
445 | MANA_DESTROY_WQ_OBJ = 0x20005, | |
446 | MANA_FENCE_RQ = 0x20006, | |
447 | MANA_CONFIG_VPORT_RX = 0x20007, | |
448 | MANA_QUERY_VPORT_CONFIG = 0x20008, | |
1566e7d6 DC |
449 | |
450 | /* Privileged commands for the PF mode */ | |
451 | MANA_REGISTER_FILTER = 0x28000, | |
452 | MANA_DEREGISTER_FILTER = 0x28001, | |
453 | MANA_REGISTER_HW_PORT = 0x28003, | |
454 | MANA_DEREGISTER_HW_PORT = 0x28004, | |
ca9c54d2 DC |
455 | }; |
456 | ||
457 | /* Query Device Configuration */ | |
458 | struct mana_query_device_cfg_req { | |
459 | struct gdma_req_hdr hdr; | |
460 | ||
1e2d0824 HZ |
461 | /* MANA Nic Driver Capability flags */ |
462 | u64 mn_drv_cap_flags1; | |
463 | u64 mn_drv_cap_flags2; | |
464 | u64 mn_drv_cap_flags3; | |
465 | u64 mn_drv_cap_flags4; | |
ca9c54d2 DC |
466 | |
467 | u32 proto_major_ver; | |
468 | u32 proto_minor_ver; | |
469 | u32 proto_micro_ver; | |
470 | ||
471 | u32 reserved; | |
472 | }; /* HW DATA */ | |
473 | ||
474 | struct mana_query_device_cfg_resp { | |
475 | struct gdma_resp_hdr hdr; | |
476 | ||
477 | u64 pf_cap_flags1; | |
478 | u64 pf_cap_flags2; | |
479 | u64 pf_cap_flags3; | |
480 | u64 pf_cap_flags4; | |
481 | ||
482 | u16 max_num_vports; | |
483 | u16 reserved; | |
484 | u32 max_num_eqs; | |
485 | }; /* HW DATA */ | |
486 | ||
487 | /* Query vPort Configuration */ | |
488 | struct mana_query_vport_cfg_req { | |
489 | struct gdma_req_hdr hdr; | |
490 | u32 vport_index; | |
491 | }; /* HW DATA */ | |
492 | ||
493 | struct mana_query_vport_cfg_resp { | |
494 | struct gdma_resp_hdr hdr; | |
495 | u32 max_num_sq; | |
496 | u32 max_num_rq; | |
497 | u32 num_indirection_ent; | |
498 | u32 reserved1; | |
499 | u8 mac_addr[6]; | |
500 | u8 reserved2[2]; | |
501 | mana_handle_t vport; | |
502 | }; /* HW DATA */ | |
503 | ||
504 | /* Configure vPort */ | |
505 | struct mana_config_vport_req { | |
506 | struct gdma_req_hdr hdr; | |
507 | mana_handle_t vport; | |
508 | u32 pdid; | |
509 | u32 doorbell_pageid; | |
510 | }; /* HW DATA */ | |
511 | ||
512 | struct mana_config_vport_resp { | |
513 | struct gdma_resp_hdr hdr; | |
514 | u16 tx_vport_offset; | |
515 | u8 short_form_allowed; | |
516 | u8 reserved; | |
517 | }; /* HW DATA */ | |
518 | ||
519 | /* Create WQ Object */ | |
520 | struct mana_create_wqobj_req { | |
521 | struct gdma_req_hdr hdr; | |
522 | mana_handle_t vport; | |
523 | u32 wq_type; | |
524 | u32 reserved; | |
525 | u64 wq_gdma_region; | |
526 | u64 cq_gdma_region; | |
527 | u32 wq_size; | |
528 | u32 cq_size; | |
529 | u32 cq_moderation_ctx_id; | |
530 | u32 cq_parent_qid; | |
531 | }; /* HW DATA */ | |
532 | ||
533 | struct mana_create_wqobj_resp { | |
534 | struct gdma_resp_hdr hdr; | |
535 | u32 wq_id; | |
536 | u32 cq_id; | |
537 | mana_handle_t wq_obj; | |
538 | }; /* HW DATA */ | |
539 | ||
540 | /* Destroy WQ Object */ | |
541 | struct mana_destroy_wqobj_req { | |
542 | struct gdma_req_hdr hdr; | |
543 | u32 wq_type; | |
544 | u32 reserved; | |
545 | mana_handle_t wq_obj_handle; | |
546 | }; /* HW DATA */ | |
547 | ||
548 | struct mana_destroy_wqobj_resp { | |
549 | struct gdma_resp_hdr hdr; | |
550 | }; /* HW DATA */ | |
551 | ||
552 | /* Fence RQ */ | |
553 | struct mana_fence_rq_req { | |
554 | struct gdma_req_hdr hdr; | |
555 | mana_handle_t wq_obj_handle; | |
556 | }; /* HW DATA */ | |
557 | ||
558 | struct mana_fence_rq_resp { | |
559 | struct gdma_resp_hdr hdr; | |
560 | }; /* HW DATA */ | |
561 | ||
562 | /* Configure vPort Rx Steering */ | |
563 | struct mana_cfg_rx_steer_req { | |
564 | struct gdma_req_hdr hdr; | |
565 | mana_handle_t vport; | |
566 | u16 num_indir_entries; | |
567 | u16 indir_tab_offset; | |
568 | u32 rx_enable; | |
569 | u32 rss_enable; | |
570 | u8 update_default_rxobj; | |
571 | u8 update_hashkey; | |
572 | u8 update_indir_tab; | |
573 | u8 reserved; | |
574 | mana_handle_t default_rxobj; | |
575 | u8 hashkey[MANA_HASH_KEY_SIZE]; | |
576 | }; /* HW DATA */ | |
577 | ||
578 | struct mana_cfg_rx_steer_resp { | |
579 | struct gdma_resp_hdr hdr; | |
580 | }; /* HW DATA */ | |
581 | ||
1566e7d6 DC |
582 | /* Register HW vPort */ |
583 | struct mana_register_hw_vport_req { | |
584 | struct gdma_req_hdr hdr; | |
585 | u16 attached_gfid; | |
586 | u8 is_pf_default_vport; | |
587 | u8 reserved1; | |
588 | u8 allow_all_ether_types; | |
589 | u8 reserved2; | |
590 | u8 reserved3; | |
591 | u8 reserved4; | |
592 | }; /* HW DATA */ | |
593 | ||
594 | struct mana_register_hw_vport_resp { | |
595 | struct gdma_resp_hdr hdr; | |
596 | mana_handle_t hw_vport_handle; | |
597 | }; /* HW DATA */ | |
598 | ||
599 | /* Deregister HW vPort */ | |
600 | struct mana_deregister_hw_vport_req { | |
601 | struct gdma_req_hdr hdr; | |
602 | mana_handle_t hw_vport_handle; | |
603 | }; /* HW DATA */ | |
604 | ||
605 | struct mana_deregister_hw_vport_resp { | |
606 | struct gdma_resp_hdr hdr; | |
607 | }; /* HW DATA */ | |
608 | ||
609 | /* Register filter */ | |
610 | struct mana_register_filter_req { | |
611 | struct gdma_req_hdr hdr; | |
612 | mana_handle_t vport; | |
613 | u8 mac_addr[6]; | |
614 | u8 reserved1; | |
615 | u8 reserved2; | |
616 | u8 reserved3; | |
617 | u8 reserved4; | |
618 | u16 reserved5; | |
619 | u32 reserved6; | |
620 | u32 reserved7; | |
621 | u32 reserved8; | |
622 | }; /* HW DATA */ | |
623 | ||
624 | struct mana_register_filter_resp { | |
625 | struct gdma_resp_hdr hdr; | |
626 | mana_handle_t filter_handle; | |
627 | }; /* HW DATA */ | |
628 | ||
629 | /* Deregister filter */ | |
630 | struct mana_deregister_filter_req { | |
631 | struct gdma_req_hdr hdr; | |
632 | mana_handle_t filter_handle; | |
633 | }; /* HW DATA */ | |
634 | ||
635 | struct mana_deregister_filter_resp { | |
636 | struct gdma_resp_hdr hdr; | |
637 | }; /* HW DATA */ | |
638 | ||
1e2d0824 | 639 | #define MANA_MAX_NUM_QUEUES 64 |
ca9c54d2 DC |
640 | |
641 | #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) | |
642 | ||
643 | struct mana_tx_package { | |
644 | struct gdma_wqe_request wqe_req; | |
645 | struct gdma_sge sgl_array[5]; | |
646 | struct gdma_sge *sgl_ptr; | |
647 | ||
648 | struct mana_tx_oob tx_oob; | |
649 | ||
650 | struct gdma_posted_wqe_info wqe_info; | |
651 | }; | |
652 | ||
4c0ff7a1 LL |
653 | int mana_create_wq_obj(struct mana_port_context *apc, |
654 | mana_handle_t vport, | |
655 | u32 wq_type, struct mana_obj_spec *wq_spec, | |
656 | struct mana_obj_spec *cq_spec, | |
657 | mana_handle_t *wq_obj); | |
658 | ||
659 | void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, | |
660 | mana_handle_t wq_obj); | |
661 | ||
b5c1c985 LL |
662 | int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, |
663 | u32 doorbell_pg_id); | |
664 | void mana_uncfg_vport(struct mana_port_context *apc); | |
ca9c54d2 | 665 | #endif /* _MANA_H */ |