1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
8 #include "hw_channel.h"
10 /* Microsoft Azure Network Adapter (MANA)'s definitions
12 * Structures labeled with "HW DATA" are exchanged with the hardware. All of
13 * them are naturally aligned and hence don't need __packed.
16 /* MANA protocol version */
17 #define MANA_MAJOR_VERSION 0
18 #define MANA_MINOR_VERSION 1
19 #define MANA_MICRO_VERSION 1
21 typedef u64 mana_handle_t;
22 #define INVALID_MANA_HANDLE ((mana_handle_t)-1)
25 TRI_STATE_UNKNOWN = -1,
30 /* Number of entries for hardware indirection table must be in power of 2 */
31 #define MANA_INDIRECT_TABLE_SIZE 64
32 #define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
34 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */
35 #define MANA_HASH_KEY_SIZE 40
37 #define COMP_ENTRY_SIZE 64
39 #define RX_BUFFERS_PER_QUEUE 512
40 #define MANA_RX_DATA_ALIGN 64
42 #define MAX_SEND_BUFFERS_PER_QUEUE 256
44 #define EQ_SIZE (8 * PAGE_SIZE)
45 #define LOG2_EQ_THROTTLE 3
47 #define MAX_PORTS_IN_MANA_DEV 256
49 /* Update this count whenever the respective structures are changed */
50 #define MANA_STATS_RX_COUNT 5
51 #define MANA_STATS_TX_COUNT 11
53 struct mana_stats_rx {
59 struct u64_stats_sync syncp;
62 struct mana_stats_tx {
68 u64 tso_inner_packets;
74 struct u64_stats_sync syncp;
78 struct gdma_queue *gdma_sq;
91 struct net_device *ndev;
93 /* The SKBs are sent to the HW and we are waiting for the CQEs. */
94 struct sk_buff_head pending_skbs;
95 struct netdev_queue *net_txq;
97 atomic_t pending_sends;
99 struct mana_stats_tx stats;
102 /* skb data and frags dma mappings */
103 struct mana_skb_head {
104 dma_addr_t dma_handle[MAX_SKB_FRAGS + 1];
106 u32 size[MAX_SKB_FRAGS + 1];
109 #define MANA_HEADROOM sizeof(struct mana_skb_head)
111 enum mana_tx_pkt_format {
112 MANA_SHORT_PKT_FMT = 0,
113 MANA_LONG_PKT_FMT = 1,
116 struct mana_tx_short_oob {
118 u32 is_outer_ipv4 : 1;
119 u32 is_outer_ipv6 : 1;
120 u32 comp_iphdr_csum : 1;
121 u32 comp_tcp_csum : 1;
122 u32 comp_udp_csum : 1;
123 u32 supress_txcqe_gen : 1;
126 u32 trans_off : 10; /* Transport header offset */
128 u32 short_vp_offset : 8;
131 struct mana_tx_long_oob {
133 u32 inner_is_ipv6 : 1;
134 u32 inner_tcp_opt : 1;
135 u32 inject_vlan_pri_tag : 1;
137 u32 pcp : 3; /* 802.1Q */
138 u32 dei : 1; /* 802.1Q */
139 u32 vlan_id : 12; /* 802.1Q */
141 u32 inner_frame_offset : 10;
142 u32 inner_ip_rel_offset : 6;
143 u32 long_vp_offset : 12;
151 struct mana_tx_short_oob s_oob;
152 struct mana_tx_long_oob l_oob;
163 CQE_RX_COALESCED_4 = 2,
164 CQE_RX_OBJECT_FENCE = 3,
165 CQE_RX_TRUNCATED = 4,
169 CQE_TX_MTU_DROP = 34,
170 CQE_TX_INVALID_OOB = 35,
171 CQE_TX_INVALID_ETH_TYPE = 36,
172 CQE_TX_HDR_PROCESSING_ERROR = 37,
173 CQE_TX_VF_DISABLED = 38,
174 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39,
175 CQE_TX_VPORT_DISABLED = 40,
176 CQE_TX_VLAN_TAGGING_VIOLATION = 41,
179 #define MANA_CQE_COMPLETION 1
181 struct mana_cqe_header {
187 /* NDIS HASH Types */
188 #define NDIS_HASH_IPV4 BIT(0)
189 #define NDIS_HASH_TCP_IPV4 BIT(1)
190 #define NDIS_HASH_UDP_IPV4 BIT(2)
191 #define NDIS_HASH_IPV6 BIT(3)
192 #define NDIS_HASH_TCP_IPV6 BIT(4)
193 #define NDIS_HASH_UDP_IPV6 BIT(5)
194 #define NDIS_HASH_IPV6_EX BIT(6)
195 #define NDIS_HASH_TCP_IPV6_EX BIT(7)
196 #define NDIS_HASH_UDP_IPV6_EX BIT(8)
198 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
199 #define MANA_HASH_L4 \
200 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \
201 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
203 struct mana_rxcomp_perpkt_info {
210 #define MANA_RXCOMP_OOB_NUM_PPI 4
212 /* Receive completion OOB */
213 struct mana_rxcomp_oob {
214 struct mana_cqe_header cqe_hdr;
217 u32 rx_vlantag_present : 1;
218 u32 rx_outer_iphdr_csum_succeed : 1;
219 u32 rx_outer_iphdr_csum_fail : 1;
222 u32 rx_iphdr_csum_succeed : 1;
223 u32 rx_iphdr_csum_fail : 1;
224 u32 rx_tcp_csum_succeed : 1;
225 u32 rx_tcp_csum_fail : 1;
226 u32 rx_udp_csum_succeed : 1;
227 u32 rx_udp_csum_fail : 1;
230 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
235 struct mana_tx_comp_oob {
236 struct mana_cqe_header cqe_hdr;
240 u32 tx_sgl_offset : 5;
241 u32 tx_wqe_offset : 27;
248 #define CQE_POLLING_BUFFER 512
251 struct gdma_queue *gdma_cq;
253 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
256 /* Type of the CQ: TX or RX */
257 enum mana_cq_type type;
259 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
260 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
262 struct mana_rxq *rxq;
264 /* Pointer to the mana_txq that is pushing TX CQEs to the queue.
265 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
267 struct mana_txq *txq;
269 /* Buffer which the CQ handler can copy the CQE's into. */
270 struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
273 struct napi_struct napi;
278 struct mana_recv_buf_oob {
279 /* A valid GDMA work request representing the data buffer. */
280 struct gdma_wqe_request wqe_req;
284 /* SGL of the buffer going to be sent has part of the work request. */
286 struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
288 /* Required to store the result of mana_gd_post_work_request.
289 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
290 * work queue when the WQE is consumed.
292 struct gdma_posted_wqe_info wqe_inf;
295 #define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \
298 #define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
301 struct gdma_queue *gdma_rq;
302 /* Cache the gdma receive queue id */
305 /* Index of RQ in the vPort, not gdma receive queue id */
314 struct mana_cq rx_cq;
316 struct completion fence_event;
318 struct net_device *ndev;
320 /* Total number of receive buffers to be allocated */
325 struct mana_stats_rx stats;
327 struct bpf_prog __rcu *bpf_prog;
328 struct xdp_rxq_info xdp_rxq;
329 void *xdp_save_va; /* for reusing */
331 int xdp_rc; /* XDP redirect return code */
333 /* MUST BE THE LAST MEMBER:
334 * Each receive buffer has an associated mana_recv_buf_oob.
336 struct mana_recv_buf_oob rx_oobs[];
342 struct mana_cq tx_cq;
344 mana_handle_t tx_object;
347 struct mana_ethtool_stats {
352 u64 tx_cqe_unknown_type;
354 u64 rx_coalesced_err;
355 u64 rx_cqe_unknown_type;
358 struct mana_context {
359 struct gdma_dev *gdma_dev;
365 struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
368 struct mana_port_context {
369 struct mana_context *ac;
370 struct net_device *ndev;
372 u8 mac_addr[ETH_ALEN];
374 enum TRI_STATE rss_state;
376 mana_handle_t default_rxobj;
377 bool tx_shortform_allowed;
380 struct mana_tx_qp *tx_qp;
382 /* Indirection Table for RX & TX. The values are queue indexes */
383 u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
385 /* Indirection table containing RxObject Handles */
386 mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
388 /* Hash key used by the NIC */
389 u8 hashkey[MANA_HASH_KEY_SIZE];
391 /* This points to an array of num_queues of RQ pointers. */
392 struct mana_rxq **rxqs;
394 /* pre-allocated rx buffer array */
399 u32 rxbpre_alloc_size;
402 struct bpf_prog *bpf_prog;
404 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
405 unsigned int max_queues;
406 unsigned int num_queues;
408 mana_handle_t port_handle;
409 mana_handle_t pf_filter_handle;
411 /* Mutex for sharing access to vport_use_count */
412 struct mutex vport_mutex;
418 bool port_st_save; /* Saved port state */
420 struct mana_ethtool_stats eth_stats;
423 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
424 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
425 bool update_hash, bool update_tab);
427 int mana_alloc_queues(struct net_device *ndev);
428 int mana_attach(struct net_device *ndev);
429 int mana_detach(struct net_device *ndev, bool from_close);
431 int mana_probe(struct gdma_dev *gd, bool resuming);
432 void mana_remove(struct gdma_dev *gd, bool suspending);
434 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
435 int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
437 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
438 struct xdp_buff *xdp, void *buf_va, uint pkt_len);
439 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
440 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
441 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
443 extern const struct ethtool_ops mana_ethtool_ops;
445 /* A CQ can be created not associated with any EQ */
446 #define GDMA_CQ_NO_EQ 0xffff
448 struct mana_obj_spec {
456 enum mana_command_code {
457 MANA_QUERY_DEV_CONFIG = 0x20001,
458 MANA_QUERY_GF_STAT = 0x20002,
459 MANA_CONFIG_VPORT_TX = 0x20003,
460 MANA_CREATE_WQ_OBJ = 0x20004,
461 MANA_DESTROY_WQ_OBJ = 0x20005,
462 MANA_FENCE_RQ = 0x20006,
463 MANA_CONFIG_VPORT_RX = 0x20007,
464 MANA_QUERY_VPORT_CONFIG = 0x20008,
466 /* Privileged commands for the PF mode */
467 MANA_REGISTER_FILTER = 0x28000,
468 MANA_DEREGISTER_FILTER = 0x28001,
469 MANA_REGISTER_HW_PORT = 0x28003,
470 MANA_DEREGISTER_HW_PORT = 0x28004,
473 /* Query Device Configuration */
474 struct mana_query_device_cfg_req {
475 struct gdma_req_hdr hdr;
477 /* MANA Nic Driver Capability flags */
478 u64 mn_drv_cap_flags1;
479 u64 mn_drv_cap_flags2;
480 u64 mn_drv_cap_flags3;
481 u64 mn_drv_cap_flags4;
490 struct mana_query_device_cfg_resp {
491 struct gdma_resp_hdr hdr;
508 /* Query vPort Configuration */
509 struct mana_query_vport_cfg_req {
510 struct gdma_req_hdr hdr;
514 struct mana_query_vport_cfg_resp {
515 struct gdma_resp_hdr hdr;
518 u32 num_indirection_ent;
525 /* Configure vPort */
526 struct mana_config_vport_req {
527 struct gdma_req_hdr hdr;
533 struct mana_config_vport_resp {
534 struct gdma_resp_hdr hdr;
536 u8 short_form_allowed;
540 /* Create WQ Object */
541 struct mana_create_wqobj_req {
542 struct gdma_req_hdr hdr;
550 u32 cq_moderation_ctx_id;
554 struct mana_create_wqobj_resp {
555 struct gdma_resp_hdr hdr;
558 mana_handle_t wq_obj;
561 /* Destroy WQ Object */
562 struct mana_destroy_wqobj_req {
563 struct gdma_req_hdr hdr;
566 mana_handle_t wq_obj_handle;
569 struct mana_destroy_wqobj_resp {
570 struct gdma_resp_hdr hdr;
574 struct mana_fence_rq_req {
575 struct gdma_req_hdr hdr;
576 mana_handle_t wq_obj_handle;
579 struct mana_fence_rq_resp {
580 struct gdma_resp_hdr hdr;
583 /* Configure vPort Rx Steering */
584 struct mana_cfg_rx_steer_req {
585 struct gdma_req_hdr hdr;
587 u16 num_indir_entries;
588 u16 indir_tab_offset;
591 u8 update_default_rxobj;
595 mana_handle_t default_rxobj;
596 u8 hashkey[MANA_HASH_KEY_SIZE];
599 struct mana_cfg_rx_steer_resp {
600 struct gdma_resp_hdr hdr;
603 /* Register HW vPort */
604 struct mana_register_hw_vport_req {
605 struct gdma_req_hdr hdr;
607 u8 is_pf_default_vport;
609 u8 allow_all_ether_types;
615 struct mana_register_hw_vport_resp {
616 struct gdma_resp_hdr hdr;
617 mana_handle_t hw_vport_handle;
620 /* Deregister HW vPort */
621 struct mana_deregister_hw_vport_req {
622 struct gdma_req_hdr hdr;
623 mana_handle_t hw_vport_handle;
626 struct mana_deregister_hw_vport_resp {
627 struct gdma_resp_hdr hdr;
630 /* Register filter */
631 struct mana_register_filter_req {
632 struct gdma_req_hdr hdr;
645 struct mana_register_filter_resp {
646 struct gdma_resp_hdr hdr;
647 mana_handle_t filter_handle;
650 /* Deregister filter */
651 struct mana_deregister_filter_req {
652 struct gdma_req_hdr hdr;
653 mana_handle_t filter_handle;
656 struct mana_deregister_filter_resp {
657 struct gdma_resp_hdr hdr;
660 #define MANA_MAX_NUM_QUEUES 64
662 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
664 struct mana_tx_package {
665 struct gdma_wqe_request wqe_req;
666 struct gdma_sge sgl_array[5];
667 struct gdma_sge *sgl_ptr;
669 struct mana_tx_oob tx_oob;
671 struct gdma_posted_wqe_info wqe_info;
674 int mana_create_wq_obj(struct mana_port_context *apc,
676 u32 wq_type, struct mana_obj_spec *wq_spec,
677 struct mana_obj_spec *cq_spec,
678 mana_handle_t *wq_obj);
680 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
681 mana_handle_t wq_obj);
683 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
685 void mana_uncfg_vport(struct mana_port_context *apc);