2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/types.h>
37 #include <rdma/ib_verbs.h>
38 #include <linux/mlx5/mlx5_ifc.h>
40 #if defined(__LITTLE_ENDIAN)
41 #define MLX5_SET_HOST_ENDIANNESS 0
42 #elif defined(__BIG_ENDIAN)
43 #define MLX5_SET_HOST_ENDIANNESS 0x80
45 #error Host endianness not defined
49 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
50 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
51 #define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld)))
52 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
53 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
54 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
55 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
56 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
57 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
59 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
60 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
61 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
62 #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
63 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
64 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
65 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
66 #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
68 /* insert a value to a struct */
69 #define MLX5_SET(typ, p, fld, v) do { \
70 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
71 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
72 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
73 (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
74 << __mlx5_dw_bit_off(typ, fld))); \
77 #define MLX5_SET_TO_ONES(typ, p, fld) do { \
78 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
79 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
80 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
81 (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
82 << __mlx5_dw_bit_off(typ, fld))); \
85 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
86 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
87 __mlx5_mask(typ, fld))
89 #define MLX5_GET_PR(typ, p, fld) ({ \
90 u32 ___t = MLX5_GET(typ, p, fld); \
91 pr_debug(#fld " = 0x%x\n", ___t); \
95 #define MLX5_SET64(typ, p, fld, v) do { \
96 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
97 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
98 *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
101 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
103 #define MLX5_GET64_PR(typ, p, fld) ({ \
104 u64 ___t = MLX5_GET64(typ, p, fld); \
105 pr_debug(#fld " = 0x%llx\n", ___t); \
109 /* Big endian getters */
110 #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
111 __mlx5_64_off(typ, fld)))
113 #define MLX5_GET_BE(type_t, typ, p, fld) ({ \
115 switch (sizeof(tmp)) { \
117 tmp = (__force type_t)MLX5_GET(typ, p, fld); \
120 tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
123 tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
126 tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
132 enum mlx5_inline_modes {
133 MLX5_INLINE_MODE_NONE,
136 MLX5_INLINE_MODE_TCP_UDP,
140 MLX5_MAX_COMMANDS = 32,
141 MLX5_CMD_DATA_BLOCK_SIZE = 512,
142 MLX5_PCI_CMD_XPORT = 7,
143 MLX5_MKEY_BSF_OCTO_SIZE = 4,
148 MLX5_EXTENDED_UD_AV = 0x80000000,
152 MLX5_CQ_STATE_ARMED = 9,
153 MLX5_CQ_STATE_ALWAYS_ARMED = 0xb,
154 MLX5_CQ_STATE_FIRED = 0xa,
158 MLX5_STAT_RATE_OFFSET = 5,
162 MLX5_INLINE_SEG = 0x80000000,
166 MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
170 MLX5_MIN_PKEY_TABLE_SIZE = 128,
171 MLX5_MAX_LOG_PKEY_TABLE = 5,
175 MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
179 MLX5_PFAULT_SUBTYPE_WQE = 0,
180 MLX5_PFAULT_SUBTYPE_RDMA = 1,
184 MLX5_PERM_LOCAL_READ = 1 << 2,
185 MLX5_PERM_LOCAL_WRITE = 1 << 3,
186 MLX5_PERM_REMOTE_READ = 1 << 4,
187 MLX5_PERM_REMOTE_WRITE = 1 << 5,
188 MLX5_PERM_ATOMIC = 1 << 6,
189 MLX5_PERM_UMR_EN = 1 << 7,
193 MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0,
194 MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2,
195 MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3,
196 MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6,
197 MLX5_PCIE_CTRL_TPH_MASK = 3 << 4,
201 MLX5_ACCESS_MODE_PA = 0,
202 MLX5_ACCESS_MODE_MTT = 1,
203 MLX5_ACCESS_MODE_KLM = 2
207 MLX5_MKEY_REMOTE_INVAL = 1 << 24,
208 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
209 MLX5_MKEY_BSF_EN = 1 << 30,
210 MLX5_MKEY_LEN64 = 1 << 31,
219 MLX5_BF_REGS_PER_PAGE = 4,
220 MLX5_MAX_UAR_PAGES = 1 << 8,
221 MLX5_NON_FP_BF_REGS_PER_PAGE = 2,
222 MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE,
226 MLX5_MKEY_MASK_LEN = 1ull << 0,
227 MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1,
228 MLX5_MKEY_MASK_START_ADDR = 1ull << 6,
229 MLX5_MKEY_MASK_PD = 1ull << 7,
230 MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8,
231 MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9,
232 MLX5_MKEY_MASK_BSF_EN = 1ull << 12,
233 MLX5_MKEY_MASK_KEY = 1ull << 13,
234 MLX5_MKEY_MASK_QPN = 1ull << 14,
235 MLX5_MKEY_MASK_LR = 1ull << 17,
236 MLX5_MKEY_MASK_LW = 1ull << 18,
237 MLX5_MKEY_MASK_RR = 1ull << 19,
238 MLX5_MKEY_MASK_RW = 1ull << 20,
239 MLX5_MKEY_MASK_A = 1ull << 21,
240 MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
241 MLX5_MKEY_MASK_FREE = 1ull << 29,
245 MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4),
247 MLX5_UMR_CHECK_NOT_FREE = (1 << 5),
248 MLX5_UMR_CHECK_FREE = (2 << 5),
250 MLX5_UMR_INLINE = (1 << 7),
253 #define MLX5_UMR_MTT_ALIGNMENT 0x40
254 #define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
255 #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
257 #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
260 MLX5_EVENT_QUEUE_TYPE_QP = 0,
261 MLX5_EVENT_QUEUE_TYPE_RQ = 1,
262 MLX5_EVENT_QUEUE_TYPE_SQ = 2,
266 MLX5_EVENT_TYPE_COMP = 0x0,
268 MLX5_EVENT_TYPE_PATH_MIG = 0x01,
269 MLX5_EVENT_TYPE_COMM_EST = 0x02,
270 MLX5_EVENT_TYPE_SQ_DRAINED = 0x03,
271 MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13,
272 MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14,
274 MLX5_EVENT_TYPE_CQ_ERROR = 0x04,
275 MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
276 MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
277 MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
278 MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
279 MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
281 MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
282 MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
283 MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
284 MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
286 MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
287 MLX5_EVENT_TYPE_STALL_EVENT = 0x1b,
289 MLX5_EVENT_TYPE_CMD = 0x0a,
290 MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb,
292 MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
293 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
297 MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1,
298 MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
299 MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5,
300 MLX5_PORT_CHANGE_SUBTYPE_LID = 6,
301 MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7,
302 MLX5_PORT_CHANGE_SUBTYPE_GUID = 8,
303 MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9,
307 MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
308 MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
309 MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
310 MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
311 MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
312 MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23,
313 MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24,
314 MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
315 MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
316 MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
317 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
318 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
322 MLX5_ROCE_VERSION_1 = 0,
323 MLX5_ROCE_VERSION_2 = 2,
327 MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1,
328 MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2,
332 MLX5_ROCE_L3_TYPE_IPV4 = 0,
333 MLX5_ROCE_L3_TYPE_IPV6 = 1,
337 MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1,
338 MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2,
342 MLX5_OPCODE_NOP = 0x00,
343 MLX5_OPCODE_SEND_INVAL = 0x01,
344 MLX5_OPCODE_RDMA_WRITE = 0x08,
345 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
346 MLX5_OPCODE_SEND = 0x0a,
347 MLX5_OPCODE_SEND_IMM = 0x0b,
348 MLX5_OPCODE_LSO = 0x0e,
349 MLX5_OPCODE_RDMA_READ = 0x10,
350 MLX5_OPCODE_ATOMIC_CS = 0x11,
351 MLX5_OPCODE_ATOMIC_FA = 0x12,
352 MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14,
353 MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
354 MLX5_OPCODE_BIND_MW = 0x18,
355 MLX5_OPCODE_CONFIG_CMD = 0x1f,
357 MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
358 MLX5_RECV_OPCODE_SEND = 0x01,
359 MLX5_RECV_OPCODE_SEND_IMM = 0x02,
360 MLX5_RECV_OPCODE_SEND_INVAL = 0x03,
362 MLX5_CQE_OPCODE_ERROR = 0x1e,
363 MLX5_CQE_OPCODE_RESIZE = 0x16,
365 MLX5_OPCODE_SET_PSV = 0x20,
366 MLX5_OPCODE_GET_PSV = 0x21,
367 MLX5_OPCODE_CHECK_PSV = 0x22,
368 MLX5_OPCODE_RGET_PSV = 0x26,
369 MLX5_OPCODE_RCHECK_PSV = 0x27,
371 MLX5_OPCODE_UMR = 0x25,
376 MLX5_SET_PORT_RESET_QKEY = 0,
377 MLX5_SET_PORT_GUID0 = 16,
378 MLX5_SET_PORT_NODE_GUID = 17,
379 MLX5_SET_PORT_SYS_GUID = 18,
380 MLX5_SET_PORT_GID_TABLE = 19,
381 MLX5_SET_PORT_PKEY_TABLE = 20,
385 MLX5_BW_NO_LIMIT = 0,
386 MLX5_100_MBPS_UNIT = 3,
391 MLX5_MAX_PAGE_SHIFT = 31
395 MLX5_ADAPTER_PAGE_SHIFT = 12,
396 MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT,
400 MLX5_CAP_OFF_CMDIF_CSUM = 46,
405 * Max wqe size for rdma read is 512 bytes, so this
406 * limits our max_sge_rd as the wqe needs to fit:
407 * - ctrl segment (16 bytes)
408 * - rdma segment (16 bytes)
409 * - scatter elements (16 bytes each)
411 MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
414 struct mlx5_inbox_hdr {
420 struct mlx5_outbox_hdr {
426 struct mlx5_cmd_query_adapter_mbox_in {
427 struct mlx5_inbox_hdr hdr;
431 struct mlx5_cmd_query_adapter_mbox_out {
432 struct mlx5_outbox_hdr hdr;
436 __be16 vsd_vendor_id;
441 enum mlx5_odp_transport_cap_bits {
442 MLX5_ODP_SUPPORT_SEND = 1 << 31,
443 MLX5_ODP_SUPPORT_RECV = 1 << 30,
444 MLX5_ODP_SUPPORT_WRITE = 1 << 29,
445 MLX5_ODP_SUPPORT_READ = 1 << 28,
448 struct mlx5_odp_caps {
454 } per_transport_caps;
455 char reserved2[0xe4];
458 struct mlx5_cmd_layout {
474 struct health_buffer {
475 __be32 assert_var[5];
477 __be32 assert_exit_ptr;
478 __be32 assert_callra;
488 struct mlx5_init_seg {
490 __be32 cmdif_rev_fw_sub;
493 __be32 cmdq_addr_l_sz;
497 struct health_buffer health;
499 __be32 internal_timer_h;
500 __be32 internal_timer_l;
502 __be32 health_counter;
505 __be32 ieee1588_clk_type;
509 struct mlx5_eqe_comp {
514 struct mlx5_eqe_qp_srq {
521 struct mlx5_eqe_cq_err {
527 struct mlx5_eqe_port_state {
532 struct mlx5_eqe_gpio {
537 struct mlx5_eqe_congestion {
543 struct mlx5_eqe_stall_vl {
548 struct mlx5_eqe_cmd {
553 struct mlx5_eqe_page_req {
560 struct mlx5_eqe_page_fault {
561 __be32 bytes_committed;
567 __be16 packet_length;
573 __be16 packet_length;
581 struct mlx5_eqe_vport_change {
589 struct mlx5_eqe_cmd cmd;
590 struct mlx5_eqe_comp comp;
591 struct mlx5_eqe_qp_srq qp_srq;
592 struct mlx5_eqe_cq_err cq_err;
593 struct mlx5_eqe_port_state port;
594 struct mlx5_eqe_gpio gpio;
595 struct mlx5_eqe_congestion cong;
596 struct mlx5_eqe_stall_vl stall_vl;
597 struct mlx5_eqe_page_req req_pages;
598 struct mlx5_eqe_page_fault page_fault;
599 struct mlx5_eqe_vport_change vport_change;
614 struct mlx5_cmd_prot_block {
615 u8 data[MLX5_CMD_DATA_BLOCK_SIZE];
626 MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
629 struct mlx5_err_cqe {
635 __be32 s_wqe_opcode_qpn;
642 u8 outer_l3_tunneled;
645 u8 lro_tcppsh_abort_dupack;
648 __be32 lro_ack_seq_num;
649 __be32 rss_hash_result;
659 __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
660 __be32 imm_inval_pkey;
671 struct mlx5_mini_cqe8 {
673 __be32 rx_hash_result;
689 MLX5_INLINE_DATA32_SEG,
690 MLX5_INLINE_DATA64_SEG,
695 MLX5_CQE_FORMAT_CSUM = 0x1,
698 #define MLX5_MINI_CQE_ARRAY_SIZE 8
700 static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
702 return (cqe->op_own >> 2) & 0x3;
705 static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
707 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
710 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
712 return (cqe->l4_l3_hdr_type >> 4) & 0x7;
715 static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
717 return (cqe->l4_l3_hdr_type >> 2) & 0x3;
720 static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe)
722 return cqe->outer_l3_tunneled & 0x1;
725 static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
727 return !!(cqe->l4_l3_hdr_type & 0x1);
730 static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
734 hi = be32_to_cpu(cqe->timestamp_h);
735 lo = be32_to_cpu(cqe->timestamp_l);
737 return (u64)lo | ((u64)hi << 32);
740 struct mpwrq_cqe_bc {
741 __be16 filler_consumed_strides;
745 static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe)
747 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
749 return be16_to_cpu(bc->byte_cnt);
752 static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc)
754 return 0x7fff & be16_to_cpu(bc->filler_consumed_strides);
757 static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe)
759 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
761 return mpwrq_get_cqe_bc_consumed_strides(bc);
764 static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe)
766 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
768 return 0x8000 & be16_to_cpu(bc->filler_consumed_strides);
771 static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe)
773 return be16_to_cpu(cqe->wqe_counter);
777 CQE_L4_HDR_TYPE_NONE = 0x0,
778 CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
779 CQE_L4_HDR_TYPE_UDP = 0x2,
780 CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3,
781 CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4,
785 CQE_RSS_HTYPE_IP = 0x3 << 6,
786 CQE_RSS_HTYPE_L4 = 0x3 << 2,
790 MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0,
791 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1,
792 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2,
801 struct mlx5_sig_err_cqe {
803 __be32 expected_trans_sig;
804 __be32 actual_trans_sig;
805 __be32 expected_reftag;
806 __be32 actual_reftag;
818 struct mlx5_wqe_srq_next_seg {
820 __be16 next_wqe_index;
831 union mlx5_ext_cqe inl_grh;
832 struct mlx5_cqe64 cqe64;
835 struct mlx5_srq_ctx {
850 struct mlx5_create_srq_mbox_in {
851 struct mlx5_inbox_hdr hdr;
854 struct mlx5_srq_ctx ctx;
859 struct mlx5_create_srq_mbox_out {
860 struct mlx5_outbox_hdr hdr;
865 struct mlx5_destroy_srq_mbox_in {
866 struct mlx5_inbox_hdr hdr;
871 struct mlx5_destroy_srq_mbox_out {
872 struct mlx5_outbox_hdr hdr;
876 struct mlx5_query_srq_mbox_in {
877 struct mlx5_inbox_hdr hdr;
882 struct mlx5_query_srq_mbox_out {
883 struct mlx5_outbox_hdr hdr;
885 struct mlx5_srq_ctx ctx;
890 struct mlx5_arm_srq_mbox_in {
891 struct mlx5_inbox_hdr hdr;
897 struct mlx5_arm_srq_mbox_out {
898 struct mlx5_outbox_hdr hdr;
902 struct mlx5_cq_context {
909 __be32 log_sz_usr_page;
916 __be32 last_notified_index;
917 __be32 solicit_producer_index;
918 __be32 consumer_counter;
919 __be32 producer_counter;
921 __be64 db_record_addr;
924 struct mlx5_create_cq_mbox_in {
925 struct mlx5_inbox_hdr hdr;
928 struct mlx5_cq_context ctx;
933 struct mlx5_create_cq_mbox_out {
934 struct mlx5_outbox_hdr hdr;
939 struct mlx5_destroy_cq_mbox_in {
940 struct mlx5_inbox_hdr hdr;
945 struct mlx5_destroy_cq_mbox_out {
946 struct mlx5_outbox_hdr hdr;
950 struct mlx5_query_cq_mbox_in {
951 struct mlx5_inbox_hdr hdr;
956 struct mlx5_query_cq_mbox_out {
957 struct mlx5_outbox_hdr hdr;
959 struct mlx5_cq_context ctx;
964 struct mlx5_modify_cq_mbox_in {
965 struct mlx5_inbox_hdr hdr;
968 struct mlx5_cq_context ctx;
973 struct mlx5_modify_cq_mbox_out {
974 struct mlx5_outbox_hdr hdr;
978 struct mlx5_enable_hca_mbox_in {
979 struct mlx5_inbox_hdr hdr;
983 struct mlx5_enable_hca_mbox_out {
984 struct mlx5_outbox_hdr hdr;
988 struct mlx5_disable_hca_mbox_in {
989 struct mlx5_inbox_hdr hdr;
993 struct mlx5_disable_hca_mbox_out {
994 struct mlx5_outbox_hdr hdr;
998 struct mlx5_eq_context {
1004 __be32 log_sz_usr_page;
1009 __be32 consumer_counter;
1010 __be32 produser_counter;
1014 struct mlx5_create_eq_mbox_in {
1015 struct mlx5_inbox_hdr hdr;
1019 struct mlx5_eq_context ctx;
1026 struct mlx5_create_eq_mbox_out {
1027 struct mlx5_outbox_hdr hdr;
1033 struct mlx5_destroy_eq_mbox_in {
1034 struct mlx5_inbox_hdr hdr;
1040 struct mlx5_destroy_eq_mbox_out {
1041 struct mlx5_outbox_hdr hdr;
1045 struct mlx5_map_eq_mbox_in {
1046 struct mlx5_inbox_hdr hdr;
1054 struct mlx5_map_eq_mbox_out {
1055 struct mlx5_outbox_hdr hdr;
1059 struct mlx5_query_eq_mbox_in {
1060 struct mlx5_inbox_hdr hdr;
1066 struct mlx5_query_eq_mbox_out {
1067 struct mlx5_outbox_hdr hdr;
1069 struct mlx5_eq_context ctx;
1073 MLX5_MKEY_STATUS_FREE = 1 << 6,
1076 struct mlx5_mkey_seg {
1077 /* This is a two bit field occupying bits 31-30.
1078 * bit 31 is always 0,
1079 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
1090 __be32 bsfs_octo_size;
1092 __be32 xlt_oct_size;
1098 struct mlx5_query_special_ctxs_mbox_in {
1099 struct mlx5_inbox_hdr hdr;
1103 struct mlx5_query_special_ctxs_mbox_out {
1104 struct mlx5_outbox_hdr hdr;
1105 __be32 dump_fill_mkey;
1106 __be32 reserved_lkey;
1109 struct mlx5_create_mkey_mbox_in {
1110 struct mlx5_inbox_hdr hdr;
1111 __be32 input_mkey_index;
1113 struct mlx5_mkey_seg seg;
1115 __be32 xlat_oct_act_size;
1121 struct mlx5_create_mkey_mbox_out {
1122 struct mlx5_outbox_hdr hdr;
1127 struct mlx5_destroy_mkey_mbox_in {
1128 struct mlx5_inbox_hdr hdr;
1133 struct mlx5_destroy_mkey_mbox_out {
1134 struct mlx5_outbox_hdr hdr;
1138 struct mlx5_query_mkey_mbox_in {
1139 struct mlx5_inbox_hdr hdr;
1143 struct mlx5_query_mkey_mbox_out {
1144 struct mlx5_outbox_hdr hdr;
1148 struct mlx5_modify_mkey_mbox_in {
1149 struct mlx5_inbox_hdr hdr;
1154 struct mlx5_modify_mkey_mbox_out {
1155 struct mlx5_outbox_hdr hdr;
1159 struct mlx5_dump_mkey_mbox_in {
1160 struct mlx5_inbox_hdr hdr;
1163 struct mlx5_dump_mkey_mbox_out {
1164 struct mlx5_outbox_hdr hdr;
1168 struct mlx5_mad_ifc_mbox_in {
1169 struct mlx5_inbox_hdr hdr;
1177 struct mlx5_mad_ifc_mbox_out {
1178 struct mlx5_outbox_hdr hdr;
1183 struct mlx5_access_reg_mbox_in {
1184 struct mlx5_inbox_hdr hdr;
1191 struct mlx5_access_reg_mbox_out {
1192 struct mlx5_outbox_hdr hdr;
1197 #define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
1200 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
1203 struct mlx5_allocate_psv_in {
1204 struct mlx5_inbox_hdr hdr;
1209 struct mlx5_allocate_psv_out {
1210 struct mlx5_outbox_hdr hdr;
1215 struct mlx5_destroy_psv_in {
1216 struct mlx5_inbox_hdr hdr;
1221 struct mlx5_destroy_psv_out {
1222 struct mlx5_outbox_hdr hdr;
1227 VPORT_STATE_DOWN = 0x0,
1228 VPORT_STATE_UP = 0x1,
1232 MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0,
1233 MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1,
1234 MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2,
1238 MLX5_L3_PROT_TYPE_IPV4 = 0,
1239 MLX5_L3_PROT_TYPE_IPV6 = 1,
1243 MLX5_L4_PROT_TYPE_TCP = 0,
1244 MLX5_L4_PROT_TYPE_UDP = 1,
1248 MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0,
1249 MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1,
1250 MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2,
1251 MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3,
1252 MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4,
1256 MLX5_MATCH_OUTER_HEADERS = 1 << 0,
1257 MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
1258 MLX5_MATCH_INNER_HEADERS = 1 << 2,
1263 MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0,
1264 MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
1268 MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0,
1269 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1,
1270 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2,
1273 enum mlx5_list_type {
1274 MLX5_NVPRT_LIST_TYPE_UC = 0x0,
1275 MLX5_NVPRT_LIST_TYPE_MC = 0x1,
1276 MLX5_NVPRT_LIST_TYPE_VLAN = 0x2,
1280 MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
1281 MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
1284 enum mlx5_wol_mode {
1285 MLX5_WOL_DISABLE = 0,
1286 MLX5_WOL_SECURED_MAGIC = 1 << 1,
1287 MLX5_WOL_MAGIC = 1 << 2,
1288 MLX5_WOL_ARP = 1 << 3,
1289 MLX5_WOL_BROADCAST = 1 << 4,
1290 MLX5_WOL_MULTICAST = 1 << 5,
1291 MLX5_WOL_UNICAST = 1 << 6,
1292 MLX5_WOL_PHY_ACTIVITY = 1 << 7,
1298 enum mlx5_cap_mode {
1299 HCA_CAP_OPMOD_GET_MAX = 0,
1300 HCA_CAP_OPMOD_GET_CUR = 1,
1303 enum mlx5_cap_type {
1304 MLX5_CAP_GENERAL = 0,
1305 MLX5_CAP_ETHERNET_OFFLOADS,
1309 MLX5_CAP_IPOIB_OFFLOADS,
1310 MLX5_CAP_EOIB_OFFLOADS,
1311 MLX5_CAP_FLOW_TABLE,
1312 MLX5_CAP_ESWITCH_FLOW_TABLE,
1315 MLX5_CAP_VECTOR_CALC,
1317 /* NUM OF CAP Types */
1321 /* GET Dev Caps macros */
1322 #define MLX5_CAP_GEN(mdev, cap) \
1323 MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
1325 #define MLX5_CAP_GEN_MAX(mdev, cap) \
1326 MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
1328 #define MLX5_CAP_ETH(mdev, cap) \
1329 MLX5_GET(per_protocol_networking_offload_caps,\
1330 mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1332 #define MLX5_CAP_ETH_MAX(mdev, cap) \
1333 MLX5_GET(per_protocol_networking_offload_caps,\
1334 mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1336 #define MLX5_CAP_ROCE(mdev, cap) \
1337 MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
1339 #define MLX5_CAP_ROCE_MAX(mdev, cap) \
1340 MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
1342 #define MLX5_CAP_ATOMIC(mdev, cap) \
1343 MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
1345 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
1346 MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
1348 #define MLX5_CAP_FLOWTABLE(mdev, cap) \
1349 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
1351 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
1352 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
1354 #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
1355 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
1357 #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
1358 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
1360 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1361 MLX5_GET(flow_table_eswitch_cap, \
1362 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1364 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
1365 MLX5_GET(flow_table_eswitch_cap, \
1366 mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1368 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
1369 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
1371 #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
1372 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
1374 #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
1375 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
1377 #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
1378 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
1380 #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
1381 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
1383 #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
1384 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
1386 #define MLX5_CAP_ESW(mdev, cap) \
1387 MLX5_GET(e_switch_cap, \
1388 mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
1390 #define MLX5_CAP_ESW_MAX(mdev, cap) \
1391 MLX5_GET(e_switch_cap, \
1392 mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
1394 #define MLX5_CAP_ODP(mdev, cap)\
1395 MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
1397 #define MLX5_CAP_VECTOR_CALC(mdev, cap) \
1398 MLX5_GET(vector_calc_cap, \
1399 mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap)
1401 #define MLX5_CAP_QOS(mdev, cap)\
1402 MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
1405 MLX5_CMD_STAT_OK = 0x0,
1406 MLX5_CMD_STAT_INT_ERR = 0x1,
1407 MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
1408 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
1409 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
1410 MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
1411 MLX5_CMD_STAT_RES_BUSY = 0x6,
1412 MLX5_CMD_STAT_LIM_ERR = 0x8,
1413 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
1414 MLX5_CMD_STAT_IX_ERR = 0xa,
1415 MLX5_CMD_STAT_NO_RES_ERR = 0xf,
1416 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
1417 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
1418 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
1419 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
1420 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
1424 MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0,
1425 MLX5_RFC_2863_COUNTERS_GROUP = 0x1,
1426 MLX5_RFC_2819_COUNTERS_GROUP = 0x2,
1427 MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
1428 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1429 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
1430 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
1431 MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
1432 MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
1435 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1437 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
1439 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1442 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8
1443 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8
1444 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
1445 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
1446 MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
1447 MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
1449 #endif /* MLX5_DEVICE_H */