2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/mlx5/device.h>
37 #include <linux/mlx5/driver.h>
39 #define MLX5_INVALID_LKEY 0x100
40 #define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
41 #define MLX5_DIF_SIZE 8
42 #define MLX5_STRIDE_BLOCK_OP 0x400
43 #define MLX5_CPY_GRD_MASK 0xc0
44 #define MLX5_CPY_APP_MASK 0x30
45 #define MLX5_CPY_REF_MASK 0x0f
46 #define MLX5_BSF_INC_REFTAG (1 << 6)
47 #define MLX5_BSF_INL_VALID (1 << 15)
48 #define MLX5_BSF_REFRESH_DIF (1 << 14)
49 #define MLX5_BSF_REPEAT_BLOCK (1 << 7)
50 #define MLX5_BSF_APPTAG_ESCAPE 0x1
51 #define MLX5_BSF_APPREF_ESCAPE 0x2
53 #define MLX5_QPN_BITS 24
54 #define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1)
57 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
58 MLX5_QP_OPTPAR_RRE = 1 << 1,
59 MLX5_QP_OPTPAR_RAE = 1 << 2,
60 MLX5_QP_OPTPAR_RWE = 1 << 3,
61 MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4,
62 MLX5_QP_OPTPAR_Q_KEY = 1 << 5,
63 MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
64 MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
65 MLX5_QP_OPTPAR_SRA_MAX = 1 << 8,
66 MLX5_QP_OPTPAR_RRA_MAX = 1 << 9,
67 MLX5_QP_OPTPAR_PM_STATE = 1 << 10,
68 MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12,
69 MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13,
70 MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
71 MLX5_QP_OPTPAR_PRI_PORT = 1 << 16,
72 MLX5_QP_OPTPAR_SRQN = 1 << 18,
73 MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
74 MLX5_QP_OPTPAR_DC_HS = 1 << 20,
75 MLX5_QP_OPTPAR_DC_KEY = 1 << 21,
79 MLX5_QP_STATE_RST = 0,
80 MLX5_QP_STATE_INIT = 1,
81 MLX5_QP_STATE_RTR = 2,
82 MLX5_QP_STATE_RTS = 3,
83 MLX5_QP_STATE_SQER = 4,
84 MLX5_QP_STATE_SQD = 5,
85 MLX5_QP_STATE_ERR = 6,
86 MLX5_QP_STATE_SQ_DRAINING = 7,
87 MLX5_QP_STATE_SUSPENDED = 9,
100 MLX5_QP_ST_QP1 = 0x8,
101 MLX5_QP_ST_RAW_ETHERTYPE = 0x9,
102 MLX5_QP_ST_RAW_IPV6 = 0xa,
103 MLX5_QP_ST_SNIFFER = 0xb,
104 MLX5_QP_ST_SYNC_UMR = 0xe,
105 MLX5_QP_ST_PTP_1588 = 0xd,
106 MLX5_QP_ST_REG_UMR = 0xc,
111 MLX5_QP_PM_MIGRATED = 0x3,
112 MLX5_QP_PM_ARMED = 0x0,
113 MLX5_QP_PM_REARM = 0x1
117 MLX5_NON_ZERO_RQ = 0 << 24,
118 MLX5_SRQ_RQ = 1 << 24,
119 MLX5_CRQ_RQ = 2 << 24,
120 MLX5_ZERO_LEN_RQ = 3 << 24
125 MLX5_QP_BIT_SRE = 1 << 15,
126 MLX5_QP_BIT_SWE = 1 << 14,
127 MLX5_QP_BIT_SAE = 1 << 13,
129 MLX5_QP_BIT_RRE = 1 << 15,
130 MLX5_QP_BIT_RWE = 1 << 14,
131 MLX5_QP_BIT_RAE = 1 << 13,
132 MLX5_QP_BIT_RIC = 1 << 4,
133 MLX5_QP_BIT_CC_SLAVE_RECV = 1 << 2,
134 MLX5_QP_BIT_CC_SLAVE_SEND = 1 << 1,
135 MLX5_QP_BIT_CC_MASTER = 1 << 0
139 MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
140 MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
141 MLX5_WQE_CTRL_SOLICITED = 1 << 1,
145 MLX5_SEND_WQE_DS = 16,
146 MLX5_SEND_WQE_BB = 64,
149 #define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
152 MLX5_SEND_WQE_MAX_WQEBBS = 16,
156 MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
157 MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
158 MLX5_WQE_FMR_PERM_REMOTE_READ = 1 << 29,
159 MLX5_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30,
160 MLX5_WQE_FMR_PERM_ATOMIC = 1 << 31
164 MLX5_FENCE_MODE_NONE = 0 << 5,
165 MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
166 MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
167 MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
171 MLX5_QP_LAT_SENSITIVE = 1 << 28,
172 MLX5_QP_BLOCK_MCAST = 1 << 30,
173 MLX5_QP_ENABLE_SIG = 1 << 31,
182 MLX5_FLAGS_INLINE = 1<<7,
183 MLX5_FLAGS_CHECK_FREE = 1<<5,
186 struct mlx5_wqe_fmr_seg {
197 struct mlx5_wqe_ctrl_seg {
198 __be32 opmod_idx_opcode;
206 #define MLX5_WQE_CTRL_DS_MASK 0x3f
207 #define MLX5_WQE_CTRL_QPN_MASK 0xffffff00
208 #define MLX5_WQE_CTRL_QPN_SHIFT 8
209 #define MLX5_WQE_DS_UNITS 16
210 #define MLX5_WQE_CTRL_OPCODE_MASK 0xff
211 #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
212 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
215 MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
216 MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5,
217 MLX5_ETH_WQE_L3_CSUM = 1 << 6,
218 MLX5_ETH_WQE_L4_CSUM = 1 << 7,
221 struct mlx5_wqe_eth_seg {
227 __be16 inline_hdr_sz;
228 u8 inline_hdr_start[2];
231 struct mlx5_wqe_xrc_seg {
236 struct mlx5_wqe_masked_atomic_seg {
239 __be64 swap_add_mask;
266 struct mlx5_wqe_datagram_seg {
270 struct mlx5_wqe_raddr_seg {
276 struct mlx5_wqe_atomic_seg {
281 struct mlx5_wqe_data_seg {
287 struct mlx5_wqe_umr_ctrl_seg {
290 __be16 klm_octowords;
291 __be16 bsf_octowords;
296 struct mlx5_seg_set_psv {
300 __be32 transient_sig;
304 struct mlx5_seg_get_psv {
312 struct mlx5_seg_check_psv {
314 __be16 err_coalescing_op;
318 __be16 xport_err_mask;
326 struct mlx5_rwqe_sig {
332 struct mlx5_wqe_signature_seg {
338 #define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff
340 struct mlx5_wqe_inline_seg {
349 struct mlx5_bsf_inl {
356 u8 dif_inc_ref_guard_check;
357 __be16 dif_app_bitmask_check;
361 struct mlx5_bsf_basic {
373 __be32 raw_data_size;
377 struct mlx5_bsf_ext {
378 __be32 t_init_gen_pro_size;
379 __be32 rsvd_epi_size;
383 struct mlx5_bsf_inl w_inl;
384 struct mlx5_bsf_inl m_inl;
393 struct mlx5_stride_block_entry {
400 struct mlx5_stride_block_ctrl_seg {
401 __be32 bcount_per_cycle;
408 enum mlx5_pagefault_flags {
409 MLX5_PFAULT_REQUESTOR = 1 << 0,
410 MLX5_PFAULT_WRITE = 1 << 1,
411 MLX5_PFAULT_RDMA = 1 << 2,
414 /* Contains the details of a pagefault. */
415 struct mlx5_pagefault {
418 enum mlx5_pagefault_flags flags;
420 /* Initiator or send message responder pagefault details. */
422 /* Received packet size, only valid for responders. */
425 * WQE index. Refers to either the send queue or
426 * receive queue, according to event_subtype.
430 /* RDMA responder pagefault details */
434 * Received packet size, minimal size page fault
435 * resolution required for forward progress.
444 struct mlx5_core_qp {
445 struct mlx5_core_rsc_common common; /* must be first */
446 void (*event) (struct mlx5_core_qp *, int);
447 void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
449 struct mlx5_rsc_debug *dbg;
453 struct mlx5_qp_path {
465 __be32 tclass_flowlabel;
478 struct mlx5_qp_context {
484 __be32 qp_counter_set_usr_page;
486 __be32 log_pg_sz_remote_qpn;
487 struct mlx5_qp_path pri_path;
488 struct mlx5_qp_path alt_path;
491 __be32 next_send_psn;
494 __be32 last_acked_psn;
497 __be32 rnr_nextrecvpsn;
504 __be16 hw_sq_wqe_counter;
505 __be16 sw_sq_wqe_counter;
506 __be16 hw_rcyclic_byte_counter;
507 __be16 hw_rq_counter;
508 __be16 sw_rcyclic_byte_counter;
509 __be16 sw_rq_counter;
514 __be64 dc_access_key;
518 struct mlx5_create_qp_mbox_in {
519 struct mlx5_inbox_hdr hdr;
522 __be32 opt_param_mask;
524 struct mlx5_qp_context ctx;
529 struct mlx5_create_qp_mbox_out {
530 struct mlx5_outbox_hdr hdr;
535 struct mlx5_destroy_qp_mbox_in {
536 struct mlx5_inbox_hdr hdr;
541 struct mlx5_destroy_qp_mbox_out {
542 struct mlx5_outbox_hdr hdr;
546 struct mlx5_modify_qp_mbox_in {
547 struct mlx5_inbox_hdr hdr;
552 struct mlx5_qp_context ctx;
555 struct mlx5_modify_qp_mbox_out {
556 struct mlx5_outbox_hdr hdr;
560 struct mlx5_query_qp_mbox_in {
561 struct mlx5_inbox_hdr hdr;
566 struct mlx5_query_qp_mbox_out {
567 struct mlx5_outbox_hdr hdr;
571 struct mlx5_qp_context ctx;
576 struct mlx5_conf_sqp_mbox_in {
577 struct mlx5_inbox_hdr hdr;
583 struct mlx5_conf_sqp_mbox_out {
584 struct mlx5_outbox_hdr hdr;
588 struct mlx5_alloc_xrcd_mbox_in {
589 struct mlx5_inbox_hdr hdr;
593 struct mlx5_alloc_xrcd_mbox_out {
594 struct mlx5_outbox_hdr hdr;
599 struct mlx5_dealloc_xrcd_mbox_in {
600 struct mlx5_inbox_hdr hdr;
605 struct mlx5_dealloc_xrcd_mbox_out {
606 struct mlx5_outbox_hdr hdr;
610 static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
612 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
615 static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
617 return radix_tree_lookup(&dev->priv.mr_table.tree, key);
620 struct mlx5_page_fault_resume_mbox_in {
621 struct mlx5_inbox_hdr hdr;
626 struct mlx5_page_fault_resume_mbox_out {
627 struct mlx5_outbox_hdr hdr;
631 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
632 struct mlx5_core_qp *qp,
633 struct mlx5_create_qp_mbox_in *in,
635 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
636 enum mlx5_qp_state new_state,
637 struct mlx5_modify_qp_mbox_in *in, int sqd_event,
638 struct mlx5_core_qp *qp);
639 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
640 struct mlx5_core_qp *qp);
641 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
642 struct mlx5_query_qp_mbox_out *out, int outlen);
644 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
645 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
646 void mlx5_init_qp_table(struct mlx5_core_dev *dev);
647 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
648 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
649 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
650 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
651 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
652 u8 context, int error);
654 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
655 struct mlx5_core_qp *rq);
656 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
657 struct mlx5_core_qp *rq);
658 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
659 struct mlx5_core_qp *sq);
660 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
661 struct mlx5_core_qp *sq);
663 static inline const char *mlx5_qp_type_str(int type)
666 case MLX5_QP_ST_RC: return "RC";
667 case MLX5_QP_ST_UC: return "C";
668 case MLX5_QP_ST_UD: return "UD";
669 case MLX5_QP_ST_XRC: return "XRC";
670 case MLX5_QP_ST_MLX: return "MLX";
671 case MLX5_QP_ST_QP0: return "QP0";
672 case MLX5_QP_ST_QP1: return "QP1";
673 case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
674 case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6";
675 case MLX5_QP_ST_SNIFFER: return "SNIFFER";
676 case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR";
677 case MLX5_QP_ST_PTP_1588: return "PTP_1588";
678 case MLX5_QP_ST_REG_UMR: return "REG_UMR";
679 default: return "Invalid transport type";
683 static inline const char *mlx5_qp_state_str(int state)
686 case MLX5_QP_STATE_RST:
688 case MLX5_QP_STATE_INIT:
690 case MLX5_QP_STATE_RTR:
692 case MLX5_QP_STATE_RTS:
694 case MLX5_QP_STATE_SQER:
696 case MLX5_QP_STATE_SQD:
698 case MLX5_QP_STATE_ERR:
700 case MLX5_QP_STATE_SQ_DRAINING:
701 return "SQ_DRAINING";
702 case MLX5_QP_STATE_SUSPENDED:
704 default: return "Invalid QP state";
708 #endif /* MLX5_QP_H */