2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <rdma/ib_verbs.h>
39 #include <rdma/ib_umem.h>
40 #include <rdma/ib_smi.h>
41 #include <linux/mlx5/driver.h>
42 #include <linux/mlx5/cq.h>
43 #include <linux/mlx5/fs.h>
44 #include <linux/mlx5/qp.h>
45 #include <linux/types.h>
46 #include <linux/mlx5/transobj.h>
47 #include <rdma/ib_user_verbs.h>
48 #include <rdma/mlx5-abi.h>
49 #include <rdma/uverbs_ioctl.h>
50 #include <rdma/mlx5_user_ioctl_cmds.h>
51 #include <rdma/mlx5_user_ioctl_verbs.h>
55 #define mlx5_ib_dbg(_dev, format, arg...) \
56 dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
57 __LINE__, current->pid, ##arg)
59 #define mlx5_ib_err(_dev, format, arg...) \
60 dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
61 __LINE__, current->pid, ##arg)
63 #define mlx5_ib_warn(_dev, format, arg...) \
64 dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
65 __LINE__, current->pid, ##arg)
67 #define MLX5_IB_DEFAULT_UIDX 0xffffff
68 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
70 #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
73 MLX5_IB_MMAP_OFFSET_START = 9,
74 MLX5_IB_MMAP_OFFSET_END = 255,
78 MLX5_IB_MMAP_CMD_SHIFT = 8,
79 MLX5_IB_MMAP_CMD_MASK = 0xff,
83 MLX5_RES_SCAT_DATA32_CQE = 0x1,
84 MLX5_RES_SCAT_DATA64_CQE = 0x2,
85 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
86 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
89 enum mlx5_ib_mad_ifc_flags {
90 MLX5_MAD_IFC_IGNORE_MKEY = 1,
91 MLX5_MAD_IFC_IGNORE_BKEY = 2,
92 MLX5_MAD_IFC_NET_VIEW = 4,
96 MLX5_CROSS_CHANNEL_BFREG = 0,
105 MLX5_TM_MAX_RNDV_MSG_SIZE = 64,
110 MLX5_IB_INVALID_UAR_INDEX = BIT(31),
111 MLX5_IB_INVALID_BFREG = BIT(31),
115 MLX5_MAX_MEMIC_PAGES = 0x100,
116 MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f,
120 MLX5_MEMIC_BASE_ALIGN = 6,
121 MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
124 enum mlx5_ib_mmap_type {
125 MLX5_IB_MMAP_TYPE_MEMIC = 1,
126 MLX5_IB_MMAP_TYPE_VAR = 2,
127 MLX5_IB_MMAP_TYPE_UAR_WC = 3,
128 MLX5_IB_MMAP_TYPE_UAR_NC = 4,
131 struct mlx5_bfreg_info {
133 int num_low_latency_bfregs;
137 * protect bfreg allocation data structs
144 u32 num_static_sys_pages;
145 u32 total_num_bfregs;
149 struct mlx5_ib_ucontext {
150 struct ib_ucontext ibucontext;
151 struct list_head db_page_list;
153 /* protect doorbell record alloc/free
155 struct mutex db_page_mutex;
156 struct mlx5_bfreg_info bfregi;
158 /* Transport Domain number */
163 /* For RoCE LAG TX affinity */
164 atomic_t tx_port_affinity;
167 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
169 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
179 MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
180 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
181 MLX5_IB_FLOW_ACTION_DECAP,
184 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
185 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
186 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
187 #error "Invalid number of bypass priorities"
189 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
191 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
192 #define MLX5_IB_NUM_SNIFFER_FTS 2
193 #define MLX5_IB_NUM_EGRESS_FTS 1
194 struct mlx5_ib_flow_prio {
195 struct mlx5_flow_table *flow_table;
196 unsigned int refcount;
199 struct mlx5_ib_flow_handler {
200 struct list_head list;
201 struct ib_flow ibflow;
202 struct mlx5_ib_flow_prio *prio;
203 struct mlx5_flow_handle *rule;
204 struct ib_counters *ibcounters;
205 struct mlx5_ib_dev *dev;
206 struct mlx5_ib_flow_matcher *flow_matcher;
209 struct mlx5_ib_flow_matcher {
210 struct mlx5_ib_match_params matcher_mask;
212 enum mlx5_ib_flow_type flow_type;
213 enum mlx5_flow_namespace_type ns_type;
215 struct mlx5_core_dev *mdev;
217 u8 match_criteria_enable;
222 struct mlx5_core_dev *mdev;
225 struct mlx5_ib_flow_db {
226 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
227 struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
228 struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
229 struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
230 struct mlx5_ib_flow_prio fdb;
231 struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT];
232 struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT];
233 struct mlx5_flow_table *lag_demux_ft;
234 /* Protect flow steering bypass flow tables
235 * when add/del flow rules.
236 * only single add/removal of flow steering rule could be done
242 /* Use macros here so that don't have to duplicate
243 * enum ib_send_flags and enum ib_qp_type for low-level driver
246 #define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0)
247 #define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1)
248 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2)
249 #define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3)
250 #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4)
251 #define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END
253 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
255 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
256 * creates the actual hardware QP.
258 #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
259 #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
260 #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
261 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
263 #define MLX5_IB_UMR_OCTOWORD 16
264 #define MLX5_IB_UMR_XLT_ALIGNMENT 64
266 #define MLX5_IB_UPD_XLT_ZAP BIT(0)
267 #define MLX5_IB_UPD_XLT_ENABLE BIT(1)
268 #define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
269 #define MLX5_IB_UPD_XLT_ADDR BIT(3)
270 #define MLX5_IB_UPD_XLT_PD BIT(4)
271 #define MLX5_IB_UPD_XLT_ACCESS BIT(5)
272 #define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
274 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
276 * These flags are intended for internal use by the mlx5_ib driver, and they
277 * rely on the range reserved for that use in the ib_qp_create_flags enum.
279 #define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START
280 #define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1)
287 enum mlx5_ib_rq_flags {
288 MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0,
289 MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1,
293 struct mlx5_frag_buf_ctrl fbc;
296 struct wr_list *w_list;
300 /* serialize post to the work queue
314 enum mlx5_ib_wq_flags {
315 MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
316 MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
319 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
320 #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
321 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
322 #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
323 #define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
327 struct mlx5_core_qp core_qp;
334 u32 two_byte_shift_en;
335 u32 single_stride_log_num_of_bytes;
336 struct ib_umem *umem;
338 unsigned int page_shift;
345 u32 create_flags; /* Use enum mlx5_ib_wq_flags */
359 struct mlx5_ib_rwq_ind_table {
360 struct ib_rwq_ind_table ib_rwq_ind_tbl;
365 struct mlx5_ib_ubuffer {
366 struct ib_umem *umem;
371 struct mlx5_ib_qp_base {
372 struct mlx5_ib_qp *container_mibqp;
373 struct mlx5_core_qp mqp;
374 struct mlx5_ib_ubuffer ubuffer;
377 struct mlx5_ib_qp_trans {
378 struct mlx5_ib_qp_base base;
385 struct mlx5_ib_rss_qp {
390 struct mlx5_ib_qp_base base;
391 struct mlx5_ib_wq *rq;
392 struct mlx5_ib_ubuffer ubuffer;
393 struct mlx5_db *doorbell;
400 struct mlx5_ib_qp_base base;
401 struct mlx5_ib_wq *sq;
402 struct mlx5_ib_ubuffer ubuffer;
403 struct mlx5_db *doorbell;
404 struct mlx5_flow_handle *flow_rule;
409 struct mlx5_ib_raw_packet_qp {
410 struct mlx5_ib_sq sq;
411 struct mlx5_ib_rq rq;
416 unsigned long offset;
417 struct mlx5_sq_bfreg *bfreg;
421 struct mlx5_core_dct mdct;
428 struct mlx5_ib_qp_trans trans_qp;
429 struct mlx5_ib_raw_packet_qp raw_packet_qp;
430 struct mlx5_ib_rss_qp rss_qp;
431 struct mlx5_ib_dct dct;
433 struct mlx5_frag_buf buf;
436 struct mlx5_ib_wq rq;
440 struct mlx5_ib_wq sq;
442 /* serialize qp state modifications
454 /* only for user space QPs. For kernel
455 * we have it from the bf object
461 struct list_head qps_list;
462 struct list_head cq_recv_list;
463 struct list_head cq_send_list;
464 struct mlx5_rate_limit rl;
467 /* storage for qp sub type when core qp type is IB_QPT_DRIVER */
468 enum ib_qp_type qp_sub_type;
469 /* A flag to indicate if there's a new counter is configured
470 * but not take effective
475 struct mlx5_ib_cq_buf {
476 struct mlx5_frag_buf_ctrl fbc;
477 struct mlx5_frag_buf frag_buf;
478 struct ib_umem *umem;
483 enum mlx5_ib_qp_flags {
484 MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
485 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
486 MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
487 MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
488 MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
489 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
490 /* QP uses 1 as its source QP number */
491 MLX5_IB_QP_SQPN_QP1 = 1 << 6,
492 MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
493 MLX5_IB_QP_RSS = 1 << 8,
494 MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9,
495 MLX5_IB_QP_UNDERLAY = 1 << 10,
496 MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11,
497 MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12,
498 MLX5_IB_QP_PACKET_BASED_CREDIT = 1 << 13,
502 struct ib_send_wr wr;
506 unsigned int page_shift;
507 unsigned int xlt_size;
511 u8 ignore_free_state:1;
514 static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
516 return container_of(wr, struct mlx5_umr_wr, wr);
519 struct mlx5_shared_mr_info {
521 struct ib_umem *umem;
524 enum mlx5_ib_cq_pr_flags {
525 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
530 struct mlx5_core_cq mcq;
531 struct mlx5_ib_cq_buf buf;
534 /* serialize access to the CQ
540 struct mutex resize_mutex;
541 struct mlx5_ib_cq_buf *resize_buf;
542 struct ib_umem *resize_umem;
544 struct list_head list_send_qp;
545 struct list_head list_recv_qp;
547 struct list_head wc_list;
548 enum ib_cq_notify_flags notify_flags;
549 struct work_struct notify_work;
550 u16 private_flags; /* Use mlx5_ib_cq_pr_flags */
555 struct list_head list;
560 struct mlx5_core_srq msrq;
561 struct mlx5_frag_buf buf;
563 struct mlx5_frag_buf_ctrl fbc;
565 /* protect SRQ hanlding
571 struct ib_umem *umem;
572 /* serialize arming a SRQ
578 struct mlx5_ib_xrcd {
579 struct ib_xrcd ibxrcd;
583 enum mlx5_ib_mtt_access_flags {
584 MLX5_IB_MTT_READ = (1 << 0),
585 MLX5_IB_MTT_WRITE = (1 << 1),
588 struct mlx5_user_mmap_entry {
589 struct rdma_user_mmap_entry rdma_entry;
597 phys_addr_t dev_addr;
604 /* other dm types specific params should be added here */
606 struct mlx5_user_mmap_entry mentry;
609 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
611 #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
612 IB_ACCESS_REMOTE_WRITE |\
613 IB_ACCESS_REMOTE_READ |\
614 IB_ACCESS_REMOTE_ATOMIC |\
617 #define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
618 IB_ACCESS_REMOTE_WRITE |\
619 IB_ACCESS_REMOTE_READ |\
622 #define mlx5_update_odp_stats(mr, counter_name, value) \
623 atomic64_add(value, &((mr)->odp_stats.counter_name))
636 struct mlx5_core_mkey mmkey;
637 struct ib_umem *umem;
638 struct mlx5_shared_mr_info *smr_info;
639 struct list_head list;
641 struct mlx5_cache_ent *cache_ent;
643 struct mlx5_ib_dev *dev;
644 u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
645 struct mlx5_core_sig_ctx *sig;
647 int access_flags; /* Needed for rereg MR */
649 struct mlx5_ib_mr *parent;
650 /* Needed for IB_MR_TYPE_INTEGRITY */
651 struct mlx5_ib_mr *pi_mr;
652 struct mlx5_ib_mr *klm_mr;
653 struct mlx5_ib_mr *mtt_mr;
657 /* For ODP and implicit */
658 atomic_t num_deferred_work;
659 wait_queue_head_t q_deferred_work;
660 struct xarray implicit_children;
663 struct list_head elm;
664 struct work_struct work;
666 struct ib_odp_counters odp_stats;
667 bool is_odp_implicit;
669 struct mlx5_async_work cb_work;
672 static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
674 return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
680 struct mlx5_core_mkey mmkey;
684 struct mlx5_ib_devx_mr {
685 struct mlx5_core_mkey mmkey;
689 struct mlx5_ib_umr_context {
691 enum ib_wc_status status;
692 struct completion done;
699 /* control access to UMR QP
701 struct semaphore sem;
710 struct mlx5_cache_ent {
711 struct list_head head;
712 /* sync access to the cahce entry
724 u8 fill_to_high_water:1;
727 * - available_mrs is the length of list head, ie the number of MRs
728 * available for immediate allocation.
729 * - total_mrs is available_mrs plus all in use MRs that could be
730 * returned to the cache.
731 * - limit is the low water mark for available_mrs, 2* limit is the
733 * - pending is the number of MRs currently being created
743 struct mlx5_ib_dev *dev;
744 struct work_struct work;
745 struct delayed_work dwork;
748 struct mlx5_mr_cache {
749 struct workqueue_struct *wq;
750 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
752 unsigned long last_add;
755 struct mlx5_ib_gsi_qp;
757 struct mlx5_ib_port_resources {
758 struct mlx5_ib_resources *devr;
759 struct mlx5_ib_gsi_qp *gsi;
760 struct work_struct pkey_change_work;
763 struct mlx5_ib_resources {
770 struct mlx5_ib_port_resources ports[2];
771 /* Protects changes to the port resources */
775 struct mlx5_ib_counters {
779 u32 num_cong_counters;
780 u32 num_ext_ppcnt_counters;
785 struct mlx5_ib_multiport_info;
787 struct mlx5_ib_multiport {
788 struct mlx5_ib_multiport_info *mpi;
789 /* To be held when accessing the multiport info */
794 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
797 rwlock_t netdev_lock;
798 struct net_device *netdev;
799 struct notifier_block nb;
800 atomic_t tx_port_affinity;
801 enum ib_port_state last_port_state;
802 struct mlx5_ib_dev *dev;
806 struct mlx5_ib_port {
807 struct mlx5_ib_counters cnts;
808 struct mlx5_ib_multiport mp;
809 struct mlx5_ib_dbg_cc_params *dbg_cc_params;
810 struct mlx5_roce roce;
811 struct mlx5_eswitch_rep *rep;
814 struct mlx5_ib_dbg_param {
816 struct mlx5_ib_dev *dev;
817 struct dentry *dentry;
821 enum mlx5_ib_dbg_cc_types {
822 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
823 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
824 MLX5_IB_DBG_CC_RP_TIME_RESET,
825 MLX5_IB_DBG_CC_RP_BYTE_RESET,
826 MLX5_IB_DBG_CC_RP_THRESHOLD,
827 MLX5_IB_DBG_CC_RP_AI_RATE,
828 MLX5_IB_DBG_CC_RP_MAX_RATE,
829 MLX5_IB_DBG_CC_RP_HAI_RATE,
830 MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
831 MLX5_IB_DBG_CC_RP_MIN_RATE,
832 MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
833 MLX5_IB_DBG_CC_RP_DCE_TCP_G,
834 MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
835 MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
836 MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
837 MLX5_IB_DBG_CC_RP_GD,
838 MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS,
839 MLX5_IB_DBG_CC_NP_CNP_DSCP,
840 MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
841 MLX5_IB_DBG_CC_NP_CNP_PRIO,
845 struct mlx5_ib_dbg_cc_params {
847 struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX];
851 MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
854 struct mlx5_ib_delay_drop {
855 struct mlx5_ib_dev *dev;
856 struct work_struct delay_drop_work;
857 /* serialize setting of delay drop */
863 struct dentry *dir_debugfs;
866 enum mlx5_ib_stages {
868 MLX5_IB_STAGE_FLOW_DB,
870 MLX5_IB_STAGE_NON_DEFAULT_CB,
873 MLX5_IB_STAGE_DEVICE_RESOURCES,
874 MLX5_IB_STAGE_DEVICE_NOTIFIER,
876 MLX5_IB_STAGE_COUNTERS,
877 MLX5_IB_STAGE_CONG_DEBUGFS,
880 MLX5_IB_STAGE_PRE_IB_REG_UMR,
881 MLX5_IB_STAGE_WHITELIST_UID,
882 MLX5_IB_STAGE_IB_REG,
883 MLX5_IB_STAGE_POST_IB_REG_UMR,
884 MLX5_IB_STAGE_DELAY_DROP,
885 MLX5_IB_STAGE_CLASS_ATTR,
889 struct mlx5_ib_stage {
890 int (*init)(struct mlx5_ib_dev *dev);
891 void (*cleanup)(struct mlx5_ib_dev *dev);
894 #define STAGE_CREATE(_stage, _init, _cleanup) \
895 .stage[_stage] = {.init = _init, .cleanup = _cleanup}
897 struct mlx5_ib_profile {
898 struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
901 struct mlx5_ib_multiport_info {
902 struct list_head list;
903 struct mlx5_ib_dev *ibdev;
904 struct mlx5_core_dev *mdev;
905 struct notifier_block mdev_events;
906 struct completion unref_comp;
913 struct mlx5_ib_flow_action {
914 struct ib_flow_action ib_action;
918 struct mlx5_accel_esp_xfrm *ctx;
921 struct mlx5_ib_dev *dev;
924 struct mlx5_modify_hdr *modify_hdr;
925 struct mlx5_pkt_reformat *pkt_reformat;
932 struct mlx5_core_dev *dev;
933 /* This lock is used to protect the access to the shared
934 * allocation map when concurrent requests by different
935 * processes are handled.
938 DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
941 struct mlx5_read_counters_attr {
942 struct mlx5_fc *hw_cntrs_hndl;
947 enum mlx5_ib_counters_type {
948 MLX5_IB_COUNTERS_FLOW,
951 struct mlx5_ib_mcounters {
952 struct ib_counters ibcntrs;
953 enum mlx5_ib_counters_type type;
954 /* number of counters supported for this counters type */
956 struct mlx5_fc *hw_cntrs_hndl;
957 /* read function for this counters type */
958 int (*read_counters)(struct ib_device *ibdev,
959 struct mlx5_read_counters_attr *read_attr);
960 /* max index set as part of create_flow */
962 /* number of counters data entries (<description,index> pair) */
964 /* counters data array for descriptions and indexes */
965 struct mlx5_ib_flow_counters_desc *counters_data;
966 /* protects access to mcounters internal data */
967 struct mutex mcntrs_mutex;
970 static inline struct mlx5_ib_mcounters *
971 to_mcounters(struct ib_counters *ibcntrs)
973 return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
976 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
978 struct mlx5_flow_act *action);
979 struct mlx5_ib_lb_state {
980 /* protect the user_td */
987 struct mlx5_ib_pf_eq {
988 struct notifier_block irq_nb;
989 struct mlx5_ib_dev *dev;
990 struct mlx5_eq *core;
991 struct work_struct work;
992 spinlock_t lock; /* Pagefaults spinlock */
993 struct workqueue_struct *wq;
997 struct mlx5_devx_event_table {
998 struct mlx5_nb devx_nb;
999 /* serialize updating the event_xa */
1000 struct mutex event_xa_lock;
1001 struct xarray event_xa;
1004 struct mlx5_var_table {
1005 /* serialize updating the bitmap */
1006 struct mutex bitmap_lock;
1007 unsigned long *bitmap;
1010 u64 num_var_hw_entries;
1013 struct mlx5_ib_dev {
1014 struct ib_device ib_dev;
1015 struct mlx5_core_dev *mdev;
1016 struct notifier_block mdev_events;
1018 /* serialize update of capability mask
1020 struct mutex cap_mask_mutex;
1026 struct umr_common umrc;
1027 /* sync used page count stats
1029 struct mlx5_ib_resources devr;
1032 struct mlx5_mr_cache cache;
1033 struct timer_list delay_timer;
1034 /* Prevents soft lock on massive reg MRs */
1035 struct mutex slow_path_mutex;
1036 struct ib_odp_caps odp_caps;
1038 struct mlx5_ib_pf_eq odp_pf_eq;
1041 * Sleepable RCU that prevents destruction of MRs while they are still
1042 * being used by a page fault handler.
1044 struct srcu_struct odp_srcu;
1045 struct xarray odp_mkeys;
1048 struct mlx5_ib_flow_db *flow_db;
1049 /* protect resources needed as part of reset flow */
1050 spinlock_t reset_flow_resource_lock;
1051 struct list_head qp_list;
1052 /* Array with num_ports elements */
1053 struct mlx5_ib_port *port;
1054 struct mlx5_sq_bfreg bfreg;
1055 struct mlx5_sq_bfreg wc_bfreg;
1056 struct mlx5_sq_bfreg fp_bfreg;
1057 struct mlx5_ib_delay_drop delay_drop;
1058 const struct mlx5_ib_profile *profile;
1060 struct mlx5_ib_lb_state lb;
1062 struct list_head ib_dev_list;
1065 u16 devx_whitelist_uid;
1066 struct mlx5_srq_table srq_table;
1067 struct mlx5_async_ctx async_ctx;
1068 struct mlx5_devx_event_table devx_event_table;
1069 struct mlx5_var_table var_table;
1071 struct xarray sig_mrs;
1074 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
1076 return container_of(mcq, struct mlx5_ib_cq, mcq);
1079 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
1081 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
1084 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
1086 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
1089 static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata)
1091 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
1092 udata, struct mlx5_ib_ucontext, ibucontext);
1094 return to_mdev(context->ibucontext.device);
1097 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
1099 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
1102 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
1104 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
1107 static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
1109 return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
1112 static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
1114 return container_of(mmkey, struct mlx5_ib_mr, mmkey);
1117 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
1119 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
1122 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
1124 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
1127 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
1129 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
1132 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
1134 return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
1137 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
1139 return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
1142 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
1144 return container_of(msrq, struct mlx5_ib_srq, msrq);
1147 static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm)
1149 return container_of(ibdm, struct mlx5_ib_dm, ibdm);
1152 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
1154 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
1157 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
1159 return container_of(ibmw, struct mlx5_ib_mw, ibmw);
1162 static inline struct mlx5_ib_flow_action *
1163 to_mflow_act(struct ib_flow_action *ibact)
1165 return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
1168 static inline struct mlx5_user_mmap_entry *
1169 to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
1171 return container_of(rdma_entry,
1172 struct mlx5_user_mmap_entry, rdma_entry);
1175 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
1176 struct ib_udata *udata, unsigned long virt,
1177 struct mlx5_db *db);
1178 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
1179 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1180 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1181 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
1182 int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
1183 struct ib_udata *udata);
1184 int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
1185 void mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags);
1186 int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
1187 struct ib_udata *udata);
1188 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1189 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
1190 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
1191 void mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
1192 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
1193 const struct ib_recv_wr **bad_wr);
1194 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1195 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1196 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1197 struct ib_qp_init_attr *init_attr,
1198 struct ib_udata *udata);
1199 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1200 int attr_mask, struct ib_udata *udata);
1201 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
1202 struct ib_qp_init_attr *qp_init_attr);
1203 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
1204 void mlx5_ib_drain_sq(struct ib_qp *qp);
1205 void mlx5_ib_drain_rq(struct ib_qp *qp);
1206 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1207 const struct ib_send_wr **bad_wr);
1208 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1209 const struct ib_recv_wr **bad_wr);
1210 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1211 size_t buflen, size_t *bc);
1212 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1213 size_t buflen, size_t *bc);
1214 int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
1215 size_t buflen, size_t *bc);
1216 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1217 struct ib_udata *udata);
1218 void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
1219 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
1220 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1221 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1222 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
1223 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
1224 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1225 u64 virt_addr, int access_flags,
1226 struct ib_udata *udata);
1227 int mlx5_ib_advise_mr(struct ib_pd *pd,
1228 enum ib_uverbs_advise_mr_advice advice,
1230 struct ib_sge *sg_list,
1232 struct uverbs_attr_bundle *attrs);
1233 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1234 struct ib_udata *udata);
1235 int mlx5_ib_dealloc_mw(struct ib_mw *mw);
1236 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
1237 int page_shift, int flags);
1238 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
1239 struct ib_udata *udata,
1241 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
1242 void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr);
1243 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1244 u64 length, u64 virt_addr, int access_flags,
1245 struct ib_pd *pd, struct ib_udata *udata);
1246 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
1247 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1248 u32 max_num_sg, struct ib_udata *udata);
1249 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
1251 u32 max_num_meta_sg);
1252 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1253 unsigned int *sg_offset);
1254 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
1255 int data_sg_nents, unsigned int *data_sg_offset,
1256 struct scatterlist *meta_sg, int meta_sg_nents,
1257 unsigned int *meta_sg_offset);
1258 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
1259 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
1260 const struct ib_mad *in, struct ib_mad *out,
1261 size_t *out_mad_size, u16 *out_mad_pkey_index);
1262 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
1263 struct ib_udata *udata);
1264 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
1265 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
1266 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
1267 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
1268 struct ib_smp *out_mad);
1269 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
1270 __be64 *sys_image_guid);
1271 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
1273 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
1275 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
1276 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
1277 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
1279 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
1281 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
1282 struct ib_port_attr *props);
1283 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1284 struct ib_port_attr *props);
1285 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
1286 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
1287 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
1288 unsigned long max_page_shift,
1289 int *count, int *shift,
1290 int *ncont, int *order);
1291 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
1292 int page_shift, size_t offset, size_t num_pages,
1293 __be64 *pas, int access_flags);
1294 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
1295 int page_shift, __be64 *pas, int access_flags);
1296 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
1297 int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
1298 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
1299 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
1301 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
1302 unsigned int entry);
1303 void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
1304 int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr);
1306 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1307 struct ib_mr_status *mr_status);
1308 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
1309 struct ib_wq_init_attr *init_attr,
1310 struct ib_udata *udata);
1311 void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
1312 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1313 u32 wq_attr_mask, struct ib_udata *udata);
1314 struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
1315 struct ib_rwq_ind_table_init_attr *init_attr,
1316 struct ib_udata *udata);
1317 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
1318 struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
1319 struct ib_ucontext *context,
1320 struct ib_dm_alloc_attr *attr,
1321 struct uverbs_attr_bundle *attrs);
1322 int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs);
1323 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1324 struct ib_dm_mr_attr *attr,
1325 struct uverbs_attr_bundle *attrs);
1327 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1328 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
1329 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
1330 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
1331 int __init mlx5_ib_odp_init(void);
1332 void mlx5_ib_odp_cleanup(void);
1333 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
1334 void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1335 struct mlx5_ib_mr *mr, int flags);
1337 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1338 enum ib_uverbs_advise_mr_advice advice,
1339 u32 flags, struct ib_sge *sg_list, u32 num_sge);
1340 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1341 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
1346 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
1347 static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
1348 static inline int mlx5_ib_odp_init(void) { return 0; }
1349 static inline void mlx5_ib_odp_cleanup(void) {}
1350 static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
1351 static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1352 struct mlx5_ib_mr *mr, int flags) {}
1355 mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1356 enum ib_uverbs_advise_mr_advice advice, u32 flags,
1357 struct ib_sge *sg_list, u32 num_sge)
1361 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1363 extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
1365 /* Needed for rep profile */
1366 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
1367 const struct mlx5_ib_profile *profile,
1369 void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
1370 const struct mlx5_ib_profile *profile);
1372 int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
1373 u8 port, struct ifla_vf_info *info);
1374 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
1375 u8 port, int state);
1376 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
1377 u8 port, struct ifla_vf_stats *stats);
1378 int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
1379 struct ifla_vf_guid *node_guid,
1380 struct ifla_vf_guid *port_guid);
1381 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
1382 u64 guid, int type);
1384 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
1385 const struct ib_gid_attr *attr);
1387 void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
1388 void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
1390 /* GSI QP helper functions */
1391 struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
1392 struct ib_qp_init_attr *init_attr);
1393 int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
1394 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1396 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
1398 struct ib_qp_init_attr *qp_init_attr);
1399 int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
1400 const struct ib_send_wr **bad_wr);
1401 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
1402 const struct ib_recv_wr **bad_wr);
1403 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
1405 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
1407 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
1409 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
1410 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
1412 u8 *native_port_num);
1413 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
1415 int mlx5_ib_fill_res_entry(struct sk_buff *msg,
1416 struct rdma_restrack_entry *res);
1417 int mlx5_ib_fill_stat_entry(struct sk_buff *msg,
1418 struct rdma_restrack_entry *res);
1420 extern const struct uapi_definition mlx5_ib_devx_defs[];
1421 extern const struct uapi_definition mlx5_ib_flow_defs[];
1422 extern const struct uapi_definition mlx5_ib_qos_defs[];
1424 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
1425 int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
1426 void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
1427 void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev);
1428 void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev);
1429 struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
1430 struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
1431 struct mlx5_flow_context *flow_context,
1432 struct mlx5_flow_act *flow_act, u32 counter_id,
1433 void *cmd_in, int inlen, int dest_id, int dest_type);
1434 bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
1435 bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id);
1436 void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction);
1439 mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
1440 bool is_user) { return -EOPNOTSUPP; }
1441 static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {}
1442 static inline void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev) {}
1443 static inline void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev) {}
1444 static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id,
1450 mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
1455 static inline void init_query_mad(struct ib_smp *mad)
1457 mad->base_version = 1;
1458 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
1459 mad->class_version = 1;
1460 mad->method = IB_MGMT_METHOD_GET;
1463 static inline u8 convert_access(int acc)
1465 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
1466 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
1467 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
1468 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
1469 MLX5_PERM_LOCAL_READ;
1472 static inline int is_qp1(enum ib_qp_type qp_type)
1474 return qp_type == MLX5_IB_QPT_HW_GSI;
1477 #define MLX5_MAX_UMR_SHIFT 16
1478 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
1480 static inline u32 check_cq_create_flags(u32 flags)
1483 * It returns non-zero value for unsupported CQ
1484 * create flags, otherwise it returns zero.
1486 return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
1487 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
1490 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
1494 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
1495 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
1497 *user_index = cmd_uidx;
1499 *user_index = MLX5_IB_DEFAULT_UIDX;
1505 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
1506 struct mlx5_ib_create_qp *ucmd,
1510 u8 cqe_version = ucontext->cqe_version;
1512 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1513 (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1516 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
1519 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1522 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
1523 struct mlx5_ib_create_srq *ucmd,
1527 u8 cqe_version = ucontext->cqe_version;
1529 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1530 (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1533 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
1536 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1539 static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
1541 return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1542 MLX5_UARS_IN_PAGE : 1;
1545 static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
1546 struct mlx5_bfreg_info *bfregi)
1548 return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages;
1551 unsigned long mlx5_ib_get_xlt_emergency_page(void);
1552 void mlx5_ib_put_xlt_emergency_page(void);
1554 int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
1555 struct mlx5_bfreg_info *bfregi, u32 bfregn,
1558 int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
1559 u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num);
1561 static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
1562 bool do_modify_atomic, int access_flags)
1564 if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
1567 if (do_modify_atomic &&
1568 MLX5_CAP_GEN(dev->mdev, atomic) &&
1569 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
1572 if (access_flags & IB_ACCESS_RELAXED_ORDERING &&
1573 (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) ||
1574 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)))
1580 int mlx5_ib_enable_driver(struct ib_device *dev);
1581 int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
1582 #endif /* MLX5_IB_H */