2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <rdma/ib_verbs.h>
39 #include <rdma/ib_smi.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/cq.h>
42 #include <linux/mlx5/fs.h>
43 #include <linux/mlx5/qp.h>
44 #include <linux/mlx5/srq.h>
45 #include <linux/mlx5/fs.h>
46 #include <linux/types.h>
47 #include <linux/mlx5/transobj.h>
48 #include <rdma/ib_user_verbs.h>
49 #include <rdma/mlx5-abi.h>
50 #include <rdma/uverbs_ioctl.h>
51 #include <rdma/mlx5_user_ioctl_cmds.h>
53 #define mlx5_ib_dbg(_dev, format, arg...) \
54 dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
55 __LINE__, current->pid, ##arg)
57 #define mlx5_ib_err(_dev, format, arg...) \
58 dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
59 __LINE__, current->pid, ##arg)
61 #define mlx5_ib_warn(_dev, format, arg...) \
62 dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
63 __LINE__, current->pid, ##arg)
65 #define field_avail(type, fld, sz) (offsetof(type, fld) + \
66 sizeof(((type *)0)->fld) <= (sz))
67 #define MLX5_IB_DEFAULT_UIDX 0xffffff
68 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
70 #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
73 MLX5_IB_MMAP_CMD_SHIFT = 8,
74 MLX5_IB_MMAP_CMD_MASK = 0xff,
78 MLX5_RES_SCAT_DATA32_CQE = 0x1,
79 MLX5_RES_SCAT_DATA64_CQE = 0x2,
80 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
81 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
84 enum mlx5_ib_mad_ifc_flags {
85 MLX5_MAD_IFC_IGNORE_MKEY = 1,
86 MLX5_MAD_IFC_IGNORE_BKEY = 2,
87 MLX5_MAD_IFC_NET_VIEW = 4,
91 MLX5_CROSS_CHANNEL_BFREG = 0,
100 MLX5_TM_MAX_RNDV_MSG_SIZE = 64,
105 MLX5_IB_INVALID_UAR_INDEX = BIT(31),
106 MLX5_IB_INVALID_BFREG = BIT(31),
110 MLX5_MAX_MEMIC_PAGES = 0x100,
111 MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f,
115 MLX5_MEMIC_BASE_ALIGN = 6,
116 MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
119 struct mlx5_ib_ucontext {
120 struct ib_ucontext ibucontext;
121 struct list_head db_page_list;
123 /* protect doorbell record alloc/free
125 struct mutex db_page_mutex;
126 struct mlx5_bfreg_info bfregi;
128 /* Transport Domain number */
132 DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES);
134 /* For RoCE LAG TX affinity */
135 atomic_t tx_port_affinity;
138 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
140 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
150 MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
151 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
152 MLX5_IB_FLOW_ACTION_DECAP,
155 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
156 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
157 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
158 #error "Invalid number of bypass priorities"
160 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
162 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
163 #define MLX5_IB_NUM_SNIFFER_FTS 2
164 #define MLX5_IB_NUM_EGRESS_FTS 1
165 struct mlx5_ib_flow_prio {
166 struct mlx5_flow_table *flow_table;
167 unsigned int refcount;
170 struct mlx5_ib_flow_handler {
171 struct list_head list;
172 struct ib_flow ibflow;
173 struct mlx5_ib_flow_prio *prio;
174 struct mlx5_flow_handle *rule;
175 struct ib_counters *ibcounters;
176 struct mlx5_ib_dev *dev;
177 struct mlx5_ib_flow_matcher *flow_matcher;
180 struct mlx5_ib_flow_matcher {
181 struct mlx5_ib_match_params matcher_mask;
183 enum mlx5_ib_flow_type flow_type;
184 enum mlx5_flow_namespace_type ns_type;
186 struct mlx5_core_dev *mdev;
188 u8 match_criteria_enable;
191 struct mlx5_ib_flow_db {
192 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
193 struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
194 struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
195 struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
196 struct mlx5_flow_table *lag_demux_ft;
197 /* Protect flow steering bypass flow tables
198 * when add/del flow rules.
199 * only single add/removal of flow steering rule could be done
205 /* Use macros here so that don't have to duplicate
206 * enum ib_send_flags and enum ib_qp_type for low-level driver
209 #define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0)
210 #define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1)
211 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2)
212 #define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3)
213 #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4)
214 #define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END
216 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
218 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
219 * creates the actual hardware QP.
221 #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
222 #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
223 #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
224 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
226 #define MLX5_IB_UMR_OCTOWORD 16
227 #define MLX5_IB_UMR_XLT_ALIGNMENT 64
229 #define MLX5_IB_UPD_XLT_ZAP BIT(0)
230 #define MLX5_IB_UPD_XLT_ENABLE BIT(1)
231 #define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
232 #define MLX5_IB_UPD_XLT_ADDR BIT(3)
233 #define MLX5_IB_UPD_XLT_PD BIT(4)
234 #define MLX5_IB_UPD_XLT_ACCESS BIT(5)
235 #define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
237 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
239 * These flags are intended for internal use by the mlx5_ib driver, and they
240 * rely on the range reserved for that use in the ib_qp_create_flags enum.
243 /* Create a UD QP whose source QP number is 1 */
244 static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
246 return IB_QP_CREATE_RESERVED_START;
254 enum mlx5_ib_rq_flags {
255 MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0,
256 MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1,
262 struct wr_list *w_list;
266 /* serialize post to the work queue
281 enum mlx5_ib_wq_flags {
282 MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
283 MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
286 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
287 #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
288 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
289 #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
293 struct mlx5_core_qp core_qp;
300 u32 two_byte_shift_en;
301 u32 single_stride_log_num_of_bytes;
302 struct ib_umem *umem;
304 unsigned int page_shift;
311 u32 create_flags; /* Use enum mlx5_ib_wq_flags */
325 struct mlx5_ib_rwq_ind_table {
326 struct ib_rwq_ind_table ib_rwq_ind_tbl;
331 struct mlx5_ib_ubuffer {
332 struct ib_umem *umem;
337 struct mlx5_ib_qp_base {
338 struct mlx5_ib_qp *container_mibqp;
339 struct mlx5_core_qp mqp;
340 struct mlx5_ib_ubuffer ubuffer;
343 struct mlx5_ib_qp_trans {
344 struct mlx5_ib_qp_base base;
351 struct mlx5_ib_rss_qp {
356 struct mlx5_ib_qp_base base;
357 struct mlx5_ib_wq *rq;
358 struct mlx5_ib_ubuffer ubuffer;
359 struct mlx5_db *doorbell;
366 struct mlx5_ib_qp_base base;
367 struct mlx5_ib_wq *sq;
368 struct mlx5_ib_ubuffer ubuffer;
369 struct mlx5_db *doorbell;
370 struct mlx5_flow_handle *flow_rule;
375 struct mlx5_ib_raw_packet_qp {
376 struct mlx5_ib_sq sq;
377 struct mlx5_ib_rq rq;
382 unsigned long offset;
383 struct mlx5_sq_bfreg *bfreg;
387 struct mlx5_core_dct mdct;
394 struct mlx5_ib_qp_trans trans_qp;
395 struct mlx5_ib_raw_packet_qp raw_packet_qp;
396 struct mlx5_ib_rss_qp rss_qp;
397 struct mlx5_ib_dct dct;
399 struct mlx5_frag_buf buf;
402 struct mlx5_ib_wq rq;
406 struct mlx5_ib_wq sq;
408 /* serialize qp state modifications
420 /* only for user space QPs. For kernel
421 * we have it from the bf object
427 /* Store signature errors */
430 struct list_head qps_list;
431 struct list_head cq_recv_list;
432 struct list_head cq_send_list;
433 struct mlx5_rate_limit rl;
436 /* storage for qp sub type when core qp type is IB_QPT_DRIVER */
437 enum ib_qp_type qp_sub_type;
440 struct mlx5_ib_cq_buf {
441 struct mlx5_frag_buf_ctrl fbc;
442 struct ib_umem *umem;
447 enum mlx5_ib_qp_flags {
448 MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
449 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
450 MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
451 MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
452 MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
453 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
454 /* QP uses 1 as its source QP number */
455 MLX5_IB_QP_SQPN_QP1 = 1 << 6,
456 MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
457 MLX5_IB_QP_RSS = 1 << 8,
458 MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9,
459 MLX5_IB_QP_UNDERLAY = 1 << 10,
460 MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11,
461 MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12,
465 struct ib_send_wr wr;
469 unsigned int page_shift;
470 unsigned int xlt_size;
476 static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
478 return container_of(wr, struct mlx5_umr_wr, wr);
481 struct mlx5_shared_mr_info {
483 struct ib_umem *umem;
486 enum mlx5_ib_cq_pr_flags {
487 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
492 struct mlx5_core_cq mcq;
493 struct mlx5_ib_cq_buf buf;
496 /* serialize access to the CQ
502 struct mutex resize_mutex;
503 struct mlx5_ib_cq_buf *resize_buf;
504 struct ib_umem *resize_umem;
506 struct list_head list_send_qp;
507 struct list_head list_recv_qp;
509 struct list_head wc_list;
510 enum ib_cq_notify_flags notify_flags;
511 struct work_struct notify_work;
512 u16 private_flags; /* Use mlx5_ib_cq_pr_flags */
517 struct list_head list;
522 struct mlx5_core_srq msrq;
523 struct mlx5_frag_buf buf;
526 /* protect SRQ hanlding
532 struct ib_umem *umem;
533 /* serialize arming a SRQ
539 struct mlx5_ib_xrcd {
540 struct ib_xrcd ibxrcd;
545 enum mlx5_ib_mtt_access_flags {
546 MLX5_IB_MTT_READ = (1 << 0),
547 MLX5_IB_MTT_WRITE = (1 << 1),
552 phys_addr_t dev_addr;
555 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
557 #define MLX5_IB_DM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
558 IB_ACCESS_REMOTE_WRITE |\
559 IB_ACCESS_REMOTE_READ |\
560 IB_ACCESS_REMOTE_ATOMIC |\
571 struct mlx5_core_mkey mmkey;
572 struct ib_umem *umem;
573 struct mlx5_shared_mr_info *smr_info;
574 struct list_head list;
576 bool allocated_from_cache;
578 struct mlx5_ib_dev *dev;
579 u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
580 struct mlx5_core_sig_ctx *sig;
583 int access_flags; /* Needed for rereg MR */
585 struct mlx5_ib_mr *parent;
586 atomic_t num_leaf_free;
587 wait_queue_head_t q_leaf_free;
592 struct mlx5_core_mkey mmkey;
596 struct mlx5_ib_umr_context {
598 enum ib_wc_status status;
599 struct completion done;
606 /* control access to UMR QP
608 struct semaphore sem;
617 struct mlx5_cache_ent {
618 struct list_head head;
619 /* sync access to the cahce entry
636 struct dentry *fsize;
638 struct dentry *fmiss;
639 struct dentry *flimit;
641 struct mlx5_ib_dev *dev;
642 struct work_struct work;
643 struct delayed_work dwork;
645 struct completion compl;
648 struct mlx5_mr_cache {
649 struct workqueue_struct *wq;
650 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
653 unsigned long last_add;
656 struct mlx5_ib_gsi_qp;
658 struct mlx5_ib_port_resources {
659 struct mlx5_ib_resources *devr;
660 struct mlx5_ib_gsi_qp *gsi;
661 struct work_struct pkey_change_work;
664 struct mlx5_ib_resources {
671 struct mlx5_ib_port_resources ports[2];
672 /* Protects changes to the port resources */
676 struct mlx5_ib_counters {
680 u32 num_cong_counters;
681 u32 num_ext_ppcnt_counters;
686 struct mlx5_ib_multiport_info;
688 struct mlx5_ib_multiport {
689 struct mlx5_ib_multiport_info *mpi;
690 /* To be held when accessing the multiport info */
694 struct mlx5_ib_port {
695 struct mlx5_ib_counters cnts;
696 struct mlx5_ib_multiport mp;
697 struct mlx5_ib_dbg_cc_params *dbg_cc_params;
701 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
704 rwlock_t netdev_lock;
705 struct net_device *netdev;
706 struct notifier_block nb;
707 atomic_t tx_port_affinity;
708 enum ib_port_state last_port_state;
709 struct mlx5_ib_dev *dev;
713 struct mlx5_ib_dbg_param {
715 struct mlx5_ib_dev *dev;
716 struct dentry *dentry;
720 enum mlx5_ib_dbg_cc_types {
721 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
722 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
723 MLX5_IB_DBG_CC_RP_TIME_RESET,
724 MLX5_IB_DBG_CC_RP_BYTE_RESET,
725 MLX5_IB_DBG_CC_RP_THRESHOLD,
726 MLX5_IB_DBG_CC_RP_AI_RATE,
727 MLX5_IB_DBG_CC_RP_HAI_RATE,
728 MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
729 MLX5_IB_DBG_CC_RP_MIN_RATE,
730 MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
731 MLX5_IB_DBG_CC_RP_DCE_TCP_G,
732 MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
733 MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
734 MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
735 MLX5_IB_DBG_CC_RP_GD,
736 MLX5_IB_DBG_CC_NP_CNP_DSCP,
737 MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
738 MLX5_IB_DBG_CC_NP_CNP_PRIO,
742 struct mlx5_ib_dbg_cc_params {
744 struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX];
748 MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
751 struct mlx5_ib_dbg_delay_drop {
752 struct dentry *dir_debugfs;
753 struct dentry *rqs_cnt_debugfs;
754 struct dentry *events_cnt_debugfs;
755 struct dentry *timeout_debugfs;
758 struct mlx5_ib_delay_drop {
759 struct mlx5_ib_dev *dev;
760 struct work_struct delay_drop_work;
761 /* serialize setting of delay drop */
767 struct mlx5_ib_dbg_delay_drop *dbg;
770 enum mlx5_ib_stages {
772 MLX5_IB_STAGE_FLOW_DB,
774 MLX5_IB_STAGE_NON_DEFAULT_CB,
776 MLX5_IB_STAGE_DEVICE_RESOURCES,
778 MLX5_IB_STAGE_COUNTERS,
779 MLX5_IB_STAGE_CONG_DEBUGFS,
782 MLX5_IB_STAGE_PRE_IB_REG_UMR,
784 MLX5_IB_STAGE_IB_REG,
785 MLX5_IB_STAGE_POST_IB_REG_UMR,
786 MLX5_IB_STAGE_DELAY_DROP,
787 MLX5_IB_STAGE_CLASS_ATTR,
788 MLX5_IB_STAGE_REP_REG,
792 struct mlx5_ib_stage {
793 int (*init)(struct mlx5_ib_dev *dev);
794 void (*cleanup)(struct mlx5_ib_dev *dev);
797 #define STAGE_CREATE(_stage, _init, _cleanup) \
798 .stage[_stage] = {.init = _init, .cleanup = _cleanup}
800 struct mlx5_ib_profile {
801 struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
804 struct mlx5_ib_multiport_info {
805 struct list_head list;
806 struct mlx5_ib_dev *ibdev;
807 struct mlx5_core_dev *mdev;
808 struct completion unref_comp;
815 struct mlx5_ib_flow_action {
816 struct ib_flow_action ib_action;
820 struct mlx5_accel_esp_xfrm *ctx;
823 struct mlx5_ib_dev *dev;
831 struct mlx5_core_dev *dev;
832 spinlock_t memic_lock;
833 DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
836 struct mlx5_read_counters_attr {
837 struct mlx5_fc *hw_cntrs_hndl;
842 enum mlx5_ib_counters_type {
843 MLX5_IB_COUNTERS_FLOW,
846 struct mlx5_ib_mcounters {
847 struct ib_counters ibcntrs;
848 enum mlx5_ib_counters_type type;
849 /* number of counters supported for this counters type */
851 struct mlx5_fc *hw_cntrs_hndl;
852 /* read function for this counters type */
853 int (*read_counters)(struct ib_device *ibdev,
854 struct mlx5_read_counters_attr *read_attr);
855 /* max index set as part of create_flow */
857 /* number of counters data entries (<description,index> pair) */
859 /* counters data array for descriptions and indexes */
860 struct mlx5_ib_flow_counters_desc *counters_data;
861 /* protects access to mcounters internal data */
862 struct mutex mcntrs_mutex;
865 static inline struct mlx5_ib_mcounters *
866 to_mcounters(struct ib_counters *ibcntrs)
868 return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
871 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
873 struct mlx5_flow_act *action);
874 struct mlx5_ib_lb_state {
875 /* protect the user_td */
883 struct ib_device ib_dev;
884 const struct uverbs_object_tree_def *driver_trees[7];
885 struct mlx5_core_dev *mdev;
886 struct mlx5_roce roce[MLX5_MAX_PORTS];
888 /* serialize update of capability mask
890 struct mutex cap_mask_mutex;
892 struct umr_common umrc;
893 /* sync used page count stats
895 struct mlx5_ib_resources devr;
896 struct mlx5_mr_cache cache;
897 struct timer_list delay_timer;
898 /* Prevents soft lock on massive reg MRs */
899 struct mutex slow_path_mutex;
901 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
902 struct ib_odp_caps odp_caps;
905 * Sleepable RCU that prevents destruction of MRs while they are still
906 * being used by a page fault handler.
908 struct srcu_struct mr_srcu;
911 struct mlx5_ib_flow_db *flow_db;
912 /* protect resources needed as part of reset flow */
913 spinlock_t reset_flow_resource_lock;
914 struct list_head qp_list;
915 /* Array with num_ports elements */
916 struct mlx5_ib_port *port;
917 struct mlx5_sq_bfreg bfreg;
918 struct mlx5_sq_bfreg fp_bfreg;
919 struct mlx5_ib_delay_drop delay_drop;
920 const struct mlx5_ib_profile *profile;
921 struct mlx5_eswitch_rep *rep;
923 struct mlx5_ib_lb_state lb;
925 struct list_head ib_dev_list;
927 struct mlx5_memic memic;
930 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
932 return container_of(mcq, struct mlx5_ib_cq, mcq);
935 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
937 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
940 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
942 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
945 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
947 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
950 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
952 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
955 static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
957 return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
960 static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
962 return container_of(mmkey, struct mlx5_ib_mr, mmkey);
965 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
967 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
970 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
972 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
975 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
977 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
980 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
982 return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
985 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
987 return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
990 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
992 return container_of(msrq, struct mlx5_ib_srq, msrq);
995 static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm)
997 return container_of(ibdm, struct mlx5_ib_dm, ibdm);
1000 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
1002 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
1005 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
1007 return container_of(ibmw, struct mlx5_ib_mw, ibmw);
1010 static inline struct mlx5_ib_flow_action *
1011 to_mflow_act(struct ib_flow_action *ibact)
1013 return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
1016 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
1017 struct mlx5_db *db);
1018 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
1019 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1020 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1021 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
1022 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
1023 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
1024 const void *in_mad, void *response_mad);
1025 struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
1026 struct ib_udata *udata);
1027 int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
1028 int mlx5_ib_destroy_ah(struct ib_ah *ah);
1029 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
1030 struct ib_srq_init_attr *init_attr,
1031 struct ib_udata *udata);
1032 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1033 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
1034 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
1035 int mlx5_ib_destroy_srq(struct ib_srq *srq);
1036 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
1037 const struct ib_recv_wr **bad_wr);
1038 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1039 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1040 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1041 struct ib_qp_init_attr *init_attr,
1042 struct ib_udata *udata);
1043 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1044 int attr_mask, struct ib_udata *udata);
1045 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
1046 struct ib_qp_init_attr *qp_init_attr);
1047 int mlx5_ib_destroy_qp(struct ib_qp *qp);
1048 void mlx5_ib_drain_sq(struct ib_qp *qp);
1049 void mlx5_ib_drain_rq(struct ib_qp *qp);
1050 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1051 const struct ib_send_wr **bad_wr);
1052 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1053 const struct ib_recv_wr **bad_wr);
1054 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
1055 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
1056 void *buffer, u32 length,
1057 struct mlx5_ib_qp_base *base);
1058 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
1059 const struct ib_cq_init_attr *attr,
1060 struct ib_ucontext *context,
1061 struct ib_udata *udata);
1062 int mlx5_ib_destroy_cq(struct ib_cq *cq);
1063 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
1064 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1065 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1066 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
1067 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
1068 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1069 u64 virt_addr, int access_flags,
1070 struct ib_udata *udata);
1071 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1072 struct ib_udata *udata);
1073 int mlx5_ib_dealloc_mw(struct ib_mw *mw);
1074 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
1075 int page_shift, int flags);
1076 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
1078 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
1079 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1080 u64 length, u64 virt_addr, int access_flags,
1081 struct ib_pd *pd, struct ib_udata *udata);
1082 int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
1083 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1084 enum ib_mr_type mr_type,
1086 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1087 unsigned int *sg_offset);
1088 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
1089 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
1090 const struct ib_mad_hdr *in, size_t in_mad_size,
1091 struct ib_mad_hdr *out, size_t *out_mad_size,
1092 u16 *out_mad_pkey_index);
1093 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
1094 struct ib_ucontext *context,
1095 struct ib_udata *udata);
1096 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
1097 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
1098 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
1099 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
1100 struct ib_smp *out_mad);
1101 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
1102 __be64 *sys_image_guid);
1103 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
1105 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
1107 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
1108 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
1109 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
1111 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
1113 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
1114 struct ib_port_attr *props);
1115 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1116 struct ib_port_attr *props);
1117 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
1118 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
1119 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
1120 unsigned long max_page_shift,
1121 int *count, int *shift,
1122 int *ncont, int *order);
1123 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
1124 int page_shift, size_t offset, size_t num_pages,
1125 __be64 *pas, int access_flags);
1126 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
1127 int page_shift, __be64 *pas, int access_flags);
1128 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
1129 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
1130 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
1131 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
1133 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry);
1134 void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
1135 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1136 struct ib_mr_status *mr_status);
1137 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
1138 struct ib_wq_init_attr *init_attr,
1139 struct ib_udata *udata);
1140 int mlx5_ib_destroy_wq(struct ib_wq *wq);
1141 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1142 u32 wq_attr_mask, struct ib_udata *udata);
1143 struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
1144 struct ib_rwq_ind_table_init_attr *init_attr,
1145 struct ib_udata *udata);
1146 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
1147 bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev);
1148 struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
1149 struct ib_ucontext *context,
1150 struct ib_dm_alloc_attr *attr,
1151 struct uverbs_attr_bundle *attrs);
1152 int mlx5_ib_dealloc_dm(struct ib_dm *ibdm);
1153 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1154 struct ib_dm_mr_attr *attr,
1155 struct uverbs_attr_bundle *attrs);
1157 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1158 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
1159 void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
1160 struct mlx5_pagefault *pfault);
1161 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
1162 int __init mlx5_ib_odp_init(void);
1163 void mlx5_ib_odp_cleanup(void);
1164 void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
1166 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
1167 void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
1168 size_t nentries, struct mlx5_ib_mr *mr, int flags);
1169 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1170 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
1175 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
1176 static inline int mlx5_ib_odp_init(void) { return 0; }
1177 static inline void mlx5_ib_odp_cleanup(void) {}
1178 static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
1179 static inline void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
1180 size_t nentries, struct mlx5_ib_mr *mr,
1183 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1185 /* Needed for rep profile */
1186 int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev);
1187 void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev);
1188 int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev);
1189 int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev);
1190 int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev);
1191 int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev);
1192 void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev);
1193 int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev);
1194 void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev);
1195 int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev);
1196 void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev);
1197 int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev);
1198 void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev);
1199 void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev);
1200 int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev);
1201 void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev);
1202 int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev);
1203 int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev);
1204 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
1205 const struct mlx5_ib_profile *profile,
1207 void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
1208 const struct mlx5_ib_profile *profile);
1210 int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
1211 u8 port, struct ifla_vf_info *info);
1212 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
1213 u8 port, int state);
1214 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
1215 u8 port, struct ifla_vf_stats *stats);
1216 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
1217 u64 guid, int type);
1219 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
1220 const struct ib_gid_attr *attr);
1222 void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
1223 int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
1225 /* GSI QP helper functions */
1226 struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
1227 struct ib_qp_init_attr *init_attr);
1228 int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
1229 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1231 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
1233 struct ib_qp_init_attr *qp_init_attr);
1234 int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
1235 const struct ib_send_wr **bad_wr);
1236 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
1237 const struct ib_recv_wr **bad_wr);
1238 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
1240 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
1242 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
1244 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
1245 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
1247 u8 *native_port_num);
1248 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
1251 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
1252 int mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
1253 struct mlx5_ib_ucontext *context);
1254 void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
1255 struct mlx5_ib_ucontext *context);
1256 const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void);
1257 struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
1258 struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
1259 struct mlx5_flow_act *flow_act, void *cmd_in, int inlen,
1260 int dest_id, int dest_type);
1261 bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
1262 int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
1263 void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction);
1266 mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
1267 struct mlx5_ib_ucontext *context) { return -EOPNOTSUPP; };
1268 static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
1269 struct mlx5_ib_ucontext *context) {}
1270 static inline const struct uverbs_object_tree_def *
1271 mlx5_ib_get_devx_tree(void) { return NULL; }
1272 static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id,
1278 mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root)
1283 mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
1288 static inline void init_query_mad(struct ib_smp *mad)
1290 mad->base_version = 1;
1291 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
1292 mad->class_version = 1;
1293 mad->method = IB_MGMT_METHOD_GET;
1296 static inline u8 convert_access(int acc)
1298 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
1299 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
1300 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
1301 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
1302 MLX5_PERM_LOCAL_READ;
1305 static inline int is_qp1(enum ib_qp_type qp_type)
1307 return qp_type == MLX5_IB_QPT_HW_GSI;
1310 #define MLX5_MAX_UMR_SHIFT 16
1311 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
1313 static inline u32 check_cq_create_flags(u32 flags)
1316 * It returns non-zero value for unsupported CQ
1317 * create flags, otherwise it returns zero.
1319 return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
1320 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
1323 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
1327 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
1328 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
1330 *user_index = cmd_uidx;
1332 *user_index = MLX5_IB_DEFAULT_UIDX;
1338 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
1339 struct mlx5_ib_create_qp *ucmd,
1343 u8 cqe_version = ucontext->cqe_version;
1345 if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
1346 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1349 if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
1353 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1356 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
1357 struct mlx5_ib_create_srq *ucmd,
1361 u8 cqe_version = ucontext->cqe_version;
1363 if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
1364 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1367 if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
1371 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1374 static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
1376 return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1377 MLX5_UARS_IN_PAGE : 1;
1380 static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
1381 struct mlx5_bfreg_info *bfregi)
1383 return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages;
1386 unsigned long mlx5_ib_get_xlt_emergency_page(void);
1387 void mlx5_ib_put_xlt_emergency_page(void);
1389 int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
1390 struct mlx5_bfreg_info *bfregi, u32 bfregn,
1392 #endif /* MLX5_IB_H */