2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/completion.h>
38 #include <linux/pci.h>
39 #include <linux/irq.h>
40 #include <linux/spinlock_types.h>
41 #include <linux/semaphore.h>
42 #include <linux/slab.h>
43 #include <linux/vmalloc.h>
44 #include <linux/xarray.h>
45 #include <linux/workqueue.h>
46 #include <linux/mempool.h>
47 #include <linux/interrupt.h>
48 #include <linux/idr.h>
49 #include <linux/notifier.h>
51 #include <linux/mlx5/device.h>
52 #include <linux/mlx5/doorbell.h>
53 #include <linux/mlx5/eq.h>
54 #include <linux/timecounter.h>
55 #include <linux/ptp_clock_kernel.h>
56 #include <net/devlink.h>
59 MLX5_BOARD_ID_LEN = 64,
63 /* one minute for the sake of bringup. Generally, commands must always
64 * complete and we may need to increase this timeout value
66 MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
67 MLX5_CMD_WQ_MAX_NAME = 32,
73 CMD_STATUS_SUCCESS = 0,
79 MLX5_SQP_IEEE_1588 = 2,
81 MLX5_SQP_SYNC_UMR = 4,
89 MLX5_ATOMIC_MODE_OFFSET = 16,
90 MLX5_ATOMIC_MODE_IB_COMP = 1,
91 MLX5_ATOMIC_MODE_CX = 2,
92 MLX5_ATOMIC_MODE_8B = 3,
93 MLX5_ATOMIC_MODE_16B = 4,
94 MLX5_ATOMIC_MODE_32B = 5,
95 MLX5_ATOMIC_MODE_64B = 6,
96 MLX5_ATOMIC_MODE_128B = 7,
97 MLX5_ATOMIC_MODE_256B = 8,
101 MLX5_REG_QPTS = 0x4002,
102 MLX5_REG_QETCR = 0x4005,
103 MLX5_REG_QTCT = 0x400a,
104 MLX5_REG_QPDPM = 0x4013,
105 MLX5_REG_QCAM = 0x4019,
106 MLX5_REG_DCBX_PARAM = 0x4020,
107 MLX5_REG_DCBX_APP = 0x4021,
108 MLX5_REG_FPGA_CAP = 0x4022,
109 MLX5_REG_FPGA_CTRL = 0x4023,
110 MLX5_REG_FPGA_ACCESS_REG = 0x4024,
111 MLX5_REG_CORE_DUMP = 0x402e,
112 MLX5_REG_PCAP = 0x5001,
113 MLX5_REG_PMTU = 0x5003,
114 MLX5_REG_PTYS = 0x5004,
115 MLX5_REG_PAOS = 0x5006,
116 MLX5_REG_PFCC = 0x5007,
117 MLX5_REG_PPCNT = 0x5008,
118 MLX5_REG_PPTB = 0x500b,
119 MLX5_REG_PBMC = 0x500c,
120 MLX5_REG_PMAOS = 0x5012,
121 MLX5_REG_PUDE = 0x5009,
122 MLX5_REG_PMPE = 0x5010,
123 MLX5_REG_PELC = 0x500e,
124 MLX5_REG_PVLC = 0x500f,
125 MLX5_REG_PCMR = 0x5041,
126 MLX5_REG_PMLP = 0x5002,
127 MLX5_REG_PPLM = 0x5023,
128 MLX5_REG_PCAM = 0x507f,
129 MLX5_REG_NODE_DESC = 0x6001,
130 MLX5_REG_HOST_ENDIANNESS = 0x7004,
131 MLX5_REG_MCIA = 0x9014,
132 MLX5_REG_MLCR = 0x902b,
133 MLX5_REG_MTRC_CAP = 0x9040,
134 MLX5_REG_MTRC_CONF = 0x9041,
135 MLX5_REG_MTRC_STDB = 0x9042,
136 MLX5_REG_MTRC_CTRL = 0x9043,
137 MLX5_REG_MPEIN = 0x9050,
138 MLX5_REG_MPCNT = 0x9051,
139 MLX5_REG_MTPPS = 0x9053,
140 MLX5_REG_MTPPSE = 0x9054,
141 MLX5_REG_MPEGC = 0x9056,
142 MLX5_REG_MCQS = 0x9060,
143 MLX5_REG_MCQI = 0x9061,
144 MLX5_REG_MCC = 0x9062,
145 MLX5_REG_MCDA = 0x9063,
146 MLX5_REG_MCAM = 0x907f,
149 enum mlx5_qpts_trust_state {
150 MLX5_QPTS_TRUST_PCP = 1,
151 MLX5_QPTS_TRUST_DSCP = 2,
154 enum mlx5_dcbx_oper_mode {
155 MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0,
156 MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
160 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
161 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
162 MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2,
163 MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3,
166 enum mlx5_page_fault_resume_flags {
167 MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
168 MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
169 MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2,
170 MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7,
179 enum port_state_policy {
180 MLX5_POLICY_DOWN = 0,
182 MLX5_POLICY_FOLLOW = 2,
183 MLX5_POLICY_INVALID = 0xffffffff
186 enum mlx5_coredev_type {
191 struct mlx5_field_desc {
196 struct mlx5_rsc_debug {
197 struct mlx5_core_dev *dev;
199 enum dbg_rsc_type type;
201 struct mlx5_field_desc fields[0];
204 enum mlx5_dev_event {
205 MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
206 MLX5_DEV_EVENT_PORT_AFFINITY = 129,
209 enum mlx5_port_status {
214 struct mlx5_bfreg_info {
216 int num_low_latency_bfregs;
220 * protect bfreg allocation data structs
226 u32 num_static_sys_pages;
227 u32 total_num_bfregs;
231 struct mlx5_cmd_first {
235 struct mlx5_cmd_msg {
236 struct list_head list;
237 struct cmd_msg_cache *parent;
239 struct mlx5_cmd_first first;
240 struct mlx5_cmd_mailbox *next;
243 struct mlx5_cmd_debug {
244 struct dentry *dbg_root;
245 struct dentry *dbg_in;
246 struct dentry *dbg_out;
247 struct dentry *dbg_outlen;
248 struct dentry *dbg_status;
249 struct dentry *dbg_run;
257 struct cmd_msg_cache {
258 /* protect block chain allocations
261 struct list_head head;
262 unsigned int max_inbox_size;
263 unsigned int num_ent;
267 MLX5_NUM_COMMAND_CACHES = 5,
270 struct mlx5_cmd_stats {
275 struct dentry *count;
276 /* protect command average calculations */
284 dma_addr_t alloc_dma;
295 /* protect command queue allocations
297 spinlock_t alloc_lock;
299 /* protect token allocations
301 spinlock_t token_lock;
303 unsigned long bitmask;
304 char wq_name[MLX5_CMD_WQ_MAX_NAME];
305 struct workqueue_struct *wq;
306 struct semaphore sem;
307 struct semaphore pages_sem;
309 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
310 struct dma_pool *pool;
311 struct mlx5_cmd_debug dbg;
312 struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
313 int checksum_disabled;
314 struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
317 struct mlx5_port_caps {
324 struct mlx5_cmd_mailbox {
327 struct mlx5_cmd_mailbox *next;
330 struct mlx5_buf_list {
335 struct mlx5_frag_buf {
336 struct mlx5_buf_list *frags;
342 struct mlx5_frag_buf_ctrl {
343 struct mlx5_buf_list *frags;
352 struct mlx5_core_psv {
364 struct mlx5_core_sig_ctx {
365 struct mlx5_core_psv psv_memory;
366 struct mlx5_core_psv psv_wire;
367 struct ib_sig_err err_item;
368 bool sig_status_checked;
376 MLX5_MKEY_INDIRECT_DEVX,
379 struct mlx5_core_mkey {
387 #define MLX5_24BIT_MASK ((1 << 24) - 1)
390 MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
391 MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
392 MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
396 MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT,
399 struct mlx5_core_rsc_common {
400 enum mlx5_res_type res;
402 struct completion free;
405 struct mlx5_uars_page {
409 struct list_head list;
411 unsigned long *reg_bitmap; /* for non fast path bf regs */
412 unsigned long *fp_bitmap;
413 unsigned int reg_avail;
414 unsigned int fp_avail;
415 struct kref ref_count;
416 struct mlx5_core_dev *mdev;
419 struct mlx5_bfreg_head {
420 /* protect blue flame registers allocations */
422 struct list_head list;
425 struct mlx5_bfreg_data {
426 struct mlx5_bfreg_head reg_head;
427 struct mlx5_bfreg_head wc_head;
430 struct mlx5_sq_bfreg {
432 struct mlx5_uars_page *up;
438 struct mlx5_core_health {
439 struct health_buffer __iomem *health;
440 __be32 __iomem *health_counter;
441 struct timer_list timer;
447 /* wq spinlock to synchronize draining */
449 struct workqueue_struct *wq;
451 struct work_struct fatal_report_work;
452 struct work_struct report_work;
453 struct delayed_work recover_work;
454 struct devlink_health_reporter *fw_reporter;
455 struct devlink_health_reporter *fw_fatal_reporter;
458 struct mlx5_qp_table {
459 struct notifier_block nb;
461 /* protect radix tree
464 struct radix_tree_root tree;
467 struct mlx5_vf_context {
471 enum port_state_policy policy;
474 struct mlx5_core_sriov {
475 struct mlx5_vf_context *vfs_ctx;
480 struct mlx5_fc_stats {
481 spinlock_t counters_idr_lock; /* protects counters_idr */
482 struct idr counters_idr;
483 struct list_head counters;
484 struct llist_head addlist;
485 struct llist_head dellist;
487 struct workqueue_struct *wq;
488 struct delayed_work work;
489 unsigned long next_query;
490 unsigned long sampling_interval; /* jiffies */
498 struct mlx5_eq_table;
499 struct mlx5_irq_table;
501 struct mlx5_rate_limit {
507 struct mlx5_rl_entry {
508 struct mlx5_rate_limit rl;
513 struct mlx5_rl_table {
514 /* protect rate limit table */
515 struct mutex rl_lock;
519 struct mlx5_rl_entry *rl_entry;
522 struct mlx5_core_roce {
523 struct mlx5_flow_table *ft;
524 struct mlx5_flow_group *fg;
525 struct mlx5_flow_handle *allow_rule;
529 /* IRQ table valid only for real pci devices PF or VF */
530 struct mlx5_irq_table *irq_table;
531 struct mlx5_eq_table *eq_table;
534 struct mlx5_nb pg_nb;
535 struct workqueue_struct *pg_wq;
536 struct rb_root page_root;
539 struct list_head free_list;
543 struct mlx5_core_health health;
545 /* start: qp staff */
546 struct mlx5_qp_table qp_table;
547 struct dentry *qp_debugfs;
548 struct dentry *eq_debugfs;
549 struct dentry *cq_debugfs;
550 struct dentry *cmdif_debugfs;
553 struct xarray mkey_table;
555 /* start: alloc staff */
556 /* protect buffer alocation according to numa node */
557 struct mutex alloc_mutex;
560 struct mutex pgdir_mutex;
561 struct list_head pgdir_list;
562 /* end: alloc staff */
563 struct dentry *dbg_root;
565 /* protect mkey key part */
566 spinlock_t mkey_lock;
569 struct list_head dev_list;
570 struct list_head ctx_list;
572 struct mlx5_events *events;
574 struct mlx5_flow_steering *steering;
575 struct mlx5_mpfs *mpfs;
576 struct mlx5_eswitch *eswitch;
577 struct mlx5_core_sriov sriov;
578 struct mlx5_lag *lag;
579 struct mlx5_devcom *devcom;
580 struct mlx5_core_roce roce;
581 struct mlx5_fc_stats fc_stats;
582 struct mlx5_rl_table rl_table;
584 struct mlx5_bfreg_data bfregs;
585 struct mlx5_uars_page *uar;
588 enum mlx5_device_state {
589 MLX5_DEVICE_STATE_UNINITIALIZED,
590 MLX5_DEVICE_STATE_UP,
591 MLX5_DEVICE_STATE_INTERNAL_ERROR,
594 enum mlx5_interface_state {
595 MLX5_INTERFACE_STATE_UP = BIT(0),
598 enum mlx5_pci_status {
599 MLX5_PCI_STATUS_DISABLED,
600 MLX5_PCI_STATUS_ENABLED,
603 enum mlx5_pagefault_type_flags {
604 MLX5_PFAULT_REQUESTOR = 1 << 0,
605 MLX5_PFAULT_WRITE = 1 << 1,
606 MLX5_PFAULT_RDMA = 1 << 2,
610 /* protects tirs list changes while tirs refresh */
611 struct mutex list_lock;
612 struct list_head tirs_list;
616 struct mlx5e_resources {
619 struct mlx5_core_mkey mkey;
620 struct mlx5_sq_bfreg bfreg;
623 #define MLX5_MAX_RESERVED_GIDS 8
625 struct mlx5_rsvd_gids {
631 #define MAX_PIN_NUM 8
633 u8 pin_caps[MAX_PIN_NUM];
634 struct work_struct out_work;
635 u64 start[MAX_PIN_NUM];
640 struct mlx5_core_dev *mdev;
641 struct mlx5_nb pps_nb;
643 struct cyclecounter cycles;
644 struct timecounter tc;
645 struct hwtstamp_config hwtstamp_config;
647 unsigned long overflow_period;
648 struct delayed_work overflow_work;
649 struct ptp_clock *ptp;
650 struct ptp_clock_info ptp_info;
651 struct mlx5_pps pps_info;
654 struct mlx5_fw_tracer;
658 struct mlx5_core_dev {
659 struct device *device;
660 enum mlx5_coredev_type coredev_type;
661 struct pci_dev *pdev;
663 struct mutex pci_status_mutex;
664 enum mlx5_pci_status pci_status;
666 char board_id[MLX5_BOARD_ID_LEN];
668 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
670 u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
671 u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
672 u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
673 u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
674 u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
675 u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
679 phys_addr_t iseg_base;
680 struct mlx5_init_seg __iomem *iseg;
681 phys_addr_t bar_addr;
682 enum mlx5_device_state state;
683 /* sync interface state */
684 struct mutex intf_state_mutex;
685 unsigned long intf_state;
686 struct mlx5_priv priv;
687 struct mlx5_profile *profile;
690 struct mlx5e_resources mlx5e_res;
691 struct mlx5_vxlan *vxlan;
692 struct mlx5_geneve *geneve;
694 struct mlx5_rsvd_gids reserved_gids;
697 #ifdef CONFIG_MLX5_FPGA
698 struct mlx5_fpga_device *fpga;
700 struct mlx5_clock clock;
701 struct mlx5_ib_clock_info *clock_info;
702 struct mlx5_fw_tracer *tracer;
709 struct mlx5_db_pgdir *pgdir;
710 struct mlx5_ib_user_db_page *user_page;
717 MLX5_COMP_EQ_SIZE = 1024,
721 MLX5_PTYS_IB = 1 << 0,
722 MLX5_PTYS_EN = 1 << 2,
725 typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
728 MLX5_CMD_ENT_STATE_PENDING_COMP,
731 struct mlx5_cmd_work_ent {
733 struct mlx5_cmd_msg *in;
734 struct mlx5_cmd_msg *out;
737 mlx5_cmd_cbk_t callback;
738 struct delayed_work cb_timeout_work;
741 struct completion done;
742 struct mlx5_cmd *cmd;
743 struct work_struct work;
744 struct mlx5_cmd_layout *lay;
760 enum phy_port_state {
764 struct mlx5_hca_vport_context {
769 enum port_state_policy policy;
770 enum phy_port_state phys_state;
771 enum ib_port_state vport_state;
772 u8 port_physical_state;
781 u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
786 u16 qkey_violation_counter;
787 u16 pkey_violation_counter;
791 static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset)
793 return buf->frags->buf + offset;
796 #define STRUCT_FIELD(header, field) \
797 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
798 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
800 static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
802 return pci_get_drvdata(pdev);
805 extern struct dentry *mlx5_debugfs_root;
807 static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
809 return ioread32be(&dev->iseg->fw_rev) & 0xffff;
812 static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
814 return ioread32be(&dev->iseg->fw_rev) >> 16;
817 static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
819 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
822 static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
824 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
827 static inline u32 mlx5_base_mkey(const u32 key)
829 return key & 0xffffff00u;
832 static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
833 u8 log_stride, u8 log_sz,
835 struct mlx5_frag_buf_ctrl *fbc)
838 fbc->log_stride = log_stride;
839 fbc->log_sz = log_sz;
840 fbc->sz_m1 = (1 << fbc->log_sz) - 1;
841 fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
842 fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
843 fbc->strides_offset = strides_offset;
846 static inline void mlx5_init_fbc(struct mlx5_buf_list *frags,
847 u8 log_stride, u8 log_sz,
848 struct mlx5_frag_buf_ctrl *fbc)
850 mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc);
853 static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
858 ix += fbc->strides_offset;
859 frag = ix >> fbc->log_frag_strides;
861 return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
865 mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
867 u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
869 return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
872 int mlx5_cmd_init(struct mlx5_core_dev *dev);
873 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
874 void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
875 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
877 struct mlx5_async_ctx {
878 struct mlx5_core_dev *dev;
879 atomic_t num_inflight;
880 struct wait_queue_head wait;
883 struct mlx5_async_work;
885 typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
887 struct mlx5_async_work {
888 struct mlx5_async_ctx *ctx;
889 mlx5_async_cbk_t user_callback;
892 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
893 struct mlx5_async_ctx *ctx);
894 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
895 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
896 void *out, int out_size, mlx5_async_cbk_t callback,
897 struct mlx5_async_work *work);
899 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
901 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
902 void *out, int out_size);
903 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
905 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
906 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
907 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
908 void mlx5_health_flush(struct mlx5_core_dev *dev);
909 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
910 int mlx5_health_init(struct mlx5_core_dev *dev);
911 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
912 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
913 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
914 void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
915 int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
916 struct mlx5_frag_buf *buf, int node);
917 int mlx5_buf_alloc(struct mlx5_core_dev *dev,
918 int size, struct mlx5_frag_buf *buf);
919 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
920 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
921 struct mlx5_frag_buf *buf, int node);
922 void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
923 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
924 gfp_t flags, int npages);
925 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
926 struct mlx5_cmd_mailbox *head);
927 void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
928 void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
929 int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
930 struct mlx5_core_mkey *mkey,
931 struct mlx5_async_ctx *async_ctx, u32 *in,
932 int inlen, u32 *out, int outlen,
933 mlx5_async_cbk_t callback,
934 struct mlx5_async_work *context);
935 int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
936 struct mlx5_core_mkey *mkey,
938 int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
939 struct mlx5_core_mkey *mkey);
940 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
941 u32 *out, int outlen);
942 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
943 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
944 int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
945 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
946 void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
947 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
948 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
949 s32 npages, bool ec_function);
950 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
951 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
952 void mlx5_register_debugfs(void);
953 void mlx5_unregister_debugfs(void);
955 void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
956 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
957 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
959 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
960 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
962 int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
963 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
964 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
965 int size_in, void *data_out, int size_out,
966 u16 reg_num, int arg, int write);
968 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
969 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
971 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
973 const char *mlx5_command_str(int command);
974 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
975 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
976 int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
977 int npsvs, u32 *sig_index);
978 int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
979 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
980 int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
981 struct mlx5_odp_caps *odp_caps);
982 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
983 u8 port_num, void *out, size_t sz);
985 int mlx5_init_rl_table(struct mlx5_core_dev *dev);
986 void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
987 int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
988 struct mlx5_rate_limit *rl);
989 void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
990 bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
991 bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
992 struct mlx5_rate_limit *rl_1);
993 int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
994 bool map_wc, bool fast_path);
995 void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
997 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
999 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
1000 unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
1001 int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
1002 u8 roce_version, u8 roce_l3_type, const u8 *gid,
1003 const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
1005 static inline int fw_initializing(struct mlx5_core_dev *dev)
1007 return ioread32be(&dev->iseg->initializing) >> 31;
1010 static inline u32 mlx5_mkey_to_idx(u32 mkey)
1015 static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
1017 return mkey_idx << 8;
1020 static inline u8 mlx5_mkey_variant(u32 mkey)
1026 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
1027 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
1031 MR_CACHE_LAST_STD_ENTRY = 20,
1032 MLX5_IMR_MTT_CACHE_ENTRY,
1033 MLX5_IMR_KSM_CACHE_ENTRY,
1034 MAX_MR_CACHE_ENTRIES
1038 MLX5_INTERFACE_PROTOCOL_IB = 0,
1039 MLX5_INTERFACE_PROTOCOL_ETH = 1,
1042 struct mlx5_interface {
1043 void * (*add)(struct mlx5_core_dev *dev);
1044 void (*remove)(struct mlx5_core_dev *dev, void *context);
1045 int (*attach)(struct mlx5_core_dev *dev, void *context);
1046 void (*detach)(struct mlx5_core_dev *dev, void *context);
1048 struct list_head list;
1051 int mlx5_register_interface(struct mlx5_interface *intf);
1052 void mlx5_unregister_interface(struct mlx5_interface *intf);
1053 int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
1054 int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
1055 int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
1056 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
1058 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
1060 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
1061 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
1062 bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
1063 bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
1064 bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
1065 bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
1066 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
1067 int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1071 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
1072 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
1074 #ifdef CONFIG_MLX5_CORE_IPOIB
1075 struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
1076 struct ib_device *ibdev,
1078 void (*setup)(struct net_device *));
1079 #endif /* CONFIG_MLX5_CORE_IPOIB */
1080 int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
1081 struct ib_device *device,
1082 struct rdma_netdev_alloc_params *params);
1084 struct mlx5_profile {
1090 } mr_cache[MAX_MR_CACHE_ENTRIES];
1094 MLX5_PCI_DEV_IS_VF = 1 << 0,
1097 static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev)
1099 return dev->coredev_type == MLX5_COREDEV_PF;
1102 static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
1104 return dev->caps.embedded_cpu;
1108 mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev)
1110 return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager);
1113 static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev)
1115 return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists);
1118 static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev)
1120 return dev->priv.sriov.max_vfs;
1123 static inline int mlx5_get_gid_table_len(u16 param)
1126 pr_warn("gid table length is zero\n");
1130 return 8 * (1 << param);
1133 static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
1135 return !!(dev->priv.rl_table.max_size);
1138 static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
1140 return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
1141 MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
1144 static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
1146 return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
1149 static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
1151 return mlx5_core_is_mp_slave(dev) ||
1152 mlx5_core_is_mp_master(dev);
1155 static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
1157 if (!mlx5_core_mp_enabled(dev))
1160 return MLX5_CAP_GEN(dev, native_port_num);
1164 MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
1167 #endif /* MLX5_DRIVER_H */