2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/completion.h>
38 #include <linux/pci.h>
39 #include <linux/irq.h>
40 #include <linux/spinlock_types.h>
41 #include <linux/semaphore.h>
42 #include <linux/slab.h>
43 #include <linux/vmalloc.h>
44 #include <linux/radix-tree.h>
45 #include <linux/workqueue.h>
46 #include <linux/mempool.h>
47 #include <linux/interrupt.h>
48 #include <linux/idr.h>
49 #include <linux/notifier.h>
51 #include <linux/mlx5/device.h>
52 #include <linux/mlx5/doorbell.h>
53 #include <linux/mlx5/srq.h>
54 #include <linux/mlx5/eq.h>
55 #include <linux/timecounter.h>
56 #include <linux/ptp_clock_kernel.h>
59 MLX5_BOARD_ID_LEN = 64,
60 MLX5_MAX_NAME_LEN = 16,
64 /* one minute for the sake of bringup. Generally, commands must always
65 * complete and we may need to increase this timeout value
67 MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
68 MLX5_CMD_WQ_MAX_NAME = 32,
74 CMD_STATUS_SUCCESS = 0,
80 MLX5_SQP_IEEE_1588 = 2,
82 MLX5_SQP_SYNC_UMR = 4,
90 MLX5_ATOMIC_MODE_OFFSET = 16,
91 MLX5_ATOMIC_MODE_IB_COMP = 1,
92 MLX5_ATOMIC_MODE_CX = 2,
93 MLX5_ATOMIC_MODE_8B = 3,
94 MLX5_ATOMIC_MODE_16B = 4,
95 MLX5_ATOMIC_MODE_32B = 5,
96 MLX5_ATOMIC_MODE_64B = 6,
97 MLX5_ATOMIC_MODE_128B = 7,
98 MLX5_ATOMIC_MODE_256B = 8,
102 MLX5_REG_QPTS = 0x4002,
103 MLX5_REG_QETCR = 0x4005,
104 MLX5_REG_QTCT = 0x400a,
105 MLX5_REG_QPDPM = 0x4013,
106 MLX5_REG_QCAM = 0x4019,
107 MLX5_REG_DCBX_PARAM = 0x4020,
108 MLX5_REG_DCBX_APP = 0x4021,
109 MLX5_REG_FPGA_CAP = 0x4022,
110 MLX5_REG_FPGA_CTRL = 0x4023,
111 MLX5_REG_FPGA_ACCESS_REG = 0x4024,
112 MLX5_REG_PCAP = 0x5001,
113 MLX5_REG_PMTU = 0x5003,
114 MLX5_REG_PTYS = 0x5004,
115 MLX5_REG_PAOS = 0x5006,
116 MLX5_REG_PFCC = 0x5007,
117 MLX5_REG_PPCNT = 0x5008,
118 MLX5_REG_PPTB = 0x500b,
119 MLX5_REG_PBMC = 0x500c,
120 MLX5_REG_PMAOS = 0x5012,
121 MLX5_REG_PUDE = 0x5009,
122 MLX5_REG_PMPE = 0x5010,
123 MLX5_REG_PELC = 0x500e,
124 MLX5_REG_PVLC = 0x500f,
125 MLX5_REG_PCMR = 0x5041,
126 MLX5_REG_PMLP = 0x5002,
127 MLX5_REG_PPLM = 0x5023,
128 MLX5_REG_PCAM = 0x507f,
129 MLX5_REG_NODE_DESC = 0x6001,
130 MLX5_REG_HOST_ENDIANNESS = 0x7004,
131 MLX5_REG_MCIA = 0x9014,
132 MLX5_REG_MLCR = 0x902b,
133 MLX5_REG_MTRC_CAP = 0x9040,
134 MLX5_REG_MTRC_CONF = 0x9041,
135 MLX5_REG_MTRC_STDB = 0x9042,
136 MLX5_REG_MTRC_CTRL = 0x9043,
137 MLX5_REG_MPCNT = 0x9051,
138 MLX5_REG_MTPPS = 0x9053,
139 MLX5_REG_MTPPSE = 0x9054,
140 MLX5_REG_MPEGC = 0x9056,
141 MLX5_REG_MCQI = 0x9061,
142 MLX5_REG_MCC = 0x9062,
143 MLX5_REG_MCDA = 0x9063,
144 MLX5_REG_MCAM = 0x907f,
147 enum mlx5_qpts_trust_state {
148 MLX5_QPTS_TRUST_PCP = 1,
149 MLX5_QPTS_TRUST_DSCP = 2,
152 enum mlx5_dcbx_oper_mode {
153 MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0,
154 MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
158 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
159 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
160 MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2,
161 MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3,
164 enum mlx5_page_fault_resume_flags {
165 MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
166 MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
167 MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2,
168 MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7,
177 enum port_state_policy {
178 MLX5_POLICY_DOWN = 0,
180 MLX5_POLICY_FOLLOW = 2,
181 MLX5_POLICY_INVALID = 0xffffffff
184 struct mlx5_field_desc {
189 struct mlx5_rsc_debug {
190 struct mlx5_core_dev *dev;
192 enum dbg_rsc_type type;
194 struct mlx5_field_desc fields[0];
197 enum mlx5_dev_event {
198 MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
199 MLX5_DEV_EVENT_PORT_UP,
200 MLX5_DEV_EVENT_PORT_DOWN,
201 MLX5_DEV_EVENT_PORT_INITIALIZED,
202 MLX5_DEV_EVENT_LID_CHANGE,
203 MLX5_DEV_EVENT_PKEY_CHANGE,
204 MLX5_DEV_EVENT_GUID_CHANGE,
205 MLX5_DEV_EVENT_CLIENT_REREG,
207 MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT,
210 enum mlx5_port_status {
215 struct mlx5_bfreg_info {
217 int num_low_latency_bfregs;
221 * protect bfreg allocation data structs
227 u32 num_static_sys_pages;
228 u32 total_num_bfregs;
232 struct mlx5_cmd_first {
236 struct mlx5_cmd_msg {
237 struct list_head list;
238 struct cmd_msg_cache *parent;
240 struct mlx5_cmd_first first;
241 struct mlx5_cmd_mailbox *next;
244 struct mlx5_cmd_debug {
245 struct dentry *dbg_root;
246 struct dentry *dbg_in;
247 struct dentry *dbg_out;
248 struct dentry *dbg_outlen;
249 struct dentry *dbg_status;
250 struct dentry *dbg_run;
258 struct cmd_msg_cache {
259 /* protect block chain allocations
262 struct list_head head;
263 unsigned int max_inbox_size;
264 unsigned int num_ent;
268 MLX5_NUM_COMMAND_CACHES = 5,
271 struct mlx5_cmd_stats {
276 struct dentry *count;
277 /* protect command average calculations */
285 dma_addr_t alloc_dma;
296 /* protect command queue allocations
298 spinlock_t alloc_lock;
300 /* protect token allocations
302 spinlock_t token_lock;
304 unsigned long bitmask;
305 char wq_name[MLX5_CMD_WQ_MAX_NAME];
306 struct workqueue_struct *wq;
307 struct semaphore sem;
308 struct semaphore pages_sem;
310 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
311 struct dma_pool *pool;
312 struct mlx5_cmd_debug dbg;
313 struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
314 int checksum_disabled;
315 struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
318 struct mlx5_port_caps {
325 struct mlx5_cmd_mailbox {
328 struct mlx5_cmd_mailbox *next;
331 struct mlx5_buf_list {
336 struct mlx5_frag_buf {
337 struct mlx5_buf_list *frags;
343 struct mlx5_frag_buf_ctrl {
344 struct mlx5_buf_list *frags;
353 struct mlx5_core_psv {
365 struct mlx5_core_sig_ctx {
366 struct mlx5_core_psv psv_memory;
367 struct mlx5_core_psv psv_wire;
368 struct ib_sig_err err_item;
369 bool sig_status_checked;
379 struct mlx5_core_mkey {
387 #define MLX5_24BIT_MASK ((1 << 24) - 1)
390 MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
391 MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
392 MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
396 MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT,
399 struct mlx5_core_rsc_common {
400 enum mlx5_res_type res;
402 struct completion free;
405 struct mlx5_core_srq {
406 struct mlx5_core_rsc_common common; /* must be first */
410 size_t max_avail_gather;
412 void (*event) (struct mlx5_core_srq *, enum mlx5_event);
415 struct completion free;
419 struct mlx5_uars_page {
423 struct list_head list;
425 unsigned long *reg_bitmap; /* for non fast path bf regs */
426 unsigned long *fp_bitmap;
427 unsigned int reg_avail;
428 unsigned int fp_avail;
429 struct kref ref_count;
430 struct mlx5_core_dev *mdev;
433 struct mlx5_bfreg_head {
434 /* protect blue flame registers allocations */
436 struct list_head list;
439 struct mlx5_bfreg_data {
440 struct mlx5_bfreg_head reg_head;
441 struct mlx5_bfreg_head wc_head;
444 struct mlx5_sq_bfreg {
446 struct mlx5_uars_page *up;
452 struct mlx5_core_health {
453 struct health_buffer __iomem *health;
454 __be32 __iomem *health_counter;
455 struct timer_list timer;
459 /* wq spinlock to synchronize draining */
461 struct workqueue_struct *wq;
463 struct work_struct work;
464 struct delayed_work recover_work;
467 struct mlx5_qp_table {
470 /* protect radix tree
473 struct radix_tree_root tree;
476 struct mlx5_srq_table {
477 struct mlx5_nb catas_err_nb;
478 struct mlx5_nb rq_limit_nb;
479 /* protect radix tree
482 struct radix_tree_root tree;
485 struct mlx5_mkey_table {
486 /* protect radix tree
489 struct radix_tree_root tree;
492 struct mlx5_vf_context {
496 enum port_state_policy policy;
499 struct mlx5_core_sriov {
500 struct mlx5_vf_context *vfs_ctx;
505 struct mlx5_fc_stats {
506 spinlock_t counters_idr_lock; /* protects counters_idr */
507 struct idr counters_idr;
508 struct list_head counters;
509 struct llist_head addlist;
510 struct llist_head dellist;
512 struct workqueue_struct *wq;
513 struct delayed_work work;
514 unsigned long next_query;
515 unsigned long sampling_interval; /* jiffies */
522 struct mlx5_eq_table;
524 struct mlx5_rate_limit {
530 struct mlx5_rl_entry {
531 struct mlx5_rate_limit rl;
536 struct mlx5_rl_table {
537 /* protect rate limit table */
538 struct mutex rl_lock;
542 struct mlx5_rl_entry *rl_entry;
546 char name[MLX5_MAX_NAME_LEN];
547 struct mlx5_eq_table *eq_table;
550 struct mlx5_nb pg_nb;
551 struct workqueue_struct *pg_wq;
552 struct rb_root page_root;
555 struct list_head free_list;
558 struct mlx5_core_health health;
560 struct mlx5_srq_table srq_table;
562 /* start: qp staff */
563 struct mlx5_qp_table qp_table;
564 struct dentry *qp_debugfs;
565 struct dentry *eq_debugfs;
566 struct dentry *cq_debugfs;
567 struct dentry *cmdif_debugfs;
570 /* start: mkey staff */
571 struct mlx5_mkey_table mkey_table;
572 /* end: mkey staff */
574 /* start: alloc staff */
575 /* protect buffer alocation according to numa node */
576 struct mutex alloc_mutex;
579 struct mutex pgdir_mutex;
580 struct list_head pgdir_list;
581 /* end: alloc staff */
582 struct dentry *dbg_root;
584 /* protect mkey key part */
585 spinlock_t mkey_lock;
588 struct list_head dev_list;
589 struct list_head ctx_list;
592 struct list_head waiting_events_list;
593 bool is_accum_events;
594 struct mlx5_events *events;
596 struct mlx5_flow_steering *steering;
597 struct mlx5_mpfs *mpfs;
598 struct mlx5_eswitch *eswitch;
599 struct mlx5_core_sriov sriov;
600 struct mlx5_lag *lag;
601 unsigned long pci_dev_data;
602 struct mlx5_fc_stats fc_stats;
603 struct mlx5_rl_table rl_table;
605 struct mlx5_bfreg_data bfregs;
606 struct mlx5_uars_page *uar;
609 enum mlx5_device_state {
610 MLX5_DEVICE_STATE_UP,
611 MLX5_DEVICE_STATE_INTERNAL_ERROR,
614 enum mlx5_interface_state {
615 MLX5_INTERFACE_STATE_UP = BIT(0),
618 enum mlx5_pci_status {
619 MLX5_PCI_STATUS_DISABLED,
620 MLX5_PCI_STATUS_ENABLED,
623 enum mlx5_pagefault_type_flags {
624 MLX5_PFAULT_REQUESTOR = 1 << 0,
625 MLX5_PFAULT_WRITE = 1 << 1,
626 MLX5_PFAULT_RDMA = 1 << 2,
630 struct list_head tirs_list;
634 struct mlx5e_resources {
637 struct mlx5_core_mkey mkey;
638 struct mlx5_sq_bfreg bfreg;
641 #define MLX5_MAX_RESERVED_GIDS 8
643 struct mlx5_rsvd_gids {
649 #define MAX_PIN_NUM 8
651 u8 pin_caps[MAX_PIN_NUM];
652 struct work_struct out_work;
653 u64 start[MAX_PIN_NUM];
658 struct mlx5_core_dev *mdev;
659 struct mlx5_nb pps_nb;
661 struct cyclecounter cycles;
662 struct timecounter tc;
663 struct hwtstamp_config hwtstamp_config;
665 unsigned long overflow_period;
666 struct delayed_work overflow_work;
667 struct ptp_clock *ptp;
668 struct ptp_clock_info ptp_info;
669 struct mlx5_pps pps_info;
672 struct mlx5_fw_tracer;
675 struct mlx5_core_dev {
676 struct pci_dev *pdev;
678 struct mutex pci_status_mutex;
679 enum mlx5_pci_status pci_status;
681 char board_id[MLX5_BOARD_ID_LEN];
683 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
685 u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
686 u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
687 u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
688 u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
689 u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
690 u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
693 phys_addr_t iseg_base;
694 struct mlx5_init_seg __iomem *iseg;
695 enum mlx5_device_state state;
696 /* sync interface state */
697 struct mutex intf_state_mutex;
698 unsigned long intf_state;
699 void (*event) (struct mlx5_core_dev *dev,
700 enum mlx5_dev_event event,
701 unsigned long param);
702 struct mlx5_priv priv;
703 struct mlx5_profile *profile;
706 struct mlx5e_resources mlx5e_res;
707 struct mlx5_vxlan *vxlan;
709 struct mlx5_rsvd_gids reserved_gids;
712 #ifdef CONFIG_MLX5_FPGA
713 struct mlx5_fpga_device *fpga;
715 struct mlx5_clock clock;
716 struct mlx5_ib_clock_info *clock_info;
717 struct page *clock_info_page;
718 struct mlx5_fw_tracer *tracer;
724 struct mlx5_db_pgdir *pgdir;
725 struct mlx5_ib_user_db_page *user_page;
732 MLX5_COMP_EQ_SIZE = 1024,
736 MLX5_PTYS_IB = 1 << 0,
737 MLX5_PTYS_EN = 1 << 2,
740 typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
743 MLX5_CMD_ENT_STATE_PENDING_COMP,
746 struct mlx5_cmd_work_ent {
748 struct mlx5_cmd_msg *in;
749 struct mlx5_cmd_msg *out;
752 mlx5_cmd_cbk_t callback;
753 struct delayed_work cb_timeout_work;
756 struct completion done;
757 struct mlx5_cmd *cmd;
758 struct work_struct work;
759 struct mlx5_cmd_layout *lay;
775 enum phy_port_state {
779 struct mlx5_hca_vport_context {
784 enum port_state_policy policy;
785 enum phy_port_state phys_state;
786 enum ib_port_state vport_state;
787 u8 port_physical_state;
796 u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
801 u16 qkey_violation_counter;
802 u16 pkey_violation_counter;
806 static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset)
808 return buf->frags->buf + offset;
811 #define STRUCT_FIELD(header, field) \
812 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
813 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
815 static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
817 return pci_get_drvdata(pdev);
820 extern struct dentry *mlx5_debugfs_root;
822 static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
824 return ioread32be(&dev->iseg->fw_rev) & 0xffff;
827 static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
829 return ioread32be(&dev->iseg->fw_rev) >> 16;
832 static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
834 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
837 static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
839 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
842 static inline u32 mlx5_base_mkey(const u32 key)
844 return key & 0xffffff00u;
847 static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
848 u8 log_stride, u8 log_sz,
850 struct mlx5_frag_buf_ctrl *fbc)
853 fbc->log_stride = log_stride;
854 fbc->log_sz = log_sz;
855 fbc->sz_m1 = (1 << fbc->log_sz) - 1;
856 fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
857 fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
858 fbc->strides_offset = strides_offset;
861 static inline void mlx5_init_fbc(struct mlx5_buf_list *frags,
862 u8 log_stride, u8 log_sz,
863 struct mlx5_frag_buf_ctrl *fbc)
865 mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc);
868 static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
873 ix += fbc->strides_offset;
874 frag = ix >> fbc->log_frag_strides;
876 return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
880 mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
882 u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
884 return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
887 int mlx5_cmd_init(struct mlx5_core_dev *dev);
888 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
889 void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
890 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
892 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
894 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
895 void *out, int out_size, mlx5_cmd_cbk_t callback,
897 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
898 void *out, int out_size);
899 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
901 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
902 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
903 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
904 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
905 int mlx5_health_init(struct mlx5_core_dev *dev);
906 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
907 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
908 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
909 void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
910 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
911 int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
912 struct mlx5_frag_buf *buf, int node);
913 int mlx5_buf_alloc(struct mlx5_core_dev *dev,
914 int size, struct mlx5_frag_buf *buf);
915 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
916 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
917 struct mlx5_frag_buf *buf, int node);
918 void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
919 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
920 gfp_t flags, int npages);
921 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
922 struct mlx5_cmd_mailbox *head);
923 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
924 struct mlx5_srq_attr *in);
925 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
926 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
927 struct mlx5_srq_attr *out);
928 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
929 u16 lwm, int is_srq);
930 void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
931 void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
932 int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
933 struct mlx5_core_mkey *mkey,
935 u32 *out, int outlen,
936 mlx5_cmd_cbk_t callback, void *context);
937 int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
938 struct mlx5_core_mkey *mkey,
940 int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
941 struct mlx5_core_mkey *mkey);
942 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
943 u32 *out, int outlen);
944 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
945 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
946 int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
948 int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
949 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
950 void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
951 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
952 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
954 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
955 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
956 void mlx5_register_debugfs(void);
957 void mlx5_unregister_debugfs(void);
959 void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
960 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
961 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
962 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
964 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
965 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
967 int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
968 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
969 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
970 int size_in, void *data_out, int size_out,
971 u16 reg_num, int arg, int write);
973 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
974 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
976 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
978 const char *mlx5_command_str(int command);
979 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
980 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
981 int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
982 int npsvs, u32 *sig_index);
983 int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
984 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
985 int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
986 struct mlx5_odp_caps *odp_caps);
987 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
988 u8 port_num, void *out, size_t sz);
989 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
990 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
991 u32 wq_num, u8 type, int error);
994 int mlx5_init_rl_table(struct mlx5_core_dev *dev);
995 void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
996 int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
997 struct mlx5_rate_limit *rl);
998 void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
999 bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
1000 bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
1001 struct mlx5_rate_limit *rl_1);
1002 int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
1003 bool map_wc, bool fast_path);
1004 void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
1006 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
1008 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
1009 unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
1010 int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
1011 u8 roce_version, u8 roce_l3_type, const u8 *gid,
1012 const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
1014 static inline int fw_initializing(struct mlx5_core_dev *dev)
1016 return ioread32be(&dev->iseg->initializing) >> 31;
1019 static inline u32 mlx5_mkey_to_idx(u32 mkey)
1024 static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
1026 return mkey_idx << 8;
1029 static inline u8 mlx5_mkey_variant(u32 mkey)
1035 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
1036 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
1040 MR_CACHE_LAST_STD_ENTRY = 20,
1041 MLX5_IMR_MTT_CACHE_ENTRY,
1042 MLX5_IMR_KSM_CACHE_ENTRY,
1043 MAX_MR_CACHE_ENTRIES
1047 MLX5_INTERFACE_PROTOCOL_IB = 0,
1048 MLX5_INTERFACE_PROTOCOL_ETH = 1,
1051 struct mlx5_interface {
1052 void * (*add)(struct mlx5_core_dev *dev);
1053 void (*remove)(struct mlx5_core_dev *dev, void *context);
1054 int (*attach)(struct mlx5_core_dev *dev, void *context);
1055 void (*detach)(struct mlx5_core_dev *dev, void *context);
1056 void (*event)(struct mlx5_core_dev *dev, void *context,
1057 enum mlx5_dev_event event, unsigned long param);
1058 void * (*get_dev)(void *context);
1060 struct list_head list;
1063 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
1064 int mlx5_register_interface(struct mlx5_interface *intf);
1065 void mlx5_unregister_interface(struct mlx5_interface *intf);
1066 int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
1067 int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
1069 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
1071 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
1072 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
1073 bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
1074 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
1075 int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1079 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
1080 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
1082 #ifdef CONFIG_MLX5_CORE_IPOIB
1083 struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
1084 struct ib_device *ibdev,
1086 void (*setup)(struct net_device *));
1087 #endif /* CONFIG_MLX5_CORE_IPOIB */
1088 int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
1089 struct ib_device *device,
1090 struct rdma_netdev_alloc_params *params);
1092 struct mlx5_profile {
1098 } mr_cache[MAX_MR_CACHE_ENTRIES];
1102 MLX5_PCI_DEV_IS_VF = 1 << 0,
1105 static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
1107 return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
1110 #define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs((mdev)->pdev))
1111 #define MLX5_VPORT_MANAGER(mdev) \
1112 (MLX5_CAP_GEN(mdev, vport_group_manager) && \
1113 (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
1114 mlx5_core_is_pf(mdev))
1116 static inline int mlx5_get_gid_table_len(u16 param)
1119 pr_warn("gid table length is zero\n");
1123 return 8 * (1 << param);
1126 static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
1128 return !!(dev->priv.rl_table.max_size);
1131 static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
1133 return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
1134 MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
1137 static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
1139 return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
1142 static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
1144 return mlx5_core_is_mp_slave(dev) ||
1145 mlx5_core_is_mp_master(dev);
1148 static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
1150 if (!mlx5_core_mp_enabled(dev))
1153 return MLX5_CAP_GEN(dev, native_port_num);
1157 MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
1160 #endif /* MLX5_DRIVER_H */