2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/completion.h>
38 #include <linux/pci.h>
39 #include <linux/spinlock_types.h>
40 #include <linux/semaphore.h>
41 #include <linux/slab.h>
42 #include <linux/vmalloc.h>
43 #include <linux/radix-tree.h>
44 #include <linux/workqueue.h>
45 #include <linux/mempool.h>
46 #include <linux/interrupt.h>
47 #include <linux/idr.h>
49 #include <linux/mlx5/device.h>
50 #include <linux/mlx5/doorbell.h>
51 #include <linux/mlx5/srq.h>
52 #include <linux/timecounter.h>
53 #include <linux/ptp_clock_kernel.h>
56 MLX5_BOARD_ID_LEN = 64,
57 MLX5_MAX_NAME_LEN = 16,
61 /* one minute for the sake of bringup. Generally, commands must always
62 * complete and we may need to increase this timeout value
64 MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
65 MLX5_CMD_WQ_MAX_NAME = 32,
71 CMD_STATUS_SUCCESS = 0,
77 MLX5_SQP_IEEE_1588 = 2,
79 MLX5_SQP_SYNC_UMR = 4,
87 MLX5_EQ_VEC_PAGES = 0,
89 MLX5_EQ_VEC_ASYNC = 2,
90 MLX5_EQ_VEC_PFAULT = 3,
91 MLX5_EQ_VEC_COMP_BASE,
95 MLX5_MAX_IRQ_NAME = 32
99 MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
100 MLX5_ATOMIC_MODE_CX = 2 << 16,
101 MLX5_ATOMIC_MODE_8B = 3 << 16,
102 MLX5_ATOMIC_MODE_16B = 4 << 16,
103 MLX5_ATOMIC_MODE_32B = 5 << 16,
104 MLX5_ATOMIC_MODE_64B = 6 << 16,
105 MLX5_ATOMIC_MODE_128B = 7 << 16,
106 MLX5_ATOMIC_MODE_256B = 8 << 16,
110 MLX5_REG_QPTS = 0x4002,
111 MLX5_REG_QETCR = 0x4005,
112 MLX5_REG_QTCT = 0x400a,
113 MLX5_REG_QPDPM = 0x4013,
114 MLX5_REG_QCAM = 0x4019,
115 MLX5_REG_DCBX_PARAM = 0x4020,
116 MLX5_REG_DCBX_APP = 0x4021,
117 MLX5_REG_FPGA_CAP = 0x4022,
118 MLX5_REG_FPGA_CTRL = 0x4023,
119 MLX5_REG_FPGA_ACCESS_REG = 0x4024,
120 MLX5_REG_PCAP = 0x5001,
121 MLX5_REG_PMTU = 0x5003,
122 MLX5_REG_PTYS = 0x5004,
123 MLX5_REG_PAOS = 0x5006,
124 MLX5_REG_PFCC = 0x5007,
125 MLX5_REG_PPCNT = 0x5008,
126 MLX5_REG_PMAOS = 0x5012,
127 MLX5_REG_PUDE = 0x5009,
128 MLX5_REG_PMPE = 0x5010,
129 MLX5_REG_PELC = 0x500e,
130 MLX5_REG_PVLC = 0x500f,
131 MLX5_REG_PCMR = 0x5041,
132 MLX5_REG_PMLP = 0x5002,
133 MLX5_REG_PCAM = 0x507f,
134 MLX5_REG_NODE_DESC = 0x6001,
135 MLX5_REG_HOST_ENDIANNESS = 0x7004,
136 MLX5_REG_MCIA = 0x9014,
137 MLX5_REG_MLCR = 0x902b,
138 MLX5_REG_MPCNT = 0x9051,
139 MLX5_REG_MTPPS = 0x9053,
140 MLX5_REG_MTPPSE = 0x9054,
141 MLX5_REG_MCQI = 0x9061,
142 MLX5_REG_MCC = 0x9062,
143 MLX5_REG_MCDA = 0x9063,
144 MLX5_REG_MCAM = 0x907f,
147 enum mlx5_qpts_trust_state {
148 MLX5_QPTS_TRUST_PCP = 1,
149 MLX5_QPTS_TRUST_DSCP = 2,
152 enum mlx5_dcbx_oper_mode {
153 MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0,
154 MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
157 enum mlx5_dct_atomic_mode {
158 MLX5_ATOMIC_MODE_DCT_OFF = 20,
159 MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF,
160 MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF,
161 MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF,
165 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
166 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
169 enum mlx5_page_fault_resume_flags {
170 MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
171 MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
172 MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2,
173 MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7,
182 enum port_state_policy {
183 MLX5_POLICY_DOWN = 0,
185 MLX5_POLICY_FOLLOW = 2,
186 MLX5_POLICY_INVALID = 0xffffffff
189 struct mlx5_field_desc {
194 struct mlx5_rsc_debug {
195 struct mlx5_core_dev *dev;
197 enum dbg_rsc_type type;
199 struct mlx5_field_desc fields[0];
202 enum mlx5_dev_event {
203 MLX5_DEV_EVENT_SYS_ERROR,
204 MLX5_DEV_EVENT_PORT_UP,
205 MLX5_DEV_EVENT_PORT_DOWN,
206 MLX5_DEV_EVENT_PORT_INITIALIZED,
207 MLX5_DEV_EVENT_LID_CHANGE,
208 MLX5_DEV_EVENT_PKEY_CHANGE,
209 MLX5_DEV_EVENT_GUID_CHANGE,
210 MLX5_DEV_EVENT_CLIENT_REREG,
212 MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT,
215 enum mlx5_port_status {
223 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
228 struct mlx5_bfreg_info {
230 int num_low_latency_bfregs;
234 * protect bfreg allocation data structs
240 u32 num_static_sys_pages;
241 u32 total_num_bfregs;
245 struct mlx5_cmd_first {
249 struct mlx5_cmd_msg {
250 struct list_head list;
251 struct cmd_msg_cache *parent;
253 struct mlx5_cmd_first first;
254 struct mlx5_cmd_mailbox *next;
257 struct mlx5_cmd_debug {
258 struct dentry *dbg_root;
259 struct dentry *dbg_in;
260 struct dentry *dbg_out;
261 struct dentry *dbg_outlen;
262 struct dentry *dbg_status;
263 struct dentry *dbg_run;
271 struct cmd_msg_cache {
272 /* protect block chain allocations
275 struct list_head head;
276 unsigned int max_inbox_size;
277 unsigned int num_ent;
281 MLX5_NUM_COMMAND_CACHES = 5,
284 struct mlx5_cmd_stats {
289 struct dentry *count;
290 /* protect command average calculations */
296 dma_addr_t alloc_dma;
307 /* protect command queue allocations
309 spinlock_t alloc_lock;
311 /* protect token allocations
313 spinlock_t token_lock;
315 unsigned long bitmask;
316 char wq_name[MLX5_CMD_WQ_MAX_NAME];
317 struct workqueue_struct *wq;
318 struct semaphore sem;
319 struct semaphore pages_sem;
321 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
322 struct dma_pool *pool;
323 struct mlx5_cmd_debug dbg;
324 struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
325 int checksum_disabled;
326 struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
329 struct mlx5_port_caps {
336 struct mlx5_cmd_mailbox {
339 struct mlx5_cmd_mailbox *next;
342 struct mlx5_buf_list {
348 struct mlx5_buf_list direct;
354 struct mlx5_frag_buf {
355 struct mlx5_buf_list *frags;
361 struct mlx5_eq_tasklet {
362 struct list_head list;
363 struct list_head process_list;
364 struct tasklet_struct task;
365 /* lock on completion tasklet list */
369 struct mlx5_eq_pagefault {
370 struct work_struct work;
371 /* Pagefaults lock */
373 struct workqueue_struct *wq;
378 struct mlx5_core_dev *dev;
379 __be32 __iomem *doorbell;
387 struct list_head list;
389 struct mlx5_rsc_debug *dbg;
390 enum mlx5_eq_type type;
392 struct mlx5_eq_tasklet tasklet_ctx;
393 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
394 struct mlx5_eq_pagefault pf_ctx;
399 struct mlx5_core_psv {
411 struct mlx5_core_sig_ctx {
412 struct mlx5_core_psv psv_memory;
413 struct mlx5_core_psv psv_wire;
414 struct ib_sig_err err_item;
415 bool sig_status_checked;
425 struct mlx5_core_mkey {
433 #define MLX5_24BIT_MASK ((1 << 24) - 1)
436 MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
437 MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
438 MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
442 MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT,
445 struct mlx5_core_rsc_common {
446 enum mlx5_res_type res;
448 struct completion free;
451 struct mlx5_core_srq {
452 struct mlx5_core_rsc_common common; /* must be first */
456 int max_avail_gather;
458 void (*event) (struct mlx5_core_srq *, enum mlx5_event);
461 struct completion free;
464 struct mlx5_eq_table {
465 void __iomem *update_ci;
466 void __iomem *update_arm_ci;
467 struct list_head comp_eqs_list;
468 struct mlx5_eq pages_eq;
469 struct mlx5_eq async_eq;
470 struct mlx5_eq cmd_eq;
471 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
472 struct mlx5_eq pfault_eq;
474 int num_comp_vectors;
480 struct mlx5_uars_page {
484 struct list_head list;
486 unsigned long *reg_bitmap; /* for non fast path bf regs */
487 unsigned long *fp_bitmap;
488 unsigned int reg_avail;
489 unsigned int fp_avail;
490 struct kref ref_count;
491 struct mlx5_core_dev *mdev;
494 struct mlx5_bfreg_head {
495 /* protect blue flame registers allocations */
497 struct list_head list;
500 struct mlx5_bfreg_data {
501 struct mlx5_bfreg_head reg_head;
502 struct mlx5_bfreg_head wc_head;
505 struct mlx5_sq_bfreg {
507 struct mlx5_uars_page *up;
513 struct mlx5_core_health {
514 struct health_buffer __iomem *health;
515 __be32 __iomem *health_counter;
516 struct timer_list timer;
520 /* wq spinlock to synchronize draining */
522 struct workqueue_struct *wq;
524 struct work_struct work;
525 struct delayed_work recover_work;
528 struct mlx5_cq_table {
529 /* protect radix tree
532 struct radix_tree_root tree;
535 struct mlx5_qp_table {
536 /* protect radix tree
539 struct radix_tree_root tree;
542 struct mlx5_srq_table {
543 /* protect radix tree
546 struct radix_tree_root tree;
549 struct mlx5_mkey_table {
550 /* protect radix tree
553 struct radix_tree_root tree;
556 struct mlx5_vf_context {
560 enum port_state_policy policy;
563 struct mlx5_core_sriov {
564 struct mlx5_vf_context *vfs_ctx;
569 struct mlx5_irq_info {
570 char name[MLX5_MAX_IRQ_NAME];
573 struct mlx5_fc_stats {
574 struct rb_root counters;
575 struct list_head addlist;
576 /* protect addlist add/splice operations */
577 spinlock_t addlist_lock;
579 struct workqueue_struct *wq;
580 struct delayed_work work;
581 unsigned long next_query;
582 unsigned long sampling_interval; /* jiffies */
588 struct mlx5_pagefault;
590 struct mlx5_rl_entry {
596 struct mlx5_rl_table {
597 /* protect rate limit table */
598 struct mutex rl_lock;
602 struct mlx5_rl_entry *rl_entry;
605 enum port_module_event_status_type {
606 MLX5_MODULE_STATUS_PLUGGED = 0x1,
607 MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
608 MLX5_MODULE_STATUS_ERROR = 0x3,
609 MLX5_MODULE_STATUS_NUM = 0x3,
612 enum port_module_event_error_type {
613 MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
614 MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
615 MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
616 MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
617 MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
618 MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
619 MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
620 MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
621 MLX5_MODULE_EVENT_ERROR_UNKNOWN,
622 MLX5_MODULE_EVENT_ERROR_NUM,
625 struct mlx5_port_module_event_stats {
626 u64 status_counters[MLX5_MODULE_STATUS_NUM];
627 u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
631 char name[MLX5_MAX_NAME_LEN];
632 struct mlx5_eq_table eq_table;
633 struct mlx5_irq_info *irq_info;
636 struct workqueue_struct *pg_wq;
637 struct rb_root page_root;
640 struct list_head free_list;
643 struct mlx5_core_health health;
645 struct mlx5_srq_table srq_table;
647 /* start: qp staff */
648 struct mlx5_qp_table qp_table;
649 struct dentry *qp_debugfs;
650 struct dentry *eq_debugfs;
651 struct dentry *cq_debugfs;
652 struct dentry *cmdif_debugfs;
655 /* start: cq staff */
656 struct mlx5_cq_table cq_table;
659 /* start: mkey staff */
660 struct mlx5_mkey_table mkey_table;
661 /* end: mkey staff */
663 /* start: alloc staff */
664 /* protect buffer alocation according to numa node */
665 struct mutex alloc_mutex;
668 struct mutex pgdir_mutex;
669 struct list_head pgdir_list;
670 /* end: alloc staff */
671 struct dentry *dbg_root;
673 /* protect mkey key part */
674 spinlock_t mkey_lock;
677 struct list_head dev_list;
678 struct list_head ctx_list;
681 struct list_head waiting_events_list;
682 bool is_accum_events;
684 struct mlx5_flow_steering *steering;
685 struct mlx5_mpfs *mpfs;
686 struct mlx5_eswitch *eswitch;
687 struct mlx5_core_sriov sriov;
688 struct mlx5_lag *lag;
689 unsigned long pci_dev_data;
690 struct mlx5_fc_stats fc_stats;
691 struct mlx5_rl_table rl_table;
693 struct mlx5_port_module_event_stats pme_stats;
695 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
696 void (*pfault)(struct mlx5_core_dev *dev,
698 struct mlx5_pagefault *pfault);
700 struct srcu_struct pfault_srcu;
702 struct mlx5_bfreg_data bfregs;
703 struct mlx5_uars_page *uar;
706 enum mlx5_device_state {
707 MLX5_DEVICE_STATE_UP,
708 MLX5_DEVICE_STATE_INTERNAL_ERROR,
711 enum mlx5_interface_state {
712 MLX5_INTERFACE_STATE_UP = BIT(0),
715 enum mlx5_pci_status {
716 MLX5_PCI_STATUS_DISABLED,
717 MLX5_PCI_STATUS_ENABLED,
720 enum mlx5_pagefault_type_flags {
721 MLX5_PFAULT_REQUESTOR = 1 << 0,
722 MLX5_PFAULT_WRITE = 1 << 1,
723 MLX5_PFAULT_RDMA = 1 << 2,
726 /* Contains the details of a pagefault. */
727 struct mlx5_pagefault {
733 /* Initiator or send message responder pagefault details. */
735 /* Received packet size, only valid for responders. */
738 * Number of resource holding WQE, depends on type.
742 * WQE index. Refers to either the send queue or
743 * receive queue, according to event_subtype.
747 /* RDMA responder pagefault details */
751 * Received packet size, minimal size page fault
752 * resolution required for forward progress.
761 struct work_struct work;
765 struct list_head tirs_list;
769 struct mlx5e_resources {
772 struct mlx5_core_mkey mkey;
773 struct mlx5_sq_bfreg bfreg;
776 #define MLX5_MAX_RESERVED_GIDS 8
778 struct mlx5_rsvd_gids {
784 #define MAX_PIN_NUM 8
786 u8 pin_caps[MAX_PIN_NUM];
787 struct work_struct out_work;
788 u64 start[MAX_PIN_NUM];
794 struct cyclecounter cycles;
795 struct timecounter tc;
796 struct hwtstamp_config hwtstamp_config;
798 unsigned long overflow_period;
799 struct delayed_work overflow_work;
800 struct ptp_clock *ptp;
801 struct ptp_clock_info ptp_info;
802 struct mlx5_pps pps_info;
805 struct mlx5_core_dev {
806 struct pci_dev *pdev;
808 struct mutex pci_status_mutex;
809 enum mlx5_pci_status pci_status;
811 char board_id[MLX5_BOARD_ID_LEN];
813 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
815 u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
816 u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
817 u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
818 u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
819 u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
820 u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
822 phys_addr_t iseg_base;
823 struct mlx5_init_seg __iomem *iseg;
824 enum mlx5_device_state state;
825 /* sync interface state */
826 struct mutex intf_state_mutex;
827 unsigned long intf_state;
828 void (*event) (struct mlx5_core_dev *dev,
829 enum mlx5_dev_event event,
830 unsigned long param);
831 struct mlx5_priv priv;
832 struct mlx5_profile *profile;
835 struct mlx5e_resources mlx5e_res;
837 struct mlx5_rsvd_gids reserved_gids;
840 #ifdef CONFIG_MLX5_FPGA
841 struct mlx5_fpga_device *fpga;
843 #ifdef CONFIG_RFS_ACCEL
844 struct cpu_rmap *rmap;
846 struct mlx5_clock clock;
852 struct mlx5_db_pgdir *pgdir;
853 struct mlx5_ib_user_db_page *user_page;
860 MLX5_COMP_EQ_SIZE = 1024,
864 MLX5_PTYS_IB = 1 << 0,
865 MLX5_PTYS_EN = 1 << 2,
868 typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
871 MLX5_CMD_ENT_STATE_PENDING_COMP,
874 struct mlx5_cmd_work_ent {
876 struct mlx5_cmd_msg *in;
877 struct mlx5_cmd_msg *out;
880 mlx5_cmd_cbk_t callback;
881 struct delayed_work cb_timeout_work;
884 struct completion done;
885 struct mlx5_cmd *cmd;
886 struct work_struct work;
887 struct mlx5_cmd_layout *lay;
903 enum phy_port_state {
907 struct mlx5_hca_vport_context {
912 enum port_state_policy policy;
913 enum phy_port_state phys_state;
914 enum ib_port_state vport_state;
915 u8 port_physical_state;
924 u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
929 u16 qkey_violation_counter;
930 u16 pkey_violation_counter;
934 static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
936 return buf->direct.buf + offset;
939 #define STRUCT_FIELD(header, field) \
940 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
941 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
943 static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
945 return pci_get_drvdata(pdev);
948 extern struct dentry *mlx5_debugfs_root;
950 static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
952 return ioread32be(&dev->iseg->fw_rev) & 0xffff;
955 static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
957 return ioread32be(&dev->iseg->fw_rev) >> 16;
960 static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
962 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
965 static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
967 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
970 static inline u32 mlx5_base_mkey(const u32 key)
972 return key & 0xffffff00u;
975 int mlx5_cmd_init(struct mlx5_core_dev *dev);
976 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
977 void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
978 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
980 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
982 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
983 void *out, int out_size, mlx5_cmd_cbk_t callback,
985 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
986 void *out, int out_size);
987 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
989 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
990 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
991 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
992 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
993 int mlx5_health_init(struct mlx5_core_dev *dev);
994 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
995 void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
996 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
997 void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
998 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
999 int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
1000 struct mlx5_buf *buf, int node);
1001 int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
1002 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
1003 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
1004 struct mlx5_frag_buf *buf, int node);
1005 void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
1006 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
1007 gfp_t flags, int npages);
1008 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
1009 struct mlx5_cmd_mailbox *head);
1010 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
1011 struct mlx5_srq_attr *in);
1012 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
1013 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
1014 struct mlx5_srq_attr *out);
1015 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
1016 u16 lwm, int is_srq);
1017 void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
1018 void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
1019 int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
1020 struct mlx5_core_mkey *mkey,
1022 u32 *out, int outlen,
1023 mlx5_cmd_cbk_t callback, void *context);
1024 int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
1025 struct mlx5_core_mkey *mkey,
1026 u32 *in, int inlen);
1027 int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
1028 struct mlx5_core_mkey *mkey);
1029 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
1030 u32 *out, int outlen);
1031 int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
1033 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
1034 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
1035 int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
1036 u16 opmod, u8 port);
1037 void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
1038 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
1039 int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
1040 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
1041 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
1043 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
1044 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
1045 void mlx5_register_debugfs(void);
1046 void mlx5_unregister_debugfs(void);
1047 int mlx5_eq_init(struct mlx5_core_dev *dev);
1048 void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
1049 void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
1050 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
1051 void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
1052 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
1053 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
1054 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
1055 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
1056 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
1057 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
1058 int nent, u64 mask, const char *name,
1059 enum mlx5_eq_type type);
1060 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
1061 int mlx5_start_eqs(struct mlx5_core_dev *dev);
1062 int mlx5_stop_eqs(struct mlx5_core_dev *dev);
1063 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
1064 unsigned int *irqn);
1065 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
1066 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
1068 int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
1069 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
1070 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
1071 int size_in, void *data_out, int size_out,
1072 u16 reg_num, int arg, int write);
1074 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
1075 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
1076 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
1077 u32 *out, int outlen);
1078 int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
1079 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
1080 int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
1081 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
1082 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
1083 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
1085 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
1087 const char *mlx5_command_str(int command);
1088 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
1089 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
1090 int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
1091 int npsvs, u32 *sig_index);
1092 int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
1093 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
1094 int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
1095 struct mlx5_odp_caps *odp_caps);
1096 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
1097 u8 port_num, void *out, size_t sz);
1098 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1099 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
1100 u32 wq_num, u8 type, int error);
1103 int mlx5_init_rl_table(struct mlx5_core_dev *dev);
1104 void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
1105 int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
1106 void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
1107 bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
1108 int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
1109 bool map_wc, bool fast_path);
1110 void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
1112 unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
1113 int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
1114 u8 roce_version, u8 roce_l3_type, const u8 *gid,
1115 const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
1117 static inline int fw_initializing(struct mlx5_core_dev *dev)
1119 return ioread32be(&dev->iseg->initializing) >> 31;
1122 static inline u32 mlx5_mkey_to_idx(u32 mkey)
1127 static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
1129 return mkey_idx << 8;
1132 static inline u8 mlx5_mkey_variant(u32 mkey)
1138 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
1139 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
1143 MR_CACHE_LAST_STD_ENTRY = 20,
1144 MLX5_IMR_MTT_CACHE_ENTRY,
1145 MLX5_IMR_KSM_CACHE_ENTRY,
1146 MAX_MR_CACHE_ENTRIES
1150 MLX5_INTERFACE_PROTOCOL_IB = 0,
1151 MLX5_INTERFACE_PROTOCOL_ETH = 1,
1154 struct mlx5_interface {
1155 void * (*add)(struct mlx5_core_dev *dev);
1156 void (*remove)(struct mlx5_core_dev *dev, void *context);
1157 int (*attach)(struct mlx5_core_dev *dev, void *context);
1158 void (*detach)(struct mlx5_core_dev *dev, void *context);
1159 void (*event)(struct mlx5_core_dev *dev, void *context,
1160 enum mlx5_dev_event event, unsigned long param);
1161 void (*pfault)(struct mlx5_core_dev *dev,
1163 struct mlx5_pagefault *pfault);
1164 void * (*get_dev)(void *context);
1166 struct list_head list;
1169 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
1170 int mlx5_register_interface(struct mlx5_interface *intf);
1171 void mlx5_unregister_interface(struct mlx5_interface *intf);
1172 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
1174 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
1175 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
1176 bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
1177 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
1178 int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1182 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
1183 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
1185 #ifndef CONFIG_MLX5_CORE_IPOIB
1187 struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
1188 struct ib_device *ibdev,
1190 void (*setup)(struct net_device *))
1192 return ERR_PTR(-EOPNOTSUPP);
1195 static inline void mlx5_rdma_netdev_free(struct net_device *netdev) {}
1197 struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
1198 struct ib_device *ibdev,
1200 void (*setup)(struct net_device *));
1201 void mlx5_rdma_netdev_free(struct net_device *netdev);
1202 #endif /* CONFIG_MLX5_CORE_IPOIB */
1204 struct mlx5_profile {
1210 } mr_cache[MAX_MR_CACHE_ENTRIES];
1214 MLX5_PCI_DEV_IS_VF = 1 << 0,
1217 static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
1219 return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
1222 static inline int mlx5_get_gid_table_len(u16 param)
1225 pr_warn("gid table length is zero\n");
1229 return 8 * (1 << param);
1232 static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
1234 return !!(dev->priv.rl_table.max_size);
1237 static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
1239 return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
1240 MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
1243 static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
1245 return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
1248 static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
1250 return mlx5_core_is_mp_slave(dev) ||
1251 mlx5_core_is_mp_master(dev);
1254 static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
1256 if (!mlx5_core_mp_enabled(dev))
1259 return MLX5_CAP_GEN(dev, native_port_num);
1263 MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
1266 static inline const struct cpumask *
1267 mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
1269 return pci_irq_get_affinity(dev->pdev, MLX5_EQ_VEC_COMP_BASE + vector);
1272 #endif /* MLX5_DRIVER_H */