2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef _HNS_ROCE_DEVICE_H
34 #define _HNS_ROCE_DEVICE_H
36 #include <rdma/ib_verbs.h>
37 #include <rdma/hns-abi.h>
39 #define PCI_REVISION_ID_HIP08 0x21
40 #define PCI_REVISION_ID_HIP09 0x30
42 #define HNS_ROCE_MAX_MSG_LEN 0x80000000
44 #define HNS_ROCE_IB_MIN_SQ_STRIDE 6
48 #define HNS_ROCE_MIN_CQE_NUM 0x40
49 #define HNS_ROCE_MIN_SRQ_WQE_NUM 1
51 #define HNS_ROCE_MAX_IRQ_NUM 128
53 #define HNS_ROCE_SGE_IN_WQE 2
54 #define HNS_ROCE_SGE_SHIFT 4
59 #define HNS_ROCE_CEQ 0
60 #define HNS_ROCE_AEQ 1
62 #define HNS_ROCE_CEQE_SIZE 0x4
63 #define HNS_ROCE_AEQE_SIZE 0x10
65 #define HNS_ROCE_V3_EQE_SIZE 0x40
67 #define HNS_ROCE_V2_CQE_SIZE 32
68 #define HNS_ROCE_V3_CQE_SIZE 64
70 #define HNS_ROCE_V2_QPC_SZ 256
71 #define HNS_ROCE_V3_QPC_SZ 512
73 #define HNS_ROCE_MAX_PORTS 6
74 #define HNS_ROCE_GID_SIZE 16
75 #define HNS_ROCE_SGE_SIZE 16
76 #define HNS_ROCE_DWQE_SIZE 65536
78 #define HNS_ROCE_HOP_NUM_0 0xff
80 #define MR_TYPE_MR 0x00
81 #define MR_TYPE_FRMR 0x01
82 #define MR_TYPE_DMA 0x03
84 #define HNS_ROCE_FRMR_MAX_PA 512
86 #define PKEY_ID 0xffff
87 #define NODE_DESC_SIZE 64
88 #define DB_REG_OFFSET 0x1000
90 /* Configure to HW for PAGE_SIZE larger than 4KB */
91 #define PG_SHIFT_OFFSET (PAGE_SHIFT - 12)
93 #define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
94 #define SRQ_DB_REG 0x230
96 #define HNS_ROCE_QP_BANK_NUM 8
97 #define HNS_ROCE_CQ_BANK_NUM 4
99 #define CQ_BANKID_SHIFT 2
109 enum hns_roce_qp_state {
110 HNS_ROCE_QP_STATE_RST,
111 HNS_ROCE_QP_STATE_INIT,
112 HNS_ROCE_QP_STATE_RTR,
113 HNS_ROCE_QP_STATE_RTS,
114 HNS_ROCE_QP_STATE_SQD,
115 HNS_ROCE_QP_STATE_ERR,
116 HNS_ROCE_QP_NUM_STATE,
119 enum hns_roce_event {
120 HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01,
121 HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02,
122 HNS_ROCE_EVENT_TYPE_COMM_EST = 0x03,
123 HNS_ROCE_EVENT_TYPE_SQ_DRAINED = 0x04,
124 HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
125 HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR = 0x06,
126 HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR = 0x07,
127 HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH = 0x08,
128 HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH = 0x09,
129 HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR = 0x0a,
130 HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR = 0x0b,
131 HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW = 0x0c,
132 HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID = 0x0d,
133 HNS_ROCE_EVENT_TYPE_PORT_CHANGE = 0x0f,
134 /* 0x10 and 0x11 is unused in currently application case */
135 HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12,
136 HNS_ROCE_EVENT_TYPE_MB = 0x13,
137 HNS_ROCE_EVENT_TYPE_FLR = 0x15,
138 HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION = 0x16,
139 HNS_ROCE_EVENT_TYPE_INVALID_XRCETH = 0x17,
142 #define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12
145 HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
146 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
147 HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
148 HNS_ROCE_CAP_FLAG_CQ_RECORD_DB = BIT(3),
149 HNS_ROCE_CAP_FLAG_QP_RECORD_DB = BIT(4),
150 HNS_ROCE_CAP_FLAG_SRQ = BIT(5),
151 HNS_ROCE_CAP_FLAG_XRC = BIT(6),
152 HNS_ROCE_CAP_FLAG_MW = BIT(7),
153 HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
154 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
155 HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
156 HNS_ROCE_CAP_FLAG_DIRECT_WQE = BIT(12),
157 HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14),
158 HNS_ROCE_CAP_FLAG_STASH = BIT(17),
161 #define HNS_ROCE_DB_TYPE_COUNT 2
162 #define HNS_ROCE_DB_UNIT_SIZE 4
165 HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
168 enum hns_roce_reset_stage {
169 HNS_ROCE_STATE_NON_RST,
170 HNS_ROCE_STATE_RST_BEF_DOWN,
171 HNS_ROCE_STATE_RST_DOWN,
172 HNS_ROCE_STATE_RST_UNINIT,
173 HNS_ROCE_STATE_RST_INIT,
174 HNS_ROCE_STATE_RST_INITED,
177 enum hns_roce_instance_state {
178 HNS_ROCE_STATE_NON_INIT,
180 HNS_ROCE_STATE_INITED,
181 HNS_ROCE_STATE_UNINIT,
185 HNS_ROCE_RST_DIRECT_RETURN = 0,
188 #define HNS_ROCE_CMD_SUCCESS 1
190 /* The minimum page size is 4K for hardware */
191 #define HNS_HW_PAGE_SHIFT 12
192 #define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT)
194 struct hns_roce_uar {
197 unsigned long logic_idx;
200 enum hns_roce_mmap_type {
201 HNS_ROCE_MMAP_TYPE_DB = 1,
202 HNS_ROCE_MMAP_TYPE_DWQE,
205 struct hns_user_mmap_entry {
206 struct rdma_user_mmap_entry rdma_entry;
207 enum hns_roce_mmap_type mmap_type;
211 struct hns_roce_ucontext {
212 struct ib_ucontext ibucontext;
213 struct hns_roce_uar uar;
214 struct list_head page_list;
215 struct mutex page_mutex;
216 struct hns_user_mmap_entry *db_mmap_entry;
224 struct hns_roce_xrcd {
225 struct ib_xrcd ibxrcd;
229 struct hns_roce_bitmap {
230 /* Bitmap Traversal last a bit which is 1 */
234 unsigned long reserved_top;
237 unsigned long *table;
240 struct hns_roce_ida {
242 u32 min; /* Lowest ID to allocate. */
243 u32 max; /* Highest ID to allocate. */
246 /* For Hardware Entry Memory */
247 struct hns_roce_hem_table {
248 /* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
250 /* HEM array elment num */
251 unsigned long num_hem;
252 /* Single obj size */
253 unsigned long obj_size;
254 unsigned long table_chunk_size;
257 struct hns_roce_hem **hem;
259 dma_addr_t *bt_l1_dma_addr;
261 dma_addr_t *bt_l0_dma_addr;
264 struct hns_roce_buf_region {
265 u32 offset; /* page offset */
266 u32 count; /* page count */
267 int hopnum; /* addressing hop num */
270 #define HNS_ROCE_MAX_BT_REGION 3
271 #define HNS_ROCE_MAX_BT_LEVEL 3
272 struct hns_roce_hem_list {
273 struct list_head root_bt;
274 /* link all bt dma mem by hop config */
275 struct list_head mid_bt[HNS_ROCE_MAX_BT_REGION][HNS_ROCE_MAX_BT_LEVEL];
276 struct list_head btm_bt; /* link all bottom bt in @mid_bt */
277 dma_addr_t root_ba; /* pointer to the root ba table */
280 struct hns_roce_buf_attr {
282 size_t size; /* region size */
283 int hopnum; /* multi-hop addressing hop num */
284 } region[HNS_ROCE_MAX_BT_REGION];
285 unsigned int region_count; /* valid region count */
286 unsigned int page_shift; /* buffer page shift */
287 unsigned int user_access; /* umem access flag */
288 bool mtt_only; /* only alloc buffer-required MTT memory */
291 struct hns_roce_hem_cfg {
292 dma_addr_t root_ba; /* root BA table's address */
293 bool is_direct; /* addressing without BA table */
294 unsigned int ba_pg_shift; /* BA table page shift */
295 unsigned int buf_pg_shift; /* buffer page shift */
296 unsigned int buf_pg_count; /* buffer page count */
297 struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION];
298 unsigned int region_count;
301 /* memory translate region */
302 struct hns_roce_mtr {
303 struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */
304 struct ib_umem *umem; /* user space buffer */
305 struct hns_roce_buf *kmem; /* kernel space buffer */
306 struct hns_roce_hem_cfg hem_cfg; /* config for hardware addressing */
313 int enabled; /* MW's active status */
321 u64 iova; /* MR's virtual original addr */
322 u64 size; /* Address range of MR */
323 u32 key; /* Key of MR */
324 u32 pd; /* PD num of MR */
325 u32 access; /* Access permission of MR */
326 int enabled; /* MR's active status */
327 int type; /* MR's register type */
328 u32 pbl_hop_num; /* multi-hop number */
329 struct hns_roce_mtr pbl_mtr;
331 dma_addr_t *page_list;
334 struct hns_roce_mr_table {
335 struct hns_roce_ida mtpt_ida;
336 struct hns_roce_hem_table mtpt_table;
340 u64 *wrid; /* Work request ID */
342 u32 wqe_cnt; /* WQE num */
346 u32 wqe_shift; /* WQE size */
349 void __iomem *db_reg;
352 struct hns_roce_sge {
353 unsigned int sge_cnt; /* SGE num */
355 u32 sge_shift; /* SGE size */
358 struct hns_roce_buf_list {
364 * %HNS_ROCE_BUF_DIRECT indicates that the all memory must be in a continuous
367 * %HNS_ROCE_BUF_NOSLEEP indicates that the caller cannot sleep.
369 * %HNS_ROCE_BUF_NOFAIL allocation only failed when allocated size is zero, even
370 * the allocated size is smaller than the required size.
373 HNS_ROCE_BUF_DIRECT = BIT(0),
374 HNS_ROCE_BUF_NOSLEEP = BIT(1),
375 HNS_ROCE_BUF_NOFAIL = BIT(2),
378 struct hns_roce_buf {
379 struct hns_roce_buf_list *trunk_list;
382 unsigned int trunk_shift;
383 unsigned int page_shift;
386 struct hns_roce_db_pgdir {
387 struct list_head list;
388 DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE);
389 DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
390 unsigned long *bits[HNS_ROCE_DB_TYPE_COUNT];
395 struct hns_roce_user_db_page {
396 struct list_head list;
397 struct ib_umem *umem;
398 unsigned long user_virt;
405 struct hns_roce_db_pgdir *pgdir;
406 struct hns_roce_user_db_page *user_page;
416 struct hns_roce_mtr mtr;
417 struct hns_roce_db db;
423 void __iomem *db_reg;
429 struct completion free;
430 struct list_head sq_list; /* all qps on this send cq */
431 struct list_head rq_list; /* all qps on this recv cq */
432 int is_armed; /* cq is armed */
433 struct list_head node; /* all armed cqs are on a list */
436 struct hns_roce_idx_que {
437 struct hns_roce_mtr mtr;
439 unsigned long *bitmap;
444 struct hns_roce_srq {
453 void __iomem *db_reg;
456 struct completion free;
458 struct hns_roce_mtr buf_mtr;
461 struct hns_roce_idx_que idx_que;
464 void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
467 struct hns_roce_uar_table {
468 struct hns_roce_bitmap bitmap;
471 struct hns_roce_bank {
473 u32 inuse; /* Number of IDs allocated */
474 u32 min; /* Lowest ID to allocate. */
475 u32 max; /* Highest ID to allocate. */
476 u32 next; /* Next ID to allocate. */
479 struct hns_roce_idx_table {
485 struct hns_roce_qp_table {
486 struct hns_roce_hem_table qp_table;
487 struct hns_roce_hem_table irrl_table;
488 struct hns_roce_hem_table trrl_table;
489 struct hns_roce_hem_table sccc_table;
490 struct mutex scc_mutex;
491 struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
492 struct mutex bank_mutex;
493 struct hns_roce_idx_table idx_table;
496 struct hns_roce_cq_table {
498 struct hns_roce_hem_table table;
499 struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
500 struct mutex bank_mutex;
503 struct hns_roce_srq_table {
504 struct hns_roce_ida srq_ida;
506 struct hns_roce_hem_table table;
518 u8 dgid[HNS_ROCE_GID_SIZE];
526 struct hns_roce_av av;
529 struct hns_roce_cmd_context {
530 struct completion done;
538 struct hns_roce_cmdq {
539 struct dma_pool *pool;
540 struct semaphore poll_sem;
542 * Event mode: cmd register mutex protection,
543 * ensure to not exceed max_cmds and user use limit region
545 struct semaphore event_sem;
547 spinlock_t context_lock;
549 struct hns_roce_cmd_context *context;
551 * Process whether use event mode, init default non-zero
552 * After the event queue of cmd event ready,
553 * can switch into event mode
554 * close device, switch into poll mode(non event mode)
559 struct hns_roce_cmd_mailbox {
566 struct hns_roce_rinl_sge {
571 struct hns_roce_rinl_wqe {
572 struct hns_roce_rinl_sge *sg_list;
576 struct hns_roce_rinl_buf {
577 struct hns_roce_rinl_wqe *wqe_list;
582 HNS_ROCE_FLUSH_FLAG = 0,
585 struct hns_roce_work {
586 struct hns_roce_dev *hr_dev;
587 struct work_struct work;
595 struct hns_roce_wq rq;
596 struct hns_roce_db rdb;
597 struct hns_roce_db sdb;
598 unsigned long en_flags;
600 enum ib_sig_type sq_signal_bits;
601 struct hns_roce_wq sq;
603 struct hns_roce_mtr mtr;
614 void (*event)(struct hns_roce_qp *qp,
615 enum hns_roce_event event_type);
621 struct completion free;
623 struct hns_roce_sge sge;
625 enum ib_mtu path_mtu;
628 /* 0: flush needed, 1: unneeded */
629 unsigned long flush_flag;
630 struct hns_roce_work flush_work;
631 struct hns_roce_rinl_buf rq_inl_buf;
632 struct list_head node; /* all qps are on a list */
633 struct list_head rq_node; /* all recv qps are on a list */
634 struct list_head sq_node; /* all send qps are on a list */
635 struct hns_user_mmap_entry *dwqe_mmap_entry;
638 struct hns_roce_ib_iboe {
640 struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
641 struct notifier_block nb;
642 u8 phy_port[HNS_ROCE_MAX_PORTS];
645 struct hns_roce_ceqe {
650 struct hns_roce_aeqe {
670 struct hns_roce_dev *hr_dev;
671 void __iomem *db_reg;
673 int type_flag; /* Aeq:1 ceq:0 */
683 struct hns_roce_mtr mtr;
691 struct hns_roce_eq_table {
692 struct hns_roce_eq *eq;
702 struct hns_roce_caps {
705 int gid_table_len[HNS_ROCE_MAX_PORTS];
706 int pkey_table_len[HNS_ROCE_MAX_PORTS];
707 int local_ca_ack_delay;
726 int max_qp_init_rdma;
727 int max_qp_dest_rdma;
735 int num_comp_vectors;
736 int num_other_vectors;
757 int qpc_timer_entry_sz;
758 int cqc_timer_entry_sz;
770 u32 qpc_timer_bt_num;
773 u32 cqc_timer_bt_num;
801 u32 qpc_timer_ba_pg_sz;
802 u32 qpc_timer_buf_pg_sz;
803 u32 qpc_timer_hop_num;
804 u32 cqc_timer_ba_pg_sz;
805 u32 cqc_timer_buf_pg_sz;
806 u32 cqc_timer_hop_num;
807 u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
811 u32 srqwqe_buf_pg_sz;
826 u32 chunk_sz; /* chunk size in non multihop mode */
828 u16 default_ceq_max_cnt;
829 u16 default_ceq_period;
830 u16 default_aeq_max_cnt;
831 u16 default_aeq_period;
832 u16 default_aeq_arm_st;
833 u16 default_ceq_arm_st;
834 enum cong_type cong_type;
837 struct hns_roce_dfx_hw {
838 int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn,
842 enum hns_roce_device_state {
843 HNS_ROCE_DEVICE_STATE_INITED,
844 HNS_ROCE_DEVICE_STATE_RST_DOWN,
845 HNS_ROCE_DEVICE_STATE_UNINIT,
849 int (*cmq_init)(struct hns_roce_dev *hr_dev);
850 void (*cmq_exit)(struct hns_roce_dev *hr_dev);
851 int (*hw_profile)(struct hns_roce_dev *hr_dev);
852 int (*hw_init)(struct hns_roce_dev *hr_dev);
853 void (*hw_exit)(struct hns_roce_dev *hr_dev);
854 int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param,
855 u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
856 u16 token, int event);
857 int (*poll_mbox_done)(struct hns_roce_dev *hr_dev,
858 unsigned int timeout);
859 bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
860 int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index,
861 const union ib_gid *gid, const struct ib_gid_attr *attr);
862 int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port,
864 int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
865 struct hns_roce_mr *mr);
866 int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
867 struct hns_roce_mr *mr, int flags,
869 int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
870 struct hns_roce_mr *mr);
871 int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
872 void (*write_cqc)(struct hns_roce_dev *hr_dev,
873 struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
874 dma_addr_t dma_handle);
875 int (*set_hem)(struct hns_roce_dev *hr_dev,
876 struct hns_roce_hem_table *table, int obj, int step_idx);
877 int (*clear_hem)(struct hns_roce_dev *hr_dev,
878 struct hns_roce_hem_table *table, int obj,
880 int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
881 int attr_mask, enum ib_qp_state cur_state,
882 enum ib_qp_state new_state);
883 int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
884 struct hns_roce_qp *hr_qp);
885 int (*init_eq)(struct hns_roce_dev *hr_dev);
886 void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
887 int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
888 const struct ib_device_ops *hns_roce_dev_ops;
889 const struct ib_device_ops *hns_roce_dev_srq_ops;
892 struct hns_roce_dev {
893 struct ib_device ib_dev;
894 struct pci_dev *pci_dev;
896 struct hns_roce_uar priv_uar;
897 const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
902 unsigned long reset_cnt;
903 struct hns_roce_ib_iboe iboe;
904 enum hns_roce_device_state state;
905 struct list_head qp_list; /* list of all qps on this dev */
906 spinlock_t qp_list_lock; /* protect qp_list */
907 struct list_head dip_list; /* list of all dest ips on this dev */
908 spinlock_t dip_list_lock; /* protect dip_list */
910 struct list_head pgdir_list;
911 struct mutex pgdir_mutex;
912 int irq[HNS_ROCE_MAX_IRQ_NUM];
913 u8 __iomem *reg_base;
914 void __iomem *mem_base;
915 struct hns_roce_caps caps;
916 struct xarray qp_table_xa;
918 unsigned char dev_addr[HNS_ROCE_MAX_PORTS][ETH_ALEN];
923 void __iomem *priv_addr;
925 struct hns_roce_cmdq cmd;
926 struct hns_roce_ida pd_ida;
927 struct hns_roce_ida xrcd_ida;
928 struct hns_roce_ida uar_ida;
929 struct hns_roce_mr_table mr_table;
930 struct hns_roce_cq_table cq_table;
931 struct hns_roce_srq_table srq_table;
932 struct hns_roce_qp_table qp_table;
933 struct hns_roce_eq_table eq_table;
934 struct hns_roce_hem_table qpc_timer_table;
935 struct hns_roce_hem_table cqc_timer_table;
936 /* GMV is the memory area that the driver allocates for the hardware
937 * to store SGID, SMAC and VLAN information.
939 struct hns_roce_hem_table gmv_table;
945 const struct hns_roce_hw *hw;
947 struct workqueue_struct *irq_workq;
948 const struct hns_roce_dfx_hw *dfx;
951 u32 cong_algo_tmpl_id;
955 static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
957 return container_of(ib_dev, struct hns_roce_dev, ib_dev);
960 static inline struct hns_roce_ucontext
961 *to_hr_ucontext(struct ib_ucontext *ibucontext)
963 return container_of(ibucontext, struct hns_roce_ucontext, ibucontext);
966 static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
968 return container_of(ibpd, struct hns_roce_pd, ibpd);
971 static inline struct hns_roce_xrcd *to_hr_xrcd(struct ib_xrcd *ibxrcd)
973 return container_of(ibxrcd, struct hns_roce_xrcd, ibxrcd);
976 static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
978 return container_of(ibah, struct hns_roce_ah, ibah);
981 static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
983 return container_of(ibmr, struct hns_roce_mr, ibmr);
986 static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw)
988 return container_of(ibmw, struct hns_roce_mw, ibmw);
991 static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
993 return container_of(ibqp, struct hns_roce_qp, ibqp);
996 static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq)
998 return container_of(ib_cq, struct hns_roce_cq, ib_cq);
1001 static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
1003 return container_of(ibsrq, struct hns_roce_srq, ibsrq);
1006 static inline struct hns_user_mmap_entry *
1007 to_hns_mmap(struct rdma_user_mmap_entry *rdma_entry)
1009 return container_of(rdma_entry, struct hns_user_mmap_entry, rdma_entry);
1012 static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
1014 writeq(*(u64 *)val, dest);
1017 static inline struct hns_roce_qp
1018 *__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
1020 return xa_load(&hr_dev->qp_table_xa, qpn);
1023 static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf,
1024 unsigned int offset)
1026 return (char *)(buf->trunk_list[offset >> buf->trunk_shift].buf) +
1027 (offset & ((1 << buf->trunk_shift) - 1));
1030 static inline dma_addr_t hns_roce_buf_dma_addr(struct hns_roce_buf *buf,
1031 unsigned int offset)
1033 return buf->trunk_list[offset >> buf->trunk_shift].map +
1034 (offset & ((1 << buf->trunk_shift) - 1));
1037 static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, u32 idx)
1039 return hns_roce_buf_dma_addr(buf, idx << buf->page_shift);
1042 #define hr_hw_page_align(x) ALIGN(x, 1 << HNS_HW_PAGE_SHIFT)
1044 static inline u64 to_hr_hw_page_addr(u64 addr)
1046 return addr >> HNS_HW_PAGE_SHIFT;
1049 static inline u32 to_hr_hw_page_shift(u32 page_shift)
1051 return page_shift - HNS_HW_PAGE_SHIFT;
1054 static inline u32 to_hr_hem_hopnum(u32 hopnum, u32 count)
1057 return hopnum == HNS_ROCE_HOP_NUM_0 ? 0 : hopnum;
1062 static inline u32 to_hr_hem_entries_size(u32 count, u32 buf_shift)
1064 return hr_hw_page_align(count << buf_shift);
1067 static inline u32 to_hr_hem_entries_count(u32 count, u32 buf_shift)
1069 return hr_hw_page_align(count << buf_shift) >> buf_shift;
1072 static inline u32 to_hr_hem_entries_shift(u32 count, u32 buf_shift)
1077 return ilog2(to_hr_hem_entries_count(count, buf_shift));
1080 #define DSCP_SHIFT 2
1082 static inline u8 get_tclass(const struct ib_global_route *grh)
1084 return grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP ?
1085 grh->traffic_class >> DSCP_SHIFT : grh->traffic_class;
1088 void hns_roce_init_uar_table(struct hns_roce_dev *dev);
1089 int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
1091 int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
1092 void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
1093 void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
1095 int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
1096 void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
1098 /* hns roce hw need current block and next block addr from mtt */
1099 #define MTT_MIN_COUNT 2
1100 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1101 u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
1102 int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1103 struct hns_roce_buf_attr *buf_attr,
1104 unsigned int page_shift, struct ib_udata *udata,
1105 unsigned long user_addr);
1106 void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
1107 struct hns_roce_mtr *mtr);
1108 int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1109 dma_addr_t *pages, unsigned int page_cnt);
1111 void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
1112 void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
1113 void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
1114 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
1115 void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
1116 void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
1118 void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
1119 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
1120 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
1122 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
1124 int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
1125 struct ib_udata *udata);
1126 int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
1127 static inline int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags)
1132 int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1133 int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1135 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
1136 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1137 u64 virt_addr, int access_flags,
1138 struct ib_udata *udata);
1139 struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
1140 u64 length, u64 virt_addr,
1141 int mr_access_flags, struct ib_pd *pd,
1142 struct ib_udata *udata);
1143 struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1145 int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1146 unsigned int *sg_offset);
1147 int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
1148 int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
1149 struct hns_roce_cmd_mailbox *mailbox,
1150 unsigned long mpt_index);
1151 unsigned long key_to_hw_index(u32 key);
1153 int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
1154 int hns_roce_dealloc_mw(struct ib_mw *ibmw);
1156 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
1157 struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
1158 u32 page_shift, u32 flags);
1160 int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
1161 int buf_cnt, struct hns_roce_buf *buf,
1162 unsigned int page_shift);
1163 int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
1164 int buf_cnt, struct ib_umem *umem,
1165 unsigned int page_shift);
1167 int hns_roce_create_srq(struct ib_srq *srq,
1168 struct ib_srq_init_attr *srq_init_attr,
1169 struct ib_udata *udata);
1170 int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
1171 enum ib_srq_attr_mask srq_attr_mask,
1172 struct ib_udata *udata);
1173 int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
1175 int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
1176 int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
1178 int hns_roce_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *init_attr,
1179 struct ib_udata *udata);
1180 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1181 int attr_mask, struct ib_udata *udata);
1182 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
1183 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
1184 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
1185 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n);
1186 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
1187 struct ib_cq *ib_cq);
1188 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
1189 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
1190 struct hns_roce_cq *recv_cq);
1191 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1192 struct hns_roce_cq *recv_cq);
1193 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
1194 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
1195 struct ib_udata *udata);
1196 __be32 send_ieth(const struct ib_send_wr *wr);
1197 int to_hr_qp_type(int qp_type);
1199 int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
1200 struct ib_udata *udata);
1202 int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
1203 int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
1204 struct hns_roce_db *db);
1205 void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
1206 struct hns_roce_db *db);
1207 int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
1209 void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);
1211 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
1212 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
1213 void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp);
1214 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
1215 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
1216 u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index);
1217 void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
1218 int hns_roce_init(struct hns_roce_dev *hr_dev);
1219 void hns_roce_exit(struct hns_roce_dev *hr_dev);
1220 int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
1221 struct ib_cq *ib_cq);
1222 struct hns_user_mmap_entry *
1223 hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
1225 enum hns_roce_mmap_type mmap_type);
1226 #endif /* _HNS_ROCE_DEVICE_H */