Merge tag 'for-linus-6.3-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / drivers / infiniband / hw / hns / hns_roce_device.h
CommitLineData
9a443537 1/*
2 * Copyright (c) 2016 Hisilicon Limited.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef _HNS_ROCE_DEVICE_H
34#define _HNS_ROCE_DEVICE_H
35
36#include <rdma/ib_verbs.h>
53ef4999 37#include <rdma/hns-abi.h>
9a443537 38
a247fd28 39#define PCI_REVISION_ID_HIP08 0x21
247fc16d 40#define PCI_REVISION_ID_HIP09 0x30
2a3d923f 41
9a443537 42#define HNS_ROCE_MAX_MSG_LEN 0x80000000
43
9a443537 44#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
45
2a3d923f
LO
46#define BA_BYTE_LEN 8
47
9a443537 48#define HNS_ROCE_MIN_CQE_NUM 0x40
6ee00fbf 49#define HNS_ROCE_MIN_SRQ_WQE_NUM 1
9a443537 50
b16f8188 51#define HNS_ROCE_MAX_IRQ_NUM 128
9a443537 52
2a3d923f
LO
53#define HNS_ROCE_SGE_IN_WQE 2
54#define HNS_ROCE_SGE_SHIFT 4
55
b16f8188
YL
56#define EQ_ENABLE 1
57#define EQ_DISABLE 0
9a443537 58
b16f8188
YL
59#define HNS_ROCE_CEQ 0
60#define HNS_ROCE_AEQ 1
61
247fc16d
WL
62#define HNS_ROCE_CEQE_SIZE 0x4
63#define HNS_ROCE_AEQE_SIZE 0x10
64
65#define HNS_ROCE_V3_EQE_SIZE 0x40
9a443537 66
09a5f210
WL
67#define HNS_ROCE_V2_CQE_SIZE 32
68#define HNS_ROCE_V3_CQE_SIZE 64
69
98912ee8
WL
70#define HNS_ROCE_V2_QPC_SZ 256
71#define HNS_ROCE_V3_QPC_SZ 512
72
9a443537 73#define HNS_ROCE_MAX_PORTS 6
9a443537 74#define HNS_ROCE_GID_SIZE 16
2a3d923f 75#define HNS_ROCE_SGE_SIZE 16
01584a5e 76#define HNS_ROCE_DWQE_SIZE 65536
9a443537 77
a25d13cb
SX
78#define HNS_ROCE_HOP_NUM_0 0xff
79
9a443537 80#define MR_TYPE_MR 0x00
68a997c5 81#define MR_TYPE_FRMR 0x01
9a443537 82#define MR_TYPE_DMA 0x03
83
68a997c5
YL
84#define HNS_ROCE_FRMR_MAX_PA 512
85
9a443537 86#define PKEY_ID 0xffff
87#define NODE_DESC_SIZE 64
509bf0c2 88#define DB_REG_OFFSET 0x1000
9a443537 89
5e6e78db
YL
90/* Configure to HW for PAGE_SIZE larger than 4KB */
91#define PG_SHIFT_OFFSET (PAGE_SHIFT - 12)
92
c7bcb134
LO
93#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
94#define SRQ_DB_REG 0x230
95
71586dd2 96#define HNS_ROCE_QP_BANK_NUM 8
1bbd4380
YL
97#define HNS_ROCE_CQ_BANK_NUM 4
98
99#define CQ_BANKID_SHIFT 2
71586dd2 100
5e049a5d
LO
101enum {
102 SERV_TYPE_RC,
103 SERV_TYPE_UC,
104 SERV_TYPE_RD,
105 SERV_TYPE_UD,
32548870 106 SERV_TYPE_XRC = 5,
5e049a5d
LO
107};
108
9a443537 109enum hns_roce_event {
110 HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01,
111 HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02,
112 HNS_ROCE_EVENT_TYPE_COMM_EST = 0x03,
113 HNS_ROCE_EVENT_TYPE_SQ_DRAINED = 0x04,
114 HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
115 HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR = 0x06,
116 HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR = 0x07,
117 HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH = 0x08,
118 HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH = 0x09,
119 HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR = 0x0a,
120 HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR = 0x0b,
121 HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW = 0x0c,
122 HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID = 0x0d,
123 HNS_ROCE_EVENT_TYPE_PORT_CHANGE = 0x0f,
124 /* 0x10 and 0x11 is unused in currently application case */
125 HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12,
126 HNS_ROCE_EVENT_TYPE_MB = 0x13,
a5073d60 127 HNS_ROCE_EVENT_TYPE_FLR = 0x15,
32548870
WL
128 HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION = 0x16,
129 HNS_ROCE_EVENT_TYPE_INVALID_XRCETH = 0x17,
9a443537 130};
131
a2c80b7b
WHX
132enum {
133 HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
023c1477 134 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
e088a685 135 HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
cf8cd4cc
YL
136 HNS_ROCE_CAP_FLAG_CQ_RECORD_DB = BIT(3),
137 HNS_ROCE_CAP_FLAG_QP_RECORD_DB = BIT(4),
d16da119 138 HNS_ROCE_CAP_FLAG_SRQ = BIT(5),
32548870 139 HNS_ROCE_CAP_FLAG_XRC = BIT(6),
c7c28191 140 HNS_ROCE_CAP_FLAG_MW = BIT(7),
68a997c5 141 HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
aa84fa18 142 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
384f8818 143 HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
0045e0d3 144 HNS_ROCE_CAP_FLAG_DIRECT_WQE = BIT(12),
aba457ca 145 HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14),
bfefae9f 146 HNS_ROCE_CAP_FLAG_STASH = BIT(17),
1d918553 147 HNS_ROCE_CAP_FLAG_CQE_INLINE = BIT(19),
a2c80b7b
WHX
148};
149
2a3d923f
LO
150#define HNS_ROCE_DB_TYPE_COUNT 2
151#define HNS_ROCE_DB_UNIT_SIZE 4
152
e088a685
YL
153enum {
154 HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
155};
156
d061effc
WHX
157enum hns_roce_reset_stage {
158 HNS_ROCE_STATE_NON_RST,
159 HNS_ROCE_STATE_RST_BEF_DOWN,
160 HNS_ROCE_STATE_RST_DOWN,
161 HNS_ROCE_STATE_RST_UNINIT,
162 HNS_ROCE_STATE_RST_INIT,
163 HNS_ROCE_STATE_RST_INITED,
164};
165
166enum hns_roce_instance_state {
167 HNS_ROCE_STATE_NON_INIT,
168 HNS_ROCE_STATE_INIT,
169 HNS_ROCE_STATE_INITED,
170 HNS_ROCE_STATE_UNINIT,
171};
172
173enum {
174 HNS_ROCE_RST_DIRECT_RETURN = 0,
175};
176
9a443537 177#define HNS_ROCE_CMD_SUCCESS 1
178
9581a356
XW
179/* The minimum page size is 4K for hardware */
180#define HNS_HW_PAGE_SHIFT 12
181#define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT)
9a443537 182
183struct hns_roce_uar {
184 u64 pfn;
185 unsigned long index;
5b6eb54f 186 unsigned long logic_idx;
9a443537 187};
188
6d202d9f
CT
189enum hns_roce_mmap_type {
190 HNS_ROCE_MMAP_TYPE_DB = 1,
0045e0d3 191 HNS_ROCE_MMAP_TYPE_DWQE,
6d202d9f
CT
192};
193
194struct hns_user_mmap_entry {
195 struct rdma_user_mmap_entry rdma_entry;
196 enum hns_roce_mmap_type mmap_type;
197 u64 address;
198};
199
9a443537 200struct hns_roce_ucontext {
201 struct ib_ucontext ibucontext;
202 struct hns_roce_uar uar;
e088a685
YL
203 struct list_head page_list;
204 struct mutex page_mutex;
6d202d9f 205 struct hns_user_mmap_entry *db_mmap_entry;
0c5e259b 206 u32 config;
9a443537 207};
208
209struct hns_roce_pd {
210 struct ib_pd ibpd;
211 unsigned long pdn;
212};
213
32548870
WL
214struct hns_roce_xrcd {
215 struct ib_xrcd ibxrcd;
216 u32 xrcdn;
217};
218
9a443537 219struct hns_roce_bitmap {
220 /* Bitmap Traversal last a bit which is 1 */
221 unsigned long last;
222 unsigned long top;
223 unsigned long max;
224 unsigned long reserved_top;
225 unsigned long mask;
226 spinlock_t lock;
227 unsigned long *table;
228};
229
d38936f0
YL
230struct hns_roce_ida {
231 struct ida ida;
232 u32 min; /* Lowest ID to allocate. */
233 u32 max; /* Highest ID to allocate. */
234};
235
9a443537 236/* For Hardware Entry Memory */
237struct hns_roce_hem_table {
238 /* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
239 u32 type;
240 /* HEM array elment num */
241 unsigned long num_hem;
6def7de6 242 /* Single obj size */
9a443537 243 unsigned long obj_size;
29a1fe5d 244 unsigned long table_chunk_size;
9a443537 245 struct mutex mutex;
246 struct hns_roce_hem **hem;
a25d13cb
SX
247 u64 **bt_l1;
248 dma_addr_t *bt_l1_dma_addr;
249 u64 **bt_l0;
250 dma_addr_t *bt_l0_dma_addr;
9a443537 251};
252
38389eaa 253struct hns_roce_buf_region {
dcdc366a 254 u32 offset; /* page offset */
6def7de6 255 u32 count; /* page count */
38389eaa
LO
256 int hopnum; /* addressing hop num */
257};
258
259#define HNS_ROCE_MAX_BT_REGION 3
260#define HNS_ROCE_MAX_BT_LEVEL 3
261struct hns_roce_hem_list {
262 struct list_head root_bt;
263 /* link all bt dma mem by hop config */
264 struct list_head mid_bt[HNS_ROCE_MAX_BT_REGION][HNS_ROCE_MAX_BT_LEVEL];
265 struct list_head btm_bt; /* link all bottom bt in @mid_bt */
266 dma_addr_t root_ba; /* pointer to the root ba table */
3c873161
XW
267};
268
269struct hns_roce_buf_attr {
270 struct {
271 size_t size; /* region size */
272 int hopnum; /* multi-hop addressing hop num */
273 } region[HNS_ROCE_MAX_BT_REGION];
dcdc366a 274 unsigned int region_count; /* valid region count */
82d07a4e 275 unsigned int page_shift; /* buffer page shift */
dcdc366a 276 unsigned int user_access; /* umem access flag */
3c873161 277 bool mtt_only; /* only alloc buffer-required MTT memory */
38389eaa
LO
278};
279
cc33b23e
XW
280struct hns_roce_hem_cfg {
281 dma_addr_t root_ba; /* root BA table's address */
282 bool is_direct; /* addressing without BA table */
283 unsigned int ba_pg_shift; /* BA table page shift */
284 unsigned int buf_pg_shift; /* buffer page shift */
285 unsigned int buf_pg_count; /* buffer page count */
286 struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION];
dcdc366a 287 unsigned int region_count;
cc33b23e
XW
288};
289
38389eaa
LO
290/* memory translate region */
291struct hns_roce_mtr {
3c873161 292 struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */
82d07a4e
WL
293 struct ib_umem *umem; /* user space buffer */
294 struct hns_roce_buf *kmem; /* kernel space buffer */
cc33b23e 295 struct hns_roce_hem_cfg hem_cfg; /* config for hardware addressing */
38389eaa
LO
296};
297
c7c28191
YL
298struct hns_roce_mw {
299 struct ib_mw ibmw;
300 u32 pdn;
301 u32 rkey;
302 int enabled; /* MW's active status */
303 u32 pbl_hop_num;
304 u32 pbl_ba_pg_sz;
305 u32 pbl_buf_pg_sz;
306};
307
9a443537 308struct hns_roce_mr {
309 struct ib_mr ibmr;
f176199d 310 u64 iova; /* MR's virtual original addr */
9a443537 311 u64 size; /* Address range of MR */
312 u32 key; /* Key of MR */
313 u32 pd; /* PD num of MR */
3aecfc38 314 u32 access; /* Access permission of MR */
9a443537 315 int enabled; /* MR's active status */
3aecfc38
XL
316 int type; /* MR's register type */
317 u32 pbl_hop_num; /* multi-hop number */
9b2cf76c
XW
318 struct hns_roce_mtr pbl_mtr;
319 u32 npages;
320 dma_addr_t *page_list;
9a443537 321};
322
323struct hns_roce_mr_table {
d38936f0 324 struct hns_roce_ida mtpt_ida;
9a443537 325 struct hns_roce_hem_table mtpt_table;
326};
327
328struct hns_roce_wq {
329 u64 *wrid; /* Work request ID */
330 spinlock_t lock;
47688202 331 u32 wqe_cnt; /* WQE num */
dcdc366a 332 u32 max_gs;
9dd05247 333 u32 rsv_sge;
d147583e
XL
334 u32 offset;
335 u32 wqe_shift; /* WQE size */
9a443537 336 u32 head;
337 u32 tail;
704d68f5 338 void __iomem *db_reg;
0c5e259b 339 u32 ext_sge_cnt;
9a443537 340};
341
926a01dc 342struct hns_roce_sge {
3aecfc38 343 unsigned int sge_cnt; /* SGE num */
d147583e
XL
344 u32 offset;
345 u32 sge_shift; /* SGE size */
926a01dc
WHX
346};
347
9a443537 348struct hns_roce_buf_list {
349 void *buf;
350 dma_addr_t map;
351};
352
6f6e2dcb
XW
353/*
354 * %HNS_ROCE_BUF_DIRECT indicates that the all memory must be in a continuous
355 * dma address range.
356 *
357 * %HNS_ROCE_BUF_NOSLEEP indicates that the caller cannot sleep.
358 *
359 * %HNS_ROCE_BUF_NOFAIL allocation only failed when allocated size is zero, even
360 * the allocated size is smaller than the required size.
361 */
362enum {
363 HNS_ROCE_BUF_DIRECT = BIT(0),
364 HNS_ROCE_BUF_NOSLEEP = BIT(1),
365 HNS_ROCE_BUF_NOFAIL = BIT(2),
366};
367
9a443537 368struct hns_roce_buf {
6f6e2dcb
XW
369 struct hns_roce_buf_list *trunk_list;
370 u32 ntrunks;
9a443537 371 u32 npages;
6f6e2dcb 372 unsigned int trunk_shift;
82d07a4e 373 unsigned int page_shift;
9a443537 374};
375
e088a685
YL
376struct hns_roce_db_pgdir {
377 struct list_head list;
378 DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE);
2a3d923f
LO
379 DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
380 unsigned long *bits[HNS_ROCE_DB_TYPE_COUNT];
e088a685
YL
381 u32 *page;
382 dma_addr_t db_dma;
383};
384
385struct hns_roce_user_db_page {
386 struct list_head list;
387 struct ib_umem *umem;
388 unsigned long user_virt;
389 refcount_t refcount;
390};
391
392struct hns_roce_db {
393 u32 *db_record;
394 union {
395 struct hns_roce_db_pgdir *pgdir;
396 struct hns_roce_user_db_page *user_page;
397 } u;
398 dma_addr_t dma;
0425e3e6 399 void *virt_addr;
dcdc366a
WL
400 unsigned long index;
401 unsigned long order;
e088a685
YL
402};
403
9a443537 404struct hns_roce_cq {
405 struct ib_cq ib_cq;
744b7bdf 406 struct hns_roce_mtr mtr;
9b44703d 407 struct hns_roce_db db;
05e6a5a6 408 u32 flags;
9a443537 409 spinlock_t lock;
9a443537 410 u32 cq_depth;
411 u32 cons_index;
86188a88 412 u32 *set_ci_db;
704d68f5 413 void __iomem *db_reg;
26beb85f 414 int arm_sn;
09a5f210 415 int cqe_size;
9a443537 416 unsigned long cqn;
417 u32 vector;
cc9e5a84 418 refcount_t refcount;
9a443537 419 struct completion free;
626903e9
XW
420 struct list_head sq_list; /* all qps on this send cq */
421 struct list_head rq_list; /* all qps on this recv cq */
422 int is_armed; /* cq is armed */
423 struct list_head node; /* all armed cqs are on a list */
9a443537 424};
425
c7bcb134 426struct hns_roce_idx_que {
6fd610c5 427 struct hns_roce_mtr mtr;
d147583e 428 u32 entry_shift;
97545b10 429 unsigned long *bitmap;
1620f09b
WL
430 u32 head;
431 u32 tail;
c7bcb134
LO
432};
433
9a443537 434struct hns_roce_srq {
435 struct ib_srq ibsrq;
c7bcb134 436 unsigned long srqn;
d938d785 437 u32 wqe_cnt;
c7bcb134 438 int max_gs;
9dd05247 439 u32 rsv_sge;
d147583e 440 u32 wqe_shift;
0fee4516 441 u32 cqn;
32548870 442 u32 xrcdn;
704d68f5 443 void __iomem *db_reg;
c7bcb134 444
33649cd3 445 refcount_t refcount;
c7bcb134
LO
446 struct completion free;
447
6fd610c5
XW
448 struct hns_roce_mtr buf_mtr;
449
c7bcb134 450 u64 *wrid;
c7bcb134
LO
451 struct hns_roce_idx_que idx_que;
452 spinlock_t lock;
c7bcb134 453 struct mutex mutex;
d938d785 454 void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
9a443537 455};
456
457struct hns_roce_uar_table {
458 struct hns_roce_bitmap bitmap;
459};
460
71586dd2
YL
461struct hns_roce_bank {
462 struct ida ida;
463 u32 inuse; /* Number of IDs allocated */
464 u32 min; /* Lowest ID to allocate. */
465 u32 max; /* Highest ID to allocate. */
466 u32 next; /* Next ID to allocate. */
467};
468
eb653eda
JH
469struct hns_roce_idx_table {
470 u32 *spare_idx;
471 u32 head;
472 u32 tail;
473};
474
9a443537 475struct hns_roce_qp_table {
9a443537 476 struct hns_roce_hem_table qp_table;
477 struct hns_roce_hem_table irrl_table;
e92f2c18 478 struct hns_roce_hem_table trrl_table;
6a157f7d 479 struct hns_roce_hem_table sccc_table;
aa84fa18 480 struct mutex scc_mutex;
71586dd2 481 struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
9293d3fc 482 struct mutex bank_mutex;
eb653eda 483 struct hns_roce_idx_table idx_table;
9a443537 484};
485
486struct hns_roce_cq_table {
27e19f45 487 struct xarray array;
9a443537 488 struct hns_roce_hem_table table;
1bbd4380
YL
489 struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
490 struct mutex bank_mutex;
9a443537 491};
492
5c1f167a 493struct hns_roce_srq_table {
c4f11b36 494 struct hns_roce_ida srq_ida;
5c1f167a
LO
495 struct xarray xa;
496 struct hns_roce_hem_table table;
497};
498
9a443537 499struct hns_roce_av {
074bf2c2
WL
500 u8 port;
501 u8 gid_index;
502 u8 stat_rate;
503 u8 hop_limit;
504 u32 flowlabel;
505 u16 udp_sport;
506 u8 sl;
507 u8 tclass;
508 u8 dgid[HNS_ROCE_GID_SIZE];
509 u8 mac[ETH_ALEN];
510 u16 vlan_id;
7406c003 511 u8 vlan_en;
9a443537 512};
513
514struct hns_roce_ah {
515 struct ib_ah ibah;
516 struct hns_roce_av av;
517};
518
519struct hns_roce_cmd_context {
520 struct completion done;
521 int result;
522 int next;
523 u64 out_param;
524 u16 token;
a389d016 525 u16 busy;
9a443537 526};
527
e8ea058e
YL
528enum hns_roce_cmdq_state {
529 HNS_ROCE_CMDQ_STATE_NORMAL,
530 HNS_ROCE_CMDQ_STATE_FATAL_ERR,
531};
532
9a443537 533struct hns_roce_cmdq {
534 struct dma_pool *pool;
9a443537 535 struct semaphore poll_sem;
536 /*
e84e40be
S
537 * Event mode: cmd register mutex protection,
538 * ensure to not exceed max_cmds and user use limit region
539 */
9a443537 540 struct semaphore event_sem;
541 int max_cmds;
542 spinlock_t context_lock;
543 int free_head;
544 struct hns_roce_cmd_context *context;
9a443537 545 /*
e84e40be
S
546 * Process whether use event mode, init default non-zero
547 * After the event queue of cmd event ready,
548 * can switch into event mode
549 * close device, switch into poll mode(non event mode)
550 */
9a443537 551 u8 use_events;
e8ea058e 552 enum hns_roce_cmdq_state state;
9a443537 553};
554
bfcc681b
SX
555struct hns_roce_cmd_mailbox {
556 void *buf;
557 dma_addr_t dma;
558};
559
162e29fe
CT
560struct hns_roce_mbox_msg {
561 u64 in_param;
562 u64 out_param;
563 u8 cmd;
564 u32 tag;
565 u16 token;
566 u8 event_en;
567};
568
9a443537 569struct hns_roce_dev;
570
b5374286
YL
571enum {
572 HNS_ROCE_FLUSH_FLAG = 0,
573};
574
ffd541d4
YL
575struct hns_roce_work {
576 struct hns_roce_dev *hr_dev;
577 struct work_struct work;
ffd541d4
YL
578 int event_type;
579 int sub_type;
d8cc403b 580 u32 queue_num;
ffd541d4
YL
581};
582
9a443537 583struct hns_roce_qp {
584 struct ib_qp ibqp;
9a443537 585 struct hns_roce_wq rq;
e088a685 586 struct hns_roce_db rdb;
0425e3e6 587 struct hns_roce_db sdb;
90ae0b57 588 unsigned long en_flags;
ea4092f3 589 enum ib_sig_type sq_signal_bits;
9a443537 590 struct hns_roce_wq sq;
591
8d18ad83 592 struct hns_roce_mtr mtr;
8d18ad83 593
9a443537 594 u32 buff_size;
595 struct mutex mutex;
596 u8 port;
7716809e 597 u8 phy_port;
9a443537 598 u8 sl;
599 u8 resp_depth;
600 u8 state;
ace1c541 601 u32 atomic_rd_en;
0fa95a9a 602 u32 qkey;
fd012f1c 603 void (*event)(struct hns_roce_qp *qp,
604 enum hns_roce_event event_type);
9a443537 605 unsigned long qpn;
606
32548870
WL
607 u32 xrcdn;
608
8f9513d8 609 refcount_t refcount;
9a443537 610 struct completion free;
926a01dc
WHX
611
612 struct hns_roce_sge sge;
613 u32 next_sge;
30b70788
WL
614 enum ib_mtu path_mtu;
615 u32 max_inline_data;
70f92521 616 u8 free_mr_en;
0009c2db 617
b5374286
YL
618 /* 0: flush needed, 1: unneeded */
619 unsigned long flush_flag;
ffd541d4 620 struct hns_roce_work flush_work;
3aecfc38
XL
621 struct list_head node; /* all qps are on a list */
622 struct list_head rq_node; /* all recv qps are on a list */
623 struct list_head sq_node; /* all send qps are on a list */
0045e0d3 624 struct hns_user_mmap_entry *dwqe_mmap_entry;
0c5e259b 625 u32 config;
9a443537 626};
627
9a443537 628struct hns_roce_ib_iboe {
629 spinlock_t lock;
630 struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
631 struct notifier_block nb;
9a443537 632 u8 phy_port[HNS_ROCE_MAX_PORTS];
633};
634
b16f8188 635struct hns_roce_ceqe {
247fc16d
WL
636 __le32 comp;
637 __le32 rsv[15];
b16f8188
YL
638};
639
813c9802
WL
640#define CEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_ceqe, h, l)
641
642#define CEQE_CQN CEQE_FIELD_LOC(23, 0)
643#define CEQE_OWNER CEQE_FIELD_LOC(31, 31)
644
b16f8188 645struct hns_roce_aeqe {
0576cbde 646 __le32 asyn;
b16f8188
YL
647 union {
648 struct {
d8cc403b 649 __le32 num;
b16f8188
YL
650 u32 rsv0;
651 u32 rsv1;
d8cc403b 652 } queue_event;
b16f8188
YL
653
654 struct {
655 __le64 out_param;
656 __le16 token;
657 u8 status;
658 u8 rsv0;
659 } __packed cmd;
660 } event;
247fc16d 661 __le32 rsv[12];
b16f8188
YL
662};
663
813c9802
WL
664#define AEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_aeqe, h, l)
665
666#define AEQE_EVENT_TYPE AEQE_FIELD_LOC(7, 0)
667#define AEQE_SUB_TYPE AEQE_FIELD_LOC(15, 8)
668#define AEQE_OWNER AEQE_FIELD_LOC(31, 31)
669#define AEQE_EVENT_QUEUE_NUM AEQE_FIELD_LOC(55, 32)
670
9a443537 671struct hns_roce_eq {
672 struct hns_roce_dev *hr_dev;
704d68f5 673 void __iomem *db_reg;
9a443537 674
6def7de6 675 int type_flag; /* Aeq:1 ceq:0 */
9a443537 676 int eqn;
677 u32 entries;
9a443537 678 int eqe_size;
679 int irq;
dcdc366a 680 u32 cons_index;
a5073d60
YL
681 int over_ignore;
682 int coalesce;
683 int arm_st;
a5073d60 684 int hop_num;
d7e2d343 685 struct hns_roce_mtr mtr;
13aa13dd 686 u16 eq_max_cnt;
dcdc366a 687 u32 eq_period;
a5073d60 688 int shift;
0425e3e6
YL
689 int event_type;
690 int sub_type;
9a443537 691};
692
693struct hns_roce_eq_table {
694 struct hns_roce_eq *eq;
9a443537 695};
696
f91696f2
YL
697enum cong_type {
698 CONG_TYPE_DCQCN,
699 CONG_TYPE_LDCP,
700 CONG_TYPE_HC3,
701 CONG_TYPE_DIP,
702};
703
9a443537 704struct hns_roce_caps {
3a63c964 705 u64 fw_ver;
9a443537 706 u8 num_ports;
707 int gid_table_len[HNS_ROCE_MAX_PORTS];
708 int pkey_table_len[HNS_ROCE_MAX_PORTS];
709 int local_ca_ack_delay;
710 int num_uars;
711 u32 phy_num_uars;
6def7de6
LC
712 u32 max_sq_sg;
713 u32 max_sq_inline;
714 u32 max_rq_sg;
5436272c 715 u32 rsv0;
dcdc366a 716 u32 num_qps;
61b460d1 717 u32 num_pi_qps;
dcdc366a 718 u32 reserved_qps;
d147583e 719 u32 num_srqs;
6def7de6 720 u32 max_wqes;
d16da119
LO
721 u32 max_srq_wrs;
722 u32 max_srq_sges;
6def7de6
LC
723 u32 max_sq_desc_sz;
724 u32 max_rq_desc_sz;
6649b4a1 725 u32 rsv2;
9a443537 726 int max_qp_init_rdma;
727 int max_qp_dest_rdma;
dcdc366a 728 u32 num_cqs;
e2b2744a
YL
729 u32 max_cqes;
730 u32 min_cqes;
926a01dc 731 u32 min_wqes;
1bbd4380 732 u32 reserved_cqs;
d147583e 733 u32 reserved_srqs;
6def7de6 734 int num_aeq_vectors;
a5073d60 735 int num_comp_vectors;
9a443537 736 int num_other_vectors;
dcdc366a 737 u32 num_mtpts;
5436272c 738 u32 rsv1;
5c1f167a
LO
739 u32 num_srqwqe_segs;
740 u32 num_idx_segs;
9a443537 741 int reserved_mrws;
742 int reserved_uars;
743 int num_pds;
744 int reserved_pds;
32548870
WL
745 u32 num_xrcds;
746 u32 reserved_xrcds;
9a443537 747 u32 mtt_entry_sz;
09a5f210 748 u32 cqe_sz;
9a443537 749 u32 page_size_cap;
750 u32 reserved_lkey;
751 int mtpt_entry_sz;
98912ee8 752 int qpc_sz;
9a443537 753 int irrl_entry_sz;
e92f2c18 754 int trrl_entry_sz;
9a443537 755 int cqc_entry_sz;
3cb2c996 756 int sccc_sz;
0e40dc2f
YL
757 int qpc_timer_entry_sz;
758 int cqc_timer_entry_sz;
5c1f167a
LO
759 int srqc_entry_sz;
760 int idx_entry_sz;
ff795f71
WHX
761 u32 pbl_ba_pg_sz;
762 u32 pbl_buf_pg_sz;
763 u32 pbl_hop_num;
9a443537 764 int aeqe_depth;
b16f8188 765 int ceqe_depth;
247fc16d
WL
766 u32 aeqe_size;
767 u32 ceqe_size;
9a443537 768 enum ib_mtu max_mtu;
cfc85f3e 769 u32 qpc_bt_num;
0e40dc2f 770 u32 qpc_timer_bt_num;
cfc85f3e
WHX
771 u32 srqc_bt_num;
772 u32 cqc_bt_num;
0e40dc2f 773 u32 cqc_timer_bt_num;
cfc85f3e 774 u32 mpt_bt_num;
2a424e1d
WX
775 u32 eqc_bt_num;
776 u32 smac_bt_num;
777 u32 sgid_bt_num;
6a157f7d 778 u32 sccc_bt_num;
d6d91e46 779 u32 gmv_bt_num;
a25d13cb
SX
780 u32 qpc_ba_pg_sz;
781 u32 qpc_buf_pg_sz;
782 u32 qpc_hop_num;
783 u32 srqc_ba_pg_sz;
784 u32 srqc_buf_pg_sz;
785 u32 srqc_hop_num;
786 u32 cqc_ba_pg_sz;
787 u32 cqc_buf_pg_sz;
788 u32 cqc_hop_num;
789 u32 mpt_ba_pg_sz;
790 u32 mpt_buf_pg_sz;
791 u32 mpt_hop_num;
6a93c77a
SX
792 u32 mtt_ba_pg_sz;
793 u32 mtt_buf_pg_sz;
794 u32 mtt_hop_num;
8d18ad83
LO
795 u32 wqe_sq_hop_num;
796 u32 wqe_sge_hop_num;
797 u32 wqe_rq_hop_num;
6a157f7d
YL
798 u32 sccc_ba_pg_sz;
799 u32 sccc_buf_pg_sz;
800 u32 sccc_hop_num;
0e40dc2f
YL
801 u32 qpc_timer_ba_pg_sz;
802 u32 qpc_timer_buf_pg_sz;
803 u32 qpc_timer_hop_num;
804 u32 cqc_timer_ba_pg_sz;
805 u32 cqc_timer_buf_pg_sz;
806 u32 cqc_timer_hop_num;
3aecfc38 807 u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
6a93c77a
SX
808 u32 cqe_buf_pg_sz;
809 u32 cqe_hop_num;
c7bcb134
LO
810 u32 srqwqe_ba_pg_sz;
811 u32 srqwqe_buf_pg_sz;
812 u32 srqwqe_hop_num;
813 u32 idx_ba_pg_sz;
814 u32 idx_buf_pg_sz;
815 u32 idx_hop_num;
a5073d60
YL
816 u32 eqe_ba_pg_sz;
817 u32 eqe_buf_pg_sz;
818 u32 eqe_hop_num;
d6d91e46
WL
819 u32 gmv_entry_num;
820 u32 gmv_entry_sz;
821 u32 gmv_ba_pg_sz;
822 u32 gmv_buf_pg_sz;
823 u32 gmv_hop_num;
6b63597d 824 u32 sl_num;
b6989da8 825 u32 llm_buf_pg_sz;
3aecfc38 826 u32 chunk_sz; /* chunk size in non multihop mode */
a2c80b7b 827 u64 flags;
ba6bb7e9
LO
828 u16 default_ceq_max_cnt;
829 u16 default_ceq_period;
830 u16 default_aeq_max_cnt;
831 u16 default_aeq_period;
832 u16 default_aeq_arm_st;
833 u16 default_ceq_arm_st;
f91696f2 834 enum cong_type cong_type;
9a443537 835};
836
626903e9
XW
837enum hns_roce_device_state {
838 HNS_ROCE_DEVICE_STATE_INITED,
839 HNS_ROCE_DEVICE_STATE_RST_DOWN,
840 HNS_ROCE_DEVICE_STATE_UNINIT,
841};
842
9a443537 843struct hns_roce_hw {
a04ff739
WHX
844 int (*cmq_init)(struct hns_roce_dev *hr_dev);
845 void (*cmq_exit)(struct hns_roce_dev *hr_dev);
cfc85f3e 846 int (*hw_profile)(struct hns_roce_dev *hr_dev);
9a443537 847 int (*hw_init)(struct hns_roce_dev *hr_dev);
848 void (*hw_exit)(struct hns_roce_dev *hr_dev);
162e29fe
CT
849 int (*post_mbox)(struct hns_roce_dev *hr_dev,
850 struct hns_roce_mbox_msg *mbox_msg);
0018ed4b 851 int (*poll_mbox_done)(struct hns_roce_dev *hr_dev);
ee82e688 852 bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
38d22088 853 int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index,
f4df9a7c 854 const union ib_gid *gid, const struct ib_gid_attr *attr);
fd92213e
JK
855 int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port,
856 const u8 *addr);
98a61519 857 int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
38d22088 858 struct hns_roce_mr *mr);
a2c80b7b 859 int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
4e9fc1da 860 struct hns_roce_mr *mr, int flags,
a2c80b7b 861 void *mb_buf);
98a61519
YL
862 int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
863 struct hns_roce_mr *mr);
c7c28191 864 int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
9a443537 865 void (*write_cqc)(struct hns_roce_dev *hr_dev,
866 struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
e2b2744a 867 dma_addr_t dma_handle);
a25d13cb 868 int (*set_hem)(struct hns_roce_dev *hr_dev,
e50cda2b 869 struct hns_roce_hem_table *table, int obj, u32 step_idx);
97f0e39f 870 int (*clear_hem)(struct hns_roce_dev *hr_dev,
a25d13cb 871 struct hns_roce_hem_table *table, int obj,
e50cda2b 872 u32 step_idx);
9a443537 873 int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
874 int attr_mask, enum ib_qp_state cur_state,
2bb185c6 875 enum ib_qp_state new_state, struct ib_udata *udata);
aa84fa18
YL
876 int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
877 struct hns_roce_qp *hr_qp);
70f92521 878 void (*dereg_mr)(struct hns_roce_dev *hr_dev);
b16f8188
YL
879 int (*init_eq)(struct hns_roce_dev *hr_dev);
880 void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
eacb45ca 881 int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
40b4b79c 882 int (*query_cqc)(struct hns_roce_dev *hr_dev, u32 cqn, void *buffer);
3e89d78b 883 int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
3d67e7e2 884 int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer);
7f645a58
KH
885 const struct ib_device_ops *hns_roce_dev_ops;
886 const struct ib_device_ops *hns_roce_dev_srq_ops;
9a443537 887};
888
889struct hns_roce_dev {
890 struct ib_device ib_dev;
dd74282d
WHX
891 struct pci_dev *pci_dev;
892 struct device *dev;
9a443537 893 struct hns_roce_uar priv_uar;
528f1deb 894 const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
9a443537 895 spinlock_t sm_lock;
cb7a94c9
WHX
896 bool active;
897 bool is_reset;
d3743fa9 898 bool dis_db;
d061effc 899 unsigned long reset_cnt;
9a443537 900 struct hns_roce_ib_iboe iboe;
626903e9
XW
901 enum hns_roce_device_state state;
902 struct list_head qp_list; /* list of all qps on this dev */
903 spinlock_t qp_list_lock; /* protect qp_list */
f91696f2
YL
904 struct list_head dip_list; /* list of all dest ips on this dev */
905 spinlock_t dip_list_lock; /* protect dip_list */
9a443537 906
472bc0fb
YL
907 struct list_head pgdir_list;
908 struct mutex pgdir_mutex;
9a443537 909 int irq[HNS_ROCE_MAX_IRQ_NUM];
910 u8 __iomem *reg_base;
01584a5e 911 void __iomem *mem_base;
9a443537 912 struct hns_roce_caps caps;
736b5a70 913 struct xarray qp_table_xa;
9a443537 914
2a3d923f 915 unsigned char dev_addr[HNS_ROCE_MAX_PORTS][ETH_ALEN];
9a443537 916 u64 sys_image_guid;
917 u32 vendor_id;
918 u32 vendor_part_id;
919 u32 hw_rev;
920 void __iomem *priv_addr;
921
922 struct hns_roce_cmdq cmd;
645f0593 923 struct hns_roce_ida pd_ida;
da43b7be 924 struct hns_roce_ida xrcd_ida;
8feafd90 925 struct hns_roce_ida uar_ida;
9a443537 926 struct hns_roce_mr_table mr_table;
927 struct hns_roce_cq_table cq_table;
5c1f167a 928 struct hns_roce_srq_table srq_table;
9a443537 929 struct hns_roce_qp_table qp_table;
930 struct hns_roce_eq_table eq_table;
0e40dc2f
YL
931 struct hns_roce_hem_table qpc_timer_table;
932 struct hns_roce_hem_table cqc_timer_table;
d6d91e46
WL
933 /* GMV is the memory area that the driver allocates for the hardware
934 * to store SGID, SMAC and VLAN information.
935 */
936 struct hns_roce_hem_table gmv_table;
9a443537 937
938 int cmd_mod;
939 int loop_idc;
2d407888
WHX
940 u32 sdb_offset;
941 u32 odb_offset;
08805fdb 942 const struct hns_roce_hw *hw;
016a0059 943 void *priv;
0425e3e6 944 struct workqueue_struct *irq_workq;
2de949ab 945 struct work_struct ecc_work;
5b03a422 946 u32 func_num;
0b567cde 947 u32 is_vf;
e079d87d 948 u32 cong_algo_tmpl_id;
0045e0d3 949 u64 dwqe_page;
9a443537 950};
951
952static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
953{
954 return container_of(ib_dev, struct hns_roce_dev, ib_dev);
955}
956
957static inline struct hns_roce_ucontext
958 *to_hr_ucontext(struct ib_ucontext *ibucontext)
959{
960 return container_of(ibucontext, struct hns_roce_ucontext, ibucontext);
961}
962
963static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
964{
965 return container_of(ibpd, struct hns_roce_pd, ibpd);
966}
967
32548870
WL
968static inline struct hns_roce_xrcd *to_hr_xrcd(struct ib_xrcd *ibxrcd)
969{
970 return container_of(ibxrcd, struct hns_roce_xrcd, ibxrcd);
971}
972
9a443537 973static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
974{
975 return container_of(ibah, struct hns_roce_ah, ibah);
976}
977
978static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
979{
980 return container_of(ibmr, struct hns_roce_mr, ibmr);
981}
982
c7c28191
YL
983static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw)
984{
985 return container_of(ibmw, struct hns_roce_mw, ibmw);
986}
987
9a443537 988static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
989{
990 return container_of(ibqp, struct hns_roce_qp, ibqp);
991}
992
993static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq)
994{
995 return container_of(ib_cq, struct hns_roce_cq, ib_cq);
996}
997
998static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
999{
1000 return container_of(ibsrq, struct hns_roce_srq, ibsrq);
1001}
1002
6d202d9f
CT
1003static inline struct hns_user_mmap_entry *
1004to_hns_mmap(struct rdma_user_mmap_entry *rdma_entry)
1005{
1006 return container_of(rdma_entry, struct hns_user_mmap_entry, rdma_entry);
1007}
1008
0576cbde 1009static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
9a443537 1010{
86f767e6 1011 writeq(*(u64 *)val, dest);
9a443537 1012}
1013
1014static inline struct hns_roce_qp
1015 *__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
1016{
61b460d1 1017 return xa_load(&hr_dev->qp_table_xa, qpn);
9a443537 1018}
1019
dcdc366a
WL
1020static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf,
1021 unsigned int offset)
9a443537 1022{
6f6e2dcb
XW
1023 return (char *)(buf->trunk_list[offset >> buf->trunk_shift].buf) +
1024 (offset & ((1 << buf->trunk_shift) - 1));
cc23267a
XW
1025}
1026
7b0006db
XW
1027static inline dma_addr_t hns_roce_buf_dma_addr(struct hns_roce_buf *buf,
1028 unsigned int offset)
cc23267a 1029{
6f6e2dcb
XW
1030 return buf->trunk_list[offset >> buf->trunk_shift].map +
1031 (offset & ((1 << buf->trunk_shift) - 1));
9a443537 1032}
1033
7b0006db
XW
1034static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, u32 idx)
1035{
1036 return hns_roce_buf_dma_addr(buf, idx << buf->page_shift);
1037}
1038
9581a356 1039#define hr_hw_page_align(x) ALIGN(x, 1 << HNS_HW_PAGE_SHIFT)
54d66387 1040
3c873161
XW
1041static inline u64 to_hr_hw_page_addr(u64 addr)
1042{
9581a356 1043 return addr >> HNS_HW_PAGE_SHIFT;
3c873161
XW
1044}
1045
1046static inline u32 to_hr_hw_page_shift(u32 page_shift)
1047{
9581a356 1048 return page_shift - HNS_HW_PAGE_SHIFT;
3c873161
XW
1049}
1050
54d66387
XW
1051static inline u32 to_hr_hem_hopnum(u32 hopnum, u32 count)
1052{
1053 if (count > 0)
1054 return hopnum == HNS_ROCE_HOP_NUM_0 ? 0 : hopnum;
1055
1056 return 0;
1057}
1058
1059static inline u32 to_hr_hem_entries_size(u32 count, u32 buf_shift)
1060{
1061 return hr_hw_page_align(count << buf_shift);
1062}
1063
1064static inline u32 to_hr_hem_entries_count(u32 count, u32 buf_shift)
1065{
1066 return hr_hw_page_align(count << buf_shift) >> buf_shift;
1067}
1068
1069static inline u32 to_hr_hem_entries_shift(u32 count, u32 buf_shift)
1070{
d4d81387
WL
1071 if (!count)
1072 return 0;
1073
54d66387
XW
1074 return ilog2(to_hr_hem_entries_count(count, buf_shift));
1075}
1076
603bee93
WL
1077#define DSCP_SHIFT 2
1078
1079static inline u8 get_tclass(const struct ib_global_route *grh)
1080{
1081 return grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP ?
1082 grh->traffic_class >> DSCP_SHIFT : grh->traffic_class;
1083}
1084
8feafd90 1085void hns_roce_init_uar_table(struct hns_roce_dev *dev);
9a443537 1086int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
9a443537 1087
1088int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
1089void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
1090void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
1091 u64 out_param);
1092int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
1093void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
1094
38389eaa
LO
1095/* hns roce hw need current block and next block addr from mtt */
1096#define MTT_MIN_COUNT 2
1097int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
d147583e 1098 u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
3c873161 1099int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
82d07a4e
WL
1100 struct hns_roce_buf_attr *buf_attr,
1101 unsigned int page_shift, struct ib_udata *udata,
1102 unsigned long user_addr);
3c873161
XW
1103void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
1104 struct hns_roce_mtr *mtr);
1105int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
dcdc366a 1106 dma_addr_t *pages, unsigned int page_cnt);
38389eaa 1107
645f0593 1108void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
d38936f0 1109void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
1bbd4380 1110void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
eb653eda 1111int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
c4f11b36 1112void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
da43b7be 1113void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
9a443537 1114
9a443537 1115void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
1116void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
1117void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
1118
9a443537 1119void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
9a443537 1120
fa5d010c
MG
1121int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
1122 struct ib_udata *udata);
90898850 1123int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
9a9ebf8c
LR
1124static inline int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags)
1125{
1126 return 0;
1127}
9a443537 1128
ff23dfa1 1129int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
91a7c58f 1130int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
9a443537 1131
1132struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
1133struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1134 u64 virt_addr, int access_flags,
1135 struct ib_udata *udata);
6e0954b1
JG
1136struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
1137 u64 length, u64 virt_addr,
1138 int mr_access_flags, struct ib_pd *pd,
1139 struct ib_udata *udata);
68a997c5 1140struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
42a3b153 1141 u32 max_num_sg);
68a997c5
YL
1142int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1143 unsigned int *sg_offset);
c4367a26 1144int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
bfcc681b 1145unsigned long key_to_hw_index(u32 key);
9a443537 1146
d18bb3e1 1147int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
c7c28191
YL
1148int hns_roce_dealloc_mw(struct ib_mw *ibmw);
1149
cc23267a 1150void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
6f6e2dcb
XW
1151struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
1152 u32 page_shift, u32 flags);
9a443537 1153
2ac0bc5e 1154int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
7b0006db
XW
1155 int buf_cnt, struct hns_roce_buf *buf,
1156 unsigned int page_shift);
2ac0bc5e 1157int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
7b0006db 1158 int buf_cnt, struct ib_umem *umem,
82d07a4e 1159 unsigned int page_shift);
2ac0bc5e 1160
68e326de
LR
1161int hns_roce_create_srq(struct ib_srq *srq,
1162 struct ib_srq_init_attr *srq_init_attr,
1163 struct ib_udata *udata);
c7bcb134
LO
1164int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
1165 enum ib_srq_attr_mask srq_attr_mask,
1166 struct ib_udata *udata);
119181d1 1167int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
c7bcb134 1168
32548870
WL
1169int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
1170int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
1171
514aee66
LR
1172int hns_roce_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *init_attr,
1173 struct ib_udata *udata);
9a443537 1174int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1175 int attr_mask, struct ib_udata *udata);
ffd541d4 1176void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
dcdc366a
WL
1177void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
1178void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
1179void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n);
1180bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
9a443537 1181 struct ib_cq *ib_cq);
9a443537 1182void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
1183 struct hns_roce_cq *recv_cq);
1184void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1185 struct hns_roce_cq *recv_cq);
1186void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
e365b26c
XW
1187void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
1188 struct ib_udata *udata);
f696bf6d 1189__be32 send_ieth(const struct ib_send_wr *wr);
9a443537 1190int to_hr_qp_type(int qp_type);
1191
707783ab
YL
1192int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
1193 struct ib_udata *udata);
9a443537 1194
43d781b9 1195int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
69e0a42f 1196int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
e088a685
YL
1197 struct hns_roce_db *db);
1198void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
1199 struct hns_roce_db *db);
472bc0fb
YL
1200int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
1201 int order);
1202void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);
1203
9a443537 1204void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
1205void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
c462a024 1206void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp);
9a443537 1207void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
81fce629 1208void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
1fb7f897 1209u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index);
626903e9 1210void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
08805fdb
WHX
1211int hns_roce_init(struct hns_roce_dev *hr_dev);
1212void hns_roce_exit(struct hns_roce_dev *hr_dev);
40b4b79c 1213int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq);
f2b070f3 1214int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq);
e198d65d 1215int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp);
3e89d78b 1216int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp);
dc9981ef 1217int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr);
3d67e7e2 1218int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr);
6d202d9f
CT
1219struct hns_user_mmap_entry *
1220hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
1221 size_t length,
1222 enum hns_roce_mmap_type mmap_type);
9a443537 1223#endif /* _HNS_ROCE_DEVICE_H */