RDMA/hns: Use flush framework for the case in aeq
[linux-2.6-block.git] / drivers / infiniband / hw / hns / hns_roce_hw_v2.c
CommitLineData
dd74282d
WHX
1/*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/acpi.h>
34#include <linux/etherdevice.h>
35#include <linux/interrupt.h>
36#include <linux/kernel.h>
0b25c9cc 37#include <linux/types.h>
d4994d2f 38#include <net/addrconf.h>
610b8967 39#include <rdma/ib_addr.h>
a70c0739 40#include <rdma/ib_cache.h>
dd74282d 41#include <rdma/ib_umem.h>
bdeacabd 42#include <rdma/uverbs_ioctl.h>
dd74282d
WHX
43
44#include "hnae3.h"
45#include "hns_roce_common.h"
46#include "hns_roce_device.h"
47#include "hns_roce_cmd.h"
48#include "hns_roce_hem.h"
a04ff739 49#include "hns_roce_hw_v2.h"
dd74282d 50
2d407888
WHX
51static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
52 struct ib_sge *sg)
53{
54 dseg->lkey = cpu_to_le32(sg->lkey);
55 dseg->addr = cpu_to_le64(sg->addr);
56 dseg->len = cpu_to_le32(sg->length);
57}
58
68a997c5
YL
59static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
60 struct hns_roce_wqe_frmr_seg *fseg,
61 const struct ib_reg_wr *wr)
62{
63 struct hns_roce_mr *mr = to_hr_mr(wr->mr);
64
65 /* use ib_access_flags */
60262b10 66 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
68a997c5 67 wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
60262b10 68 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
68a997c5 69 wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
60262b10 70 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RR_S,
68a997c5 71 wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
60262b10 72 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RW_S,
68a997c5 73 wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
60262b10 74 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_LW_S,
68a997c5
YL
75 wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
76
77 /* Data structure reuse may lead to confusion */
78 rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff);
79 rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32);
80
81 rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
82 rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
83 rc_sq_wqe->rkey = cpu_to_le32(wr->key);
84 rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
85
86 fseg->pbl_size = cpu_to_le32(mr->pbl_size);
87 roce_set_field(fseg->mode_buf_pg_sz,
88 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
89 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
90 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
91 roce_set_bit(fseg->mode_buf_pg_sz,
92 V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
93}
94
384f8818
LO
95static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
96 const struct ib_atomic_wr *wr)
97{
98 if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
99 aseg->fetchadd_swap_data = cpu_to_le64(wr->swap);
100 aseg->cmp_data = cpu_to_le64(wr->compare_add);
101 } else {
102 aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add);
103 aseg->cmp_data = 0;
104 }
105}
106
f696bf6d 107static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
468d020e 108 unsigned int *sge_ind, int valid_num_sge)
0b25c9cc
WHX
109{
110 struct hns_roce_v2_wqe_data_seg *dseg;
111 struct ib_sge *sg;
112 int num_in_wqe = 0;
113 int extend_sge_num;
114 int fi_sge_num;
115 int se_sge_num;
116 int shift;
117 int i;
118
119 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
120 num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
468d020e 121 extend_sge_num = valid_num_sge - num_in_wqe;
0b25c9cc
WHX
122 sg = wr->sg_list + num_in_wqe;
123 shift = qp->hr_buf.page_shift;
124
125 /*
126 * Check whether wr->num_sge sges are in the same page. If not, we
127 * should calculate how many sges in the first page and the second
128 * page.
129 */
130 dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
131 fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
132 (uintptr_t)dseg) /
133 sizeof(struct hns_roce_v2_wqe_data_seg);
134 if (extend_sge_num > fi_sge_num) {
135 se_sge_num = extend_sge_num - fi_sge_num;
136 for (i = 0; i < fi_sge_num; i++) {
137 set_data_seg_v2(dseg++, sg + i);
138 (*sge_ind)++;
139 }
140 dseg = get_send_extend_sge(qp,
141 (*sge_ind) & (qp->sge.sge_cnt - 1));
142 for (i = 0; i < se_sge_num; i++) {
143 set_data_seg_v2(dseg++, sg + fi_sge_num + i);
144 (*sge_ind)++;
145 }
146 } else {
147 for (i = 0; i < extend_sge_num; i++) {
148 set_data_seg_v2(dseg++, sg + i);
149 (*sge_ind)++;
150 }
151 }
152}
153
f696bf6d 154static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
7bdee415 155 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
156 void *wqe, unsigned int *sge_ind,
468d020e 157 int valid_num_sge,
d34ac5cd 158 const struct ib_send_wr **bad_wr)
7bdee415 159{
160 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
161 struct hns_roce_v2_wqe_data_seg *dseg = wqe;
162 struct hns_roce_qp *qp = to_hr_qp(ibqp);
468d020e 163 int j = 0;
7bdee415 164 int i;
165
468d020e 166 if (wr->send_flags & IB_SEND_INLINE && valid_num_sge) {
8b9b8d14 167 if (le32_to_cpu(rc_sq_wqe->msg_len) >
168 hr_dev->caps.max_sq_inline) {
7bdee415 169 *bad_wr = wr;
170 dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
171 rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
172 return -EINVAL;
173 }
174
328d405b 175 if (wr->opcode == IB_WR_RDMA_READ) {
c80e0661 176 *bad_wr = wr;
328d405b 177 dev_err(hr_dev->dev, "Not support inline data!\n");
178 return -EINVAL;
179 }
180
7bdee415 181 for (i = 0; i < wr->num_sge; i++) {
182 memcpy(wqe, ((void *)wr->sg_list[i].addr),
183 wr->sg_list[i].length);
184 wqe += wr->sg_list[i].length;
185 }
186
187 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
188 1);
189 } else {
468d020e 190 if (valid_num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
7bdee415 191 for (i = 0; i < wr->num_sge; i++) {
192 if (likely(wr->sg_list[i].length)) {
193 set_data_seg_v2(dseg, wr->sg_list + i);
194 dseg++;
195 }
196 }
197 } else {
198 roce_set_field(rc_sq_wqe->byte_20,
199 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
200 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
201 (*sge_ind) & (qp->sge.sge_cnt - 1));
202
468d020e
LO
203 for (i = 0; i < wr->num_sge &&
204 j < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
7bdee415 205 if (likely(wr->sg_list[i].length)) {
206 set_data_seg_v2(dseg, wr->sg_list + i);
207 dseg++;
468d020e 208 j++;
7bdee415 209 }
210 }
211
468d020e 212 set_extend_sge(qp, wr, sge_ind, valid_num_sge);
7bdee415 213 }
214
215 roce_set_field(rc_sq_wqe->byte_16,
216 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
468d020e 217 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
7bdee415 218 }
219
220 return 0;
221}
222
626903e9
XW
223static int check_send_valid(struct hns_roce_dev *hr_dev,
224 struct hns_roce_qp *hr_qp)
225{
226 struct ib_qp *ibqp = &hr_qp->ibqp;
227 struct device *dev = hr_dev->dev;
228
229 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
230 ibqp->qp_type != IB_QPT_GSI &&
231 ibqp->qp_type != IB_QPT_UD)) {
232 dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
233 return -EOPNOTSUPP;
234 } else if (unlikely(hr_qp->state == IB_QPS_RESET ||
235 hr_qp->state == IB_QPS_INIT ||
236 hr_qp->state == IB_QPS_RTR)) {
237 dev_err(dev, "Post WQE fail, QP state %d!\n", hr_qp->state);
238 return -EINVAL;
239 } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
240 dev_err(dev, "Post WQE fail, dev state %d!\n", hr_dev->state);
241 return -EIO;
242 }
243
244 return 0;
245}
246
d34ac5cd
BVA
247static int hns_roce_v2_post_send(struct ib_qp *ibqp,
248 const struct ib_send_wr *wr,
249 const struct ib_send_wr **bad_wr)
2d407888
WHX
250{
251 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
7bdee415 252 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
253 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
2d407888
WHX
254 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
255 struct hns_roce_qp *qp = to_hr_qp(ibqp);
68a997c5 256 struct hns_roce_wqe_frmr_seg *fseg;
2d407888 257 struct device *dev = hr_dev->dev;
52c5e9e7 258 struct hns_roce_v2_db sq_db = {};
e8d18533 259 unsigned int owner_bit;
47688202
YL
260 unsigned int sge_idx;
261 unsigned int wqe_idx;
2d407888 262 unsigned long flags;
468d020e 263 int valid_num_sge;
2d407888 264 void *wqe = NULL;
7bdee415 265 bool loopback;
55ba49cb 266 u32 tmp_len;
b9c1ea40 267 u32 hr_op;
7bdee415 268 u8 *smac;
2d407888 269 int nreq;
626903e9 270 int ret;
2d407888
WHX
271 int i;
272
626903e9 273 spin_lock_irqsave(&qp->sq.lock, flags);
2d407888 274
626903e9
XW
275 ret = check_send_valid(hr_dev, qp);
276 if (ret) {
2d407888 277 *bad_wr = wr;
626903e9
XW
278 nreq = 0;
279 goto out;
2d407888
WHX
280 }
281
47688202 282 sge_idx = qp->next_sge;
2d407888
WHX
283
284 for (nreq = 0; wr; ++nreq, wr = wr->next) {
285 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
286 ret = -ENOMEM;
287 *bad_wr = wr;
288 goto out;
289 }
290
47688202
YL
291 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
292
2d407888
WHX
293 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
294 dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
295 wr->num_sge, qp->sq.max_gs);
296 ret = -EINVAL;
297 *bad_wr = wr;
298 goto out;
299 }
300
47688202
YL
301 wqe = get_send_wqe(qp, wqe_idx);
302 qp->sq.wrid[wqe_idx] = wr->wr_id;
634f6390 303 owner_bit =
304 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
468d020e 305 valid_num_sge = 0;
55ba49cb 306 tmp_len = 0;
2d407888 307
468d020e
LO
308 for (i = 0; i < wr->num_sge; i++) {
309 if (likely(wr->sg_list[i].length)) {
310 tmp_len += wr->sg_list[i].length;
311 valid_num_sge++;
312 }
313 }
314
7bdee415 315 /* Corresponding to the QP type, wqe process separately */
316 if (ibqp->qp_type == IB_QPT_GSI) {
317 ud_sq_wqe = wqe;
318 memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
319
320 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
321 V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
322 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
323 V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
324 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
325 V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
326 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
327 V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
328 roce_set_field(ud_sq_wqe->byte_48,
329 V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
330 V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
331 ah->av.mac[4]);
332 roce_set_field(ud_sq_wqe->byte_48,
333 V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
334 V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
335 ah->av.mac[5]);
336
337 /* MAC loopback */
338 smac = (u8 *)hr_dev->dev_addr[qp->port];
339 loopback = ether_addr_equal_unaligned(ah->av.mac,
340 smac) ? 1 : 0;
341
342 roce_set_bit(ud_sq_wqe->byte_40,
343 V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
344
345 roce_set_field(ud_sq_wqe->byte_4,
346 V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
347 V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
348 HNS_ROCE_V2_WQE_OP_SEND);
2d407888 349
8b9b8d14 350 ud_sq_wqe->msg_len =
351 cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
352
353 switch (wr->opcode) {
354 case IB_WR_SEND_WITH_IMM:
355 case IB_WR_RDMA_WRITE_WITH_IMM:
0c4a0e29
LO
356 ud_sq_wqe->immtdata =
357 cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
8b9b8d14 358 break;
359 default:
360 ud_sq_wqe->immtdata = 0;
361 break;
362 }
651487c2 363
7bdee415 364 /* Set sig attr */
365 roce_set_bit(ud_sq_wqe->byte_4,
366 V2_UD_SEND_WQE_BYTE_4_CQE_S,
367 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
a49d761f 368
7bdee415 369 /* Set se attr */
370 roce_set_bit(ud_sq_wqe->byte_4,
371 V2_UD_SEND_WQE_BYTE_4_SE_S,
372 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
e8d18533 373
7bdee415 374 roce_set_bit(ud_sq_wqe->byte_4,
375 V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
376
377 roce_set_field(ud_sq_wqe->byte_16,
378 V2_UD_SEND_WQE_BYTE_16_PD_M,
379 V2_UD_SEND_WQE_BYTE_16_PD_S,
380 to_hr_pd(ibqp->pd)->pdn);
381
382 roce_set_field(ud_sq_wqe->byte_16,
383 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
384 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
468d020e 385 valid_num_sge);
7bdee415 386
387 roce_set_field(ud_sq_wqe->byte_20,
388 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
389 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
47688202 390 sge_idx & (qp->sge.sge_cnt - 1));
7bdee415 391
392 roce_set_field(ud_sq_wqe->byte_24,
393 V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
394 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
395 ud_sq_wqe->qkey =
8b9b8d14 396 cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
397 qp->qkey : ud_wr(wr)->remote_qkey);
7bdee415 398 roce_set_field(ud_sq_wqe->byte_32,
399 V2_UD_SEND_WQE_BYTE_32_DQPN_M,
400 V2_UD_SEND_WQE_BYTE_32_DQPN_S,
401 ud_wr(wr)->remote_qpn);
402
403 roce_set_field(ud_sq_wqe->byte_36,
404 V2_UD_SEND_WQE_BYTE_36_VLAN_M,
405 V2_UD_SEND_WQE_BYTE_36_VLAN_S,
32883228 406 ah->av.vlan_id);
7bdee415 407 roce_set_field(ud_sq_wqe->byte_36,
408 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
409 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
410 ah->av.hop_limit);
411 roce_set_field(ud_sq_wqe->byte_36,
412 V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
413 V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
82e620d9 414 ah->av.tclass);
7bdee415 415 roce_set_field(ud_sq_wqe->byte_40,
416 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
cdfa4ad5 417 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S,
82e620d9 418 ah->av.flowlabel);
7bdee415 419 roce_set_field(ud_sq_wqe->byte_40,
420 V2_UD_SEND_WQE_BYTE_40_SL_M,
421 V2_UD_SEND_WQE_BYTE_40_SL_S,
82e620d9 422 ah->av.sl);
7bdee415 423 roce_set_field(ud_sq_wqe->byte_40,
424 V2_UD_SEND_WQE_BYTE_40_PORTN_M,
425 V2_UD_SEND_WQE_BYTE_40_PORTN_S,
426 qp->port);
427
8320deb8
LO
428 roce_set_bit(ud_sq_wqe->byte_40,
429 V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
430 ah->av.vlan_en ? 1 : 0);
7bdee415 431 roce_set_field(ud_sq_wqe->byte_48,
432 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
433 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
434 hns_get_gid_index(hr_dev, qp->phy_port,
435 ah->av.gid_index));
436
437 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
438 GID_LEN_V2);
439
468d020e 440 set_extend_sge(qp, wr, &sge_idx, valid_num_sge);
7bdee415 441 } else if (ibqp->qp_type == IB_QPT_RC) {
442 rc_sq_wqe = wqe;
443 memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
8b9b8d14 444
445 rc_sq_wqe->msg_len =
446 cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
7bdee415 447
8b9b8d14 448 switch (wr->opcode) {
449 case IB_WR_SEND_WITH_IMM:
450 case IB_WR_RDMA_WRITE_WITH_IMM:
0c4a0e29
LO
451 rc_sq_wqe->immtdata =
452 cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
8b9b8d14 453 break;
454 case IB_WR_SEND_WITH_INV:
455 rc_sq_wqe->inv_key =
456 cpu_to_le32(wr->ex.invalidate_rkey);
457 break;
458 default:
459 rc_sq_wqe->immtdata = 0;
460 break;
461 }
7bdee415 462
463 roce_set_bit(rc_sq_wqe->byte_4,
464 V2_RC_SEND_WQE_BYTE_4_FENCE_S,
465 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
466
467 roce_set_bit(rc_sq_wqe->byte_4,
468 V2_RC_SEND_WQE_BYTE_4_SE_S,
469 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
470
471 roce_set_bit(rc_sq_wqe->byte_4,
472 V2_RC_SEND_WQE_BYTE_4_CQE_S,
473 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
474
475 roce_set_bit(rc_sq_wqe->byte_4,
476 V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
477
384f8818 478 wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
7bdee415 479 switch (wr->opcode) {
480 case IB_WR_RDMA_READ:
b9c1ea40 481 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ;
7bdee415 482 rc_sq_wqe->rkey =
483 cpu_to_le32(rdma_wr(wr)->rkey);
484 rc_sq_wqe->va =
485 cpu_to_le64(rdma_wr(wr)->remote_addr);
486 break;
487 case IB_WR_RDMA_WRITE:
b9c1ea40 488 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE;
7bdee415 489 rc_sq_wqe->rkey =
490 cpu_to_le32(rdma_wr(wr)->rkey);
491 rc_sq_wqe->va =
492 cpu_to_le64(rdma_wr(wr)->remote_addr);
493 break;
494 case IB_WR_RDMA_WRITE_WITH_IMM:
b9c1ea40 495 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM;
7bdee415 496 rc_sq_wqe->rkey =
497 cpu_to_le32(rdma_wr(wr)->rkey);
498 rc_sq_wqe->va =
499 cpu_to_le64(rdma_wr(wr)->remote_addr);
500 break;
501 case IB_WR_SEND:
b9c1ea40 502 hr_op = HNS_ROCE_V2_WQE_OP_SEND;
7bdee415 503 break;
504 case IB_WR_SEND_WITH_INV:
b9c1ea40 505 hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV;
7bdee415 506 break;
507 case IB_WR_SEND_WITH_IMM:
b9c1ea40 508 hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM;
7bdee415 509 break;
510 case IB_WR_LOCAL_INV:
b9c1ea40 511 hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV;
e93df010
LO
512 roce_set_bit(rc_sq_wqe->byte_4,
513 V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
514 rc_sq_wqe->inv_key =
515 cpu_to_le32(wr->ex.invalidate_rkey);
7bdee415 516 break;
68a997c5
YL
517 case IB_WR_REG_MR:
518 hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR;
519 fseg = wqe;
520 set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr));
521 break;
7bdee415 522 case IB_WR_ATOMIC_CMP_AND_SWP:
b9c1ea40 523 hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP;
384f8818
LO
524 rc_sq_wqe->rkey =
525 cpu_to_le32(atomic_wr(wr)->rkey);
526 rc_sq_wqe->va =
d9581bf3 527 cpu_to_le64(atomic_wr(wr)->remote_addr);
7bdee415 528 break;
529 case IB_WR_ATOMIC_FETCH_AND_ADD:
b9c1ea40 530 hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD;
384f8818
LO
531 rc_sq_wqe->rkey =
532 cpu_to_le32(atomic_wr(wr)->rkey);
533 rc_sq_wqe->va =
d9581bf3 534 cpu_to_le64(atomic_wr(wr)->remote_addr);
7bdee415 535 break;
536 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
b9c1ea40
LO
537 hr_op =
538 HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP;
7bdee415 539 break;
540 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
b9c1ea40
LO
541 hr_op =
542 HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD;
7bdee415 543 break;
544 default:
b9c1ea40 545 hr_op = HNS_ROCE_V2_WQE_OP_MASK;
7bdee415 546 break;
2d407888
WHX
547 }
548
b9c1ea40
LO
549 roce_set_field(rc_sq_wqe->byte_4,
550 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
551 V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op);
2d407888 552
d9581bf3
LO
553 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
554 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
555 struct hns_roce_v2_wqe_data_seg *dseg;
556
557 dseg = wqe;
558 set_data_seg_v2(dseg, wr->sg_list);
559 wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
560 set_atomic_seg(wqe, atomic_wr(wr));
561 roce_set_field(rc_sq_wqe->byte_16,
562 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
563 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
468d020e 564 valid_num_sge);
68a997c5 565 } else if (wr->opcode != IB_WR_REG_MR) {
d9581bf3 566 ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
468d020e
LO
567 wqe, &sge_idx,
568 valid_num_sge, bad_wr);
d9581bf3
LO
569 if (ret)
570 goto out;
571 }
2d407888 572 } else {
7bdee415 573 dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
574 spin_unlock_irqrestore(&qp->sq.lock, flags);
137ae320 575 *bad_wr = wr;
7bdee415 576 return -EOPNOTSUPP;
2d407888 577 }
2d407888
WHX
578 }
579
580out:
581 if (likely(nreq)) {
582 qp->sq.head += nreq;
583 /* Memory barrier */
584 wmb();
585
2d407888
WHX
586 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
587 V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
588 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
589 V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
cc3391cb 590 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
591 V2_DB_PARAMETER_IDX_S,
2d407888
WHX
592 qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
593 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
594 V2_DB_PARAMETER_SL_S, qp->sl);
595
d3743fa9 596 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
2d407888 597
47688202 598 qp->next_sge = sge_idx;
0425e3e6 599
b5374286
YL
600 /*
601 * Hip08 hardware cannot flush the WQEs in SQ if the QP state
602 * gets into errored mode. Hence, as a workaround to this
603 * hardware limitation, driver needs to assist in flushing. But
604 * the flushing operation uses mailbox to convey the QP state to
605 * the hardware and which can sleep due to the mutex protection
606 * around the mailbox calls. Hence, use the deferred flush for
607 * now.
608 */
609 if (qp->state == IB_QPS_ERR)
610 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG,
611 &qp->flush_flag))
612 init_flush_work(hr_dev, qp);
2d407888
WHX
613 }
614
615 spin_unlock_irqrestore(&qp->sq.lock, flags);
616
617 return ret;
618}
619
626903e9
XW
620static int check_recv_valid(struct hns_roce_dev *hr_dev,
621 struct hns_roce_qp *hr_qp)
622{
623 if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
624 return -EIO;
625 else if (hr_qp->state == IB_QPS_RESET)
626 return -EINVAL;
627
628 return 0;
629}
630
d34ac5cd
BVA
631static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
632 const struct ib_recv_wr *wr,
633 const struct ib_recv_wr **bad_wr)
2d407888
WHX
634{
635 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
636 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
637 struct hns_roce_v2_wqe_data_seg *dseg;
0009c2db 638 struct hns_roce_rinl_sge *sge_list;
2d407888 639 struct device *dev = hr_dev->dev;
2d407888
WHX
640 unsigned long flags;
641 void *wqe = NULL;
47688202 642 u32 wqe_idx;
2d407888 643 int nreq;
626903e9 644 int ret;
2d407888
WHX
645 int i;
646
647 spin_lock_irqsave(&hr_qp->rq.lock, flags);
2d407888 648
626903e9
XW
649 ret = check_recv_valid(hr_dev, hr_qp);
650 if (ret) {
2d407888 651 *bad_wr = wr;
626903e9
XW
652 nreq = 0;
653 goto out;
2d407888
WHX
654 }
655
656 for (nreq = 0; wr; ++nreq, wr = wr->next) {
657 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
658 hr_qp->ibqp.recv_cq)) {
659 ret = -ENOMEM;
660 *bad_wr = wr;
661 goto out;
662 }
663
47688202
YL
664 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
665
2d407888
WHX
666 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
667 dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
668 wr->num_sge, hr_qp->rq.max_gs);
669 ret = -EINVAL;
670 *bad_wr = wr;
671 goto out;
672 }
673
47688202 674 wqe = get_recv_wqe(hr_qp, wqe_idx);
2d407888
WHX
675 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
676 for (i = 0; i < wr->num_sge; i++) {
677 if (!wr->sg_list[i].length)
678 continue;
679 set_data_seg_v2(dseg, wr->sg_list + i);
680 dseg++;
681 }
682
683 if (i < hr_qp->rq.max_gs) {
778cc5a8 684 dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
685 dseg->addr = 0;
2d407888
WHX
686 }
687
0009c2db 688 /* rq support inline data */
ecaaf1e2 689 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
47688202
YL
690 sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
691 hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt =
ecaaf1e2 692 (u32)wr->num_sge;
693 for (i = 0; i < wr->num_sge; i++) {
694 sge_list[i].addr =
695 (void *)(u64)wr->sg_list[i].addr;
696 sge_list[i].len = wr->sg_list[i].length;
697 }
0009c2db 698 }
699
47688202 700 hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
2d407888
WHX
701 }
702
703out:
704 if (likely(nreq)) {
705 hr_qp->rq.head += nreq;
706 /* Memory barrier */
707 wmb();
708
472bc0fb 709 *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
0425e3e6 710
b5374286
YL
711 /*
712 * Hip08 hardware cannot flush the WQEs in RQ if the QP state
713 * gets into errored mode. Hence, as a workaround to this
714 * hardware limitation, driver needs to assist in flushing. But
715 * the flushing operation uses mailbox to convey the QP state to
716 * the hardware and which can sleep due to the mutex protection
717 * around the mailbox calls. Hence, use the deferred flush for
718 * now.
719 */
720 if (hr_qp->state == IB_QPS_ERR)
721 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG,
722 &hr_qp->flush_flag))
723 init_flush_work(hr_dev, hr_qp);
2d407888
WHX
724 }
725 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
726
727 return ret;
728}
729
6a04aed6
WHX
730static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
731 unsigned long instance_stage,
732 unsigned long reset_stage)
733{
734 /* When hardware reset has been completed once or more, we should stop
d3743fa9 735 * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
6a04aed6
WHX
736 * function, we should exit with error. If now at HNAE3_INIT_CLIENT
737 * stage of soft reset process, we should exit with error, and then
738 * HNAE3_INIT_CLIENT related process can rollback the operation like
739 * notifing hardware to free resources, HNAE3_INIT_CLIENT related
740 * process will exit with error to notify NIC driver to reschedule soft
741 * reset process once again.
742 */
743 hr_dev->is_reset = true;
d3743fa9 744 hr_dev->dis_db = true;
6a04aed6
WHX
745
746 if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
747 instance_stage == HNS_ROCE_STATE_INIT)
748 return CMD_RST_PRC_EBUSY;
749
750 return CMD_RST_PRC_SUCCESS;
751}
752
753static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
754 unsigned long instance_stage,
755 unsigned long reset_stage)
756{
757 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
758 struct hnae3_handle *handle = priv->handle;
759 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
760
d3743fa9
WHX
761 /* When hardware reset is detected, we should stop sending mailbox&cmq&
762 * doorbell to hardware. If now in .init_instance() function, we should
6a04aed6
WHX
763 * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
764 * process, we should exit with error, and then HNAE3_INIT_CLIENT
765 * related process can rollback the operation like notifing hardware to
766 * free resources, HNAE3_INIT_CLIENT related process will exit with
767 * error to notify NIC driver to reschedule soft reset process once
768 * again.
769 */
d3743fa9 770 hr_dev->dis_db = true;
6a04aed6
WHX
771 if (!ops->get_hw_reset_stat(handle))
772 hr_dev->is_reset = true;
773
774 if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
775 instance_stage == HNS_ROCE_STATE_INIT)
776 return CMD_RST_PRC_EBUSY;
777
778 return CMD_RST_PRC_SUCCESS;
779}
780
781static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
782{
783 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
784 struct hnae3_handle *handle = priv->handle;
785 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
786
787 /* When software reset is detected at .init_instance() function, we
d3743fa9
WHX
788 * should stop sending mailbox&cmq&doorbell to hardware, and exit
789 * with error.
6a04aed6 790 */
d3743fa9 791 hr_dev->dis_db = true;
6a04aed6
WHX
792 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
793 hr_dev->is_reset = true;
794
795 return CMD_RST_PRC_EBUSY;
796}
797
798static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
799{
800 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
801 struct hnae3_handle *handle = priv->handle;
802 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
803 unsigned long instance_stage; /* the current instance stage */
804 unsigned long reset_stage; /* the current reset stage */
805 unsigned long reset_cnt;
806 bool sw_resetting;
807 bool hw_resetting;
808
809 if (hr_dev->is_reset)
810 return CMD_RST_PRC_SUCCESS;
811
812 /* Get information about reset from NIC driver or RoCE driver itself,
813 * the meaning of the following variables from NIC driver are described
814 * as below:
815 * reset_cnt -- The count value of completed hardware reset.
816 * hw_resetting -- Whether hardware device is resetting now.
817 * sw_resetting -- Whether NIC's software reset process is running now.
818 */
819 instance_stage = handle->rinfo.instance_state;
820 reset_stage = handle->rinfo.reset_state;
821 reset_cnt = ops->ae_dev_reset_cnt(handle);
822 hw_resetting = ops->get_hw_reset_stat(handle);
823 sw_resetting = ops->ae_dev_resetting(handle);
824
825 if (reset_cnt != hr_dev->reset_cnt)
826 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
827 reset_stage);
828 else if (hw_resetting)
829 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
830 reset_stage);
831 else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
832 return hns_roce_v2_cmd_sw_resetting(hr_dev);
833
834 return 0;
835}
836
a04ff739
WHX
837static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
838{
839 int ntu = ring->next_to_use;
840 int ntc = ring->next_to_clean;
841 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
842
843 return ring->desc_num - used - 1;
844}
845
846static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
847 struct hns_roce_v2_cmq_ring *ring)
848{
849 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
850
851 ring->desc = kzalloc(size, GFP_KERNEL);
852 if (!ring->desc)
853 return -ENOMEM;
854
855 ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
856 DMA_BIDIRECTIONAL);
857 if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
858 ring->desc_dma_addr = 0;
859 kfree(ring->desc);
860 ring->desc = NULL;
861 return -ENOMEM;
862 }
863
864 return 0;
865}
866
867static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
868 struct hns_roce_v2_cmq_ring *ring)
869{
870 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
871 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
872 DMA_BIDIRECTIONAL);
90e7a4d5 873
874 ring->desc_dma_addr = 0;
a04ff739
WHX
875 kfree(ring->desc);
876}
877
878static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
879{
880 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
881 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
882 &priv->cmq.csq : &priv->cmq.crq;
883
884 ring->flag = ring_type;
885 ring->next_to_clean = 0;
886 ring->next_to_use = 0;
887
888 return hns_roce_alloc_cmq_desc(hr_dev, ring);
889}
890
891static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
892{
893 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
894 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
895 &priv->cmq.csq : &priv->cmq.crq;
896 dma_addr_t dma = ring->desc_dma_addr;
897
898 if (ring_type == TYPE_CSQ) {
899 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
900 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
901 upper_32_bits(dma));
902 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
2288b3b3 903 ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
a04ff739
WHX
904 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
905 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
906 } else {
907 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
908 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
909 upper_32_bits(dma));
910 roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
2288b3b3 911 ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
a04ff739
WHX
912 roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
913 roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
914 }
915}
916
917static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
918{
919 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
920 int ret;
921
922 /* Setup the queue entries for command queue */
426c4146
LO
923 priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
924 priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
a04ff739
WHX
925
926 /* Setup the lock for command queue */
927 spin_lock_init(&priv->cmq.csq.lock);
928 spin_lock_init(&priv->cmq.crq.lock);
929
930 /* Setup Tx write back timeout */
931 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
932
933 /* Init CSQ */
934 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
935 if (ret) {
936 dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
937 return ret;
938 }
939
940 /* Init CRQ */
941 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
942 if (ret) {
943 dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
944 goto err_crq;
945 }
946
947 /* Init CSQ REG */
948 hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
949
950 /* Init CRQ REG */
951 hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
952
953 return 0;
954
955err_crq:
956 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
957
958 return ret;
959}
960
961static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
962{
963 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
964
965 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
966 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
967}
968
281d0ccf
CIK
969static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
970 enum hns_roce_opcode_type opcode,
971 bool is_read)
a04ff739
WHX
972{
973 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
974 desc->opcode = cpu_to_le16(opcode);
975 desc->flag =
976 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
977 if (is_read)
978 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
979 else
980 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
981}
982
983static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
984{
985 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
986 u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
987
988 return head == priv->cmq.csq.next_to_use;
989}
990
991static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
992{
993 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
994 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
995 struct hns_roce_cmq_desc *desc;
996 u16 ntc = csq->next_to_clean;
997 u32 head;
998 int clean = 0;
999
1000 desc = &csq->desc[ntc];
1001 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
1002 while (head != ntc) {
1003 memset(desc, 0, sizeof(*desc));
1004 ntc++;
1005 if (ntc == csq->desc_num)
1006 ntc = 0;
1007 desc = &csq->desc[ntc];
1008 clean++;
1009 }
1010 csq->next_to_clean = ntc;
1011
1012 return clean;
1013}
1014
6a04aed6
WHX
1015static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1016 struct hns_roce_cmq_desc *desc, int num)
a04ff739
WHX
1017{
1018 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1019 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1020 struct hns_roce_cmq_desc *desc_to_use;
1021 bool complete = false;
1022 u32 timeout = 0;
1023 int handle = 0;
1024 u16 desc_ret;
1025 int ret = 0;
1026 int ntc;
1027
1028 spin_lock_bh(&csq->lock);
1029
1030 if (num > hns_roce_cmq_space(csq)) {
1031 spin_unlock_bh(&csq->lock);
1032 return -EBUSY;
1033 }
1034
1035 /*
1036 * Record the location of desc in the cmq for this time
1037 * which will be use for hardware to write back
1038 */
1039 ntc = csq->next_to_use;
1040
1041 while (handle < num) {
1042 desc_to_use = &csq->desc[csq->next_to_use];
1043 *desc_to_use = desc[handle];
1044 dev_dbg(hr_dev->dev, "set cmq desc:\n");
1045 csq->next_to_use++;
1046 if (csq->next_to_use == csq->desc_num)
1047 csq->next_to_use = 0;
1048 handle++;
1049 }
1050
1051 /* Write to hardware */
1052 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
1053
1054 /*
1055 * If the command is sync, wait for the firmware to write back,
1056 * if multi descriptors to be sent, use the first one to check
1057 */
bfe86035 1058 if (le16_to_cpu(desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
a04ff739
WHX
1059 do {
1060 if (hns_roce_cmq_csq_done(hr_dev))
1061 break;
988e175b 1062 udelay(1);
a04ff739
WHX
1063 timeout++;
1064 } while (timeout < priv->cmq.tx_timeout);
1065 }
1066
1067 if (hns_roce_cmq_csq_done(hr_dev)) {
1068 complete = true;
1069 handle = 0;
1070 while (handle < num) {
1071 /* get the result of hardware write back */
1072 desc_to_use = &csq->desc[ntc];
1073 desc[handle] = *desc_to_use;
1074 dev_dbg(hr_dev->dev, "Get cmq desc:\n");
bfe86035 1075 desc_ret = le16_to_cpu(desc[handle].retval);
a04ff739
WHX
1076 if (desc_ret == CMD_EXEC_SUCCESS)
1077 ret = 0;
1078 else
1079 ret = -EIO;
1080 priv->cmq.last_status = desc_ret;
1081 ntc++;
1082 handle++;
1083 if (ntc == csq->desc_num)
1084 ntc = 0;
1085 }
1086 }
1087
1088 if (!complete)
1089 ret = -EAGAIN;
1090
1091 /* clean the command send queue */
1092 handle = hns_roce_cmq_csq_clean(hr_dev);
1093 if (handle != num)
1094 dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
1095 handle, num);
1096
1097 spin_unlock_bh(&csq->lock);
1098
1099 return ret;
1100}
1101
e95e52a1 1102static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
6a04aed6
WHX
1103 struct hns_roce_cmq_desc *desc, int num)
1104{
1105 int retval;
1106 int ret;
1107
1108 ret = hns_roce_v2_rst_process_cmd(hr_dev);
1109 if (ret == CMD_RST_PRC_SUCCESS)
1110 return 0;
1111 if (ret == CMD_RST_PRC_EBUSY)
b417c087 1112 return -EBUSY;
6a04aed6
WHX
1113
1114 ret = __hns_roce_cmq_send(hr_dev, desc, num);
1115 if (ret) {
1116 retval = hns_roce_v2_rst_process_cmd(hr_dev);
1117 if (retval == CMD_RST_PRC_SUCCESS)
1118 return 0;
1119 else if (retval == CMD_RST_PRC_EBUSY)
b417c087 1120 return -EBUSY;
6a04aed6
WHX
1121 }
1122
1123 return ret;
1124}
1125
281d0ccf 1126static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
cfc85f3e
WHX
1127{
1128 struct hns_roce_query_version *resp;
1129 struct hns_roce_cmq_desc desc;
1130 int ret;
1131
1132 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1133 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1134 if (ret)
1135 return ret;
1136
1137 resp = (struct hns_roce_query_version *)desc.data;
bfe86035 1138 hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
3a63c964
LO
1139 hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1140
1141 return 0;
1142}
1143
e075da5e
LC
1144static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
1145{
1146 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1147 struct hnae3_handle *handle = priv->handle;
1148 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1149 unsigned long reset_cnt;
1150 bool sw_resetting;
1151 bool hw_resetting;
1152
1153 reset_cnt = ops->ae_dev_reset_cnt(handle);
1154 hw_resetting = ops->get_hw_reset_stat(handle);
1155 sw_resetting = ops->ae_dev_resetting(handle);
1156
1157 if (reset_cnt != hr_dev->reset_cnt || hw_resetting || sw_resetting)
1158 return true;
1159
1160 return false;
1161}
1162
1163static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
1164 int flag)
1165{
1166 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1167 struct hnae3_handle *handle = priv->handle;
1168 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1169 unsigned long instance_stage;
1170 unsigned long reset_cnt;
1171 unsigned long end;
1172 bool sw_resetting;
1173 bool hw_resetting;
1174
1175 instance_stage = handle->rinfo.instance_state;
1176 reset_cnt = ops->ae_dev_reset_cnt(handle);
1177 hw_resetting = ops->get_hw_reset_stat(handle);
1178 sw_resetting = ops->ae_dev_resetting(handle);
1179
1180 if (reset_cnt != hr_dev->reset_cnt) {
1181 hr_dev->dis_db = true;
1182 hr_dev->is_reset = true;
1183 dev_info(hr_dev->dev, "Func clear success after reset.\n");
1184 } else if (hw_resetting) {
1185 hr_dev->dis_db = true;
1186
1187 dev_warn(hr_dev->dev,
1188 "Func clear is pending, device in resetting state.\n");
1189 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1190 while (end) {
1191 if (!ops->get_hw_reset_stat(handle)) {
1192 hr_dev->is_reset = true;
1193 dev_info(hr_dev->dev,
1194 "Func clear success after reset.\n");
1195 return;
1196 }
1197 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1198 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1199 }
1200
1201 dev_warn(hr_dev->dev, "Func clear failed.\n");
1202 } else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) {
1203 hr_dev->dis_db = true;
1204
1205 dev_warn(hr_dev->dev,
1206 "Func clear is pending, device in resetting state.\n");
1207 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1208 while (end) {
1209 if (ops->ae_dev_reset_cnt(handle) !=
1210 hr_dev->reset_cnt) {
1211 hr_dev->is_reset = true;
1212 dev_info(hr_dev->dev,
1213 "Func clear success after sw reset\n");
1214 return;
1215 }
1216 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1217 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1218 }
1219
1220 dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
1221 } else {
1222 if (retval && !flag)
1223 dev_warn(hr_dev->dev,
1224 "Func clear read failed, ret = %d.\n", retval);
1225
1226 dev_warn(hr_dev->dev, "Func clear failed.\n");
1227 }
1228}
89a6da3c
LC
1229static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1230{
e075da5e 1231 bool fclr_write_fail_flag = false;
89a6da3c
LC
1232 struct hns_roce_func_clear *resp;
1233 struct hns_roce_cmq_desc desc;
1234 unsigned long end;
e075da5e
LC
1235 int ret = 0;
1236
1237 if (hns_roce_func_clr_chk_rst(hr_dev))
1238 goto out;
89a6da3c
LC
1239
1240 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
1241 resp = (struct hns_roce_func_clear *)desc.data;
1242
1243 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1244 if (ret) {
e075da5e 1245 fclr_write_fail_flag = true;
89a6da3c
LC
1246 dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
1247 ret);
e075da5e 1248 goto out;
89a6da3c
LC
1249 }
1250
1251 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1252 end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1253 while (end) {
e075da5e
LC
1254 if (hns_roce_func_clr_chk_rst(hr_dev))
1255 goto out;
89a6da3c
LC
1256 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1257 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1258
1259 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
1260 true);
1261
1262 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1263 if (ret)
1264 continue;
1265
1266 if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
1267 hr_dev->is_reset = true;
1268 return;
1269 }
1270 }
1271
e075da5e 1272out:
e075da5e 1273 hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag);
89a6da3c
LC
1274}
1275
3a63c964
LO
1276static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1277{
1278 struct hns_roce_query_fw_info *resp;
1279 struct hns_roce_cmq_desc desc;
1280 int ret;
1281
1282 hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1283 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1284 if (ret)
1285 return ret;
1286
1287 resp = (struct hns_roce_query_fw_info *)desc.data;
1288 hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
cfc85f3e
WHX
1289
1290 return 0;
1291}
1292
1293static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1294{
1295 struct hns_roce_cfg_global_param *req;
1296 struct hns_roce_cmq_desc desc;
1297
1298 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1299 false);
1300
1301 req = (struct hns_roce_cfg_global_param *)desc.data;
1302 memset(req, 0, sizeof(*req));
1303 roce_set_field(req->time_cfg_udp_port,
1304 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
1305 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
1306 roce_set_field(req->time_cfg_udp_port,
1307 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
1308 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
1309
1310 return hns_roce_cmq_send(hr_dev, &desc, 1);
1311}
1312
1313static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1314{
1315 struct hns_roce_cmq_desc desc[2];
6b63597d 1316 struct hns_roce_pf_res_a *req_a;
1317 struct hns_roce_pf_res_b *req_b;
cfc85f3e
WHX
1318 int ret;
1319 int i;
1320
1321 for (i = 0; i < 2; i++) {
1322 hns_roce_cmq_setup_basic_desc(&desc[i],
1323 HNS_ROCE_OPC_QUERY_PF_RES, true);
1324
1325 if (i == 0)
1326 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1327 else
1328 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1329 }
1330
1331 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1332 if (ret)
1333 return ret;
1334
6b63597d 1335 req_a = (struct hns_roce_pf_res_a *)desc[0].data;
1336 req_b = (struct hns_roce_pf_res_b *)desc[1].data;
cfc85f3e 1337
6b63597d 1338 hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
cfc85f3e
WHX
1339 PF_RES_DATA_1_PF_QPC_BT_NUM_M,
1340 PF_RES_DATA_1_PF_QPC_BT_NUM_S);
6b63597d 1341 hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
cfc85f3e
WHX
1342 PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
1343 PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
6b63597d 1344 hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
cfc85f3e
WHX
1345 PF_RES_DATA_3_PF_CQC_BT_NUM_M,
1346 PF_RES_DATA_3_PF_CQC_BT_NUM_S);
6b63597d 1347 hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
cfc85f3e
WHX
1348 PF_RES_DATA_4_PF_MPT_BT_NUM_M,
1349 PF_RES_DATA_4_PF_MPT_BT_NUM_S);
1350
6b63597d 1351 hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
1352 PF_RES_DATA_3_PF_SL_NUM_M,
1353 PF_RES_DATA_3_PF_SL_NUM_S);
6a157f7d
YL
1354 hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
1355 PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
1356 PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
6b63597d 1357
cfc85f3e
WHX
1358 return 0;
1359}
1360
0e40dc2f
YL
1361static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
1362{
1363 struct hns_roce_pf_timer_res_a *req_a;
1364 struct hns_roce_cmq_desc desc[2];
1365 int ret, i;
1366
1367 for (i = 0; i < 2; i++) {
1368 hns_roce_cmq_setup_basic_desc(&desc[i],
1369 HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1370 true);
1371
1372 if (i == 0)
1373 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1374 else
1375 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1376 }
1377
1378 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1379 if (ret)
1380 return ret;
1381
1382 req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data;
1383
1384 hr_dev->caps.qpc_timer_bt_num =
1385 roce_get_field(req_a->qpc_timer_bt_idx_num,
1386 PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
1387 PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
1388 hr_dev->caps.cqc_timer_bt_num =
1389 roce_get_field(req_a->cqc_timer_bt_idx_num,
1390 PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
1391 PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
1392
1393 return 0;
1394}
1395
60262b10 1396static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, int vf_id)
0c1c3880
LO
1397{
1398 struct hns_roce_cmq_desc desc;
1399 struct hns_roce_vf_switch *swt;
1400 int ret;
1401
1402 swt = (struct hns_roce_vf_switch *)desc.data;
1403 hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
bfe86035 1404 swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
60262b10
LO
1405 roce_set_field(swt->fun_id, VF_SWITCH_DATA_FUN_ID_VF_ID_M,
1406 VF_SWITCH_DATA_FUN_ID_VF_ID_S, vf_id);
0c1c3880
LO
1407 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1408 if (ret)
1409 return ret;
60262b10 1410
0c1c3880
LO
1411 desc.flag =
1412 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1413 desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1414 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
d967e262 1415 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0);
0c1c3880
LO
1416 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
1417
1418 return hns_roce_cmq_send(hr_dev, &desc, 1);
1419}
1420
cfc85f3e
WHX
1421static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1422{
1423 struct hns_roce_cmq_desc desc[2];
1424 struct hns_roce_vf_res_a *req_a;
1425 struct hns_roce_vf_res_b *req_b;
1426 int i;
1427
1428 req_a = (struct hns_roce_vf_res_a *)desc[0].data;
1429 req_b = (struct hns_roce_vf_res_b *)desc[1].data;
1430 memset(req_a, 0, sizeof(*req_a));
1431 memset(req_b, 0, sizeof(*req_b));
1432 for (i = 0; i < 2; i++) {
1433 hns_roce_cmq_setup_basic_desc(&desc[i],
1434 HNS_ROCE_OPC_ALLOC_VF_RES, false);
1435
1436 if (i == 0)
1437 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1438 else
1439 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1440
1441 if (i == 0) {
1442 roce_set_field(req_a->vf_qpc_bt_idx_num,
1443 VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
1444 VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
1445 roce_set_field(req_a->vf_qpc_bt_idx_num,
1446 VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
1447 VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
1448 HNS_ROCE_VF_QPC_BT_NUM);
1449
1450 roce_set_field(req_a->vf_srqc_bt_idx_num,
1451 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
1452 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
1453 roce_set_field(req_a->vf_srqc_bt_idx_num,
1454 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
1455 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
1456 HNS_ROCE_VF_SRQC_BT_NUM);
1457
1458 roce_set_field(req_a->vf_cqc_bt_idx_num,
1459 VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
1460 VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
1461 roce_set_field(req_a->vf_cqc_bt_idx_num,
1462 VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
1463 VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
1464 HNS_ROCE_VF_CQC_BT_NUM);
1465
1466 roce_set_field(req_a->vf_mpt_bt_idx_num,
1467 VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
1468 VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
1469 roce_set_field(req_a->vf_mpt_bt_idx_num,
1470 VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
1471 VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
1472 HNS_ROCE_VF_MPT_BT_NUM);
1473
1474 roce_set_field(req_a->vf_eqc_bt_idx_num,
1475 VF_RES_A_DATA_5_VF_EQC_IDX_M,
1476 VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
1477 roce_set_field(req_a->vf_eqc_bt_idx_num,
1478 VF_RES_A_DATA_5_VF_EQC_NUM_M,
1479 VF_RES_A_DATA_5_VF_EQC_NUM_S,
1480 HNS_ROCE_VF_EQC_NUM);
1481 } else {
1482 roce_set_field(req_b->vf_smac_idx_num,
1483 VF_RES_B_DATA_1_VF_SMAC_IDX_M,
1484 VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
1485 roce_set_field(req_b->vf_smac_idx_num,
1486 VF_RES_B_DATA_1_VF_SMAC_NUM_M,
1487 VF_RES_B_DATA_1_VF_SMAC_NUM_S,
1488 HNS_ROCE_VF_SMAC_NUM);
1489
1490 roce_set_field(req_b->vf_sgid_idx_num,
1491 VF_RES_B_DATA_2_VF_SGID_IDX_M,
1492 VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
1493 roce_set_field(req_b->vf_sgid_idx_num,
1494 VF_RES_B_DATA_2_VF_SGID_NUM_M,
1495 VF_RES_B_DATA_2_VF_SGID_NUM_S,
1496 HNS_ROCE_VF_SGID_NUM);
1497
1498 roce_set_field(req_b->vf_qid_idx_sl_num,
1499 VF_RES_B_DATA_3_VF_QID_IDX_M,
1500 VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1501 roce_set_field(req_b->vf_qid_idx_sl_num,
1502 VF_RES_B_DATA_3_VF_SL_NUM_M,
1503 VF_RES_B_DATA_3_VF_SL_NUM_S,
1504 HNS_ROCE_VF_SL_NUM);
6a157f7d
YL
1505
1506 roce_set_field(req_b->vf_sccc_idx_num,
1507 VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
1508 VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
1509 roce_set_field(req_b->vf_sccc_idx_num,
1510 VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
1511 VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
1512 HNS_ROCE_VF_SCCC_BT_NUM);
cfc85f3e
WHX
1513 }
1514 }
1515
1516 return hns_roce_cmq_send(hr_dev, desc, 2);
1517}
1518
a81fba28
WHX
1519static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1520{
1521 u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1522 u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1523 u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1524 u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
6a157f7d 1525 u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
a81fba28
WHX
1526 struct hns_roce_cfg_bt_attr *req;
1527 struct hns_roce_cmq_desc desc;
1528
1529 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1530 req = (struct hns_roce_cfg_bt_attr *)desc.data;
1531 memset(req, 0, sizeof(*req));
1532
1533 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1534 CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
5e6e78db 1535 hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
a81fba28
WHX
1536 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1537 CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
5e6e78db 1538 hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
a81fba28
WHX
1539 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1540 CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1541 qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1542
1543 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1544 CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
5e6e78db 1545 hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
a81fba28
WHX
1546 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1547 CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
5e6e78db 1548 hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
a81fba28
WHX
1549 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1550 CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1551 srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1552
1553 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1554 CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
5e6e78db 1555 hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
a81fba28
WHX
1556 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1557 CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
5e6e78db 1558 hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
a81fba28
WHX
1559 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1560 CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1561 cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1562
1563 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1564 CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
5e6e78db 1565 hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
a81fba28
WHX
1566 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1567 CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
5e6e78db 1568 hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
a81fba28
WHX
1569 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1570 CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1571 mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1572
6a157f7d
YL
1573 roce_set_field(req->vf_sccc_cfg,
1574 CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
1575 CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
1576 hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1577 roce_set_field(req->vf_sccc_cfg,
1578 CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
1579 CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
1580 hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1581 roce_set_field(req->vf_sccc_cfg,
1582 CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
1583 CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
1584 sccc_hop_num ==
1585 HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
1586
a81fba28
WHX
1587 return hns_roce_cmq_send(hr_dev, &desc, 1);
1588}
1589
ba6bb7e9
LO
1590static void set_default_caps(struct hns_roce_dev *hr_dev)
1591{
1592 struct hns_roce_caps *caps = &hr_dev->caps;
1593
1594 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
1595 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
1596 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
1597 caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
1598 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
1599 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
1600 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1601 caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1602 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1603 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
1604 caps->num_uars = HNS_ROCE_V2_UAR_NUM;
1605 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
1606 caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
1607 caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
1608 caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1609 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
1610 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
1611 caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
1612 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
1613 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
1614 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
1615 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1616 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1617 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1618 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1619 caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1620 caps->qpc_entry_sz = HNS_ROCE_V2_QPC_ENTRY_SZ;
1621 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
7db82697 1622 caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
ba6bb7e9
LO
1623 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
1624 caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ;
1625 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1626 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
1627 caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ;
1628 caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
1629 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1630 caps->reserved_lkey = 0;
1631 caps->reserved_pds = 0;
1632 caps->reserved_mrws = 1;
1633 caps->reserved_uars = 0;
1634 caps->reserved_cqs = 0;
1635 caps->reserved_srqs = 0;
1636 caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
1637
1638 caps->qpc_ba_pg_sz = 0;
1639 caps->qpc_buf_pg_sz = 0;
1640 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1641 caps->srqc_ba_pg_sz = 0;
1642 caps->srqc_buf_pg_sz = 0;
1643 caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1644 caps->cqc_ba_pg_sz = 0;
1645 caps->cqc_buf_pg_sz = 0;
1646 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1647 caps->mpt_ba_pg_sz = 0;
1648 caps->mpt_buf_pg_sz = 0;
1649 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1650 caps->mtt_ba_pg_sz = 0;
1651 caps->mtt_buf_pg_sz = 0;
1652 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
1653 caps->wqe_sq_hop_num = HNS_ROCE_SQWQE_HOP_NUM;
1654 caps->wqe_sge_hop_num = HNS_ROCE_EXT_SGE_HOP_NUM;
1655 caps->wqe_rq_hop_num = HNS_ROCE_RQWQE_HOP_NUM;
1656 caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
1657 caps->cqe_buf_pg_sz = 0;
1658 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
1659 caps->srqwqe_ba_pg_sz = 0;
1660 caps->srqwqe_buf_pg_sz = 0;
1661 caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM;
1662 caps->idx_ba_pg_sz = 0;
1663 caps->idx_buf_pg_sz = 0;
1664 caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
1665 caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1666
1667 caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
1668 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1669 HNS_ROCE_CAP_FLAG_RQ_INLINE |
1670 HNS_ROCE_CAP_FLAG_RECORD_DB |
1671 HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
1672
1673 caps->pkey_table_len[0] = 1;
1674 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
1675 caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
1676 caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
1677 caps->local_ca_ack_delay = 0;
1678 caps->max_mtu = IB_MTU_4096;
1679
1680 caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
1681 caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
1682
dfaf2854 1683 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) {
ba6bb7e9
LO
1684 caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
1685 HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
1686 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
1687
1688 caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
1689 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
1690 caps->qpc_timer_ba_pg_sz = 0;
1691 caps->qpc_timer_buf_pg_sz = 0;
1692 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1693 caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
1694 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
1695 caps->cqc_timer_ba_pg_sz = 0;
1696 caps->cqc_timer_buf_pg_sz = 0;
1697 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1698
1699 caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
1700 caps->sccc_ba_pg_sz = 0;
1701 caps->sccc_buf_pg_sz = 0;
1702 caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
1703 }
1704}
1705
1706static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num,
1707 int *buf_page_size, int *bt_page_size, u32 hem_type)
1708{
1709 u64 obj_per_chunk;
1710 int bt_chunk_size = 1 << PAGE_SHIFT;
1711 int buf_chunk_size = 1 << PAGE_SHIFT;
1712 int obj_per_chunk_default = buf_chunk_size / obj_size;
1713
1714 *buf_page_size = 0;
1715 *bt_page_size = 0;
1716
1717 switch (hop_num) {
1718 case 3:
1719 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1720 (bt_chunk_size / BA_BYTE_LEN) *
1721 (bt_chunk_size / BA_BYTE_LEN) *
1722 obj_per_chunk_default;
1723 break;
1724 case 2:
1725 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1726 (bt_chunk_size / BA_BYTE_LEN) *
1727 obj_per_chunk_default;
1728 break;
1729 case 1:
1730 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1731 obj_per_chunk_default;
1732 break;
1733 case HNS_ROCE_HOP_NUM_0:
1734 obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
1735 break;
1736 default:
1737 pr_err("Table %d not support hop_num = %d!\n", hem_type,
1738 hop_num);
1739 return;
1740 }
1741
1742 if (hem_type >= HEM_TYPE_MTT)
1743 *bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
1744 else
1745 *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
1746}
1747
1748static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
1749{
1750 struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
1751 struct hns_roce_caps *caps = &hr_dev->caps;
1752 struct hns_roce_query_pf_caps_a *resp_a;
1753 struct hns_roce_query_pf_caps_b *resp_b;
1754 struct hns_roce_query_pf_caps_c *resp_c;
1755 struct hns_roce_query_pf_caps_d *resp_d;
1756 struct hns_roce_query_pf_caps_e *resp_e;
1757 int ctx_hop_num;
1758 int pbl_hop_num;
1759 int ret;
1760 int i;
1761
1762 for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
1763 hns_roce_cmq_setup_basic_desc(&desc[i],
1764 HNS_ROCE_OPC_QUERY_PF_CAPS_NUM,
1765 true);
1766 if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
1767 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1768 else
1769 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1770 }
1771
1772 ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM);
1773 if (ret)
1774 return ret;
1775
1776 resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data;
1777 resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data;
1778 resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
1779 resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
1780 resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
1781
1782 caps->local_ca_ack_delay = resp_a->local_ca_ack_delay;
1783 caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
1784 caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
1785 caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
1786 caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
1787 caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer);
1788 caps->num_cqc_timer = le16_to_cpu(resp_a->num_cqc_timer);
1789 caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
1790 caps->num_aeq_vectors = resp_a->num_aeq_vectors;
1791 caps->num_other_vectors = resp_a->num_other_vectors;
1792 caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
1793 caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
1794 caps->max_srq_desc_sz = resp_a->max_srq_desc_sz;
1795 caps->cq_entry_sz = resp_a->cq_entry_sz;
1796
1797 caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
1798 caps->irrl_entry_sz = resp_b->irrl_entry_sz;
1799 caps->trrl_entry_sz = resp_b->trrl_entry_sz;
1800 caps->cqc_entry_sz = resp_b->cqc_entry_sz;
1801 caps->srqc_entry_sz = resp_b->srqc_entry_sz;
1802 caps->idx_entry_sz = resp_b->idx_entry_sz;
1803 caps->sccc_entry_sz = resp_b->scc_ctx_entry_sz;
1804 caps->max_mtu = resp_b->max_mtu;
1805 caps->qpc_entry_sz = le16_to_cpu(resp_b->qpc_entry_sz);
1806 caps->min_cqes = resp_b->min_cqes;
1807 caps->min_wqes = resp_b->min_wqes;
1808 caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap);
1809 caps->pkey_table_len[0] = resp_b->pkey_table_len;
1810 caps->phy_num_uars = resp_b->phy_num_uars;
1811 ctx_hop_num = resp_b->ctx_hop_num;
1812 pbl_hop_num = resp_b->pbl_hop_num;
1813
1814 caps->num_pds = 1 << roce_get_field(resp_c->cap_flags_num_pds,
1815 V2_QUERY_PF_CAPS_C_NUM_PDS_M,
1816 V2_QUERY_PF_CAPS_C_NUM_PDS_S);
1817 caps->flags = roce_get_field(resp_c->cap_flags_num_pds,
1818 V2_QUERY_PF_CAPS_C_CAP_FLAGS_M,
1819 V2_QUERY_PF_CAPS_C_CAP_FLAGS_S);
1820 caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs,
1821 V2_QUERY_PF_CAPS_C_NUM_CQS_M,
1822 V2_QUERY_PF_CAPS_C_NUM_CQS_S);
1823 caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs,
1824 V2_QUERY_PF_CAPS_C_MAX_GID_M,
1825 V2_QUERY_PF_CAPS_C_MAX_GID_S);
1826 caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth,
1827 V2_QUERY_PF_CAPS_C_CQ_DEPTH_M,
1828 V2_QUERY_PF_CAPS_C_CQ_DEPTH_S);
1829 caps->num_mtpts = 1 << roce_get_field(resp_c->num_mrws,
1830 V2_QUERY_PF_CAPS_C_NUM_MRWS_M,
1831 V2_QUERY_PF_CAPS_C_NUM_MRWS_S);
1832 caps->num_qps = 1 << roce_get_field(resp_c->ord_num_qps,
1833 V2_QUERY_PF_CAPS_C_NUM_QPS_M,
1834 V2_QUERY_PF_CAPS_C_NUM_QPS_S);
1835 caps->max_qp_init_rdma = roce_get_field(resp_c->ord_num_qps,
1836 V2_QUERY_PF_CAPS_C_MAX_ORD_M,
1837 V2_QUERY_PF_CAPS_C_MAX_ORD_S);
1838 caps->max_qp_dest_rdma = caps->max_qp_init_rdma;
1839 caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
1840 caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs,
1841 V2_QUERY_PF_CAPS_D_NUM_SRQS_M,
1842 V2_QUERY_PF_CAPS_D_NUM_SRQS_S);
1843 caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
1844 caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth,
1845 V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M,
1846 V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S);
1847 caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth,
1848 V2_QUERY_PF_CAPS_D_NUM_CEQS_M,
1849 V2_QUERY_PF_CAPS_D_NUM_CEQS_S);
1850 caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth,
1851 V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M,
1852 V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S);
1853 caps->default_aeq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
1854 V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M,
1855 V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S);
1856 caps->default_ceq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
1857 V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M,
1858 V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S);
1859 caps->reserved_pds = roce_get_field(resp_d->num_uars_rsv_pds,
1860 V2_QUERY_PF_CAPS_D_RSV_PDS_M,
1861 V2_QUERY_PF_CAPS_D_RSV_PDS_S);
1862 caps->num_uars = 1 << roce_get_field(resp_d->num_uars_rsv_pds,
1863 V2_QUERY_PF_CAPS_D_NUM_UARS_M,
1864 V2_QUERY_PF_CAPS_D_NUM_UARS_S);
1865 caps->reserved_qps = roce_get_field(resp_d->rsv_uars_rsv_qps,
1866 V2_QUERY_PF_CAPS_D_RSV_QPS_M,
1867 V2_QUERY_PF_CAPS_D_RSV_QPS_S);
1868 caps->reserved_uars = roce_get_field(resp_d->rsv_uars_rsv_qps,
1869 V2_QUERY_PF_CAPS_D_RSV_UARS_M,
1870 V2_QUERY_PF_CAPS_D_RSV_UARS_S);
1871 caps->reserved_mrws = roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
1872 V2_QUERY_PF_CAPS_E_RSV_MRWS_M,
1873 V2_QUERY_PF_CAPS_E_RSV_MRWS_S);
1874 caps->chunk_sz = 1 << roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
1875 V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M,
1876 V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S);
1877 caps->reserved_cqs = roce_get_field(resp_e->rsv_cqs,
1878 V2_QUERY_PF_CAPS_E_RSV_CQS_M,
1879 V2_QUERY_PF_CAPS_E_RSV_CQS_S);
1880 caps->reserved_srqs = roce_get_field(resp_e->rsv_srqs,
1881 V2_QUERY_PF_CAPS_E_RSV_SRQS_M,
1882 V2_QUERY_PF_CAPS_E_RSV_SRQS_S);
1883 caps->reserved_lkey = roce_get_field(resp_e->rsv_lkey,
1884 V2_QUERY_PF_CAPS_E_RSV_LKEYS_M,
1885 V2_QUERY_PF_CAPS_E_RSV_LKEYS_S);
1886 caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
1887 caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
1888 caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
1889 caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
1890
1891 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
1892 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
1893 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
1894 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
1895 caps->mtt_ba_pg_sz = 0;
1896 caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
1897 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
1898 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
1899
1900 caps->qpc_hop_num = ctx_hop_num;
1901 caps->srqc_hop_num = ctx_hop_num;
1902 caps->cqc_hop_num = ctx_hop_num;
1903 caps->mpt_hop_num = ctx_hop_num;
1904 caps->mtt_hop_num = pbl_hop_num;
1905 caps->cqe_hop_num = pbl_hop_num;
1906 caps->srqwqe_hop_num = pbl_hop_num;
1907 caps->idx_hop_num = pbl_hop_num;
1908 caps->wqe_sq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
1909 V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M,
1910 V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S);
1911 caps->wqe_sge_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
1912 V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M,
1913 V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S);
1914 caps->wqe_rq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
1915 V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M,
1916 V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S);
1917
1918 calc_pg_sz(caps->num_qps, caps->qpc_entry_sz, caps->qpc_hop_num,
1919 caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
1920 HEM_TYPE_QPC);
1921 calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
1922 caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
1923 HEM_TYPE_MTPT);
1924 calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
1925 caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
1926 HEM_TYPE_CQC);
1927 calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz, caps->srqc_hop_num,
1928 caps->srqc_bt_num, &caps->srqc_buf_pg_sz,
1929 &caps->srqc_ba_pg_sz, HEM_TYPE_SRQC);
1930
dfaf2854 1931 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) {
ba6bb7e9
LO
1932 caps->sccc_hop_num = ctx_hop_num;
1933 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1934 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1935
1936 calc_pg_sz(caps->num_qps, caps->sccc_entry_sz,
1937 caps->sccc_hop_num, caps->sccc_bt_num,
1938 &caps->sccc_buf_pg_sz, &caps->sccc_ba_pg_sz,
1939 HEM_TYPE_SCCC);
1940 calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz,
1941 caps->cqc_timer_hop_num, caps->cqc_timer_bt_num,
1942 &caps->cqc_timer_buf_pg_sz,
1943 &caps->cqc_timer_ba_pg_sz, HEM_TYPE_CQC_TIMER);
1944 }
1945
1946 calc_pg_sz(caps->num_cqe_segs, caps->mtt_entry_sz, caps->cqe_hop_num,
1947 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
1948 calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
1949 caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
1950 &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
1951 calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz, caps->idx_hop_num,
1952 1, &caps->idx_buf_pg_sz, &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
1953
1954 return 0;
1955}
1956
cfc85f3e
WHX
1957static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1958{
1959 struct hns_roce_caps *caps = &hr_dev->caps;
1960 int ret;
1961
1962 ret = hns_roce_cmq_query_hw_info(hr_dev);
3a63c964
LO
1963 if (ret) {
1964 dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
1965 ret);
1966 return ret;
1967 }
1968
1969 ret = hns_roce_query_fw_ver(hr_dev);
cfc85f3e
WHX
1970 if (ret) {
1971 dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
1972 ret);
1973 return ret;
1974 }
1975
1976 ret = hns_roce_config_global_param(hr_dev);
1977 if (ret) {
1978 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1979 ret);
2349fdd4 1980 return ret;
cfc85f3e
WHX
1981 }
1982
1983 /* Get pf resource owned by every pf */
1984 ret = hns_roce_query_pf_resource(hr_dev);
1985 if (ret) {
1986 dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
1987 ret);
1988 return ret;
1989 }
1990
dfaf2854 1991 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) {
0e40dc2f
YL
1992 ret = hns_roce_query_pf_timer_resource(hr_dev);
1993 if (ret) {
1994 dev_err(hr_dev->dev,
1995 "Query pf timer resource fail, ret = %d.\n",
1996 ret);
1997 return ret;
1998 }
cfc85f3e 1999
0c1c3880
LO
2000 ret = hns_roce_set_vf_switch_param(hr_dev, 0);
2001 if (ret) {
2002 dev_err(hr_dev->dev,
2003 "Set function switch param fail, ret = %d.\n",
2004 ret);
2005 return ret;
2006 }
2007 }
3a63c964 2008
dfaf2854
LC
2009 ret = hns_roce_alloc_vf_resource(hr_dev);
2010 if (ret) {
2011 dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
2012 ret);
2013 return ret;
2014 }
2015
3a63c964
LO
2016 hr_dev->vendor_part_id = hr_dev->pci_dev->device;
2017 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
cfc85f3e 2018
cfc85f3e
WHX
2019 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
2020 caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
5c1f167a
LO
2021 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
2022 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
cfc85f3e 2023
80a78570 2024 caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
ff795f71
WHX
2025 caps->pbl_buf_pg_sz = 0;
2026 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
a5073d60
YL
2027 caps->eqe_ba_pg_sz = 0;
2028 caps->eqe_buf_pg_sz = 0;
2029 caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
6b63597d 2030 caps->tsq_buf_pg_sz = 0;
aa84fa18 2031
80a78570
LO
2032 ret = hns_roce_query_pf_caps(hr_dev);
2033 if (ret)
2034 set_default_caps(hr_dev);
384f8818 2035
a81fba28
WHX
2036 ret = hns_roce_v2_set_bt(hr_dev);
2037 if (ret)
2038 dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
2039 ret);
2040
2041 return ret;
cfc85f3e
WHX
2042}
2043
6b63597d 2044static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
2045 enum hns_roce_link_table_type type)
2046{
2047 struct hns_roce_cmq_desc desc[2];
2048 struct hns_roce_cfg_llm_a *req_a =
2049 (struct hns_roce_cfg_llm_a *)desc[0].data;
2050 struct hns_roce_cfg_llm_b *req_b =
2051 (struct hns_roce_cfg_llm_b *)desc[1].data;
2052 struct hns_roce_v2_priv *priv = hr_dev->priv;
2053 struct hns_roce_link_table *link_tbl;
2054 struct hns_roce_link_table_entry *entry;
2055 enum hns_roce_opcode_type opcode;
2056 u32 page_num;
2057 int i;
2058
2059 switch (type) {
2060 case TSQ_LINK_TABLE:
2061 link_tbl = &priv->tsq;
2062 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
2063 break;
ded58ff9 2064 case TPQ_LINK_TABLE:
2065 link_tbl = &priv->tpq;
2066 opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
2067 break;
6b63597d 2068 default:
2069 return -EINVAL;
2070 }
2071
2072 page_num = link_tbl->npages;
2073 entry = link_tbl->table.buf;
2074 memset(req_a, 0, sizeof(*req_a));
2075 memset(req_b, 0, sizeof(*req_b));
2076
2077 for (i = 0; i < 2; i++) {
2078 hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
2079
2080 if (i == 0)
2081 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2082 else
2083 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2084
2085 if (i == 0) {
bfe86035
LC
2086 req_a->base_addr_l =
2087 cpu_to_le32(link_tbl->table.map & 0xffffffff);
2088 req_a->base_addr_h =
2089 cpu_to_le32(link_tbl->table.map >> 32);
6b63597d 2090 roce_set_field(req_a->depth_pgsz_init_en,
60262b10 2091 CFG_LLM_QUE_DEPTH_M, CFG_LLM_QUE_DEPTH_S,
6b63597d 2092 link_tbl->npages);
2093 roce_set_field(req_a->depth_pgsz_init_en,
60262b10 2094 CFG_LLM_QUE_PGSZ_M, CFG_LLM_QUE_PGSZ_S,
6b63597d 2095 link_tbl->pg_sz);
bfe86035
LC
2096 req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
2097 req_a->head_ba_h_nxtptr =
2098 cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
60262b10 2099 roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M,
6b63597d 2100 CFG_LLM_HEAD_PTR_S, 0);
2101 } else {
bfe86035
LC
2102 req_b->tail_ba_l =
2103 cpu_to_le32(entry[page_num - 1].blk_ba0);
60262b10 2104 roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M,
6b63597d 2105 CFG_LLM_TAIL_BA_H_S,
2106 entry[page_num - 1].blk_ba1_nxt_ptr &
60262b10
LO
2107 HNS_ROCE_LINK_TABLE_BA1_M);
2108 roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M,
6b63597d 2109 CFG_LLM_TAIL_PTR_S,
2110 (entry[page_num - 2].blk_ba1_nxt_ptr &
60262b10
LO
2111 HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
2112 HNS_ROCE_LINK_TABLE_NXT_PTR_S);
6b63597d 2113 }
2114 }
60262b10
LO
2115 roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_INIT_EN_M,
2116 CFG_LLM_INIT_EN_S, 1);
6b63597d 2117
2118 return hns_roce_cmq_send(hr_dev, desc, 2);
2119}
2120
2121static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
2122 enum hns_roce_link_table_type type)
2123{
2124 struct hns_roce_v2_priv *priv = hr_dev->priv;
2125 struct hns_roce_link_table *link_tbl;
2126 struct hns_roce_link_table_entry *entry;
2127 struct device *dev = hr_dev->dev;
2128 u32 buf_chk_sz;
2129 dma_addr_t t;
ded58ff9 2130 int func_num = 1;
6b63597d 2131 int pg_num_a;
2132 int pg_num_b;
2133 int pg_num;
2134 int size;
2135 int i;
2136
2137 switch (type) {
2138 case TSQ_LINK_TABLE:
2139 link_tbl = &priv->tsq;
2140 buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
2141 pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
2142 pg_num_b = hr_dev->caps.sl_num * 4 + 2;
2143 break;
ded58ff9 2144 case TPQ_LINK_TABLE:
2145 link_tbl = &priv->tpq;
2146 buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
2147 pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
2148 pg_num_b = 2 * 4 * func_num + 2;
2149 break;
6b63597d 2150 default:
2151 return -EINVAL;
2152 }
2153
2154 pg_num = max(pg_num_a, pg_num_b);
2155 size = pg_num * sizeof(struct hns_roce_link_table_entry);
2156
2157 link_tbl->table.buf = dma_alloc_coherent(dev, size,
2158 &link_tbl->table.map,
2159 GFP_KERNEL);
2160 if (!link_tbl->table.buf)
2161 goto out;
2162
2163 link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
2164 GFP_KERNEL);
2165 if (!link_tbl->pg_list)
2166 goto err_kcalloc_failed;
2167
2168 entry = link_tbl->table.buf;
2169 for (i = 0; i < pg_num; ++i) {
2170 link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
2171 &t, GFP_KERNEL);
2172 if (!link_tbl->pg_list[i].buf)
2173 goto err_alloc_buf_failed;
2174
2175 link_tbl->pg_list[i].map = t;
6b63597d 2176
bfe86035
LC
2177 entry[i].blk_ba0 = (u32)(t >> 12);
2178 entry[i].blk_ba1_nxt_ptr = (u32)(t >> 44);
6b63597d 2179
2180 if (i < (pg_num - 1))
bfe86035
LC
2181 entry[i].blk_ba1_nxt_ptr |=
2182 (i + 1) << HNS_ROCE_LINK_TABLE_NXT_PTR_S;
2183
6b63597d 2184 }
2185 link_tbl->npages = pg_num;
2186 link_tbl->pg_sz = buf_chk_sz;
2187
2188 return hns_roce_config_link_table(hr_dev, type);
2189
2190err_alloc_buf_failed:
2191 for (i -= 1; i >= 0; i--)
2192 dma_free_coherent(dev, buf_chk_sz,
2193 link_tbl->pg_list[i].buf,
2194 link_tbl->pg_list[i].map);
2195 kfree(link_tbl->pg_list);
2196
2197err_kcalloc_failed:
2198 dma_free_coherent(dev, size, link_tbl->table.buf,
2199 link_tbl->table.map);
2200
2201out:
2202 return -ENOMEM;
2203}
2204
2205static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
2206 struct hns_roce_link_table *link_tbl)
2207{
2208 struct device *dev = hr_dev->dev;
2209 int size;
2210 int i;
2211
2212 size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
2213
2214 for (i = 0; i < link_tbl->npages; ++i)
2215 if (link_tbl->pg_list[i].buf)
2216 dma_free_coherent(dev, link_tbl->pg_sz,
2217 link_tbl->pg_list[i].buf,
2218 link_tbl->pg_list[i].map);
2219 kfree(link_tbl->pg_list);
2220
2221 dma_free_coherent(dev, size, link_tbl->table.buf,
2222 link_tbl->table.map);
2223}
2224
2225static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
2226{
ded58ff9 2227 struct hns_roce_v2_priv *priv = hr_dev->priv;
0e40dc2f
YL
2228 int qpc_count, cqc_count;
2229 int ret, i;
6b63597d 2230
2231 /* TSQ includes SQ doorbell and ack doorbell */
2232 ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
ded58ff9 2233 if (ret) {
6b63597d 2234 dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
ded58ff9 2235 return ret;
2236 }
2237
2238 ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
2239 if (ret) {
2240 dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
2241 goto err_tpq_init_failed;
2242 }
2243
6def7de6 2244 /* Alloc memory for QPC Timer buffer space chunk */
0e40dc2f
YL
2245 for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
2246 qpc_count++) {
2247 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
2248 qpc_count);
2249 if (ret) {
2250 dev_err(hr_dev->dev, "QPC Timer get failed\n");
2251 goto err_qpc_timer_failed;
2252 }
2253 }
2254
6def7de6 2255 /* Alloc memory for CQC Timer buffer space chunk */
0e40dc2f
YL
2256 for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
2257 cqc_count++) {
2258 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
2259 cqc_count);
2260 if (ret) {
2261 dev_err(hr_dev->dev, "CQC Timer get failed\n");
2262 goto err_cqc_timer_failed;
2263 }
2264 }
2265
ded58ff9 2266 return 0;
2267
0e40dc2f
YL
2268err_cqc_timer_failed:
2269 for (i = 0; i < cqc_count; i++)
2270 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2271
2272err_qpc_timer_failed:
2273 for (i = 0; i < qpc_count; i++)
2274 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2275
2276 hns_roce_free_link_table(hr_dev, &priv->tpq);
2277
ded58ff9 2278err_tpq_init_failed:
2279 hns_roce_free_link_table(hr_dev, &priv->tsq);
6b63597d 2280
2281 return ret;
2282}
2283
2284static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
2285{
2286 struct hns_roce_v2_priv *priv = hr_dev->priv;
2287
dfaf2854 2288 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B)
89a6da3c
LC
2289 hns_roce_function_clear(hr_dev);
2290
ded58ff9 2291 hns_roce_free_link_table(hr_dev, &priv->tpq);
6b63597d 2292 hns_roce_free_link_table(hr_dev, &priv->tsq);
2293}
2294
f747b689
LO
2295static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
2296{
2297 struct hns_roce_cmq_desc desc;
2298 struct hns_roce_mbox_status *mb_st =
2299 (struct hns_roce_mbox_status *)desc.data;
2300 enum hns_roce_cmd_return_status status;
2301
2302 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
2303
2304 status = hns_roce_cmq_send(hr_dev, &desc, 1);
2305 if (status)
2306 return status;
2307
bfe86035 2308 return le32_to_cpu(mb_st->mb_status_hw_run);
f747b689
LO
2309}
2310
a680f2f3
WHX
2311static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
2312{
f747b689 2313 u32 status = hns_roce_query_mbox_status(hr_dev);
a680f2f3
WHX
2314
2315 return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
2316}
2317
2318static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
2319{
f747b689 2320 u32 status = hns_roce_query_mbox_status(hr_dev);
a680f2f3
WHX
2321
2322 return status & HNS_ROCE_HW_MB_STATUS_MASK;
2323}
2324
f747b689
LO
2325static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
2326 u64 out_param, u32 in_modifier, u8 op_modifier,
2327 u16 op, u16 token, int event)
2328{
2329 struct hns_roce_cmq_desc desc;
2330 struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
2331
2332 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
2333
bfe86035
LC
2334 mb->in_param_l = cpu_to_le32(in_param);
2335 mb->in_param_h = cpu_to_le32(in_param >> 32);
2336 mb->out_param_l = cpu_to_le32(out_param);
2337 mb->out_param_h = cpu_to_le32(out_param >> 32);
f747b689
LO
2338 mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
2339 mb->token_event_en = cpu_to_le32(event << 16 | token);
2340
2341 return hns_roce_cmq_send(hr_dev, &desc, 1);
2342}
2343
a680f2f3
WHX
2344static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
2345 u64 out_param, u32 in_modifier, u8 op_modifier,
2346 u16 op, u16 token, int event)
2347{
2348 struct device *dev = hr_dev->dev;
a680f2f3 2349 unsigned long end;
f747b689 2350 int ret;
a680f2f3
WHX
2351
2352 end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
2353 while (hns_roce_v2_cmd_pending(hr_dev)) {
2354 if (time_after(jiffies, end)) {
2355 dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
2356 (int)end);
2357 return -EAGAIN;
2358 }
2359 cond_resched();
2360 }
2361
f747b689
LO
2362 ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
2363 op_modifier, op, token, event);
2364 if (ret)
2365 dev_err(dev, "Post mailbox fail(%d)\n", ret);
a680f2f3 2366
f747b689 2367 return ret;
a680f2f3
WHX
2368}
2369
2370static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
2371 unsigned long timeout)
2372{
2373 struct device *dev = hr_dev->dev;
617cf24f 2374 unsigned long end;
a680f2f3
WHX
2375 u32 status;
2376
2377 end = msecs_to_jiffies(timeout) + jiffies;
2378 while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
2379 cond_resched();
2380
2381 if (hns_roce_v2_cmd_pending(hr_dev)) {
2382 dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
2383 return -ETIMEDOUT;
2384 }
2385
2386 status = hns_roce_v2_cmd_complete(hr_dev);
2387 if (status != 0x1) {
6a04aed6
WHX
2388 if (status == CMD_RST_PRC_EBUSY)
2389 return status;
2390
a680f2f3
WHX
2391 dev_err(dev, "mailbox status 0x%x!\n", status);
2392 return -EBUSY;
2393 }
2394
2395 return 0;
2396}
2397
4db134a3 2398static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
2399 int gid_index, const union ib_gid *gid,
2400 enum hns_roce_sgid_type sgid_type)
2401{
2402 struct hns_roce_cmq_desc desc;
2403 struct hns_roce_cfg_sgid_tb *sgid_tb =
2404 (struct hns_roce_cfg_sgid_tb *)desc.data;
2405 u32 *p;
2406
2407 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
2408
60262b10 2409 roce_set_field(sgid_tb->table_idx_rsv, CFG_SGID_TB_TABLE_IDX_M,
4db134a3 2410 CFG_SGID_TB_TABLE_IDX_S, gid_index);
60262b10 2411 roce_set_field(sgid_tb->vf_sgid_type_rsv, CFG_SGID_TB_VF_SGID_TYPE_M,
4db134a3 2412 CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
2413
2414 p = (u32 *)&gid->raw[0];
2415 sgid_tb->vf_sgid_l = cpu_to_le32(*p);
2416
2417 p = (u32 *)&gid->raw[4];
2418 sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
2419
2420 p = (u32 *)&gid->raw[8];
2421 sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
2422
2423 p = (u32 *)&gid->raw[0xc];
2424 sgid_tb->vf_sgid_h = cpu_to_le32(*p);
2425
2426 return hns_roce_cmq_send(hr_dev, &desc, 1);
2427}
2428
b5ff0f61 2429static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
f4df9a7c 2430 int gid_index, const union ib_gid *gid,
b5ff0f61 2431 const struct ib_gid_attr *attr)
7afddafa 2432{
b5ff0f61 2433 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
4db134a3 2434 int ret;
7afddafa 2435
b5ff0f61
WHX
2436 if (!gid || !attr)
2437 return -EINVAL;
2438
2439 if (attr->gid_type == IB_GID_TYPE_ROCE)
2440 sgid_type = GID_TYPE_FLAG_ROCE_V1;
2441
2442 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
2443 if (ipv6_addr_v4mapped((void *)gid))
2444 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
2445 else
2446 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
2447 }
2448
4db134a3 2449 ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
2450 if (ret)
2451 dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret);
b5ff0f61 2452
4db134a3 2453 return ret;
7afddafa
WHX
2454}
2455
a74dc41d
WHX
2456static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
2457 u8 *addr)
7afddafa 2458{
e8e8b652 2459 struct hns_roce_cmq_desc desc;
2460 struct hns_roce_cfg_smac_tb *smac_tb =
2461 (struct hns_roce_cfg_smac_tb *)desc.data;
7afddafa
WHX
2462 u16 reg_smac_h;
2463 u32 reg_smac_l;
e8e8b652 2464
2465 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
7afddafa
WHX
2466
2467 reg_smac_l = *(u32 *)(&addr[0]);
e8e8b652 2468 reg_smac_h = *(u16 *)(&addr[4]);
7afddafa 2469
e8e8b652 2470 memset(smac_tb, 0, sizeof(*smac_tb));
2471 roce_set_field(smac_tb->tb_idx_rsv,
2472 CFG_SMAC_TB_IDX_M,
2473 CFG_SMAC_TB_IDX_S, phy_port);
2474 roce_set_field(smac_tb->vf_smac_h_rsv,
2475 CFG_SMAC_TB_VF_SMAC_H_M,
2476 CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
bfe86035 2477 smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
a74dc41d 2478
e8e8b652 2479 return hns_roce_cmq_send(hr_dev, &desc, 1);
7afddafa
WHX
2480}
2481
ca088320
YL
2482static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
2483 struct hns_roce_mr *mr)
3958cc56 2484{
3856ec55 2485 struct sg_dma_page_iter sg_iter;
db270c41 2486 u64 page_addr;
3958cc56 2487 u64 *pages;
3856ec55 2488 int i;
3958cc56 2489
ca088320
YL
2490 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2491 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2492 roce_set_field(mpt_entry->byte_48_mode_ba,
2493 V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
2494 upper_32_bits(mr->pbl_ba >> 3));
2495
2496 pages = (u64 *)__get_free_page(GFP_KERNEL);
2497 if (!pages)
2498 return -ENOMEM;
2499
2500 i = 0;
3856ec55
SS
2501 for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
2502 page_addr = sg_page_iter_dma_address(&sg_iter);
2503 pages[i] = page_addr >> 6;
2504
2505 /* Record the first 2 entry directly to MTPT table */
2506 if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
2507 goto found;
2508 i++;
ca088320
YL
2509 }
2510found:
2511 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
2512 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
2513 V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
2514
2515 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
2516 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
2517 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
2518 roce_set_field(mpt_entry->byte_64_buf_pa1,
2519 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2520 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2521 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2522
2523 free_page((unsigned long)pages);
2524
2525 return 0;
2526}
2527
2528static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
2529 unsigned long mtpt_idx)
2530{
2531 struct hns_roce_v2_mpt_entry *mpt_entry;
2532 int ret;
2533
3958cc56
WHX
2534 mpt_entry = mb_buf;
2535 memset(mpt_entry, 0, sizeof(*mpt_entry));
2536
2537 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2538 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2539 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2540 V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
2541 HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
2542 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2543 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
5e6e78db
YL
2544 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2545 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
3958cc56
WHX
2546 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2547 V2_MPT_BYTE_4_PD_S, mr->pd);
3958cc56
WHX
2548
2549 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
82342e49 2550 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
e93df010 2551 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
3958cc56
WHX
2552 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
2553 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
384f8818
LO
2554 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
2555 mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
3958cc56
WHX
2556 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2557 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
2558 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2559 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
2560 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2561 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
3958cc56
WHX
2562
2563 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
2564 mr->type == MR_TYPE_MR ? 0 : 1);
85e0274d 2565 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
2566 1);
3958cc56
WHX
2567
2568 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
2569 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
2570 mpt_entry->lkey = cpu_to_le32(mr->key);
2571 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
2572 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
2573
2574 if (mr->type == MR_TYPE_DMA)
2575 return 0;
2576
ca088320 2577 ret = set_mtpt_pbl(mpt_entry, mr);
3958cc56 2578
ca088320 2579 return ret;
3958cc56
WHX
2580}
2581
a2c80b7b
WHX
2582static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
2583 struct hns_roce_mr *mr, int flags,
2584 u32 pdn, int mr_access_flags, u64 iova,
2585 u64 size, void *mb_buf)
2586{
2587 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
ca088320 2588 int ret = 0;
a2c80b7b 2589
ab22bf05
YL
2590 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2591 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2592
a2c80b7b
WHX
2593 if (flags & IB_MR_REREG_PD) {
2594 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2595 V2_MPT_BYTE_4_PD_S, pdn);
2596 mr->pd = pdn;
2597 }
2598
2599 if (flags & IB_MR_REREG_ACCESS) {
2600 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2601 V2_MPT_BYTE_8_BIND_EN_S,
2602 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
2603 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
ca088320
YL
2604 V2_MPT_BYTE_8_ATOMIC_EN_S,
2605 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
a2c80b7b 2606 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
ca088320 2607 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
a2c80b7b 2608 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
ca088320 2609 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
a2c80b7b 2610 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
ca088320 2611 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
a2c80b7b
WHX
2612 }
2613
2614 if (flags & IB_MR_REREG_TRANS) {
2615 mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
2616 mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
2617 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
2618 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
2619
a2c80b7b
WHX
2620 mr->iova = iova;
2621 mr->size = size;
ca088320
YL
2622
2623 ret = set_mtpt_pbl(mpt_entry, mr);
a2c80b7b
WHX
2624 }
2625
ca088320 2626 return ret;
a2c80b7b
WHX
2627}
2628
68a997c5
YL
2629static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
2630{
2631 struct hns_roce_v2_mpt_entry *mpt_entry;
2632
2633 mpt_entry = mb_buf;
2634 memset(mpt_entry, 0, sizeof(*mpt_entry));
2635
2636 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2637 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2638 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2639 V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
2640 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2641 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2642 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2643 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2644 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2645 V2_MPT_BYTE_4_PD_S, mr->pd);
2646
2647 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
2648 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2649 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2650
2651 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
2652 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2653 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
2654 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2655
2656 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2657
2658 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2659 roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
2660 V2_MPT_BYTE_48_PBL_BA_H_S,
2661 upper_32_bits(mr->pbl_ba >> 3));
2662
2663 roce_set_field(mpt_entry->byte_64_buf_pa1,
2664 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2665 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2666 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2667
2668 return 0;
2669}
2670
c7c28191
YL
2671static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
2672{
2673 struct hns_roce_v2_mpt_entry *mpt_entry;
2674
2675 mpt_entry = mb_buf;
2676 memset(mpt_entry, 0, sizeof(*mpt_entry));
2677
2678 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2679 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2680 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2681 V2_MPT_BYTE_4_PD_S, mw->pdn);
60262b10 2682 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
c7c28191 2683 V2_MPT_BYTE_4_PBL_HOP_NUM_S,
60262b10
LO
2684 mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
2685 mw->pbl_hop_num);
c7c28191
YL
2686 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2687 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2688 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2689 mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2690
2691 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2692 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2693
2694 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2695 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
2696 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2697 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
2698 mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
2699
2700 roce_set_field(mpt_entry->byte_64_buf_pa1,
2701 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2702 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2703 mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2704
2705 mpt_entry->lkey = cpu_to_le32(mw->rkey);
2706
2707 return 0;
2708}
2709
93aa2187
WHX
2710static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2711{
18a96d25 2712 return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
93aa2187
WHX
2713}
2714
2715static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2716{
2717 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
2718
2719 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
2720 return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
e2b2744a 2721 !!(n & hr_cq->cq_depth)) ? cqe : NULL;
93aa2187
WHX
2722}
2723
2724static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
2725{
2726 return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
2727}
2728
c7bcb134
LO
2729static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
2730{
2731 return hns_roce_buf_offset(&srq->buf, n << srq->wqe_shift);
2732}
2733
2734static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
2735{
c7bcb134
LO
2736 /* always called with interrupts disabled. */
2737 spin_lock(&srq->lock);
2738
97545b10 2739 bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
c7bcb134
LO
2740 srq->tail++;
2741
2742 spin_unlock(&srq->lock);
2743}
2744
93aa2187
WHX
2745static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
2746{
b14c95be 2747 *hr_cq->set_ci_db = cons_index & V2_CQ_DB_PARAMETER_CONS_IDX_M;
93aa2187
WHX
2748}
2749
926a01dc
WHX
2750static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2751 struct hns_roce_srq *srq)
2752{
2753 struct hns_roce_v2_cqe *cqe, *dest;
2754 u32 prod_index;
2755 int nfreed = 0;
c7bcb134 2756 int wqe_index;
926a01dc
WHX
2757 u8 owner_bit;
2758
2759 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
2760 ++prod_index) {
d7e5ca88 2761 if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
926a01dc
WHX
2762 break;
2763 }
2764
2765 /*
2766 * Now backwards through the CQ, removing CQ entries
2767 * that match our QP by overwriting them with next entries.
2768 */
2769 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2770 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2771 if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2772 V2_CQE_BYTE_16_LCL_QPN_S) &
2773 HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
c7bcb134
LO
2774 if (srq &&
2775 roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) {
2776 wqe_index = roce_get_field(cqe->byte_4,
2777 V2_CQE_BYTE_4_WQE_INDX_M,
2778 V2_CQE_BYTE_4_WQE_INDX_S);
2779 hns_roce_free_srq_wqe(srq, wqe_index);
2780 }
926a01dc
WHX
2781 ++nfreed;
2782 } else if (nfreed) {
2783 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
2784 hr_cq->ib_cq.cqe);
2785 owner_bit = roce_get_bit(dest->byte_4,
2786 V2_CQE_BYTE_4_OWNER_S);
2787 memcpy(dest, cqe, sizeof(*cqe));
2788 roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
2789 owner_bit);
2790 }
2791 }
2792
2793 if (nfreed) {
2794 hr_cq->cons_index += nfreed;
2795 /*
2796 * Make sure update of buffer contents is done before
2797 * updating consumer index.
2798 */
2799 wmb();
2800 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2801 }
2802}
2803
2804static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2805 struct hns_roce_srq *srq)
2806{
2807 spin_lock_irq(&hr_cq->lock);
2808 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
2809 spin_unlock_irq(&hr_cq->lock);
2810}
2811
93aa2187
WHX
2812static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
2813 struct hns_roce_cq *hr_cq, void *mb_buf,
e2b2744a 2814 u64 *mtts, dma_addr_t dma_handle)
93aa2187
WHX
2815{
2816 struct hns_roce_v2_cq_context *cq_context;
2817
2818 cq_context = mb_buf;
2819 memset(cq_context, 0, sizeof(*cq_context));
2820
2821 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
2822 V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
a5073d60
YL
2823 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
2824 V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
93aa2187 2825 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
60262b10 2826 V2_CQC_BYTE_4_SHIFT_S, ilog2(hr_cq->cq_depth));
93aa2187 2827 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
e2b2744a 2828 V2_CQC_BYTE_4_CEQN_S, hr_cq->vector);
93aa2187
WHX
2829
2830 roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
2831 V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
2832
bfe86035 2833 cq_context->cqe_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
93aa2187
WHX
2834
2835 roce_set_field(cq_context->byte_16_hop_addr,
2836 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
2837 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
bfe86035 2838 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
93aa2187
WHX
2839 roce_set_field(cq_context->byte_16_hop_addr,
2840 V2_CQC_BYTE_16_CQE_HOP_NUM_M,
2841 V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
2842 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
2843
bfe86035 2844 cq_context->cqe_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
93aa2187
WHX
2845 roce_set_field(cq_context->byte_24_pgsz_addr,
2846 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
2847 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
bfe86035 2848 mtts[1] >> (32 + PAGE_ADDR_SHIFT));
93aa2187
WHX
2849 roce_set_field(cq_context->byte_24_pgsz_addr,
2850 V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
2851 V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
5e6e78db 2852 hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
93aa2187
WHX
2853 roce_set_field(cq_context->byte_24_pgsz_addr,
2854 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
2855 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
5e6e78db 2856 hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
93aa2187 2857
bfe86035 2858 cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3);
93aa2187
WHX
2859
2860 roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
2861 V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
a5073d60 2862
9b44703d
YL
2863 if (hr_cq->db_en)
2864 roce_set_bit(cq_context->byte_44_db_record,
2865 V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
2866
2867 roce_set_field(cq_context->byte_44_db_record,
2868 V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
2869 V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
2870 ((u32)hr_cq->db.dma) >> 1);
bfe86035 2871 cq_context->db_record_addr = cpu_to_le32(hr_cq->db.dma >> 32);
9b44703d 2872
a5073d60
YL
2873 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2874 V2_CQC_BYTE_56_CQ_MAX_CNT_M,
2875 V2_CQC_BYTE_56_CQ_MAX_CNT_S,
2876 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
2877 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2878 V2_CQC_BYTE_56_CQ_PERIOD_M,
2879 V2_CQC_BYTE_56_CQ_PERIOD_S,
2880 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
93aa2187
WHX
2881}
2882
2883static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
2884 enum ib_cq_notify_flags flags)
2885{
d3743fa9 2886 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
93aa2187
WHX
2887 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2888 u32 notification_flag;
bfe86035 2889 __le32 doorbell[2];
93aa2187
WHX
2890
2891 doorbell[0] = 0;
2892 doorbell[1] = 0;
2893
2894 notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
2895 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
2896 /*
2897 * flags = 0; Notification Flag = 1, next
2898 * flags = 1; Notification Flag = 0, solocited
2899 */
2900 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
2901 hr_cq->cqn);
2902 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
2903 HNS_ROCE_V2_CQ_DB_NTR);
2904 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
2905 V2_CQ_DB_PARAMETER_CONS_IDX_S,
2906 hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2907 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
26beb85f 2908 V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
93aa2187
WHX
2909 roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
2910 notification_flag);
2911
d3743fa9 2912 hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
93aa2187
WHX
2913
2914 return 0;
2915}
2916
0009c2db 2917static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
2918 struct hns_roce_qp **cur_qp,
2919 struct ib_wc *wc)
2920{
2921 struct hns_roce_rinl_sge *sge_list;
2922 u32 wr_num, wr_cnt, sge_num;
2923 u32 sge_cnt, data_len, size;
2924 void *wqe_buf;
2925
2926 wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
2927 V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
2928 wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
2929
2930 sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
2931 sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
2932 wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
2933 data_len = wc->byte_len;
2934
2935 for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
2936 size = min(sge_list[sge_cnt].len, data_len);
2937 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
2938
2939 data_len -= size;
2940 wqe_buf += size;
2941 }
2942
2943 if (data_len) {
2944 wc->status = IB_WC_LOC_LEN_ERR;
2945 return -EAGAIN;
2946 }
2947
2948 return 0;
2949}
2950
626903e9
XW
2951static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
2952 int num_entries, struct ib_wc *wc)
2953{
2954 unsigned int left;
2955 int npolled = 0;
2956
2957 left = wq->head - wq->tail;
2958 if (left == 0)
2959 return 0;
2960
2961 left = min_t(unsigned int, (unsigned int)num_entries, left);
2962 while (npolled < left) {
2963 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2964 wc->status = IB_WC_WR_FLUSH_ERR;
2965 wc->vendor_err = 0;
2966 wc->qp = &hr_qp->ibqp;
2967
2968 wq->tail++;
2969 wc++;
2970 npolled++;
2971 }
2972
2973 return npolled;
2974}
2975
2976static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
2977 struct ib_wc *wc)
2978{
2979 struct hns_roce_qp *hr_qp;
2980 int npolled = 0;
2981
2982 list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) {
2983 npolled += sw_comp(hr_qp, &hr_qp->sq,
2984 num_entries - npolled, wc + npolled);
2985 if (npolled >= num_entries)
2986 goto out;
2987 }
2988
2989 list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) {
2990 npolled += sw_comp(hr_qp, &hr_qp->rq,
2991 num_entries - npolled, wc + npolled);
2992 if (npolled >= num_entries)
2993 goto out;
2994 }
2995
2996out:
2997 return npolled;
2998}
2999
93aa2187
WHX
3000static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
3001 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
3002{
b5374286 3003 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
c7bcb134 3004 struct hns_roce_srq *srq = NULL;
93aa2187
WHX
3005 struct hns_roce_v2_cqe *cqe;
3006 struct hns_roce_qp *hr_qp;
3007 struct hns_roce_wq *wq;
3008 int is_send;
3009 u16 wqe_ctr;
3010 u32 opcode;
3011 u32 status;
3012 int qpn;
0009c2db 3013 int ret;
93aa2187
WHX
3014
3015 /* Find cqe according to consumer index */
3016 cqe = next_cqe_sw_v2(hr_cq);
3017 if (!cqe)
3018 return -EAGAIN;
3019
3020 ++hr_cq->cons_index;
3021 /* Memory barrier */
3022 rmb();
3023
3024 /* 0->SQ, 1->RQ */
3025 is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
3026
3027 qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
3028 V2_CQE_BYTE_16_LCL_QPN_S);
3029
3030 if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
93aa2187
WHX
3031 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
3032 if (unlikely(!hr_qp)) {
3033 dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
3034 hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
3035 return -EINVAL;
3036 }
3037 *cur_qp = hr_qp;
3038 }
3039
b5374286 3040 hr_qp = *cur_qp;
93aa2187
WHX
3041 wc->qp = &(*cur_qp)->ibqp;
3042 wc->vendor_err = 0;
3043
c7bcb134
LO
3044 if (is_send) {
3045 wq = &(*cur_qp)->sq;
3046 if ((*cur_qp)->sq_signal_bits) {
3047 /*
3048 * If sg_signal_bit is 1,
3049 * firstly tail pointer updated to wqe
3050 * which current cqe correspond to
3051 */
3052 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
3053 V2_CQE_BYTE_4_WQE_INDX_M,
3054 V2_CQE_BYTE_4_WQE_INDX_S);
3055 wq->tail += (wqe_ctr - (u16)wq->tail) &
3056 (wq->wqe_cnt - 1);
3057 }
3058
3059 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3060 ++wq->tail;
3061 } else if ((*cur_qp)->ibqp.srq) {
3062 srq = to_hr_srq((*cur_qp)->ibqp.srq);
bfe86035
LC
3063 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
3064 V2_CQE_BYTE_4_WQE_INDX_M,
3065 V2_CQE_BYTE_4_WQE_INDX_S);
c7bcb134
LO
3066 wc->wr_id = srq->wrid[wqe_ctr];
3067 hns_roce_free_srq_wqe(srq, wqe_ctr);
3068 } else {
3069 /* Update tail pointer, record wr_id */
3070 wq = &(*cur_qp)->rq;
3071 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3072 ++wq->tail;
3073 }
3074
93aa2187
WHX
3075 status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
3076 V2_CQE_BYTE_4_STATUS_S);
3077 switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
3078 case HNS_ROCE_CQE_V2_SUCCESS:
3079 wc->status = IB_WC_SUCCESS;
3080 break;
3081 case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
3082 wc->status = IB_WC_LOC_LEN_ERR;
3083 break;
3084 case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
3085 wc->status = IB_WC_LOC_QP_OP_ERR;
3086 break;
3087 case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
3088 wc->status = IB_WC_LOC_PROT_ERR;
3089 break;
3090 case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
3091 wc->status = IB_WC_WR_FLUSH_ERR;
3092 break;
3093 case HNS_ROCE_CQE_V2_MW_BIND_ERR:
3094 wc->status = IB_WC_MW_BIND_ERR;
3095 break;
3096 case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
3097 wc->status = IB_WC_BAD_RESP_ERR;
3098 break;
3099 case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
3100 wc->status = IB_WC_LOC_ACCESS_ERR;
3101 break;
3102 case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
3103 wc->status = IB_WC_REM_INV_REQ_ERR;
3104 break;
3105 case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
3106 wc->status = IB_WC_REM_ACCESS_ERR;
3107 break;
3108 case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
3109 wc->status = IB_WC_REM_OP_ERR;
3110 break;
3111 case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
3112 wc->status = IB_WC_RETRY_EXC_ERR;
3113 break;
3114 case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
3115 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
3116 break;
3117 case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
3118 wc->status = IB_WC_REM_ABORT_ERR;
3119 break;
3120 default:
3121 wc->status = IB_WC_GENERAL_ERR;
3122 break;
3123 }
3124
b5374286
YL
3125 /*
3126 * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
3127 * into errored mode. Hence, as a workaround to this hardware
3128 * limitation, driver needs to assist in flushing. But the flushing
3129 * operation uses mailbox to convey the QP state to the hardware and
3130 * which can sleep due to the mutex protection around the mailbox calls.
3131 * Hence, use the deferred flush for now. Once wc error detected, the
3132 * flushing operation is needed.
3133 */
3134 if (wc->status != IB_WC_SUCCESS &&
3135 wc->status != IB_WC_WR_FLUSH_ERR) {
3136 dev_err(hr_dev->dev, "error cqe status is: 0x%x\n",
3137 status & HNS_ROCE_V2_CQE_STATUS_MASK);
3138
3139 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag))
3140 init_flush_work(hr_dev, hr_qp);
3141
3142 return 0;
0425e3e6
YL
3143 }
3144
3145 if (wc->status == IB_WC_WR_FLUSH_ERR)
93aa2187
WHX
3146 return 0;
3147
3148 if (is_send) {
3149 wc->wc_flags = 0;
3150 /* SQ corresponding to CQE */
3151 switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
3152 V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
3153 case HNS_ROCE_SQ_OPCODE_SEND:
3154 wc->opcode = IB_WC_SEND;
3155 break;
3156 case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
3157 wc->opcode = IB_WC_SEND;
3158 break;
3159 case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
3160 wc->opcode = IB_WC_SEND;
3161 wc->wc_flags |= IB_WC_WITH_IMM;
3162 break;
3163 case HNS_ROCE_SQ_OPCODE_RDMA_READ:
3164 wc->opcode = IB_WC_RDMA_READ;
3165 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3166 break;
3167 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
3168 wc->opcode = IB_WC_RDMA_WRITE;
3169 break;
3170 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
3171 wc->opcode = IB_WC_RDMA_WRITE;
3172 wc->wc_flags |= IB_WC_WITH_IMM;
3173 break;
3174 case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
3175 wc->opcode = IB_WC_LOCAL_INV;
3176 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3177 break;
3178 case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
3179 wc->opcode = IB_WC_COMP_SWAP;
3180 wc->byte_len = 8;
3181 break;
3182 case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
3183 wc->opcode = IB_WC_FETCH_ADD;
3184 wc->byte_len = 8;
3185 break;
3186 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
3187 wc->opcode = IB_WC_MASKED_COMP_SWAP;
3188 wc->byte_len = 8;
3189 break;
3190 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
3191 wc->opcode = IB_WC_MASKED_FETCH_ADD;
3192 wc->byte_len = 8;
3193 break;
3194 case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
3195 wc->opcode = IB_WC_REG_MR;
3196 break;
3197 case HNS_ROCE_SQ_OPCODE_BIND_MW:
3198 wc->opcode = IB_WC_REG_MR;
3199 break;
3200 default:
3201 wc->status = IB_WC_GENERAL_ERR;
3202 break;
3203 }
93aa2187
WHX
3204 } else {
3205 /* RQ correspond to CQE */
3206 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3207
3208 opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
3209 V2_CQE_BYTE_4_OPCODE_S);
3210 switch (opcode & 0x1f) {
3211 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
3212 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3213 wc->wc_flags = IB_WC_WITH_IMM;
0c4a0e29
LO
3214 wc->ex.imm_data =
3215 cpu_to_be32(le32_to_cpu(cqe->immtdata));
93aa2187
WHX
3216 break;
3217 case HNS_ROCE_V2_OPCODE_SEND:
3218 wc->opcode = IB_WC_RECV;
3219 wc->wc_flags = 0;
3220 break;
3221 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
3222 wc->opcode = IB_WC_RECV;
3223 wc->wc_flags = IB_WC_WITH_IMM;
0c4a0e29
LO
3224 wc->ex.imm_data =
3225 cpu_to_be32(le32_to_cpu(cqe->immtdata));
93aa2187
WHX
3226 break;
3227 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
3228 wc->opcode = IB_WC_RECV;
3229 wc->wc_flags = IB_WC_WITH_INVALIDATE;
ccb8a29e 3230 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
93aa2187
WHX
3231 break;
3232 default:
3233 wc->status = IB_WC_GENERAL_ERR;
3234 break;
3235 }
3236
0009c2db 3237 if ((wc->qp->qp_type == IB_QPT_RC ||
3238 wc->qp->qp_type == IB_QPT_UC) &&
3239 (opcode == HNS_ROCE_V2_OPCODE_SEND ||
3240 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
3241 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
3242 (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
3243 ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
3244 if (ret)
3245 return -EAGAIN;
3246 }
3247
93aa2187
WHX
3248 wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
3249 V2_CQE_BYTE_32_SL_S);
3250 wc->src_qp = (u8)roce_get_field(cqe->byte_32,
3251 V2_CQE_BYTE_32_RMT_QPN_M,
3252 V2_CQE_BYTE_32_RMT_QPN_S);
15fc056f 3253 wc->slid = 0;
93aa2187
WHX
3254 wc->wc_flags |= (roce_get_bit(cqe->byte_32,
3255 V2_CQE_BYTE_32_GRH_S) ?
3256 IB_WC_GRH : 0);
6c1f08b3 3257 wc->port_num = roce_get_field(cqe->byte_32,
3258 V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
3259 wc->pkey_index = 0;
2eade675 3260 memcpy(wc->smac, cqe->smac, 4);
3261 wc->smac[4] = roce_get_field(cqe->byte_28,
3262 V2_CQE_BYTE_28_SMAC_4_M,
3263 V2_CQE_BYTE_28_SMAC_4_S);
3264 wc->smac[5] = roce_get_field(cqe->byte_28,
3265 V2_CQE_BYTE_28_SMAC_5_M,
3266 V2_CQE_BYTE_28_SMAC_5_S);
0e1aa6f0 3267 wc->wc_flags |= IB_WC_WITH_SMAC;
944e6409
LO
3268 if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
3269 wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
3270 V2_CQE_BYTE_28_VID_M,
3271 V2_CQE_BYTE_28_VID_S);
0e1aa6f0 3272 wc->wc_flags |= IB_WC_WITH_VLAN;
944e6409
LO
3273 } else {
3274 wc->vlan_id = 0xffff;
3275 }
3276
2eade675 3277 wc->network_hdr_type = roce_get_field(cqe->byte_28,
3278 V2_CQE_BYTE_28_PORT_TYPE_M,
3279 V2_CQE_BYTE_28_PORT_TYPE_S);
93aa2187
WHX
3280 }
3281
3282 return 0;
3283}
3284
3285static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
3286 struct ib_wc *wc)
3287{
626903e9 3288 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
93aa2187
WHX
3289 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3290 struct hns_roce_qp *cur_qp = NULL;
3291 unsigned long flags;
3292 int npolled;
3293
3294 spin_lock_irqsave(&hr_cq->lock, flags);
3295
626903e9
XW
3296 /*
3297 * When the device starts to reset, the state is RST_DOWN. At this time,
3298 * there may still be some valid CQEs in the hardware that are not
3299 * polled. Therefore, it is not allowed to switch to the software mode
3300 * immediately. When the state changes to UNINIT, CQE no longer exists
3301 * in the hardware, and then switch to software mode.
3302 */
3303 if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) {
3304 npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc);
3305 goto out;
3306 }
3307
93aa2187
WHX
3308 for (npolled = 0; npolled < num_entries; ++npolled) {
3309 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
3310 break;
3311 }
3312
3313 if (npolled) {
3314 /* Memory barrier */
3315 wmb();
3316 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
3317 }
3318
626903e9 3319out:
93aa2187
WHX
3320 spin_unlock_irqrestore(&hr_cq->lock, flags);
3321
3322 return npolled;
3323}
3324
260c3b34
YL
3325static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
3326 int step_idx)
3327{
3328 int op;
3329
3330 if (type == HEM_TYPE_SCCC && step_idx)
3331 return -EINVAL;
3332
3333 switch (type) {
3334 case HEM_TYPE_QPC:
3335 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
3336 break;
3337 case HEM_TYPE_MTPT:
3338 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
3339 break;
3340 case HEM_TYPE_CQC:
3341 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
3342 break;
3343 case HEM_TYPE_SRQC:
3344 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
3345 break;
3346 case HEM_TYPE_SCCC:
3347 op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
3348 break;
3349 case HEM_TYPE_QPC_TIMER:
3350 op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
3351 break;
3352 case HEM_TYPE_CQC_TIMER:
3353 op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
3354 break;
3355 default:
3356 dev_warn(hr_dev->dev,
3357 "Table %d not to be written by mailbox!\n", type);
3358 return -EINVAL;
3359 }
3360
3361 return op + step_idx;
3362}
3363
a81fba28
WHX
3364static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
3365 struct hns_roce_hem_table *table, int obj,
3366 int step_idx)
3367{
a81fba28
WHX
3368 struct hns_roce_cmd_mailbox *mailbox;
3369 struct hns_roce_hem_iter iter;
3370 struct hns_roce_hem_mhop mhop;
3371 struct hns_roce_hem *hem;
3372 unsigned long mhop_obj = obj;
3373 int i, j, k;
3374 int ret = 0;
3375 u64 hem_idx = 0;
3376 u64 l1_idx = 0;
3377 u64 bt_ba = 0;
3378 u32 chunk_ba_num;
3379 u32 hop_num;
260c3b34 3380 int op;
a81fba28
WHX
3381
3382 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3383 return 0;
3384
3385 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
3386 i = mhop.l0_idx;
3387 j = mhop.l1_idx;
3388 k = mhop.l2_idx;
3389 hop_num = mhop.hop_num;
3390 chunk_ba_num = mhop.bt_chunk_size / 8;
3391
3392 if (hop_num == 2) {
3393 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
3394 k;
3395 l1_idx = i * chunk_ba_num + j;
3396 } else if (hop_num == 1) {
3397 hem_idx = i * chunk_ba_num + j;
3398 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
3399 hem_idx = i;
3400 }
3401
260c3b34
YL
3402 op = get_op_for_set_hem(hr_dev, table->type, step_idx);
3403 if (op == -EINVAL)
a81fba28 3404 return 0;
a81fba28
WHX
3405
3406 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3407 if (IS_ERR(mailbox))
3408 return PTR_ERR(mailbox);
3409
6ac16e40
YL
3410 if (table->type == HEM_TYPE_SCCC)
3411 obj = mhop.l0_idx;
3412
a81fba28
WHX
3413 if (check_whether_last_step(hop_num, step_idx)) {
3414 hem = table->hem[hem_idx];
3415 for (hns_roce_hem_first(hem, &iter);
3416 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
3417 bt_ba = hns_roce_hem_addr(&iter);
3418
3419 /* configure the ba, tag, and op */
3420 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
3421 obj, 0, op,
3422 HNS_ROCE_CMD_TIMEOUT_MSECS);
3423 }
3424 } else {
3425 if (step_idx == 0)
3426 bt_ba = table->bt_l0_dma_addr[i];
3427 else if (step_idx == 1 && hop_num == 2)
3428 bt_ba = table->bt_l1_dma_addr[l1_idx];
3429
3430 /* configure the ba, tag, and op */
3431 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
3432 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
3433 }
3434
3435 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3436 return ret;
3437}
3438
3439static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
3440 struct hns_roce_hem_table *table, int obj,
3441 int step_idx)
3442{
3443 struct device *dev = hr_dev->dev;
3444 struct hns_roce_cmd_mailbox *mailbox;
617cf24f 3445 int ret;
a81fba28
WHX
3446 u16 op = 0xff;
3447
3448 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3449 return 0;
3450
3451 switch (table->type) {
3452 case HEM_TYPE_QPC:
3453 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
3454 break;
3455 case HEM_TYPE_MTPT:
3456 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
3457 break;
3458 case HEM_TYPE_CQC:
3459 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
3460 break;
6a157f7d 3461 case HEM_TYPE_SCCC:
0e40dc2f
YL
3462 case HEM_TYPE_QPC_TIMER:
3463 case HEM_TYPE_CQC_TIMER:
6a157f7d 3464 break;
a81fba28
WHX
3465 case HEM_TYPE_SRQC:
3466 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
3467 break;
3468 default:
3469 dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
3470 table->type);
3471 return 0;
3472 }
6a157f7d 3473
0e40dc2f
YL
3474 if (table->type == HEM_TYPE_SCCC ||
3475 table->type == HEM_TYPE_QPC_TIMER ||
3476 table->type == HEM_TYPE_CQC_TIMER)
6a157f7d
YL
3477 return 0;
3478
a81fba28
WHX
3479 op += step_idx;
3480
3481 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3482 if (IS_ERR(mailbox))
3483 return PTR_ERR(mailbox);
3484
3485 /* configure the tag and op */
3486 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
3487 HNS_ROCE_CMD_TIMEOUT_MSECS);
3488
3489 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3490 return ret;
3491}
3492
926a01dc 3493static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
926a01dc
WHX
3494 struct hns_roce_v2_qp_context *context,
3495 struct hns_roce_qp *hr_qp)
3496{
3497 struct hns_roce_cmd_mailbox *mailbox;
3498 int ret;
3499
3500 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3501 if (IS_ERR(mailbox))
3502 return PTR_ERR(mailbox);
3503
3504 memcpy(mailbox->buf, context, sizeof(*context) * 2);
3505
3506 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
3507 HNS_ROCE_CMD_MODIFY_QPC,
3508 HNS_ROCE_CMD_TIMEOUT_MSECS);
3509
3510 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3511
3512 return ret;
3513}
3514
ace1c541 3515static void set_access_flags(struct hns_roce_qp *hr_qp,
3516 struct hns_roce_v2_qp_context *context,
3517 struct hns_roce_v2_qp_context *qpc_mask,
3518 const struct ib_qp_attr *attr, int attr_mask)
3519{
3520 u8 dest_rd_atomic;
3521 u32 access_flags;
3522
c2799119 3523 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
ace1c541 3524 attr->max_dest_rd_atomic : hr_qp->resp_depth;
3525
c2799119 3526 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
ace1c541 3527 attr->qp_access_flags : hr_qp->atomic_rd_en;
3528
3529 if (!dest_rd_atomic)
3530 access_flags &= IB_ACCESS_REMOTE_WRITE;
3531
3532 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3533 !!(access_flags & IB_ACCESS_REMOTE_READ));
3534 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
3535
3536 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3537 !!(access_flags & IB_ACCESS_REMOTE_WRITE));
3538 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
3539
3540 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3541 !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3542 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
7db82697
JZ
3543 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S,
3544 !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3545 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S, 0);
ace1c541 3546}
3547
99441ab5
XW
3548static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
3549 struct hns_roce_v2_qp_context *context,
3550 struct hns_roce_v2_qp_context *qpc_mask)
3551{
3552 if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
3553 roce_set_field(context->byte_4_sqpn_tst,
3554 V2_QPC_BYTE_4_SGE_SHIFT_M,
3555 V2_QPC_BYTE_4_SGE_SHIFT_S,
3556 ilog2((unsigned int)hr_qp->sge.sge_cnt));
3557 else
3558 roce_set_field(context->byte_4_sqpn_tst,
3559 V2_QPC_BYTE_4_SGE_SHIFT_M,
3560 V2_QPC_BYTE_4_SGE_SHIFT_S,
3561 hr_qp->sq.max_gs >
3562 HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
3563 ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
3564
3565 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
3566 V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
3567
3568 roce_set_field(context->byte_20_smac_sgid_idx,
3569 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
3570 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
3571 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3572 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
3573
3574 roce_set_field(context->byte_20_smac_sgid_idx,
3575 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
3576 (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
3577 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT ||
3578 hr_qp->ibqp.srq) ? 0 :
3579 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
3580
3581 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3582 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
3583}
3584
926a01dc
WHX
3585static void modify_qp_reset_to_init(struct ib_qp *ibqp,
3586 const struct ib_qp_attr *attr,
0fa95a9a 3587 int attr_mask,
926a01dc
WHX
3588 struct hns_roce_v2_qp_context *context,
3589 struct hns_roce_v2_qp_context *qpc_mask)
3590{
ecaaf1e2 3591 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
926a01dc
WHX
3592 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3593
3594 /*
3595 * In v2 engine, software pass context and context mask to hardware
3596 * when modifying qp. If software need modify some fields in context,
3597 * we should set all bits of the relevant fields in context mask to
3598 * 0 at the same time, else set them to 0x1.
3599 */
3600 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3601 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3602 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3603 V2_QPC_BYTE_4_TST_S, 0);
3604
926a01dc
WHX
3605 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3606 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3607 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3608 V2_QPC_BYTE_4_SQPN_S, 0);
3609
3610 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3611 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3612 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3613 V2_QPC_BYTE_16_PD_S, 0);
3614
3615 roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3616 V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
3617 roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3618 V2_QPC_BYTE_20_RQWS_S, 0);
3619
99441ab5 3620 set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
926a01dc
WHX
3621
3622 /* No VLAN need to set 0xFFF */
c8e46f8d
LO
3623 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3624 V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
3625 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3626 V2_QPC_BYTE_24_VLAN_ID_S, 0);
926a01dc
WHX
3627
3628 /*
3629 * Set some fields in context to zero, Because the default values
3630 * of all fields in context are zero, we need not set them to 0 again.
3631 * but we should set the relevant fields of context mask to 0.
3632 */
3633 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
3634 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
3635 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
3636 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
3637
2362ccee
LO
3638 roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M,
3639 V2_QPC_BYTE_60_TEMPID_S, 0);
3640
3641 roce_set_field(qpc_mask->byte_60_qpst_tempid,
3642 V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S,
3643 0);
3644 roce_set_bit(qpc_mask->byte_60_qpst_tempid,
3645 V2_QPC_BYTE_60_SQ_DB_DOING_S, 0);
3646 roce_set_bit(qpc_mask->byte_60_qpst_tempid,
3647 V2_QPC_BYTE_60_RQ_DB_DOING_S, 0);
926a01dc
WHX
3648 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
3649 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
3650
e088a685
YL
3651 if (hr_qp->rdb_en) {
3652 roce_set_bit(context->byte_68_rq_db,
3653 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
3654 roce_set_bit(qpc_mask->byte_68_rq_db,
3655 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
3656 }
3657
3658 roce_set_field(context->byte_68_rq_db,
3659 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3660 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
3661 ((u32)hr_qp->rdb.dma) >> 1);
3662 roce_set_field(qpc_mask->byte_68_rq_db,
3663 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3664 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
bfe86035 3665 context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32);
e088a685
YL
3666 qpc_mask->rq_db_record_addr = 0;
3667
ecaaf1e2 3668 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
3669 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
926a01dc
WHX
3670 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
3671
3672 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3673 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3674 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3675 V2_QPC_BYTE_80_RX_CQN_S, 0);
3676 if (ibqp->srq) {
3677 roce_set_field(context->byte_76_srqn_op_en,
3678 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3679 to_hr_srq(ibqp->srq)->srqn);
3680 roce_set_field(qpc_mask->byte_76_srqn_op_en,
3681 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3682 roce_set_bit(context->byte_76_srqn_op_en,
3683 V2_QPC_BYTE_76_SRQ_EN_S, 1);
3684 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3685 V2_QPC_BYTE_76_SRQ_EN_S, 0);
3686 }
3687
3688 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3689 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3690 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3691 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3692 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3693 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3694
3695 roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
3696 V2_QPC_BYTE_92_SRQ_INFO_S, 0);
3697
3698 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3699 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3700
3701 roce_set_field(qpc_mask->byte_104_rq_sge,
3702 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
3703 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
3704
3705 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3706 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3707 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3708 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3709 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3710 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3711 V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
3712
3713 qpc_mask->rq_rnr_timer = 0;
3714 qpc_mask->rx_msg_len = 0;
3715 qpc_mask->rx_rkey_pkt_info = 0;
3716 qpc_mask->rx_va = 0;
3717
3718 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3719 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3720 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3721 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3722
2362ccee
LO
3723 roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S,
3724 0);
926a01dc
WHX
3725 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
3726 V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
3727 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
3728 V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
3729
3730 roce_set_field(qpc_mask->byte_144_raq,
3731 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
3732 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
926a01dc
WHX
3733 roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
3734 V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
3735 roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
3736
3737 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
3738 V2_QPC_BYTE_148_RQ_MSN_S, 0);
3739 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
3740 V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
3741
3742 roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3743 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
3744 roce_set_field(qpc_mask->byte_152_raq,
3745 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
3746 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
3747
3748 roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
3749 V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
3750
3751 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3752 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
3753 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
3754 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3755 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
3756 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
3757
2362ccee
LO
3758 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3759 V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0);
3760 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3761 V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0);
3762 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3763 V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0);
926a01dc
WHX
3764 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3765 V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
b5fddb7c 3766 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3767 V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
926a01dc
WHX
3768 roce_set_field(qpc_mask->byte_168_irrl_idx,
3769 V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
3770 V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
3771
3772 roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3773 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
3774 roce_set_field(qpc_mask->byte_172_sq_psn,
3775 V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3776 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
3777
3778 roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
3779 0);
3780
68a997c5
YL
3781 roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
3782 roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0);
3783
926a01dc
WHX
3784 roce_set_field(qpc_mask->byte_176_msg_pktn,
3785 V2_QPC_BYTE_176_MSG_USE_PKTN_M,
3786 V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
3787 roce_set_field(qpc_mask->byte_176_msg_pktn,
3788 V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
3789 V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
3790
3791 roce_set_field(qpc_mask->byte_184_irrl_idx,
3792 V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
3793 V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
3794
3795 qpc_mask->cur_sge_offset = 0;
3796
3797 roce_set_field(qpc_mask->byte_192_ext_sge,
3798 V2_QPC_BYTE_192_CUR_SGE_IDX_M,
3799 V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
3800 roce_set_field(qpc_mask->byte_192_ext_sge,
3801 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
3802 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
3803
3804 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3805 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
3806
3807 roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
3808 V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
3809 roce_set_field(qpc_mask->byte_200_sq_max,
3810 V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
3811 V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
3812
3813 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
3814 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
3815
3816 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3817 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3818
3819 qpc_mask->sq_timer = 0;
3820
3821 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3822 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3823 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3824 roce_set_field(qpc_mask->byte_232_irrl_sge,
3825 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3826 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3827
2362ccee
LO
3828 roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S,
3829 0);
3830 roce_set_bit(qpc_mask->byte_232_irrl_sge,
3831 V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0);
3832 roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S,
3833 0);
3834
926a01dc
WHX
3835 qpc_mask->irrl_cur_sge_offset = 0;
3836
3837 roce_set_field(qpc_mask->byte_240_irrl_tail,
3838 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3839 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3840 roce_set_field(qpc_mask->byte_240_irrl_tail,
3841 V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
3842 V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
3843 roce_set_field(qpc_mask->byte_240_irrl_tail,
3844 V2_QPC_BYTE_240_RX_ACK_MSN_M,
3845 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3846
3847 roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
3848 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3849 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
3850 0);
3851 roce_set_field(qpc_mask->byte_248_ack_psn,
3852 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3853 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3854 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
3855 0);
3856 roce_set_bit(qpc_mask->byte_248_ack_psn,
3857 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3858 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
3859 0);
3860
3861 hr_qp->access_flags = attr->qp_access_flags;
926a01dc
WHX
3862 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3863 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3864 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3865 V2_QPC_BYTE_252_TX_CQN_S, 0);
3866
3867 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
3868 V2_QPC_BYTE_252_ERR_TYPE_S, 0);
3869
3870 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3871 V2_QPC_BYTE_256_RQ_CQE_IDX_M,
3872 V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
3873 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3874 V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
3875 V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
3876}
3877
3878static void modify_qp_init_to_init(struct ib_qp *ibqp,
3879 const struct ib_qp_attr *attr, int attr_mask,
3880 struct hns_roce_v2_qp_context *context,
3881 struct hns_roce_v2_qp_context *qpc_mask)
3882{
3883 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3884
3885 /*
3886 * In v2 engine, software pass context and context mask to hardware
3887 * when modifying qp. If software need modify some fields in context,
3888 * we should set all bits of the relevant fields in context mask to
3889 * 0 at the same time, else set them to 0x1.
3890 */
3891 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3892 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3893 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3894 V2_QPC_BYTE_4_TST_S, 0);
3895
926a01dc
WHX
3896 if (attr_mask & IB_QP_ACCESS_FLAGS) {
3897 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3898 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
3899 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3900 0);
3901
3902 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3903 !!(attr->qp_access_flags &
3904 IB_ACCESS_REMOTE_WRITE));
3905 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3906 0);
3907
3908 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3909 !!(attr->qp_access_flags &
3910 IB_ACCESS_REMOTE_ATOMIC));
3911 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3912 0);
7db82697
JZ
3913 roce_set_bit(context->byte_76_srqn_op_en,
3914 V2_QPC_BYTE_76_EXT_ATE_S,
3915 !!(attr->qp_access_flags &
3916 IB_ACCESS_REMOTE_ATOMIC));
3917 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3918 V2_QPC_BYTE_76_EXT_ATE_S, 0);
926a01dc
WHX
3919 } else {
3920 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3921 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
3922 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3923 0);
3924
3925 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3926 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
3927 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3928 0);
3929
3930 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3931 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
3932 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3933 0);
7db82697
JZ
3934 roce_set_bit(context->byte_76_srqn_op_en,
3935 V2_QPC_BYTE_76_EXT_ATE_S,
3936 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
3937 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3938 V2_QPC_BYTE_76_EXT_ATE_S, 0);
926a01dc
WHX
3939 }
3940
926a01dc
WHX
3941 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3942 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3943 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3944 V2_QPC_BYTE_16_PD_S, 0);
3945
3946 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3947 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3948 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3949 V2_QPC_BYTE_80_RX_CQN_S, 0);
3950
3951 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
6d13b869 3952 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
926a01dc
WHX
3953 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3954 V2_QPC_BYTE_252_TX_CQN_S, 0);
3955
3956 if (ibqp->srq) {
3957 roce_set_bit(context->byte_76_srqn_op_en,
3958 V2_QPC_BYTE_76_SRQ_EN_S, 1);
3959 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3960 V2_QPC_BYTE_76_SRQ_EN_S, 0);
3961 roce_set_field(context->byte_76_srqn_op_en,
3962 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3963 to_hr_srq(ibqp->srq)->srqn);
3964 roce_set_field(qpc_mask->byte_76_srqn_op_en,
3965 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3966 }
3967
926a01dc
WHX
3968 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3969 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3970 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3971 V2_QPC_BYTE_4_SQPN_S, 0);
3972
b6dd9b34 3973 if (attr_mask & IB_QP_DEST_QPN) {
3974 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3975 V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
3976 roce_set_field(qpc_mask->byte_56_dqpn_err,
3977 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3978 }
926a01dc
WHX
3979}
3980
8d18ad83
LO
3981static bool check_wqe_rq_mtt_count(struct hns_roce_dev *hr_dev,
3982 struct hns_roce_qp *hr_qp, int mtt_cnt,
3983 u32 page_size)
3984{
3985 struct device *dev = hr_dev->dev;
3986
3987 if (hr_qp->rq.wqe_cnt < 1)
3988 return true;
3989
3990 if (mtt_cnt < 1) {
3991 dev_err(dev, "qp(0x%lx) rqwqe buf ba find failed\n",
3992 hr_qp->qpn);
3993 return false;
3994 }
3995
3996 if (mtt_cnt < MTT_MIN_COUNT &&
3997 (hr_qp->rq.offset + page_size) < hr_qp->buff_size) {
3998 dev_err(dev, "qp(0x%lx) next rqwqe buf ba find failed\n",
3999 hr_qp->qpn);
4000 return false;
4001 }
4002
4003 return true;
4004}
4005
926a01dc
WHX
4006static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
4007 const struct ib_qp_attr *attr, int attr_mask,
4008 struct hns_roce_v2_qp_context *context,
4009 struct hns_roce_v2_qp_context *qpc_mask)
4010{
4011 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4012 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4013 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4014 struct device *dev = hr_dev->dev;
8d18ad83 4015 u64 mtts[MTT_MIN_COUNT] = { 0 };
e92f2c18 4016 dma_addr_t dma_handle_3;
926a01dc 4017 dma_addr_t dma_handle_2;
8d18ad83 4018 u64 wqe_sge_ba;
926a01dc
WHX
4019 u32 page_size;
4020 u8 port_num;
e92f2c18 4021 u64 *mtts_3;
926a01dc 4022 u64 *mtts_2;
8d18ad83 4023 int count;
926a01dc
WHX
4024 u8 *dmac;
4025 u8 *smac;
4026 int port;
4027
4028 /* Search qp buf's mtts */
8d18ad83
LO
4029 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
4030 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
4031 hr_qp->rq.offset / page_size, mtts,
4032 MTT_MIN_COUNT, &wqe_sge_ba);
4033 if (!ibqp->srq)
4034 if (!check_wqe_rq_mtt_count(hr_dev, hr_qp, count, page_size))
4035 return -EINVAL;
926a01dc
WHX
4036
4037 /* Search IRRL's mtts */
4038 mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
4039 hr_qp->qpn, &dma_handle_2);
4040 if (!mtts_2) {
4041 dev_err(dev, "qp irrl_table find failed\n");
4042 return -EINVAL;
4043 }
4044
e92f2c18 4045 /* Search TRRL's mtts */
4046 mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
4047 hr_qp->qpn, &dma_handle_3);
4048 if (!mtts_3) {
4049 dev_err(dev, "qp trrl_table find failed\n");
4050 return -EINVAL;
4051 }
4052
734f3863 4053 if (attr_mask & IB_QP_ALT_PATH) {
926a01dc
WHX
4054 dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
4055 return -EINVAL;
4056 }
4057
4058 dmac = (u8 *)attr->ah_attr.roce.dmac;
bfe86035 4059 context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
926a01dc
WHX
4060 qpc_mask->wqe_sge_ba = 0;
4061
4062 /*
4063 * In v2 engine, software pass context and context mask to hardware
4064 * when modifying qp. If software need modify some fields in context,
4065 * we should set all bits of the relevant fields in context mask to
4066 * 0 at the same time, else set them to 0x1.
4067 */
4068 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
8d18ad83 4069 V2_QPC_BYTE_12_WQE_SGE_BA_S, wqe_sge_ba >> (32 + 3));
926a01dc
WHX
4070 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
4071 V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
4072
4073 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
4074 V2_QPC_BYTE_12_SQ_HOP_NUM_S,
8d18ad83
LO
4075 hr_dev->caps.wqe_sq_hop_num == HNS_ROCE_HOP_NUM_0 ?
4076 0 : hr_dev->caps.wqe_sq_hop_num);
926a01dc
WHX
4077 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
4078 V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
4079
4080 roce_set_field(context->byte_20_smac_sgid_idx,
4081 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
4082 V2_QPC_BYTE_20_SGE_HOP_NUM_S,
8d18ad83
LO
4083 ((ibqp->qp_type == IB_QPT_GSI) ||
4084 hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
4085 hr_dev->caps.wqe_sge_hop_num : 0);
926a01dc
WHX
4086 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4087 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
4088 V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
4089
4090 roce_set_field(context->byte_20_smac_sgid_idx,
4091 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
4092 V2_QPC_BYTE_20_RQ_HOP_NUM_S,
8d18ad83
LO
4093 hr_dev->caps.wqe_rq_hop_num == HNS_ROCE_HOP_NUM_0 ?
4094 0 : hr_dev->caps.wqe_rq_hop_num);
926a01dc
WHX
4095 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4096 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
4097 V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
4098
4099 roce_set_field(context->byte_16_buf_ba_pg_sz,
4100 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
4101 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
8d18ad83 4102 hr_qp->wqe_bt_pg_shift + PG_SHIFT_OFFSET);
926a01dc
WHX
4103 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
4104 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
4105 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
4106
4107 roce_set_field(context->byte_16_buf_ba_pg_sz,
4108 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
4109 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
5e6e78db 4110 hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
926a01dc
WHX
4111 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
4112 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
4113 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
4114
bfe86035 4115 context->rq_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
926a01dc
WHX
4116 qpc_mask->rq_cur_blk_addr = 0;
4117
4118 roce_set_field(context->byte_92_srq_info,
4119 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
4120 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
8d18ad83 4121 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
926a01dc
WHX
4122 roce_set_field(qpc_mask->byte_92_srq_info,
4123 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
4124 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
4125
bfe86035 4126 context->rq_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
926a01dc
WHX
4127 qpc_mask->rq_nxt_blk_addr = 0;
4128
4129 roce_set_field(context->byte_104_rq_sge,
4130 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
4131 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
8d18ad83 4132 mtts[1] >> (32 + PAGE_ADDR_SHIFT));
926a01dc
WHX
4133 roce_set_field(qpc_mask->byte_104_rq_sge,
4134 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
4135 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
4136
e92f2c18 4137 roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
4138 V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
4139 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
4140 V2_QPC_BYTE_132_TRRL_BA_S, 0);
bfe86035 4141 context->trrl_ba = cpu_to_le32(dma_handle_3 >> (16 + 4));
e92f2c18 4142 qpc_mask->trrl_ba = 0;
4143 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
4144 V2_QPC_BYTE_140_TRRL_BA_S,
4145 (u32)(dma_handle_3 >> (32 + 16 + 4)));
4146 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
4147 V2_QPC_BYTE_140_TRRL_BA_S, 0);
4148
bfe86035 4149 context->irrl_ba = cpu_to_le32(dma_handle_2 >> 6);
926a01dc
WHX
4150 qpc_mask->irrl_ba = 0;
4151 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
4152 V2_QPC_BYTE_208_IRRL_BA_S,
d5514246 4153 dma_handle_2 >> (32 + 6));
926a01dc
WHX
4154 roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
4155 V2_QPC_BYTE_208_IRRL_BA_S, 0);
4156
4157 roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
4158 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
4159
4160 roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
4161 hr_qp->sq_signal_bits);
4162 roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
4163 0);
4164
4165 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
4166
4167 smac = (u8 *)hr_dev->dev_addr[port];
4168 /* when dmac equals smac or loop_idc is 1, it should loopback */
4169 if (ether_addr_equal_unaligned(dmac, smac) ||
4170 hr_dev->loop_idc == 0x1) {
4171 roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
4172 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
4173 }
4174
b6dd9b34 4175 if (attr_mask & IB_QP_DEST_QPN) {
4176 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
4177 V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
4178 roce_set_field(qpc_mask->byte_56_dqpn_err,
4179 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
4180 }
926a01dc
WHX
4181
4182 /* Configure GID index */
4183 port_num = rdma_ah_get_port_num(&attr->ah_attr);
4184 roce_set_field(context->byte_20_smac_sgid_idx,
60262b10 4185 V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
926a01dc
WHX
4186 hns_get_gid_index(hr_dev, port_num - 1,
4187 grh->sgid_index));
4188 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
60262b10 4189 V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
2a3d923f 4190 memcpy(&(context->dmac), dmac, sizeof(u32));
926a01dc
WHX
4191 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
4192 V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
4193 qpc_mask->dmac = 0;
4194 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
4195 V2_QPC_BYTE_52_DMAC_S, 0);
4196
2a3d923f 4197 /* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
926a01dc
WHX
4198 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
4199 V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
4200 roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
4201 V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
4202
0fa95a9a 4203 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
4204 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
4205 V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
6852af86 4206 else if (attr_mask & IB_QP_PATH_MTU)
0fa95a9a 4207 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
4208 V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
4209
926a01dc
WHX
4210 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
4211 V2_QPC_BYTE_24_MTU_S, 0);
4212
926a01dc
WHX
4213 roce_set_field(context->byte_84_rq_ci_pi,
4214 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4215 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
4216 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4217 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4218 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
4219
4220 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4221 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
4222 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
4223 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4224 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
4225 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
4226 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
4227 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
4228 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
4229 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
4230
4231 context->rq_rnr_timer = 0;
4232 qpc_mask->rq_rnr_timer = 0;
4233
926a01dc
WHX
4234 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
4235 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
4236 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
4237 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
4238
2a3d923f 4239 /* rocee send 2^lp_sgen_ini segs every time */
926a01dc
WHX
4240 roce_set_field(context->byte_168_irrl_idx,
4241 V2_QPC_BYTE_168_LP_SGEN_INI_M,
4242 V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
4243 roce_set_field(qpc_mask->byte_168_irrl_idx,
4244 V2_QPC_BYTE_168_LP_SGEN_INI_M,
4245 V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
4246
926a01dc
WHX
4247 return 0;
4248}
4249
4250static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
4251 const struct ib_qp_attr *attr, int attr_mask,
4252 struct hns_roce_v2_qp_context *context,
4253 struct hns_roce_v2_qp_context *qpc_mask)
4254{
4255 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4256 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4257 struct device *dev = hr_dev->dev;
8d18ad83
LO
4258 u64 sge_cur_blk = 0;
4259 u64 sq_cur_blk = 0;
befb63b4 4260 u32 page_size;
8d18ad83 4261 int count;
926a01dc
WHX
4262
4263 /* Search qp buf's mtts */
8d18ad83
LO
4264 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
4265 if (count < 1) {
4266 dev_err(dev, "qp(0x%lx) buf pa find failed\n", hr_qp->qpn);
926a01dc
WHX
4267 return -EINVAL;
4268 }
4269
8d18ad83
LO
4270 if (hr_qp->sge.offset) {
4271 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
4272 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
4273 hr_qp->sge.offset / page_size,
4274 &sge_cur_blk, 1, NULL);
4275 if (count < 1) {
4276 dev_err(dev, "qp(0x%lx) sge pa find failed\n",
4277 hr_qp->qpn);
4278 return -EINVAL;
4279 }
4280 }
4281
734f3863 4282 /* Not support alternate path and path migration */
4283 if ((attr_mask & IB_QP_ALT_PATH) ||
4284 (attr_mask & IB_QP_PATH_MIG_STATE)) {
926a01dc
WHX
4285 dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
4286 return -EINVAL;
4287 }
4288
4289 /*
4290 * In v2 engine, software pass context and context mask to hardware
4291 * when modifying qp. If software need modify some fields in context,
4292 * we should set all bits of the relevant fields in context mask to
4293 * 0 at the same time, else set them to 0x1.
4294 */
bfe86035 4295 context->sq_cur_blk_addr = cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
926a01dc
WHX
4296 roce_set_field(context->byte_168_irrl_idx,
4297 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
4298 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
8d18ad83 4299 sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
926a01dc
WHX
4300 qpc_mask->sq_cur_blk_addr = 0;
4301 roce_set_field(qpc_mask->byte_168_irrl_idx,
4302 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
4303 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
4304
2a3d923f
LO
4305 context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI) ||
4306 hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
bfe86035
LC
4307 cpu_to_le32(sge_cur_blk >>
4308 PAGE_ADDR_SHIFT) : 0;
befb63b4 4309 roce_set_field(context->byte_184_irrl_idx,
4310 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
4311 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
2a3d923f
LO
4312 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs >
4313 HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
8d18ad83 4314 (sge_cur_blk >>
befb63b4 4315 (32 + PAGE_ADDR_SHIFT)) : 0);
4316 qpc_mask->sq_cur_sge_blk_addr = 0;
4317 roce_set_field(qpc_mask->byte_184_irrl_idx,
4318 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
4319 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
4320
bfe86035
LC
4321 context->rx_sq_cur_blk_addr =
4322 cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
926a01dc
WHX
4323 roce_set_field(context->byte_232_irrl_sge,
4324 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
4325 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
8d18ad83 4326 sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
926a01dc
WHX
4327 qpc_mask->rx_sq_cur_blk_addr = 0;
4328 roce_set_field(qpc_mask->byte_232_irrl_sge,
4329 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
4330 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
4331
4332 /*
4333 * Set some fields in context to zero, Because the default values
4334 * of all fields in context are zero, we need not set them to 0 again.
4335 * but we should set the relevant fields of context mask to 0.
4336 */
4337 roce_set_field(qpc_mask->byte_232_irrl_sge,
4338 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
4339 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
4340
4341 roce_set_field(qpc_mask->byte_240_irrl_tail,
4342 V2_QPC_BYTE_240_RX_ACK_MSN_M,
4343 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
4344
926a01dc
WHX
4345 roce_set_field(qpc_mask->byte_248_ack_psn,
4346 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
4347 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
4348 roce_set_bit(qpc_mask->byte_248_ack_psn,
4349 V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
4350 roce_set_field(qpc_mask->byte_248_ack_psn,
4351 V2_QPC_BYTE_248_IRRL_PSN_M,
4352 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
4353
4354 roce_set_field(qpc_mask->byte_240_irrl_tail,
4355 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
4356 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
4357
926a01dc
WHX
4358 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4359 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
4360 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
4361
4362 roce_set_bit(qpc_mask->byte_248_ack_psn,
4363 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
4364
4365 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
4366 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
4367
926a01dc
WHX
4368 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4369 V2_QPC_BYTE_212_LSN_S, 0x100);
4370 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4371 V2_QPC_BYTE_212_LSN_S, 0);
4372
926a01dc
WHX
4373 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
4374 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
926a01dc
WHX
4375
4376 return 0;
4377}
4378
233673e4
LO
4379static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state,
4380 enum ib_qp_state new_state)
4381{
4382
4383 if ((cur_state != IB_QPS_RESET &&
4384 (new_state == IB_QPS_ERR || new_state == IB_QPS_RESET)) ||
4385 ((cur_state == IB_QPS_RTS || cur_state == IB_QPS_SQD) &&
4386 (new_state == IB_QPS_RTS || new_state == IB_QPS_SQD)) ||
4387 (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS))
4388 return true;
4389
4390 return false;
4391
4392}
4393
606bf89e
LO
4394static int hns_roce_v2_set_path(struct ib_qp *ibqp,
4395 const struct ib_qp_attr *attr,
4396 int attr_mask,
4397 struct hns_roce_v2_qp_context *context,
4398 struct hns_roce_v2_qp_context *qpc_mask)
926a01dc 4399{
606bf89e 4400 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
926a01dc
WHX
4401 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4402 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
606bf89e
LO
4403 const struct ib_gid_attr *gid_attr = NULL;
4404 int is_roce_protocol;
32883228 4405 u16 vlan_id = 0xffff;
606bf89e 4406 bool is_udp = false;
606bf89e
LO
4407 u8 ib_port;
4408 u8 hr_port;
4409 int ret;
926a01dc 4410
606bf89e
LO
4411 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
4412 hr_port = ib_port - 1;
4413 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4414 rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4415
4416 if (is_roce_protocol) {
4417 gid_attr = attr->ah_attr.grh.sgid_attr;
32883228 4418 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
606bf89e
LO
4419 if (ret)
4420 return ret;
4421
4422 if (gid_attr)
4423 is_udp = (gid_attr->gid_type ==
4424 IB_GID_TYPE_ROCE_UDP_ENCAP);
4425 }
4426
32883228 4427 if (vlan_id < VLAN_N_VID) {
606bf89e
LO
4428 roce_set_bit(context->byte_76_srqn_op_en,
4429 V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
4430 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
4431 V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
4432 roce_set_bit(context->byte_168_irrl_idx,
4433 V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
4434 roce_set_bit(qpc_mask->byte_168_irrl_idx,
4435 V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
4436 }
4437
4438 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
32883228 4439 V2_QPC_BYTE_24_VLAN_ID_S, vlan_id);
606bf89e
LO
4440 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4441 V2_QPC_BYTE_24_VLAN_ID_S, 0);
4442
4443 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4444 dev_err(hr_dev->dev, "sgid_index(%u) too large. max is %d\n",
4445 grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
4446 return -EINVAL;
4447 }
4448
4449 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4450 dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
4451 return -EINVAL;
4452 }
4453
4454 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4455 V2_QPC_BYTE_52_UDPSPN_S,
4456 is_udp ? 0x12b7 : 0);
4457
4458 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4459 V2_QPC_BYTE_52_UDPSPN_S, 0);
4460
4461 roce_set_field(context->byte_20_smac_sgid_idx,
4462 V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
4463 grh->sgid_index);
4464
4465 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4466 V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
4467
4468 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4469 V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
4470 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4471 V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
4472
dfaf2854 4473 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B && is_udp)
606bf89e
LO
4474 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4475 V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2);
4476 else
4477 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4478 V2_QPC_BYTE_24_TC_S, grh->traffic_class);
4479 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4480 V2_QPC_BYTE_24_TC_S, 0);
4481 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4482 V2_QPC_BYTE_28_FL_S, grh->flow_label);
4483 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4484 V2_QPC_BYTE_28_FL_S, 0);
4485 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4486 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
4487 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4488 V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
4489 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4490 V2_QPC_BYTE_28_SL_S, 0);
4491 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4492
4493 return 0;
4494}
4495
4496static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
4497 const struct ib_qp_attr *attr,
4498 int attr_mask,
4499 enum ib_qp_state cur_state,
4500 enum ib_qp_state new_state,
4501 struct hns_roce_v2_qp_context *context,
4502 struct hns_roce_v2_qp_context *qpc_mask)
4503{
4504 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4505 int ret = 0;
926a01dc 4506
926a01dc 4507 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
9f507101 4508 memset(qpc_mask, 0, sizeof(*qpc_mask));
0fa95a9a 4509 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
4510 qpc_mask);
926a01dc
WHX
4511 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
4512 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
4513 qpc_mask);
4514 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4515 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
4516 qpc_mask);
4517 if (ret)
4518 goto out;
4519 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
4520 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
4521 qpc_mask);
4522 if (ret)
4523 goto out;
233673e4 4524 } else if (hns_roce_v2_check_qp_stat(cur_state, new_state)) {
926a01dc
WHX
4525 /* Nothing */
4526 ;
4527 } else {
606bf89e 4528 dev_err(hr_dev->dev, "Illegal state for QP!\n");
ac7cbf96 4529 ret = -EINVAL;
926a01dc
WHX
4530 goto out;
4531 }
4532
606bf89e
LO
4533out:
4534 return ret;
4535}
9c6ccc03 4536
606bf89e
LO
4537static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
4538 const struct ib_qp_attr *attr,
4539 int attr_mask,
4540 struct hns_roce_v2_qp_context *context,
4541 struct hns_roce_v2_qp_context *qpc_mask)
4542{
4543 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4544 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4545 int ret = 0;
0425e3e6 4546
610b8967 4547 if (attr_mask & IB_QP_AV) {
606bf89e
LO
4548 ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
4549 qpc_mask);
4550 if (ret)
4551 return ret;
610b8967
LO
4552 }
4553
5b01b243
LO
4554 if (attr_mask & IB_QP_TIMEOUT) {
4555 if (attr->timeout < 31) {
4556 roce_set_field(context->byte_28_at_fl,
4557 V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4558 attr->timeout);
4559 roce_set_field(qpc_mask->byte_28_at_fl,
4560 V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4561 0);
4562 } else {
606bf89e
LO
4563 dev_warn(hr_dev->dev,
4564 "Local ACK timeout shall be 0 to 30.\n");
5b01b243
LO
4565 }
4566 }
4567
4568 if (attr_mask & IB_QP_RETRY_CNT) {
4569 roce_set_field(context->byte_212_lsn,
4570 V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4571 V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
4572 attr->retry_cnt);
4573 roce_set_field(qpc_mask->byte_212_lsn,
4574 V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4575 V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
4576
4577 roce_set_field(context->byte_212_lsn,
4578 V2_QPC_BYTE_212_RETRY_CNT_M,
60262b10 4579 V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
5b01b243
LO
4580 roce_set_field(qpc_mask->byte_212_lsn,
4581 V2_QPC_BYTE_212_RETRY_CNT_M,
4582 V2_QPC_BYTE_212_RETRY_CNT_S, 0);
4583 }
4584
4585 if (attr_mask & IB_QP_RNR_RETRY) {
4586 roce_set_field(context->byte_244_rnr_rxack,
4587 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4588 V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
4589 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4590 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4591 V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
4592
4593 roce_set_field(context->byte_244_rnr_rxack,
4594 V2_QPC_BYTE_244_RNR_CNT_M,
4595 V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
4596 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4597 V2_QPC_BYTE_244_RNR_CNT_M,
4598 V2_QPC_BYTE_244_RNR_CNT_S, 0);
4599 }
4600
606bf89e 4601 /* RC&UC&UD required attr */
f04cc178
LO
4602 if (attr_mask & IB_QP_SQ_PSN) {
4603 roce_set_field(context->byte_172_sq_psn,
4604 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4605 V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
4606 roce_set_field(qpc_mask->byte_172_sq_psn,
4607 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4608 V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
4609
4610 roce_set_field(context->byte_196_sq_psn,
4611 V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4612 V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
4613 roce_set_field(qpc_mask->byte_196_sq_psn,
4614 V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4615 V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
4616
4617 roce_set_field(context->byte_220_retry_psn_msn,
4618 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4619 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
4620 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4621 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4622 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
4623
4624 roce_set_field(context->byte_224_retry_msg,
4625 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4626 V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
2a3d923f 4627 attr->sq_psn >> V2_QPC_BYTE_220_RETRY_MSG_PSN_S);
f04cc178
LO
4628 roce_set_field(qpc_mask->byte_224_retry_msg,
4629 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4630 V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
4631
4632 roce_set_field(context->byte_224_retry_msg,
4633 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4634 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
4635 attr->sq_psn);
4636 roce_set_field(qpc_mask->byte_224_retry_msg,
4637 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4638 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
4639
4640 roce_set_field(context->byte_244_rnr_rxack,
4641 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4642 V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
4643 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4644 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4645 V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
4646 }
4647
5b01b243
LO
4648 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
4649 attr->max_dest_rd_atomic) {
4650 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4651 V2_QPC_BYTE_140_RR_MAX_S,
4652 fls(attr->max_dest_rd_atomic - 1));
4653 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4654 V2_QPC_BYTE_140_RR_MAX_S, 0);
4655 }
4656
4657 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
4658 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
4659 V2_QPC_BYTE_208_SR_MAX_S,
4660 fls(attr->max_rd_atomic - 1));
4661 roce_set_field(qpc_mask->byte_208_irrl,
4662 V2_QPC_BYTE_208_SR_MAX_M,
4663 V2_QPC_BYTE_208_SR_MAX_S, 0);
4664 }
4665
ace1c541 4666 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
4667 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
4668
5b01b243
LO
4669 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
4670 roce_set_field(context->byte_80_rnr_rx_cqn,
4671 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4672 V2_QPC_BYTE_80_MIN_RNR_TIME_S,
4673 attr->min_rnr_timer);
4674 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
4675 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4676 V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
4677 }
4678
601f3e6d
LO
4679 /* RC&UC required attr */
4680 if (attr_mask & IB_QP_RQ_PSN) {
4681 roce_set_field(context->byte_108_rx_reqepsn,
4682 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4683 V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
4684 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
4685 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4686 V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
4687
4688 roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
4689 V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
4690 roce_set_field(qpc_mask->byte_152_raq,
4691 V2_QPC_BYTE_152_RAQ_PSN_M,
4692 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
4693 }
4694
5b01b243 4695 if (attr_mask & IB_QP_QKEY) {
bfe86035 4696 context->qkey_xrcd = cpu_to_le32(attr->qkey);
5b01b243
LO
4697 qpc_mask->qkey_xrcd = 0;
4698 hr_qp->qkey = attr->qkey;
4699 }
4700
606bf89e
LO
4701 return ret;
4702}
4703
4704static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
4705 const struct ib_qp_attr *attr,
4706 int attr_mask)
4707{
4708 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4709 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4710
4711 if (attr_mask & IB_QP_ACCESS_FLAGS)
4712 hr_qp->atomic_rd_en = attr->qp_access_flags;
4713
4714 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4715 hr_qp->resp_depth = attr->max_dest_rd_atomic;
4716 if (attr_mask & IB_QP_PORT) {
4717 hr_qp->port = attr->port_num - 1;
4718 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
4719 }
4720}
4721
4722static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
4723 const struct ib_qp_attr *attr,
4724 int attr_mask, enum ib_qp_state cur_state,
4725 enum ib_qp_state new_state)
4726{
4727 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4728 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4b42d05d
LC
4729 struct hns_roce_v2_qp_context ctx[2];
4730 struct hns_roce_v2_qp_context *context = ctx;
4731 struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
606bf89e 4732 struct device *dev = hr_dev->dev;
b5374286
YL
4733 unsigned long sq_flag = 0;
4734 unsigned long rq_flag = 0;
b5c229dc 4735 int ret;
606bf89e 4736
606bf89e
LO
4737 /*
4738 * In v2 engine, software pass context and context mask to hardware
4739 * when modifying qp. If software need modify some fields in context,
4740 * we should set all bits of the relevant fields in context mask to
4741 * 0 at the same time, else set them to 0x1.
4742 */
4b42d05d 4743 memset(context, 0, sizeof(*context));
606bf89e
LO
4744 memset(qpc_mask, 0xff, sizeof(*qpc_mask));
4745 ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
4746 new_state, context, qpc_mask);
4747 if (ret)
4748 goto out;
4749
4750 /* When QP state is err, SQ and RQ WQE should be flushed */
4751 if (new_state == IB_QPS_ERR) {
b5374286
YL
4752 spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
4753 spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
4754 hr_qp->state = IB_QPS_ERR;
606bf89e
LO
4755 roce_set_field(context->byte_160_sq_ci_pi,
4756 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4757 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
4758 hr_qp->sq.head);
4759 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
4760 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4761 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
4762
4763 if (!ibqp->srq) {
4764 roce_set_field(context->byte_84_rq_ci_pi,
4765 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4766 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
4767 hr_qp->rq.head);
4768 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4769 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4770 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
4771 }
b5374286
YL
4772 spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
4773 spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
606bf89e
LO
4774 }
4775
4776 /* Configure the optional fields */
4777 ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
4778 qpc_mask);
4779 if (ret)
4780 goto out;
4781
c7bcb134
LO
4782 roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
4783 ibqp->srq ? 1 : 0);
4784 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4785 V2_QPC_BYTE_108_INV_CREDIT_S, 0);
4786
926a01dc 4787 /* Every status migrate must change state */
2362ccee 4788 roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
926a01dc 4789 V2_QPC_BYTE_60_QP_ST_S, new_state);
2362ccee 4790 roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
926a01dc
WHX
4791 V2_QPC_BYTE_60_QP_ST_S, 0);
4792
4793 /* SW pass context to HW */
032b0574 4794 ret = hns_roce_v2_qp_modify(hr_dev, ctx, hr_qp);
926a01dc
WHX
4795 if (ret) {
4796 dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
4797 goto out;
4798 }
4799
4800 hr_qp->state = new_state;
4801
606bf89e 4802 hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
926a01dc
WHX
4803
4804 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
4805 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
4806 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
4807 if (ibqp->send_cq != ibqp->recv_cq)
4808 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
4809 hr_qp->qpn, NULL);
4810
4811 hr_qp->rq.head = 0;
4812 hr_qp->rq.tail = 0;
4813 hr_qp->sq.head = 0;
4814 hr_qp->sq.tail = 0;
926a01dc 4815 hr_qp->next_sge = 0;
e088a685
YL
4816 if (hr_qp->rq.wqe_cnt)
4817 *hr_qp->rdb.db_record = 0;
926a01dc
WHX
4818 }
4819
4820out:
926a01dc
WHX
4821 return ret;
4822}
4823
4824static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
4825{
4826 switch (state) {
4827 case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
4828 case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
4829 case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
4830 case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
4831 case HNS_ROCE_QP_ST_SQ_DRAINING:
4832 case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
4833 case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
4834 case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
4835 default: return -1;
4836 }
4837}
4838
4839static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
4840 struct hns_roce_qp *hr_qp,
4841 struct hns_roce_v2_qp_context *hr_context)
4842{
4843 struct hns_roce_cmd_mailbox *mailbox;
4844 int ret;
4845
4846 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4847 if (IS_ERR(mailbox))
4848 return PTR_ERR(mailbox);
4849
4850 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
4851 HNS_ROCE_CMD_QUERY_QPC,
4852 HNS_ROCE_CMD_TIMEOUT_MSECS);
4853 if (ret) {
4854 dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
4855 goto out;
4856 }
4857
4858 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
4859
4860out:
4861 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4862 return ret;
4863}
4864
4865static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
4866 int qp_attr_mask,
4867 struct ib_qp_init_attr *qp_init_attr)
4868{
4869 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4870 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4b42d05d 4871 struct hns_roce_v2_qp_context context = {};
926a01dc
WHX
4872 struct device *dev = hr_dev->dev;
4873 int tmp_qp_state;
4874 int state;
4875 int ret;
4876
926a01dc
WHX
4877 memset(qp_attr, 0, sizeof(*qp_attr));
4878 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
4879
4880 mutex_lock(&hr_qp->mutex);
4881
4882 if (hr_qp->state == IB_QPS_RESET) {
4883 qp_attr->qp_state = IB_QPS_RESET;
63ea641f 4884 ret = 0;
926a01dc
WHX
4885 goto done;
4886 }
4887
4b42d05d 4888 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
926a01dc
WHX
4889 if (ret) {
4890 dev_err(dev, "query qpc error\n");
4891 ret = -EINVAL;
4892 goto out;
4893 }
4894
4b42d05d 4895 state = roce_get_field(context.byte_60_qpst_tempid,
926a01dc
WHX
4896 V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
4897 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
4898 if (tmp_qp_state == -1) {
4899 dev_err(dev, "Illegal ib_qp_state\n");
4900 ret = -EINVAL;
4901 goto out;
4902 }
4903 hr_qp->state = (u8)tmp_qp_state;
4904 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
4b42d05d 4905 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context.byte_24_mtu_tc,
926a01dc
WHX
4906 V2_QPC_BYTE_24_MTU_M,
4907 V2_QPC_BYTE_24_MTU_S);
4908 qp_attr->path_mig_state = IB_MIG_ARMED;
2bf910d4 4909 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
926a01dc
WHX
4910 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
4911 qp_attr->qkey = V2_QKEY_VAL;
4912
4b42d05d 4913 qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn,
926a01dc
WHX
4914 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4915 V2_QPC_BYTE_108_RX_REQ_EPSN_S);
4b42d05d 4916 qp_attr->sq_psn = (u32)roce_get_field(context.byte_172_sq_psn,
926a01dc
WHX
4917 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4918 V2_QPC_BYTE_172_SQ_CUR_PSN_S);
4b42d05d 4919 qp_attr->dest_qp_num = (u8)roce_get_field(context.byte_56_dqpn_err,
926a01dc
WHX
4920 V2_QPC_BYTE_56_DQPN_M,
4921 V2_QPC_BYTE_56_DQPN_S);
4b42d05d 4922 qp_attr->qp_access_flags = ((roce_get_bit(context.byte_76_srqn_op_en,
98c09b8c 4923 V2_QPC_BYTE_76_RRE_S)) << V2_QP_RRE_S) |
4b42d05d 4924 ((roce_get_bit(context.byte_76_srqn_op_en,
98c09b8c 4925 V2_QPC_BYTE_76_RWE_S)) << V2_QP_RWE_S) |
4b42d05d 4926 ((roce_get_bit(context.byte_76_srqn_op_en,
2a3d923f
LO
4927 V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
4928
926a01dc
WHX
4929 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
4930 hr_qp->ibqp.qp_type == IB_QPT_UC) {
4931 struct ib_global_route *grh =
4932 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
4933
4934 rdma_ah_set_sl(&qp_attr->ah_attr,
4b42d05d 4935 roce_get_field(context.byte_28_at_fl,
926a01dc
WHX
4936 V2_QPC_BYTE_28_SL_M,
4937 V2_QPC_BYTE_28_SL_S));
4b42d05d 4938 grh->flow_label = roce_get_field(context.byte_28_at_fl,
926a01dc
WHX
4939 V2_QPC_BYTE_28_FL_M,
4940 V2_QPC_BYTE_28_FL_S);
4b42d05d 4941 grh->sgid_index = roce_get_field(context.byte_20_smac_sgid_idx,
926a01dc
WHX
4942 V2_QPC_BYTE_20_SGID_IDX_M,
4943 V2_QPC_BYTE_20_SGID_IDX_S);
4b42d05d 4944 grh->hop_limit = roce_get_field(context.byte_24_mtu_tc,
926a01dc
WHX
4945 V2_QPC_BYTE_24_HOP_LIMIT_M,
4946 V2_QPC_BYTE_24_HOP_LIMIT_S);
4b42d05d 4947 grh->traffic_class = roce_get_field(context.byte_24_mtu_tc,
926a01dc
WHX
4948 V2_QPC_BYTE_24_TC_M,
4949 V2_QPC_BYTE_24_TC_S);
4950
4b42d05d 4951 memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
926a01dc
WHX
4952 }
4953
4954 qp_attr->port_num = hr_qp->port + 1;
4955 qp_attr->sq_draining = 0;
4b42d05d 4956 qp_attr->max_rd_atomic = 1 << roce_get_field(context.byte_208_irrl,
926a01dc
WHX
4957 V2_QPC_BYTE_208_SR_MAX_M,
4958 V2_QPC_BYTE_208_SR_MAX_S);
4b42d05d 4959 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq,
926a01dc
WHX
4960 V2_QPC_BYTE_140_RR_MAX_M,
4961 V2_QPC_BYTE_140_RR_MAX_S);
4b42d05d 4962 qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn,
926a01dc
WHX
4963 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4964 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
4b42d05d 4965 qp_attr->timeout = (u8)roce_get_field(context.byte_28_at_fl,
926a01dc
WHX
4966 V2_QPC_BYTE_28_AT_M,
4967 V2_QPC_BYTE_28_AT_S);
4b42d05d 4968 qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
926a01dc
WHX
4969 V2_QPC_BYTE_212_RETRY_CNT_M,
4970 V2_QPC_BYTE_212_RETRY_CNT_S);
bfe86035 4971 qp_attr->rnr_retry = le32_to_cpu(context.rq_rnr_timer);
926a01dc
WHX
4972
4973done:
4974 qp_attr->cur_qp_state = qp_attr->qp_state;
4975 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
4976 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
4977
4978 if (!ibqp->uobject) {
4979 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
4980 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
4981 } else {
4982 qp_attr->cap.max_send_wr = 0;
4983 qp_attr->cap.max_send_sge = 0;
4984 }
4985
4986 qp_init_attr->cap = qp_attr->cap;
4987
4988out:
4989 mutex_unlock(&hr_qp->mutex);
926a01dc
WHX
4990 return ret;
4991}
4992
4993static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
4994 struct hns_roce_qp *hr_qp,
bdeacabd 4995 struct ib_udata *udata)
926a01dc
WHX
4996{
4997 struct hns_roce_cq *send_cq, *recv_cq;
db50077b 4998 struct ib_device *ibdev = &hr_dev->ib_dev;
626903e9 4999 unsigned long flags;
d302c6e3 5000 int ret = 0;
926a01dc
WHX
5001
5002 if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
5003 /* Modify qp to reset before destroying qp */
5004 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
5005 hr_qp->state, IB_QPS_RESET);
d302c6e3 5006 if (ret)
db50077b 5007 ibdev_err(ibdev, "modify QP to Reset failed.\n");
926a01dc
WHX
5008 }
5009
626903e9
XW
5010 send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
5011 recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
926a01dc 5012
626903e9 5013 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
926a01dc
WHX
5014 hns_roce_lock_cqs(send_cq, recv_cq);
5015
626903e9
XW
5016 list_del(&hr_qp->node);
5017 list_del(&hr_qp->sq_node);
5018 list_del(&hr_qp->rq_node);
5019
bdeacabd 5020 if (!udata) {
626903e9
XW
5021 if (recv_cq)
5022 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn,
5023 (hr_qp->ibqp.srq ?
5024 to_hr_srq(hr_qp->ibqp.srq) :
5025 NULL));
5026
5027 if (send_cq && send_cq != recv_cq)
926a01dc 5028 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
626903e9 5029
926a01dc
WHX
5030 }
5031
5032 hns_roce_qp_remove(hr_dev, hr_qp);
5033
5034 hns_roce_unlock_cqs(send_cq, recv_cq);
626903e9 5035 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
926a01dc
WHX
5036
5037 hns_roce_qp_free(hr_dev, hr_qp);
5038
5039 /* Not special_QP, free their QPN */
5040 if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
5041 (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
5042 (hr_qp->ibqp.qp_type == IB_QPT_UD))
5043 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
5044
6fafe560 5045 hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
926a01dc 5046
bdeacabd
SR
5047 if (udata) {
5048 struct hns_roce_ucontext *context =
5049 rdma_udata_to_drv_context(
5050 udata,
5051 struct hns_roce_ucontext,
5052 ibucontext);
5053
0425e3e6 5054 if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
bdeacabd 5055 hns_roce_db_unmap_user(context, &hr_qp->sdb);
0425e3e6 5056
e088a685 5057 if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
bdeacabd 5058 hns_roce_db_unmap_user(context, &hr_qp->rdb);
926a01dc
WHX
5059 } else {
5060 kfree(hr_qp->sq.wrid);
5061 kfree(hr_qp->rq.wrid);
5062 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
472bc0fb
YL
5063 if (hr_qp->rq.wqe_cnt)
5064 hns_roce_free_db(hr_dev, &hr_qp->rdb);
926a01dc 5065 }
836a0fbb 5066 ib_umem_release(hr_qp->umem);
926a01dc 5067
c7bcb134
LO
5068 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
5069 hr_qp->rq.wqe_cnt) {
0009c2db 5070 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
5071 kfree(hr_qp->rq_inl_buf.wqe_list);
5072 }
5073
d302c6e3 5074 return ret;
926a01dc
WHX
5075}
5076
c4367a26 5077static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
926a01dc
WHX
5078{
5079 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5080 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5081 int ret;
5082
bdeacabd 5083 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
d302c6e3 5084 if (ret)
db50077b
LO
5085 ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n",
5086 hr_qp->qpn, ret);
926a01dc 5087
16a11e0b 5088 kfree(hr_qp);
926a01dc
WHX
5089
5090 return 0;
5091}
5092
aa84fa18
YL
5093static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
5094 struct hns_roce_qp *hr_qp)
5095{
da91ddfd 5096 struct hns_roce_sccc_clr_done *resp;
aa84fa18
YL
5097 struct hns_roce_sccc_clr *clr;
5098 struct hns_roce_cmq_desc desc;
5099 int ret, i;
5100
5101 mutex_lock(&hr_dev->qp_table.scc_mutex);
5102
5103 /* set scc ctx clear done flag */
5104 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
aa84fa18
YL
5105 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5106 if (ret) {
5107 dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret);
5108 goto out;
5109 }
5110
5111 /* clear scc context */
5112 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
5113 clr = (struct hns_roce_sccc_clr *)desc.data;
5114 clr->qpn = cpu_to_le32(hr_qp->qpn);
5115 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5116 if (ret) {
5117 dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret);
5118 goto out;
5119 }
5120
5121 /* query scc context clear is done or not */
5122 resp = (struct hns_roce_sccc_clr_done *)desc.data;
5123 for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
5124 hns_roce_cmq_setup_basic_desc(&desc,
5125 HNS_ROCE_OPC_QUERY_SCCC, true);
5126 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5127 if (ret) {
5128 dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret);
5129 goto out;
5130 }
5131
5132 if (resp->clr_done)
5133 goto out;
5134
5135 msleep(20);
5136 }
5137
5138 dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n");
5139 ret = -ETIMEDOUT;
5140
5141out:
5142 mutex_unlock(&hr_dev->qp_table.scc_mutex);
5143 return ret;
5144}
5145
b156269d 5146static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
5147{
5148 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
5149 struct hns_roce_v2_cq_context *cq_context;
5150 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
5151 struct hns_roce_v2_cq_context *cqc_mask;
5152 struct hns_roce_cmd_mailbox *mailbox;
5153 int ret;
5154
5155 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5156 if (IS_ERR(mailbox))
5157 return PTR_ERR(mailbox);
5158
5159 cq_context = mailbox->buf;
5160 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
5161
5162 memset(cqc_mask, 0xff, sizeof(*cqc_mask));
5163
5164 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
5165 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
5166 cq_count);
5167 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
5168 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
5169 0);
5170 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
5171 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
5172 cq_period);
5173 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
5174 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
5175 0);
5176
5177 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
5178 HNS_ROCE_CMD_MODIFY_CQC,
5179 HNS_ROCE_CMD_TIMEOUT_MSECS);
5180 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5181 if (ret)
5182 dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
5183
5184 return ret;
5185}
5186
0425e3e6
YL
5187static void hns_roce_irq_work_handle(struct work_struct *work)
5188{
5189 struct hns_roce_work *irq_work =
5190 container_of(work, struct hns_roce_work, work);
b00a92c8 5191 struct device *dev = irq_work->hr_dev->dev;
0425e3e6 5192 u32 qpn = irq_work->qpn;
b00a92c8 5193 u32 cqn = irq_work->cqn;
0425e3e6
YL
5194
5195 switch (irq_work->event_type) {
b00a92c8 5196 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5197 dev_info(dev, "Path migrated succeeded.\n");
5198 break;
5199 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5200 dev_warn(dev, "Path migration failed.\n");
5201 break;
5202 case HNS_ROCE_EVENT_TYPE_COMM_EST:
b00a92c8 5203 break;
5204 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5205 dev_warn(dev, "Send queue drained.\n");
5206 break;
0425e3e6 5207 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
e95c716c
YL
5208 dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n",
5209 qpn, irq_work->sub_type);
b00a92c8 5210 break;
0425e3e6 5211 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
e95c716c
YL
5212 dev_err(dev, "Invalid request local work queue 0x%x error.\n",
5213 qpn);
b00a92c8 5214 break;
0425e3e6 5215 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
e95c716c
YL
5216 dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n",
5217 qpn, irq_work->sub_type);
b00a92c8 5218 break;
5219 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5220 dev_warn(dev, "SRQ limit reach.\n");
5221 break;
5222 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5223 dev_warn(dev, "SRQ last wqe reach.\n");
5224 break;
5225 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5226 dev_err(dev, "SRQ catas error.\n");
5227 break;
5228 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5229 dev_err(dev, "CQ 0x%x access err.\n", cqn);
5230 break;
5231 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5232 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
5233 break;
5234 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5235 dev_warn(dev, "DB overflow.\n");
5236 break;
5237 case HNS_ROCE_EVENT_TYPE_FLR:
5238 dev_warn(dev, "Function level reset.\n");
0425e3e6
YL
5239 break;
5240 default:
5241 break;
5242 }
5243
5244 kfree(irq_work);
5245}
5246
5247static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
b00a92c8 5248 struct hns_roce_eq *eq,
5249 u32 qpn, u32 cqn)
0425e3e6
YL
5250{
5251 struct hns_roce_work *irq_work;
5252
5253 irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
5254 if (!irq_work)
5255 return;
5256
5257 INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
5258 irq_work->hr_dev = hr_dev;
5259 irq_work->qpn = qpn;
b00a92c8 5260 irq_work->cqn = cqn;
0425e3e6
YL
5261 irq_work->event_type = eq->event_type;
5262 irq_work->sub_type = eq->sub_type;
5263 queue_work(hr_dev->irq_workq, &(irq_work->work));
5264}
5265
a5073d60
YL
5266static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
5267{
d3743fa9 5268 struct hns_roce_dev *hr_dev = eq->hr_dev;
880f133c 5269 __le32 doorbell[2] = {};
a5073d60
YL
5270
5271 if (eq->type_flag == HNS_ROCE_AEQ) {
5272 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
5273 HNS_ROCE_V2_EQ_DB_CMD_S,
5274 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5275 HNS_ROCE_EQ_DB_CMD_AEQ :
5276 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
5277 } else {
5278 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
5279 HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
5280
5281 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
5282 HNS_ROCE_V2_EQ_DB_CMD_S,
5283 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5284 HNS_ROCE_EQ_DB_CMD_CEQ :
5285 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
5286 }
5287
5288 roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
5289 HNS_ROCE_V2_EQ_DB_PARA_S,
5290 (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
5291
d3743fa9 5292 hns_roce_write64(hr_dev, doorbell, eq->doorbell);
a5073d60
YL
5293}
5294
d7e2d343 5295static inline void *get_eqe_buf(struct hns_roce_eq *eq, unsigned long offset)
a5073d60
YL
5296{
5297 u32 buf_chk_sz;
a5073d60
YL
5298
5299 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
d7e2d343
XW
5300 if (eq->buf.nbufs == 1)
5301 return eq->buf.direct.buf + offset % buf_chk_sz;
a5073d60 5302 else
d7e2d343
XW
5303 return eq->buf.page_list[offset / buf_chk_sz].buf +
5304 offset % buf_chk_sz;
a5073d60
YL
5305}
5306
5307static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
5308{
5309 struct hns_roce_aeqe *aeqe;
5310
d7e2d343
XW
5311 aeqe = get_eqe_buf(eq, (eq->cons_index & (eq->entries - 1)) *
5312 HNS_ROCE_AEQ_ENTRY_SIZE);
a5073d60
YL
5313 return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
5314 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
5315}
5316
5317static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
5318 struct hns_roce_eq *eq)
5319{
5320 struct device *dev = hr_dev->dev;
e7f40440 5321 struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
a5073d60
YL
5322 int aeqe_found = 0;
5323 int event_type;
0425e3e6 5324 int sub_type;
81fce629 5325 u32 srqn;
0425e3e6
YL
5326 u32 qpn;
5327 u32 cqn;
a5073d60 5328
e7f40440 5329 while (aeqe) {
4044a3f4
YL
5330 /* Make sure we read AEQ entry after we have checked the
5331 * ownership bit
5332 */
5333 dma_rmb();
a5073d60
YL
5334
5335 event_type = roce_get_field(aeqe->asyn,
5336 HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
5337 HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
0425e3e6
YL
5338 sub_type = roce_get_field(aeqe->asyn,
5339 HNS_ROCE_V2_AEQE_SUB_TYPE_M,
5340 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
5341 qpn = roce_get_field(aeqe->event.qp_event.qp,
5342 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5343 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5344 cqn = roce_get_field(aeqe->event.cq_event.cq,
5345 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5346 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
81fce629
LO
5347 srqn = roce_get_field(aeqe->event.srq_event.srq,
5348 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5349 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
a5073d60
YL
5350
5351 switch (event_type) {
5352 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
a5073d60 5353 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
a5073d60
YL
5354 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5355 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5356 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
81fce629 5357 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
a5073d60
YL
5358 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5359 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
b00a92c8 5360 hns_roce_qp_event(hr_dev, qpn, event_type);
a5073d60
YL
5361 break;
5362 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
a5073d60 5363 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
81fce629 5364 hns_roce_srq_event(hr_dev, srqn, event_type);
a5073d60
YL
5365 break;
5366 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5367 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
b00a92c8 5368 hns_roce_cq_event(hr_dev, cqn, event_type);
a5073d60
YL
5369 break;
5370 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
a5073d60
YL
5371 break;
5372 case HNS_ROCE_EVENT_TYPE_MB:
5373 hns_roce_cmd_event(hr_dev,
5374 le16_to_cpu(aeqe->event.cmd.token),
5375 aeqe->event.cmd.status,
5376 le64_to_cpu(aeqe->event.cmd.out_param));
5377 break;
5378 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
a5073d60
YL
5379 break;
5380 case HNS_ROCE_EVENT_TYPE_FLR:
a5073d60
YL
5381 break;
5382 default:
5383 dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
5384 event_type, eq->eqn, eq->cons_index);
5385 break;
790b57f6 5386 }
a5073d60 5387
0425e3e6
YL
5388 eq->event_type = event_type;
5389 eq->sub_type = sub_type;
a5073d60
YL
5390 ++eq->cons_index;
5391 aeqe_found = 1;
5392
249f2f92 5393 if (eq->cons_index > (2 * eq->entries - 1))
a5073d60 5394 eq->cons_index = 0;
249f2f92 5395
b00a92c8 5396 hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
e7f40440
LC
5397
5398 aeqe = next_aeqe_sw_v2(eq);
a5073d60
YL
5399 }
5400
5401 set_eq_cons_index_v2(eq);
5402 return aeqe_found;
5403}
5404
a5073d60
YL
5405static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
5406{
5407 struct hns_roce_ceqe *ceqe;
5408
d7e2d343
XW
5409 ceqe = get_eqe_buf(eq, (eq->cons_index & (eq->entries - 1)) *
5410 HNS_ROCE_CEQ_ENTRY_SIZE);
a5073d60
YL
5411 return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
5412 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
5413}
5414
5415static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
5416 struct hns_roce_eq *eq)
5417{
5418 struct device *dev = hr_dev->dev;
e7f40440 5419 struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
a5073d60
YL
5420 int ceqe_found = 0;
5421 u32 cqn;
5422
e7f40440 5423 while (ceqe) {
4044a3f4
YL
5424 /* Make sure we read CEQ entry after we have checked the
5425 * ownership bit
5426 */
5427 dma_rmb();
5428
60262b10 5429 cqn = roce_get_field(ceqe->comp, HNS_ROCE_V2_CEQE_COMP_CQN_M,
a5073d60
YL
5430 HNS_ROCE_V2_CEQE_COMP_CQN_S);
5431
5432 hns_roce_cq_completion(hr_dev, cqn);
5433
5434 ++eq->cons_index;
5435 ceqe_found = 1;
5436
90c559b1 5437 if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1)) {
a5073d60
YL
5438 dev_warn(dev, "cons_index overflow, set back to 0.\n");
5439 eq->cons_index = 0;
5440 }
e7f40440
LC
5441
5442 ceqe = next_ceqe_sw_v2(eq);
a5073d60
YL
5443 }
5444
5445 set_eq_cons_index_v2(eq);
5446
5447 return ceqe_found;
5448}
5449
5450static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
5451{
5452 struct hns_roce_eq *eq = eq_ptr;
5453 struct hns_roce_dev *hr_dev = eq->hr_dev;
5454 int int_work = 0;
5455
5456 if (eq->type_flag == HNS_ROCE_CEQ)
5457 /* Completion event interrupt */
5458 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
5459 else
5460 /* Asychronous event interrupt */
5461 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
5462
5463 return IRQ_RETVAL(int_work);
5464}
5465
5466static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
5467{
5468 struct hns_roce_dev *hr_dev = dev_id;
5469 struct device *dev = hr_dev->dev;
5470 int int_work = 0;
5471 u32 int_st;
5472 u32 int_en;
5473
5474 /* Abnormal interrupt */
5475 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
5476 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
5477
bfe86035 5478 if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
2b9acb9a
XT
5479 struct pci_dev *pdev = hr_dev->pci_dev;
5480 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
5481 const struct hnae3_ae_ops *ops = ae_dev->ops;
5482
a5073d60
YL
5483 dev_err(dev, "AEQ overflow!\n");
5484
bfe86035 5485 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S;
a5073d60
YL
5486 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5487
2b9acb9a
XT
5488 /* Set reset level for reset_event() */
5489 if (ops->set_default_reset_request)
5490 ops->set_default_reset_request(ae_dev,
5491 HNAE3_FUNC_RESET);
5492 if (ops->reset_event)
5493 ops->reset_event(pdev, NULL);
5494
bfe86035 5495 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
a5073d60
YL
5496 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5497
5498 int_work = 1;
bfe86035 5499 } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
a5073d60
YL
5500 dev_err(dev, "BUS ERR!\n");
5501
bfe86035 5502 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S;
a5073d60
YL
5503 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5504
bfe86035 5505 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
a5073d60
YL
5506 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5507
5508 int_work = 1;
bfe86035 5509 } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
a5073d60
YL
5510 dev_err(dev, "OTHER ERR!\n");
5511
bfe86035 5512 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S;
a5073d60
YL
5513 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5514
bfe86035 5515 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
a5073d60
YL
5516 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5517
5518 int_work = 1;
5519 } else
5520 dev_err(dev, "There is no abnormal irq found!\n");
5521
5522 return IRQ_RETVAL(int_work);
5523}
5524
5525static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
5526 int eq_num, int enable_flag)
5527{
5528 int i;
5529
5530 if (enable_flag == EQ_ENABLE) {
5531 for (i = 0; i < eq_num; i++)
5532 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5533 i * EQ_REG_OFFSET,
5534 HNS_ROCE_V2_VF_EVENT_INT_EN_M);
5535
5536 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5537 HNS_ROCE_V2_VF_ABN_INT_EN_M);
5538 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5539 HNS_ROCE_V2_VF_ABN_INT_CFG_M);
5540 } else {
5541 for (i = 0; i < eq_num; i++)
5542 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5543 i * EQ_REG_OFFSET,
5544 HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
5545
5546 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5547 HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
5548 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5549 HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
5550 }
5551}
5552
5553static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
5554{
5555 struct device *dev = hr_dev->dev;
5556 int ret;
5557
5558 if (eqn < hr_dev->caps.num_comp_vectors)
5559 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5560 0, HNS_ROCE_CMD_DESTROY_CEQC,
5561 HNS_ROCE_CMD_TIMEOUT_MSECS);
5562 else
5563 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5564 0, HNS_ROCE_CMD_DESTROY_AEQC,
5565 HNS_ROCE_CMD_TIMEOUT_MSECS);
5566 if (ret)
5567 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
5568}
5569
d7e2d343 5570static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
a5073d60 5571{
d7e2d343
XW
5572 if (!eq->hop_num || eq->hop_num == HNS_ROCE_HOP_NUM_0)
5573 hns_roce_mtr_cleanup(hr_dev, &eq->mtr);
5574 hns_roce_buf_free(hr_dev, eq->buf.size, &eq->buf);
a5073d60
YL
5575}
5576
5577static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
5578 struct hns_roce_eq *eq,
5579 void *mb_buf)
5580{
5581 struct hns_roce_eq_context *eqc;
d7e2d343
XW
5582 u64 ba[MTT_MIN_COUNT] = { 0 };
5583 int count;
a5073d60
YL
5584
5585 eqc = mb_buf;
5586 memset(eqc, 0, sizeof(struct hns_roce_eq_context));
5587
5588 /* init eqc */
5589 eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
5590 eq->hop_num = hr_dev->caps.eqe_hop_num;
5591 eq->cons_index = 0;
5592 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
5593 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
5594 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
5595 eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
5596 eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
5597 eq->shift = ilog2((unsigned int)eq->entries);
5598
d7e2d343
XW
5599 /* if not muti-hop, eqe buffer only use one trunk */
5600 if (!eq->hop_num || eq->hop_num == HNS_ROCE_HOP_NUM_0) {
5601 eq->eqe_ba = eq->buf.direct.map;
5602 eq->cur_eqe_ba = eq->eqe_ba;
5603 if (eq->buf.npages > 1)
5604 eq->nxt_eqe_ba = eq->eqe_ba + (1 << eq->eqe_buf_pg_sz);
5605 else
5606 eq->nxt_eqe_ba = eq->eqe_ba;
5607 } else {
5608 count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, ba,
5609 MTT_MIN_COUNT, &eq->eqe_ba);
5610 eq->cur_eqe_ba = ba[0];
5611 if (count > 1)
5612 eq->nxt_eqe_ba = ba[1];
5613 else
5614 eq->nxt_eqe_ba = ba[0];
5615 }
a5073d60
YL
5616
5617 /* set eqc state */
60262b10 5618 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQ_ST_M, HNS_ROCE_EQC_EQ_ST_S,
a5073d60
YL
5619 HNS_ROCE_V2_EQ_STATE_VALID);
5620
5621 /* set eqe hop num */
60262b10 5622 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_HOP_NUM_M,
a5073d60
YL
5623 HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
5624
5625 /* set eqc over_ignore */
60262b10 5626 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_OVER_IGNORE_M,
a5073d60
YL
5627 HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
5628
5629 /* set eqc coalesce */
60262b10 5630 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_COALESCE_M,
a5073d60
YL
5631 HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
5632
5633 /* set eqc arm_state */
60262b10 5634 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_ARM_ST_M,
a5073d60
YL
5635 HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
5636
5637 /* set eqn */
60262b10
LO
5638 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQN_M, HNS_ROCE_EQC_EQN_S,
5639 eq->eqn);
a5073d60
YL
5640
5641 /* set eqe_cnt */
60262b10
LO
5642 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQE_CNT_M,
5643 HNS_ROCE_EQC_EQE_CNT_S, HNS_ROCE_EQ_INIT_EQE_CNT);
a5073d60
YL
5644
5645 /* set eqe_ba_pg_sz */
60262b10 5646 roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BA_PG_SZ_M,
5e6e78db
YL
5647 HNS_ROCE_EQC_BA_PG_SZ_S,
5648 eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
a5073d60
YL
5649
5650 /* set eqe_buf_pg_sz */
60262b10 5651 roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BUF_PG_SZ_M,
5e6e78db
YL
5652 HNS_ROCE_EQC_BUF_PG_SZ_S,
5653 eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
a5073d60
YL
5654
5655 /* set eq_producer_idx */
60262b10
LO
5656 roce_set_field(eqc->byte_8, HNS_ROCE_EQC_PROD_INDX_M,
5657 HNS_ROCE_EQC_PROD_INDX_S, HNS_ROCE_EQ_INIT_PROD_IDX);
a5073d60
YL
5658
5659 /* set eq_max_cnt */
60262b10 5660 roce_set_field(eqc->byte_12, HNS_ROCE_EQC_MAX_CNT_M,
a5073d60
YL
5661 HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
5662
5663 /* set eq_period */
60262b10 5664 roce_set_field(eqc->byte_12, HNS_ROCE_EQC_PERIOD_M,
a5073d60
YL
5665 HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
5666
5667 /* set eqe_report_timer */
60262b10 5668 roce_set_field(eqc->eqe_report_timer, HNS_ROCE_EQC_REPORT_TIMER_M,
a5073d60
YL
5669 HNS_ROCE_EQC_REPORT_TIMER_S,
5670 HNS_ROCE_EQ_INIT_REPORT_TIMER);
5671
5672 /* set eqe_ba [34:3] */
60262b10 5673 roce_set_field(eqc->eqe_ba0, HNS_ROCE_EQC_EQE_BA_L_M,
a5073d60
YL
5674 HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
5675
5676 /* set eqe_ba [64:35] */
60262b10 5677 roce_set_field(eqc->eqe_ba1, HNS_ROCE_EQC_EQE_BA_H_M,
a5073d60
YL
5678 HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
5679
5680 /* set eq shift */
60262b10
LO
5681 roce_set_field(eqc->byte_28, HNS_ROCE_EQC_SHIFT_M, HNS_ROCE_EQC_SHIFT_S,
5682 eq->shift);
a5073d60
YL
5683
5684 /* set eq MSI_IDX */
60262b10
LO
5685 roce_set_field(eqc->byte_28, HNS_ROCE_EQC_MSI_INDX_M,
5686 HNS_ROCE_EQC_MSI_INDX_S, HNS_ROCE_EQ_INIT_MSI_IDX);
a5073d60
YL
5687
5688 /* set cur_eqe_ba [27:12] */
60262b10 5689 roce_set_field(eqc->byte_28, HNS_ROCE_EQC_CUR_EQE_BA_L_M,
a5073d60
YL
5690 HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
5691
5692 /* set cur_eqe_ba [59:28] */
60262b10 5693 roce_set_field(eqc->byte_32, HNS_ROCE_EQC_CUR_EQE_BA_M_M,
a5073d60
YL
5694 HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
5695
5696 /* set cur_eqe_ba [63:60] */
60262b10 5697 roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CUR_EQE_BA_H_M,
a5073d60
YL
5698 HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
5699
5700 /* set eq consumer idx */
60262b10
LO
5701 roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CONS_INDX_M,
5702 HNS_ROCE_EQC_CONS_INDX_S, HNS_ROCE_EQ_INIT_CONS_IDX);
a5073d60
YL
5703
5704 /* set nex_eqe_ba[43:12] */
60262b10 5705 roce_set_field(eqc->nxt_eqe_ba0, HNS_ROCE_EQC_NXT_EQE_BA_L_M,
a5073d60
YL
5706 HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
5707
5708 /* set nex_eqe_ba[63:44] */
60262b10 5709 roce_set_field(eqc->nxt_eqe_ba1, HNS_ROCE_EQC_NXT_EQE_BA_H_M,
a5073d60
YL
5710 HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
5711}
5712
d7e2d343
XW
5713static int map_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
5714 u32 page_shift)
a5073d60 5715{
d7e2d343
XW
5716 struct hns_roce_buf_region region = {};
5717 dma_addr_t *buf_list = NULL;
a5073d60 5718 int ba_num;
d7e2d343 5719 int ret;
a5073d60 5720
2a3d923f 5721 ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size),
d7e2d343
XW
5722 1 << page_shift);
5723 hns_roce_init_buf_region(&region, hr_dev->caps.eqe_hop_num, 0, ba_num);
a5073d60 5724
d7e2d343
XW
5725 /* alloc a tmp list for storing eq buf address */
5726 ret = hns_roce_alloc_buf_list(&region, &buf_list, 1);
5727 if (ret) {
5728 dev_err(hr_dev->dev, "alloc eq buf_list error\n");
5729 return ret;
5730 }
a5073d60 5731
d7e2d343
XW
5732 ba_num = hns_roce_get_kmem_bufs(hr_dev, buf_list, region.count,
5733 region.offset, &eq->buf);
5734 if (ba_num != region.count) {
5735 dev_err(hr_dev->dev, "get eqe buf err,expect %d,ret %d.\n",
5736 region.count, ba_num);
5737 ret = -ENOBUFS;
5738 goto done;
a5073d60
YL
5739 }
5740
d7e2d343
XW
5741 hns_roce_mtr_init(&eq->mtr, PAGE_SHIFT + hr_dev->caps.eqe_ba_pg_sz,
5742 page_shift);
5743 ret = hns_roce_mtr_attach(hr_dev, &eq->mtr, &buf_list, &region, 1);
5744 if (ret)
8d8d2b76 5745 dev_err(hr_dev->dev, "mtr attach error for eqe\n");
a5073d60 5746
d7e2d343 5747 goto done;
a5073d60 5748
d7e2d343
XW
5749 hns_roce_mtr_cleanup(hr_dev, &eq->mtr);
5750done:
5751 hns_roce_free_buf_list(&buf_list, 1);
a5073d60 5752
d7e2d343
XW
5753 return ret;
5754}
a5073d60 5755
d7e2d343
XW
5756static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
5757{
5758 struct hns_roce_buf *buf = &eq->buf;
5759 bool is_mhop = false;
5760 u32 page_shift;
5761 u32 mhop_num;
5762 u32 max_size;
5763 int ret;
a5073d60 5764
d7e2d343
XW
5765 page_shift = PAGE_SHIFT + hr_dev->caps.eqe_buf_pg_sz;
5766 mhop_num = hr_dev->caps.eqe_hop_num;
5767 if (!mhop_num) {
5768 max_size = 1 << page_shift;
5769 buf->size = max_size;
5770 } else if (mhop_num == HNS_ROCE_HOP_NUM_0) {
5771 max_size = eq->entries * eq->eqe_size;
5772 buf->size = max_size;
5773 } else {
5774 max_size = 1 << page_shift;
5775 buf->size = PAGE_ALIGN(eq->entries * eq->eqe_size);
5776 is_mhop = true;
5777 }
a5073d60 5778
d7e2d343
XW
5779 ret = hns_roce_buf_alloc(hr_dev, buf->size, max_size, buf, page_shift);
5780 if (ret) {
5781 dev_err(hr_dev->dev, "alloc eq buf error\n");
5782 return ret;
a5073d60 5783 }
d7e2d343
XW
5784
5785 if (is_mhop) {
5786 ret = map_eq_buf(hr_dev, eq, page_shift);
5787 if (ret) {
5788 dev_err(hr_dev->dev, "map roce buf error\n");
5789 goto err_alloc;
a5073d60
YL
5790 }
5791 }
5792
d7e2d343
XW
5793 return 0;
5794err_alloc:
5795 hns_roce_buf_free(hr_dev, buf->size, buf);
5796 return ret;
a5073d60
YL
5797}
5798
5799static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
5800 struct hns_roce_eq *eq,
5801 unsigned int eq_cmd)
5802{
a5073d60 5803 struct hns_roce_cmd_mailbox *mailbox;
a5073d60
YL
5804 int ret;
5805
5806 /* Allocate mailbox memory */
5807 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5808 if (IS_ERR(mailbox))
5809 return PTR_ERR(mailbox);
5810
d7e2d343
XW
5811 ret = alloc_eq_buf(hr_dev, eq);
5812 if (ret) {
5813 ret = -ENOMEM;
5814 goto free_cmd_mbox;
a5073d60 5815 }
a5073d60
YL
5816 hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
5817
5818 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
5819 eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
5820 if (ret) {
d7e2d343 5821 dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n");
a5073d60
YL
5822 goto err_cmd_mbox;
5823 }
5824
5825 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5826
5827 return 0;
5828
5829err_cmd_mbox:
d7e2d343 5830 free_eq_buf(hr_dev, eq);
a5073d60
YL
5831
5832free_cmd_mbox:
5833 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5834
5835 return ret;
5836}
5837
33db6f94
YL
5838static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
5839 int comp_num, int aeq_num, int other_num)
5840{
5841 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5842 int i, j;
5843 int ret;
5844
5845 for (i = 0; i < irq_num; i++) {
5846 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
5847 GFP_KERNEL);
5848 if (!hr_dev->irq_names[i]) {
5849 ret = -ENOMEM;
5850 goto err_kzalloc_failed;
5851 }
5852 }
5853
6def7de6 5854 /* irq contains: abnormal + AEQ + CEQ */
bebdb83f 5855 for (j = 0; j < other_num; j++)
60262b10
LO
5856 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
5857 "hns-abn-%d", j);
bebdb83f
LC
5858
5859 for (j = other_num; j < (other_num + aeq_num); j++)
60262b10
LO
5860 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
5861 "hns-aeq-%d", j - other_num);
bebdb83f
LC
5862
5863 for (j = (other_num + aeq_num); j < irq_num; j++)
60262b10
LO
5864 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
5865 "hns-ceq-%d", j - other_num - aeq_num);
33db6f94
YL
5866
5867 for (j = 0; j < irq_num; j++) {
5868 if (j < other_num)
5869 ret = request_irq(hr_dev->irq[j],
5870 hns_roce_v2_msix_interrupt_abn,
5871 0, hr_dev->irq_names[j], hr_dev);
5872
5873 else if (j < (other_num + comp_num))
5874 ret = request_irq(eq_table->eq[j - other_num].irq,
5875 hns_roce_v2_msix_interrupt_eq,
5876 0, hr_dev->irq_names[j + aeq_num],
5877 &eq_table->eq[j - other_num]);
5878 else
5879 ret = request_irq(eq_table->eq[j - other_num].irq,
5880 hns_roce_v2_msix_interrupt_eq,
5881 0, hr_dev->irq_names[j - comp_num],
5882 &eq_table->eq[j - other_num]);
5883 if (ret) {
5884 dev_err(hr_dev->dev, "Request irq error!\n");
5885 goto err_request_failed;
5886 }
5887 }
5888
5889 return 0;
5890
5891err_request_failed:
5892 for (j -= 1; j >= 0; j--)
5893 if (j < other_num)
5894 free_irq(hr_dev->irq[j], hr_dev);
5895 else
5896 free_irq(eq_table->eq[j - other_num].irq,
5897 &eq_table->eq[j - other_num]);
5898
5899err_kzalloc_failed:
5900 for (i -= 1; i >= 0; i--)
5901 kfree(hr_dev->irq_names[i]);
5902
5903 return ret;
5904}
5905
5906static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
5907{
5908 int irq_num;
5909 int eq_num;
5910 int i;
5911
5912 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
5913 irq_num = eq_num + hr_dev->caps.num_other_vectors;
5914
5915 for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
5916 free_irq(hr_dev->irq[i], hr_dev);
5917
5918 for (i = 0; i < eq_num; i++)
5919 free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
5920
5921 for (i = 0; i < irq_num; i++)
5922 kfree(hr_dev->irq_names[i]);
5923}
5924
a5073d60
YL
5925static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
5926{
5927 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5928 struct device *dev = hr_dev->dev;
5929 struct hns_roce_eq *eq;
5930 unsigned int eq_cmd;
5931 int irq_num;
5932 int eq_num;
5933 int other_num;
5934 int comp_num;
5935 int aeq_num;
33db6f94 5936 int i;
a5073d60
YL
5937 int ret;
5938
5939 other_num = hr_dev->caps.num_other_vectors;
5940 comp_num = hr_dev->caps.num_comp_vectors;
5941 aeq_num = hr_dev->caps.num_aeq_vectors;
5942
5943 eq_num = comp_num + aeq_num;
5944 irq_num = eq_num + other_num;
5945
5946 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
5947 if (!eq_table->eq)
5948 return -ENOMEM;
5949
a5073d60 5950 /* create eq */
33db6f94
YL
5951 for (i = 0; i < eq_num; i++) {
5952 eq = &eq_table->eq[i];
a5073d60 5953 eq->hr_dev = hr_dev;
33db6f94
YL
5954 eq->eqn = i;
5955 if (i < comp_num) {
a5073d60
YL
5956 /* CEQ */
5957 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
5958 eq->type_flag = HNS_ROCE_CEQ;
5959 eq->entries = hr_dev->caps.ceqe_depth;
5960 eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
33db6f94 5961 eq->irq = hr_dev->irq[i + other_num + aeq_num];
a5073d60
YL
5962 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
5963 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
5964 } else {
5965 /* AEQ */
5966 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
5967 eq->type_flag = HNS_ROCE_AEQ;
5968 eq->entries = hr_dev->caps.aeqe_depth;
5969 eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
33db6f94 5970 eq->irq = hr_dev->irq[i - comp_num + other_num];
a5073d60
YL
5971 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
5972 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
5973 }
5974
5975 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
5976 if (ret) {
5977 dev_err(dev, "eq create failed.\n");
5978 goto err_create_eq_fail;
5979 }
5980 }
5981
5982 /* enable irq */
5983 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
5984
33db6f94
YL
5985 ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num,
5986 aeq_num, other_num);
5987 if (ret) {
5988 dev_err(dev, "Request irq failed.\n");
5989 goto err_request_irq_fail;
a5073d60
YL
5990 }
5991
ffd541d4 5992 hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
0425e3e6
YL
5993 if (!hr_dev->irq_workq) {
5994 dev_err(dev, "Create irq workqueue failed!\n");
f1a31542 5995 ret = -ENOMEM;
33db6f94 5996 goto err_create_wq_fail;
0425e3e6
YL
5997 }
5998
a5073d60
YL
5999 return 0;
6000
33db6f94
YL
6001err_create_wq_fail:
6002 __hns_roce_free_irq(hr_dev);
6003
a5073d60 6004err_request_irq_fail:
33db6f94 6005 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
a5073d60
YL
6006
6007err_create_eq_fail:
a5073d60 6008 for (i -= 1; i >= 0; i--)
d7e2d343 6009 free_eq_buf(hr_dev, &eq_table->eq[i]);
a5073d60
YL
6010 kfree(eq_table->eq);
6011
6012 return ret;
6013}
6014
6015static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
6016{
6017 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
a5073d60
YL
6018 int eq_num;
6019 int i;
6020
6021 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
a5073d60
YL
6022
6023 /* Disable irq */
6024 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6025
33db6f94 6026 __hns_roce_free_irq(hr_dev);
a5073d60
YL
6027
6028 for (i = 0; i < eq_num; i++) {
6029 hns_roce_v2_destroy_eqc(hr_dev, i);
6030
d7e2d343 6031 free_eq_buf(hr_dev, &eq_table->eq[i]);
a5073d60
YL
6032 }
6033
a5073d60 6034 kfree(eq_table->eq);
0425e3e6
YL
6035
6036 flush_workqueue(hr_dev->irq_workq);
6037 destroy_workqueue(hr_dev->irq_workq);
a5073d60
YL
6038}
6039
c7bcb134
LO
6040static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
6041 struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
6042 u32 cqn, void *mb_buf, u64 *mtts_wqe,
6043 u64 *mtts_idx, dma_addr_t dma_handle_wqe,
6044 dma_addr_t dma_handle_idx)
6045{
6046 struct hns_roce_srq_context *srq_context;
6047
6048 srq_context = mb_buf;
6049 memset(srq_context, 0, sizeof(*srq_context));
6050
6051 roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
6052 SRQC_BYTE_4_SRQ_ST_S, 1);
6053
6054 roce_set_field(srq_context->byte_4_srqn_srqst,
6055 SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
6056 SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
6057 (hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
6058 hr_dev->caps.srqwqe_hop_num));
6059 roce_set_field(srq_context->byte_4_srqn_srqst,
6060 SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
d938d785 6061 ilog2(srq->wqe_cnt));
c7bcb134
LO
6062
6063 roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
6064 SRQC_BYTE_4_SRQN_S, srq->srqn);
6065
6066 roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6067 SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
6068
6069 roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
6070 SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
6071
6072 srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
6073
6074 roce_set_field(srq_context->byte_24_wqe_bt_ba,
6075 SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
6076 SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
bfe86035 6077 dma_handle_wqe >> 35);
c7bcb134
LO
6078
6079 roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
6080 SRQC_BYTE_28_PD_S, pdn);
6081 roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
6082 SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
6083 fls(srq->max_gs - 1));
6084
bfe86035 6085 srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
c7bcb134
LO
6086 roce_set_field(srq_context->rsv_idx_bt_ba,
6087 SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
6088 SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
bfe86035 6089 dma_handle_idx >> 35);
c7bcb134 6090
c7bcb134 6091 srq_context->idx_cur_blk_addr =
bfe86035 6092 cpu_to_le32(mtts_idx[0] >> PAGE_ADDR_SHIFT);
c7bcb134
LO
6093 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6094 SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
6095 SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
bfe86035 6096 mtts_idx[0] >> (32 + PAGE_ADDR_SHIFT));
c7bcb134
LO
6097 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6098 SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
6099 SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
6100 hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
6101 hr_dev->caps.idx_hop_num);
6102
6103 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6104 SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
6105 SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
5c7e76fb 6106 hr_dev->caps.idx_ba_pg_sz + PG_SHIFT_OFFSET);
c7bcb134
LO
6107 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6108 SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
6109 SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
5c7e76fb 6110 hr_dev->caps.idx_buf_pg_sz + PG_SHIFT_OFFSET);
c7bcb134 6111
c7bcb134 6112 srq_context->idx_nxt_blk_addr =
bfe86035 6113 cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT);
c7bcb134
LO
6114 roce_set_field(srq_context->rsv_idxnxtblkaddr,
6115 SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
6116 SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
bfe86035 6117 mtts_idx[1] >> (32 + PAGE_ADDR_SHIFT));
c7bcb134
LO
6118 roce_set_field(srq_context->byte_56_xrc_cqn,
6119 SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
6120 cqn);
6121 roce_set_field(srq_context->byte_56_xrc_cqn,
6122 SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
6123 SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
6124 hr_dev->caps.srqwqe_ba_pg_sz + PG_SHIFT_OFFSET);
6125 roce_set_field(srq_context->byte_56_xrc_cqn,
6126 SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
6127 SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
6128 hr_dev->caps.srqwqe_buf_pg_sz + PG_SHIFT_OFFSET);
6129
6130 roce_set_bit(srq_context->db_record_addr_record_en,
6131 SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
6132}
6133
6134static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
6135 struct ib_srq_attr *srq_attr,
6136 enum ib_srq_attr_mask srq_attr_mask,
6137 struct ib_udata *udata)
6138{
6139 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
6140 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6141 struct hns_roce_srq_context *srq_context;
6142 struct hns_roce_srq_context *srqc_mask;
6143 struct hns_roce_cmd_mailbox *mailbox;
6144 int ret;
6145
6146 if (srq_attr_mask & IB_SRQ_LIMIT) {
d938d785 6147 if (srq_attr->srq_limit >= srq->wqe_cnt)
c7bcb134
LO
6148 return -EINVAL;
6149
6150 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6151 if (IS_ERR(mailbox))
6152 return PTR_ERR(mailbox);
6153
6154 srq_context = mailbox->buf;
6155 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
6156
6157 memset(srqc_mask, 0xff, sizeof(*srqc_mask));
6158
6159 roce_set_field(srq_context->byte_8_limit_wl,
6160 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6161 SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
6162 roce_set_field(srqc_mask->byte_8_limit_wl,
6163 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6164 SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
6165
6166 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
6167 HNS_ROCE_CMD_MODIFY_SRQC,
6168 HNS_ROCE_CMD_TIMEOUT_MSECS);
6169 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6170 if (ret) {
6171 dev_err(hr_dev->dev,
6172 "MODIFY SRQ Failed to cmd mailbox.\n");
6173 return ret;
6174 }
6175 }
6176
6177 return 0;
6178}
6179
c3c668e7 6180static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
c7bcb134
LO
6181{
6182 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
6183 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6184 struct hns_roce_srq_context *srq_context;
6185 struct hns_roce_cmd_mailbox *mailbox;
6186 int limit_wl;
6187 int ret;
6188
6189 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6190 if (IS_ERR(mailbox))
6191 return PTR_ERR(mailbox);
6192
6193 srq_context = mailbox->buf;
6194 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
6195 HNS_ROCE_CMD_QUERY_SRQC,
6196 HNS_ROCE_CMD_TIMEOUT_MSECS);
6197 if (ret) {
6198 dev_err(hr_dev->dev, "QUERY SRQ cmd process error\n");
6199 goto out;
6200 }
6201
6202 limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
6203 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6204 SRQC_BYTE_8_SRQ_LIMIT_WL_S);
6205
6206 attr->srq_limit = limit_wl;
d938d785 6207 attr->max_wr = srq->wqe_cnt - 1;
c7bcb134
LO
6208 attr->max_sge = srq->max_gs;
6209
6210 memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
6211
6212out:
6213 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6214 return ret;
6215}
6216
97545b10
LO
6217static int find_empty_entry(struct hns_roce_idx_que *idx_que,
6218 unsigned long size)
c7bcb134 6219{
97545b10 6220 int wqe_idx;
c7bcb134 6221
97545b10
LO
6222 if (unlikely(bitmap_full(idx_que->bitmap, size)))
6223 return -ENOSPC;
6224
6225 wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
6226
6227 bitmap_set(idx_que->bitmap, wqe_idx, 1);
c7bcb134 6228
97545b10 6229 return wqe_idx;
c7bcb134
LO
6230}
6231
6232static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
6233 int cur_idx, int wqe_idx)
6234{
6235 unsigned int *addr;
6236
6237 addr = (unsigned int *)hns_roce_buf_offset(&idx_que->idx_buf,
6238 cur_idx * idx_que->entry_sz);
6239 *addr = wqe_idx;
6240}
6241
6242static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
6243 const struct ib_recv_wr *wr,
6244 const struct ib_recv_wr **bad_wr)
6245{
d3743fa9 6246 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
c7bcb134
LO
6247 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6248 struct hns_roce_v2_wqe_data_seg *dseg;
6249 struct hns_roce_v2_db srq_db;
6250 unsigned long flags;
6251 int ret = 0;
6252 int wqe_idx;
6253 void *wqe;
6254 int nreq;
6255 int ind;
6256 int i;
6257
6258 spin_lock_irqsave(&srq->lock, flags);
6259
d938d785 6260 ind = srq->head & (srq->wqe_cnt - 1);
c7bcb134
LO
6261
6262 for (nreq = 0; wr; ++nreq, wr = wr->next) {
6263 if (unlikely(wr->num_sge > srq->max_gs)) {
6264 ret = -EINVAL;
6265 *bad_wr = wr;
6266 break;
6267 }
6268
6269 if (unlikely(srq->head == srq->tail)) {
6270 ret = -ENOMEM;
6271 *bad_wr = wr;
6272 break;
6273 }
6274
d938d785 6275 wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
97545b10
LO
6276 if (wqe_idx < 0) {
6277 ret = -ENOMEM;
6278 *bad_wr = wr;
6279 break;
6280 }
6281
c7bcb134
LO
6282 fill_idx_queue(&srq->idx_que, ind, wqe_idx);
6283 wqe = get_srq_wqe(srq, wqe_idx);
6284 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
6285
6286 for (i = 0; i < wr->num_sge; ++i) {
6287 dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
6288 dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
6289 dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
6290 }
6291
6292 if (i < srq->max_gs) {
4f18904c
LO
6293 dseg[i].len = 0;
6294 dseg[i].lkey = cpu_to_le32(0x100);
6295 dseg[i].addr = 0;
c7bcb134
LO
6296 }
6297
6298 srq->wrid[wqe_idx] = wr->wr_id;
d938d785 6299 ind = (ind + 1) & (srq->wqe_cnt - 1);
c7bcb134
LO
6300 }
6301
6302 if (likely(nreq)) {
6303 srq->head += nreq;
6304
6305 /*
6306 * Make sure that descriptors are written before
6307 * doorbell record.
6308 */
6309 wmb();
6310
bfe86035
LC
6311 srq_db.byte_4 =
6312 cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
6313 (srq->srqn & V2_DB_BYTE_4_TAG_M));
6314 srq_db.parameter = cpu_to_le32(srq->head);
c7bcb134 6315
d3743fa9 6316 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
c7bcb134
LO
6317
6318 }
6319
6320 spin_unlock_irqrestore(&srq->lock, flags);
6321
6322 return ret;
6323}
6324
e1c9a0dc
LO
6325static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
6326 .query_cqc_info = hns_roce_v2_query_cqc_info,
6327};
6328
7f645a58
KH
6329static const struct ib_device_ops hns_roce_v2_dev_ops = {
6330 .destroy_qp = hns_roce_v2_destroy_qp,
6331 .modify_cq = hns_roce_v2_modify_cq,
6332 .poll_cq = hns_roce_v2_poll_cq,
6333 .post_recv = hns_roce_v2_post_recv,
6334 .post_send = hns_roce_v2_post_send,
6335 .query_qp = hns_roce_v2_query_qp,
6336 .req_notify_cq = hns_roce_v2_req_notify_cq,
6337};
6338
6339static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6340 .modify_srq = hns_roce_v2_modify_srq,
6341 .post_srq_recv = hns_roce_v2_post_srq_recv,
6342 .query_srq = hns_roce_v2_query_srq,
6343};
6344
a04ff739
WHX
6345static const struct hns_roce_hw hns_roce_hw_v2 = {
6346 .cmq_init = hns_roce_v2_cmq_init,
6347 .cmq_exit = hns_roce_v2_cmq_exit,
cfc85f3e 6348 .hw_profile = hns_roce_v2_profile,
6b63597d 6349 .hw_init = hns_roce_v2_init,
6350 .hw_exit = hns_roce_v2_exit,
a680f2f3
WHX
6351 .post_mbox = hns_roce_v2_post_mbox,
6352 .chk_mbox = hns_roce_v2_chk_mbox,
6a04aed6 6353 .rst_prc_mbox = hns_roce_v2_rst_process_cmd,
7afddafa
WHX
6354 .set_gid = hns_roce_v2_set_gid,
6355 .set_mac = hns_roce_v2_set_mac,
3958cc56 6356 .write_mtpt = hns_roce_v2_write_mtpt,
a2c80b7b 6357 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
68a997c5 6358 .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
c7c28191 6359 .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
93aa2187 6360 .write_cqc = hns_roce_v2_write_cqc,
a81fba28
WHX
6361 .set_hem = hns_roce_v2_set_hem,
6362 .clear_hem = hns_roce_v2_clear_hem,
926a01dc
WHX
6363 .modify_qp = hns_roce_v2_modify_qp,
6364 .query_qp = hns_roce_v2_query_qp,
6365 .destroy_qp = hns_roce_v2_destroy_qp,
aa84fa18 6366 .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
b156269d 6367 .modify_cq = hns_roce_v2_modify_cq,
2d407888
WHX
6368 .post_send = hns_roce_v2_post_send,
6369 .post_recv = hns_roce_v2_post_recv,
93aa2187
WHX
6370 .req_notify_cq = hns_roce_v2_req_notify_cq,
6371 .poll_cq = hns_roce_v2_poll_cq,
a5073d60
YL
6372 .init_eq = hns_roce_v2_init_eq_table,
6373 .cleanup_eq = hns_roce_v2_cleanup_eq_table,
c7bcb134
LO
6374 .write_srqc = hns_roce_v2_write_srqc,
6375 .modify_srq = hns_roce_v2_modify_srq,
6376 .query_srq = hns_roce_v2_query_srq,
6377 .post_srq_recv = hns_roce_v2_post_srq_recv,
7f645a58
KH
6378 .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6379 .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
a04ff739 6380};
dd74282d
WHX
6381
6382static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6383 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6384 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
aaa31567
LO
6385 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6386 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
dd74282d
WHX
6387 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6388 /* required last entry */
6389 {0, }
6390};
6391
f97a62c3 6392MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6393
301cc7eb 6394static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
dd74282d
WHX
6395 struct hnae3_handle *handle)
6396{
d061effc 6397 struct hns_roce_v2_priv *priv = hr_dev->priv;
a5073d60 6398 int i;
dd74282d 6399
301cc7eb
LC
6400 hr_dev->pci_dev = handle->pdev;
6401 hr_dev->dev = &handle->pdev->dev;
dd74282d 6402 hr_dev->hw = &hns_roce_hw_v2;
e1c9a0dc 6403 hr_dev->dfx = &hns_roce_dfx_hw_v2;
2d407888
WHX
6404 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6405 hr_dev->odb_offset = hr_dev->sdb_offset;
dd74282d
WHX
6406
6407 /* Get info from NIC driver. */
6408 hr_dev->reg_base = handle->rinfo.roce_io_base;
6409 hr_dev->caps.num_ports = 1;
6410 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6411 hr_dev->iboe.phy_port[0] = 0;
6412
d4994d2f 6413 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6414 hr_dev->iboe.netdevs[0]->dev_addr);
6415
a5073d60
YL
6416 for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
6417 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6418 i + handle->rinfo.base_vector);
6419
dd74282d 6420 /* cmd issue mode: 0 is poll, 1 is event */
a5073d60 6421 hr_dev->cmd_mod = 1;
dd74282d
WHX
6422 hr_dev->loop_idc = 0;
6423
d061effc
WHX
6424 hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6425 priv->handle = handle;
dd74282d
WHX
6426}
6427
d061effc 6428static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
dd74282d
WHX
6429{
6430 struct hns_roce_dev *hr_dev;
6431 int ret;
6432
459cc69f 6433 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
dd74282d
WHX
6434 if (!hr_dev)
6435 return -ENOMEM;
6436
a04ff739
WHX
6437 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6438 if (!hr_dev->priv) {
6439 ret = -ENOMEM;
6440 goto error_failed_kzalloc;
6441 }
6442
301cc7eb 6443 hns_roce_hw_v2_get_cfg(hr_dev, handle);
dd74282d
WHX
6444
6445 ret = hns_roce_init(hr_dev);
6446 if (ret) {
6447 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6448 goto error_failed_get_cfg;
6449 }
6450
d061effc
WHX
6451 handle->priv = hr_dev;
6452
dd74282d
WHX
6453 return 0;
6454
6455error_failed_get_cfg:
a04ff739
WHX
6456 kfree(hr_dev->priv);
6457
6458error_failed_kzalloc:
dd74282d
WHX
6459 ib_dealloc_device(&hr_dev->ib_dev);
6460
6461 return ret;
6462}
6463
d061effc 6464static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
dd74282d
WHX
6465 bool reset)
6466{
6467 struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
6468
cb7a94c9
WHX
6469 if (!hr_dev)
6470 return;
6471
d061effc 6472 handle->priv = NULL;
626903e9
XW
6473
6474 hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
6475 hns_roce_handle_device_err(hr_dev);
6476
dd74282d 6477 hns_roce_exit(hr_dev);
a04ff739 6478 kfree(hr_dev->priv);
dd74282d
WHX
6479 ib_dealloc_device(&hr_dev->ib_dev);
6480}
6481
d061effc
WHX
6482static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6483{
6484 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
07c2339a 6485 const struct pci_device_id *id;
d061effc
WHX
6486 struct device *dev = &handle->pdev->dev;
6487 int ret;
6488
6489 handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6490
6491 if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6492 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6493 goto reset_chk_err;
6494 }
6495
07c2339a
LO
6496 id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6497 if (!id)
6498 return 0;
6499
d061effc
WHX
6500 ret = __hns_roce_hw_v2_init_instance(handle);
6501 if (ret) {
6502 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6503 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6504 if (ops->ae_dev_resetting(handle) ||
6505 ops->get_hw_reset_stat(handle))
6506 goto reset_chk_err;
6507 else
6508 return ret;
6509 }
6510
6511 handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6512
6513
6514 return 0;
6515
6516reset_chk_err:
6517 dev_err(dev, "Device is busy in resetting state.\n"
6518 "please retry later.\n");
6519
6520 return -EBUSY;
6521}
6522
6523static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6524 bool reset)
6525{
6526 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6527 return;
6528
6529 handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6530
6531 __hns_roce_hw_v2_uninit_instance(handle, reset);
6532
6533 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6534}
cb7a94c9
WHX
6535static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6536{
d061effc 6537 struct hns_roce_dev *hr_dev;
cb7a94c9 6538
d061effc
WHX
6539 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6540 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6541 return 0;
cb7a94c9
WHX
6542 }
6543
d061effc
WHX
6544 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6545 clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6546
6547 hr_dev = (struct hns_roce_dev *)handle->priv;
6548 if (!hr_dev)
6549 return 0;
6550
726be12f 6551 hr_dev->is_reset = true;
cb7a94c9 6552 hr_dev->active = false;
d3743fa9 6553 hr_dev->dis_db = true;
cb7a94c9 6554
626903e9 6555 hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
cb7a94c9
WHX
6556
6557 return 0;
6558}
6559
6560static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6561{
d061effc 6562 struct device *dev = &handle->pdev->dev;
cb7a94c9
WHX
6563 int ret;
6564
d061effc
WHX
6565 if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6566 &handle->rinfo.state)) {
6567 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6568 return 0;
6569 }
6570
6571 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6572
6573 dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6574 ret = __hns_roce_hw_v2_init_instance(handle);
cb7a94c9
WHX
6575 if (ret) {
6576 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
6577 * callback function, RoCE Engine reinitialize. If RoCE reinit
6578 * failed, we should inform NIC driver.
6579 */
6580 handle->priv = NULL;
d061effc
WHX
6581 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6582 } else {
6583 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6584 dev_info(dev, "Reset done, RoCE client reinit finished.\n");
cb7a94c9
WHX
6585 }
6586
6587 return ret;
6588}
6589
6590static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6591{
d061effc
WHX
6592 if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6593 return 0;
6594
6595 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6596 dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
90c559b1 6597 msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
d061effc
WHX
6598 __hns_roce_hw_v2_uninit_instance(handle, false);
6599
cb7a94c9
WHX
6600 return 0;
6601}
6602
6603static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6604 enum hnae3_reset_notify_type type)
6605{
6606 int ret = 0;
6607
6608 switch (type) {
6609 case HNAE3_DOWN_CLIENT:
6610 ret = hns_roce_hw_v2_reset_notify_down(handle);
6611 break;
6612 case HNAE3_INIT_CLIENT:
6613 ret = hns_roce_hw_v2_reset_notify_init(handle);
6614 break;
6615 case HNAE3_UNINIT_CLIENT:
6616 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6617 break;
6618 default:
6619 break;
6620 }
6621
6622 return ret;
6623}
6624
dd74282d
WHX
6625static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6626 .init_instance = hns_roce_hw_v2_init_instance,
6627 .uninit_instance = hns_roce_hw_v2_uninit_instance,
cb7a94c9 6628 .reset_notify = hns_roce_hw_v2_reset_notify,
dd74282d
WHX
6629};
6630
6631static struct hnae3_client hns_roce_hw_v2_client = {
6632 .name = "hns_roce_hw_v2",
6633 .type = HNAE3_CLIENT_ROCE,
6634 .ops = &hns_roce_hw_v2_ops,
6635};
6636
6637static int __init hns_roce_hw_v2_init(void)
6638{
6639 return hnae3_register_client(&hns_roce_hw_v2_client);
6640}
6641
6642static void __exit hns_roce_hw_v2_exit(void)
6643{
6644 hnae3_unregister_client(&hns_roce_hw_v2_client);
6645}
6646
6647module_init(hns_roce_hw_v2_init);
6648module_exit(hns_roce_hw_v2_exit);
6649
6650MODULE_LICENSE("Dual BSD/GPL");
6651MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6652MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6653MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6654MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");