RDMA/hns: fix potential integer overflow on left shift
[linux-2.6-block.git] / drivers / infiniband / hw / hns / hns_roce_hw_v2.c
CommitLineData
dd74282d
WHX
1/*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/acpi.h>
34#include <linux/etherdevice.h>
35#include <linux/interrupt.h>
36#include <linux/kernel.h>
0b25c9cc 37#include <linux/types.h>
d4994d2f 38#include <net/addrconf.h>
610b8967 39#include <rdma/ib_addr.h>
a70c0739 40#include <rdma/ib_cache.h>
dd74282d 41#include <rdma/ib_umem.h>
bdeacabd 42#include <rdma/uverbs_ioctl.h>
dd74282d
WHX
43
44#include "hnae3.h"
45#include "hns_roce_common.h"
46#include "hns_roce_device.h"
47#include "hns_roce_cmd.h"
48#include "hns_roce_hem.h"
a04ff739 49#include "hns_roce_hw_v2.h"
dd74282d 50
2d407888
WHX
51static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
52 struct ib_sge *sg)
53{
54 dseg->lkey = cpu_to_le32(sg->lkey);
55 dseg->addr = cpu_to_le64(sg->addr);
56 dseg->len = cpu_to_le32(sg->length);
57}
58
68a997c5
YL
59static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
60 struct hns_roce_wqe_frmr_seg *fseg,
61 const struct ib_reg_wr *wr)
62{
63 struct hns_roce_mr *mr = to_hr_mr(wr->mr);
64
65 /* use ib_access_flags */
66 roce_set_bit(rc_sq_wqe->byte_4,
67 V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
68 wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
69 roce_set_bit(rc_sq_wqe->byte_4,
70 V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
71 wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
72 roce_set_bit(rc_sq_wqe->byte_4,
73 V2_RC_FRMR_WQE_BYTE_4_RR_S,
74 wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
75 roce_set_bit(rc_sq_wqe->byte_4,
76 V2_RC_FRMR_WQE_BYTE_4_RW_S,
77 wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
78 roce_set_bit(rc_sq_wqe->byte_4,
79 V2_RC_FRMR_WQE_BYTE_4_LW_S,
80 wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
81
82 /* Data structure reuse may lead to confusion */
83 rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff);
84 rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32);
85
86 rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
87 rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
88 rc_sq_wqe->rkey = cpu_to_le32(wr->key);
89 rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
90
91 fseg->pbl_size = cpu_to_le32(mr->pbl_size);
92 roce_set_field(fseg->mode_buf_pg_sz,
93 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
94 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
95 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
96 roce_set_bit(fseg->mode_buf_pg_sz,
97 V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
98}
99
384f8818
LO
100static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
101 const struct ib_atomic_wr *wr)
102{
103 if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
104 aseg->fetchadd_swap_data = cpu_to_le64(wr->swap);
105 aseg->cmp_data = cpu_to_le64(wr->compare_add);
106 } else {
107 aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add);
108 aseg->cmp_data = 0;
109 }
110}
111
f696bf6d 112static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
0b25c9cc
WHX
113 unsigned int *sge_ind)
114{
115 struct hns_roce_v2_wqe_data_seg *dseg;
116 struct ib_sge *sg;
117 int num_in_wqe = 0;
118 int extend_sge_num;
119 int fi_sge_num;
120 int se_sge_num;
121 int shift;
122 int i;
123
124 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
125 num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
126 extend_sge_num = wr->num_sge - num_in_wqe;
127 sg = wr->sg_list + num_in_wqe;
128 shift = qp->hr_buf.page_shift;
129
130 /*
131 * Check whether wr->num_sge sges are in the same page. If not, we
132 * should calculate how many sges in the first page and the second
133 * page.
134 */
135 dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
136 fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
137 (uintptr_t)dseg) /
138 sizeof(struct hns_roce_v2_wqe_data_seg);
139 if (extend_sge_num > fi_sge_num) {
140 se_sge_num = extend_sge_num - fi_sge_num;
141 for (i = 0; i < fi_sge_num; i++) {
142 set_data_seg_v2(dseg++, sg + i);
143 (*sge_ind)++;
144 }
145 dseg = get_send_extend_sge(qp,
146 (*sge_ind) & (qp->sge.sge_cnt - 1));
147 for (i = 0; i < se_sge_num; i++) {
148 set_data_seg_v2(dseg++, sg + fi_sge_num + i);
149 (*sge_ind)++;
150 }
151 } else {
152 for (i = 0; i < extend_sge_num; i++) {
153 set_data_seg_v2(dseg++, sg + i);
154 (*sge_ind)++;
155 }
156 }
157}
158
f696bf6d 159static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
7bdee415 160 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
161 void *wqe, unsigned int *sge_ind,
d34ac5cd 162 const struct ib_send_wr **bad_wr)
7bdee415 163{
164 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
165 struct hns_roce_v2_wqe_data_seg *dseg = wqe;
166 struct hns_roce_qp *qp = to_hr_qp(ibqp);
167 int i;
168
169 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
8b9b8d14 170 if (le32_to_cpu(rc_sq_wqe->msg_len) >
171 hr_dev->caps.max_sq_inline) {
7bdee415 172 *bad_wr = wr;
173 dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
174 rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
175 return -EINVAL;
176 }
177
328d405b 178 if (wr->opcode == IB_WR_RDMA_READ) {
c80e0661 179 *bad_wr = wr;
328d405b 180 dev_err(hr_dev->dev, "Not support inline data!\n");
181 return -EINVAL;
182 }
183
7bdee415 184 for (i = 0; i < wr->num_sge; i++) {
185 memcpy(wqe, ((void *)wr->sg_list[i].addr),
186 wr->sg_list[i].length);
187 wqe += wr->sg_list[i].length;
188 }
189
190 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
191 1);
192 } else {
0b25c9cc 193 if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
7bdee415 194 for (i = 0; i < wr->num_sge; i++) {
195 if (likely(wr->sg_list[i].length)) {
196 set_data_seg_v2(dseg, wr->sg_list + i);
197 dseg++;
198 }
199 }
200 } else {
201 roce_set_field(rc_sq_wqe->byte_20,
202 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
203 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
204 (*sge_ind) & (qp->sge.sge_cnt - 1));
205
0b25c9cc 206 for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
7bdee415 207 if (likely(wr->sg_list[i].length)) {
208 set_data_seg_v2(dseg, wr->sg_list + i);
209 dseg++;
210 }
211 }
212
0b25c9cc 213 set_extend_sge(qp, wr, sge_ind);
7bdee415 214 }
215
216 roce_set_field(rc_sq_wqe->byte_16,
217 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
218 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
219 }
220
221 return 0;
222}
223
0425e3e6
YL
224static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
225 const struct ib_qp_attr *attr,
226 int attr_mask, enum ib_qp_state cur_state,
227 enum ib_qp_state new_state);
228
d34ac5cd
BVA
229static int hns_roce_v2_post_send(struct ib_qp *ibqp,
230 const struct ib_send_wr *wr,
231 const struct ib_send_wr **bad_wr)
2d407888
WHX
232{
233 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
7bdee415 234 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
235 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
2d407888
WHX
236 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
237 struct hns_roce_qp *qp = to_hr_qp(ibqp);
68a997c5 238 struct hns_roce_wqe_frmr_seg *fseg;
2d407888
WHX
239 struct device *dev = hr_dev->dev;
240 struct hns_roce_v2_db sq_db;
0425e3e6 241 struct ib_qp_attr attr;
2d407888 242 unsigned int sge_ind = 0;
e8d18533 243 unsigned int owner_bit;
2d407888
WHX
244 unsigned long flags;
245 unsigned int ind;
246 void *wqe = NULL;
7bdee415 247 bool loopback;
0425e3e6 248 int attr_mask;
55ba49cb 249 u32 tmp_len;
2d407888 250 int ret = 0;
b9c1ea40 251 u32 hr_op;
7bdee415 252 u8 *smac;
2d407888
WHX
253 int nreq;
254 int i;
255
7bdee415 256 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
257 ibqp->qp_type != IB_QPT_GSI &&
258 ibqp->qp_type != IB_QPT_UD)) {
2d407888 259 dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
137ae320 260 *bad_wr = wr;
2d407888
WHX
261 return -EOPNOTSUPP;
262 }
263
10bd2ade
YL
264 if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
265 qp->state == IB_QPS_RTR)) {
2d407888
WHX
266 dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
267 *bad_wr = wr;
268 return -EINVAL;
269 }
270
271 spin_lock_irqsave(&qp->sq.lock, flags);
272 ind = qp->sq_next_wqe;
273 sge_ind = qp->next_sge;
274
275 for (nreq = 0; wr; ++nreq, wr = wr->next) {
276 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
277 ret = -ENOMEM;
278 *bad_wr = wr;
279 goto out;
280 }
281
282 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
283 dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
284 wr->num_sge, qp->sq.max_gs);
285 ret = -EINVAL;
286 *bad_wr = wr;
287 goto out;
288 }
289
290 wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
291 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
292 wr->wr_id;
293
634f6390 294 owner_bit =
295 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
55ba49cb 296 tmp_len = 0;
2d407888 297
7bdee415 298 /* Corresponding to the QP type, wqe process separately */
299 if (ibqp->qp_type == IB_QPT_GSI) {
300 ud_sq_wqe = wqe;
301 memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
302
303 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
304 V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
305 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
306 V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
307 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
308 V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
309 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
310 V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
311 roce_set_field(ud_sq_wqe->byte_48,
312 V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
313 V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
314 ah->av.mac[4]);
315 roce_set_field(ud_sq_wqe->byte_48,
316 V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
317 V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
318 ah->av.mac[5]);
319
320 /* MAC loopback */
321 smac = (u8 *)hr_dev->dev_addr[qp->port];
322 loopback = ether_addr_equal_unaligned(ah->av.mac,
323 smac) ? 1 : 0;
324
325 roce_set_bit(ud_sq_wqe->byte_40,
326 V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
327
328 roce_set_field(ud_sq_wqe->byte_4,
329 V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
330 V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
331 HNS_ROCE_V2_WQE_OP_SEND);
2d407888 332
7bdee415 333 for (i = 0; i < wr->num_sge; i++)
8b9b8d14 334 tmp_len += wr->sg_list[i].length;
492b2bd0 335
8b9b8d14 336 ud_sq_wqe->msg_len =
337 cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
338
339 switch (wr->opcode) {
340 case IB_WR_SEND_WITH_IMM:
341 case IB_WR_RDMA_WRITE_WITH_IMM:
0c4a0e29
LO
342 ud_sq_wqe->immtdata =
343 cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
8b9b8d14 344 break;
345 default:
346 ud_sq_wqe->immtdata = 0;
347 break;
348 }
651487c2 349
7bdee415 350 /* Set sig attr */
351 roce_set_bit(ud_sq_wqe->byte_4,
352 V2_UD_SEND_WQE_BYTE_4_CQE_S,
353 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
a49d761f 354
7bdee415 355 /* Set se attr */
356 roce_set_bit(ud_sq_wqe->byte_4,
357 V2_UD_SEND_WQE_BYTE_4_SE_S,
358 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
e8d18533 359
7bdee415 360 roce_set_bit(ud_sq_wqe->byte_4,
361 V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
362
363 roce_set_field(ud_sq_wqe->byte_16,
364 V2_UD_SEND_WQE_BYTE_16_PD_M,
365 V2_UD_SEND_WQE_BYTE_16_PD_S,
366 to_hr_pd(ibqp->pd)->pdn);
367
368 roce_set_field(ud_sq_wqe->byte_16,
369 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
370 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
371 wr->num_sge);
372
373 roce_set_field(ud_sq_wqe->byte_20,
374 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
375 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
376 sge_ind & (qp->sge.sge_cnt - 1));
377
378 roce_set_field(ud_sq_wqe->byte_24,
379 V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
380 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
381 ud_sq_wqe->qkey =
8b9b8d14 382 cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
383 qp->qkey : ud_wr(wr)->remote_qkey);
7bdee415 384 roce_set_field(ud_sq_wqe->byte_32,
385 V2_UD_SEND_WQE_BYTE_32_DQPN_M,
386 V2_UD_SEND_WQE_BYTE_32_DQPN_S,
387 ud_wr(wr)->remote_qpn);
388
389 roce_set_field(ud_sq_wqe->byte_36,
390 V2_UD_SEND_WQE_BYTE_36_VLAN_M,
391 V2_UD_SEND_WQE_BYTE_36_VLAN_S,
8b9b8d14 392 le16_to_cpu(ah->av.vlan));
7bdee415 393 roce_set_field(ud_sq_wqe->byte_36,
394 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
395 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
396 ah->av.hop_limit);
397 roce_set_field(ud_sq_wqe->byte_36,
398 V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
399 V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
cdfa4ad5
LO
400 ah->av.sl_tclass_flowlabel >>
401 HNS_ROCE_TCLASS_SHIFT);
7bdee415 402 roce_set_field(ud_sq_wqe->byte_40,
403 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
cdfa4ad5
LO
404 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S,
405 ah->av.sl_tclass_flowlabel &
406 HNS_ROCE_FLOW_LABEL_MASK);
7bdee415 407 roce_set_field(ud_sq_wqe->byte_40,
408 V2_UD_SEND_WQE_BYTE_40_SL_M,
409 V2_UD_SEND_WQE_BYTE_40_SL_S,
8b9b8d14 410 le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
411 HNS_ROCE_SL_SHIFT);
7bdee415 412 roce_set_field(ud_sq_wqe->byte_40,
413 V2_UD_SEND_WQE_BYTE_40_PORTN_M,
414 V2_UD_SEND_WQE_BYTE_40_PORTN_S,
415 qp->port);
416
8320deb8
LO
417 roce_set_bit(ud_sq_wqe->byte_40,
418 V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
419 ah->av.vlan_en ? 1 : 0);
7bdee415 420 roce_set_field(ud_sq_wqe->byte_48,
421 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
422 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
423 hns_get_gid_index(hr_dev, qp->phy_port,
424 ah->av.gid_index));
425
426 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
427 GID_LEN_V2);
428
0b25c9cc 429 set_extend_sge(qp, wr, &sge_ind);
7bdee415 430 ind++;
431 } else if (ibqp->qp_type == IB_QPT_RC) {
432 rc_sq_wqe = wqe;
433 memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
434 for (i = 0; i < wr->num_sge; i++)
8b9b8d14 435 tmp_len += wr->sg_list[i].length;
436
437 rc_sq_wqe->msg_len =
438 cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
7bdee415 439
8b9b8d14 440 switch (wr->opcode) {
441 case IB_WR_SEND_WITH_IMM:
442 case IB_WR_RDMA_WRITE_WITH_IMM:
0c4a0e29
LO
443 rc_sq_wqe->immtdata =
444 cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
8b9b8d14 445 break;
446 case IB_WR_SEND_WITH_INV:
447 rc_sq_wqe->inv_key =
448 cpu_to_le32(wr->ex.invalidate_rkey);
449 break;
450 default:
451 rc_sq_wqe->immtdata = 0;
452 break;
453 }
7bdee415 454
455 roce_set_bit(rc_sq_wqe->byte_4,
456 V2_RC_SEND_WQE_BYTE_4_FENCE_S,
457 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
458
459 roce_set_bit(rc_sq_wqe->byte_4,
460 V2_RC_SEND_WQE_BYTE_4_SE_S,
461 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
462
463 roce_set_bit(rc_sq_wqe->byte_4,
464 V2_RC_SEND_WQE_BYTE_4_CQE_S,
465 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
466
467 roce_set_bit(rc_sq_wqe->byte_4,
468 V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
469
384f8818 470 wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
7bdee415 471 switch (wr->opcode) {
472 case IB_WR_RDMA_READ:
b9c1ea40 473 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ;
7bdee415 474 rc_sq_wqe->rkey =
475 cpu_to_le32(rdma_wr(wr)->rkey);
476 rc_sq_wqe->va =
477 cpu_to_le64(rdma_wr(wr)->remote_addr);
478 break;
479 case IB_WR_RDMA_WRITE:
b9c1ea40 480 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE;
7bdee415 481 rc_sq_wqe->rkey =
482 cpu_to_le32(rdma_wr(wr)->rkey);
483 rc_sq_wqe->va =
484 cpu_to_le64(rdma_wr(wr)->remote_addr);
485 break;
486 case IB_WR_RDMA_WRITE_WITH_IMM:
b9c1ea40 487 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM;
7bdee415 488 rc_sq_wqe->rkey =
489 cpu_to_le32(rdma_wr(wr)->rkey);
490 rc_sq_wqe->va =
491 cpu_to_le64(rdma_wr(wr)->remote_addr);
492 break;
493 case IB_WR_SEND:
b9c1ea40 494 hr_op = HNS_ROCE_V2_WQE_OP_SEND;
7bdee415 495 break;
496 case IB_WR_SEND_WITH_INV:
b9c1ea40 497 hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV;
7bdee415 498 break;
499 case IB_WR_SEND_WITH_IMM:
b9c1ea40 500 hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM;
7bdee415 501 break;
502 case IB_WR_LOCAL_INV:
b9c1ea40 503 hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV;
e93df010
LO
504 roce_set_bit(rc_sq_wqe->byte_4,
505 V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
506 rc_sq_wqe->inv_key =
507 cpu_to_le32(wr->ex.invalidate_rkey);
7bdee415 508 break;
68a997c5
YL
509 case IB_WR_REG_MR:
510 hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR;
511 fseg = wqe;
512 set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr));
513 break;
7bdee415 514 case IB_WR_ATOMIC_CMP_AND_SWP:
b9c1ea40 515 hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP;
384f8818
LO
516 rc_sq_wqe->rkey =
517 cpu_to_le32(atomic_wr(wr)->rkey);
518 rc_sq_wqe->va =
d9581bf3 519 cpu_to_le64(atomic_wr(wr)->remote_addr);
7bdee415 520 break;
521 case IB_WR_ATOMIC_FETCH_AND_ADD:
b9c1ea40 522 hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD;
384f8818
LO
523 rc_sq_wqe->rkey =
524 cpu_to_le32(atomic_wr(wr)->rkey);
525 rc_sq_wqe->va =
d9581bf3 526 cpu_to_le64(atomic_wr(wr)->remote_addr);
7bdee415 527 break;
528 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
b9c1ea40
LO
529 hr_op =
530 HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP;
7bdee415 531 break;
532 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
b9c1ea40
LO
533 hr_op =
534 HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD;
7bdee415 535 break;
536 default:
b9c1ea40 537 hr_op = HNS_ROCE_V2_WQE_OP_MASK;
7bdee415 538 break;
2d407888
WHX
539 }
540
b9c1ea40
LO
541 roce_set_field(rc_sq_wqe->byte_4,
542 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
543 V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op);
2d407888 544
d9581bf3
LO
545 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
546 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
547 struct hns_roce_v2_wqe_data_seg *dseg;
548
549 dseg = wqe;
550 set_data_seg_v2(dseg, wr->sg_list);
551 wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
552 set_atomic_seg(wqe, atomic_wr(wr));
553 roce_set_field(rc_sq_wqe->byte_16,
554 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
555 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
556 wr->num_sge);
68a997c5 557 } else if (wr->opcode != IB_WR_REG_MR) {
d9581bf3
LO
558 ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
559 wqe, &sge_ind, bad_wr);
560 if (ret)
561 goto out;
562 }
563
7bdee415 564 ind++;
2d407888 565 } else {
7bdee415 566 dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
567 spin_unlock_irqrestore(&qp->sq.lock, flags);
137ae320 568 *bad_wr = wr;
7bdee415 569 return -EOPNOTSUPP;
2d407888 570 }
2d407888
WHX
571 }
572
573out:
574 if (likely(nreq)) {
575 qp->sq.head += nreq;
576 /* Memory barrier */
577 wmb();
578
579 sq_db.byte_4 = 0;
580 sq_db.parameter = 0;
581
582 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
583 V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
584 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
585 V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
cc3391cb 586 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
587 V2_DB_PARAMETER_IDX_S,
2d407888
WHX
588 qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
589 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
590 V2_DB_PARAMETER_SL_S, qp->sl);
591
d3743fa9 592 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
2d407888
WHX
593
594 qp->sq_next_wqe = ind;
595 qp->next_sge = sge_ind;
0425e3e6
YL
596
597 if (qp->state == IB_QPS_ERR) {
598 attr_mask = IB_QP_STATE;
599 attr.qp_state = IB_QPS_ERR;
600
601 ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask,
602 qp->state, IB_QPS_ERR);
603 if (ret) {
604 spin_unlock_irqrestore(&qp->sq.lock, flags);
605 *bad_wr = wr;
606 return ret;
607 }
608 }
2d407888
WHX
609 }
610
611 spin_unlock_irqrestore(&qp->sq.lock, flags);
612
613 return ret;
614}
615
d34ac5cd
BVA
616static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
617 const struct ib_recv_wr *wr,
618 const struct ib_recv_wr **bad_wr)
2d407888
WHX
619{
620 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
621 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
622 struct hns_roce_v2_wqe_data_seg *dseg;
0009c2db 623 struct hns_roce_rinl_sge *sge_list;
2d407888 624 struct device *dev = hr_dev->dev;
0425e3e6 625 struct ib_qp_attr attr;
2d407888
WHX
626 unsigned long flags;
627 void *wqe = NULL;
0425e3e6 628 int attr_mask;
2d407888
WHX
629 int ret = 0;
630 int nreq;
631 int ind;
632 int i;
633
634 spin_lock_irqsave(&hr_qp->rq.lock, flags);
635 ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
636
ced07769 637 if (hr_qp->state == IB_QPS_RESET) {
2d407888
WHX
638 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
639 *bad_wr = wr;
640 return -EINVAL;
641 }
642
643 for (nreq = 0; wr; ++nreq, wr = wr->next) {
644 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
645 hr_qp->ibqp.recv_cq)) {
646 ret = -ENOMEM;
647 *bad_wr = wr;
648 goto out;
649 }
650
651 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
652 dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
653 wr->num_sge, hr_qp->rq.max_gs);
654 ret = -EINVAL;
655 *bad_wr = wr;
656 goto out;
657 }
658
659 wqe = get_recv_wqe(hr_qp, ind);
660 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
661 for (i = 0; i < wr->num_sge; i++) {
662 if (!wr->sg_list[i].length)
663 continue;
664 set_data_seg_v2(dseg, wr->sg_list + i);
665 dseg++;
666 }
667
668 if (i < hr_qp->rq.max_gs) {
778cc5a8 669 dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
670 dseg->addr = 0;
2d407888
WHX
671 }
672
0009c2db 673 /* rq support inline data */
ecaaf1e2 674 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
675 sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
676 hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
677 (u32)wr->num_sge;
678 for (i = 0; i < wr->num_sge; i++) {
679 sge_list[i].addr =
680 (void *)(u64)wr->sg_list[i].addr;
681 sge_list[i].len = wr->sg_list[i].length;
682 }
0009c2db 683 }
684
2d407888
WHX
685 hr_qp->rq.wrid[ind] = wr->wr_id;
686
687 ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
688 }
689
690out:
691 if (likely(nreq)) {
692 hr_qp->rq.head += nreq;
693 /* Memory barrier */
694 wmb();
695
472bc0fb 696 *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
0425e3e6
YL
697
698 if (hr_qp->state == IB_QPS_ERR) {
699 attr_mask = IB_QP_STATE;
700 attr.qp_state = IB_QPS_ERR;
701
702 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr,
703 attr_mask, hr_qp->state,
704 IB_QPS_ERR);
705 if (ret) {
706 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
707 *bad_wr = wr;
708 return ret;
709 }
710 }
2d407888
WHX
711 }
712 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
713
714 return ret;
715}
716
6a04aed6
WHX
717static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
718 unsigned long instance_stage,
719 unsigned long reset_stage)
720{
721 /* When hardware reset has been completed once or more, we should stop
d3743fa9 722 * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
6a04aed6
WHX
723 * function, we should exit with error. If now at HNAE3_INIT_CLIENT
724 * stage of soft reset process, we should exit with error, and then
725 * HNAE3_INIT_CLIENT related process can rollback the operation like
726 * notifing hardware to free resources, HNAE3_INIT_CLIENT related
727 * process will exit with error to notify NIC driver to reschedule soft
728 * reset process once again.
729 */
730 hr_dev->is_reset = true;
d3743fa9 731 hr_dev->dis_db = true;
6a04aed6
WHX
732
733 if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
734 instance_stage == HNS_ROCE_STATE_INIT)
735 return CMD_RST_PRC_EBUSY;
736
737 return CMD_RST_PRC_SUCCESS;
738}
739
740static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
741 unsigned long instance_stage,
742 unsigned long reset_stage)
743{
744 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
745 struct hnae3_handle *handle = priv->handle;
746 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
747
d3743fa9
WHX
748 /* When hardware reset is detected, we should stop sending mailbox&cmq&
749 * doorbell to hardware. If now in .init_instance() function, we should
6a04aed6
WHX
750 * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
751 * process, we should exit with error, and then HNAE3_INIT_CLIENT
752 * related process can rollback the operation like notifing hardware to
753 * free resources, HNAE3_INIT_CLIENT related process will exit with
754 * error to notify NIC driver to reschedule soft reset process once
755 * again.
756 */
d3743fa9 757 hr_dev->dis_db = true;
6a04aed6
WHX
758 if (!ops->get_hw_reset_stat(handle))
759 hr_dev->is_reset = true;
760
761 if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
762 instance_stage == HNS_ROCE_STATE_INIT)
763 return CMD_RST_PRC_EBUSY;
764
765 return CMD_RST_PRC_SUCCESS;
766}
767
768static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
769{
770 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
771 struct hnae3_handle *handle = priv->handle;
772 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
773
774 /* When software reset is detected at .init_instance() function, we
d3743fa9
WHX
775 * should stop sending mailbox&cmq&doorbell to hardware, and exit
776 * with error.
6a04aed6 777 */
d3743fa9 778 hr_dev->dis_db = true;
6a04aed6
WHX
779 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
780 hr_dev->is_reset = true;
781
782 return CMD_RST_PRC_EBUSY;
783}
784
785static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
786{
787 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
788 struct hnae3_handle *handle = priv->handle;
789 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
790 unsigned long instance_stage; /* the current instance stage */
791 unsigned long reset_stage; /* the current reset stage */
792 unsigned long reset_cnt;
793 bool sw_resetting;
794 bool hw_resetting;
795
796 if (hr_dev->is_reset)
797 return CMD_RST_PRC_SUCCESS;
798
799 /* Get information about reset from NIC driver or RoCE driver itself,
800 * the meaning of the following variables from NIC driver are described
801 * as below:
802 * reset_cnt -- The count value of completed hardware reset.
803 * hw_resetting -- Whether hardware device is resetting now.
804 * sw_resetting -- Whether NIC's software reset process is running now.
805 */
806 instance_stage = handle->rinfo.instance_state;
807 reset_stage = handle->rinfo.reset_state;
808 reset_cnt = ops->ae_dev_reset_cnt(handle);
809 hw_resetting = ops->get_hw_reset_stat(handle);
810 sw_resetting = ops->ae_dev_resetting(handle);
811
812 if (reset_cnt != hr_dev->reset_cnt)
813 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
814 reset_stage);
815 else if (hw_resetting)
816 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
817 reset_stage);
818 else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
819 return hns_roce_v2_cmd_sw_resetting(hr_dev);
820
821 return 0;
822}
823
a04ff739
WHX
824static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
825{
826 int ntu = ring->next_to_use;
827 int ntc = ring->next_to_clean;
828 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
829
830 return ring->desc_num - used - 1;
831}
832
833static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
834 struct hns_roce_v2_cmq_ring *ring)
835{
836 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
837
838 ring->desc = kzalloc(size, GFP_KERNEL);
839 if (!ring->desc)
840 return -ENOMEM;
841
842 ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
843 DMA_BIDIRECTIONAL);
844 if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
845 ring->desc_dma_addr = 0;
846 kfree(ring->desc);
847 ring->desc = NULL;
848 return -ENOMEM;
849 }
850
851 return 0;
852}
853
854static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
855 struct hns_roce_v2_cmq_ring *ring)
856{
857 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
858 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
859 DMA_BIDIRECTIONAL);
90e7a4d5 860
861 ring->desc_dma_addr = 0;
a04ff739
WHX
862 kfree(ring->desc);
863}
864
865static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
866{
867 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
868 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
869 &priv->cmq.csq : &priv->cmq.crq;
870
871 ring->flag = ring_type;
872 ring->next_to_clean = 0;
873 ring->next_to_use = 0;
874
875 return hns_roce_alloc_cmq_desc(hr_dev, ring);
876}
877
878static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
879{
880 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
881 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
882 &priv->cmq.csq : &priv->cmq.crq;
883 dma_addr_t dma = ring->desc_dma_addr;
884
885 if (ring_type == TYPE_CSQ) {
886 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
887 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
888 upper_32_bits(dma));
889 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
890 (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
891 HNS_ROCE_CMQ_ENABLE);
892 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
893 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
894 } else {
895 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
896 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
897 upper_32_bits(dma));
898 roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
899 (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
900 HNS_ROCE_CMQ_ENABLE);
901 roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
902 roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
903 }
904}
905
906static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
907{
908 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
909 int ret;
910
911 /* Setup the queue entries for command queue */
426c4146
LO
912 priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
913 priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
a04ff739
WHX
914
915 /* Setup the lock for command queue */
916 spin_lock_init(&priv->cmq.csq.lock);
917 spin_lock_init(&priv->cmq.crq.lock);
918
919 /* Setup Tx write back timeout */
920 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
921
922 /* Init CSQ */
923 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
924 if (ret) {
925 dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
926 return ret;
927 }
928
929 /* Init CRQ */
930 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
931 if (ret) {
932 dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
933 goto err_crq;
934 }
935
936 /* Init CSQ REG */
937 hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
938
939 /* Init CRQ REG */
940 hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
941
942 return 0;
943
944err_crq:
945 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
946
947 return ret;
948}
949
950static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
951{
952 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
953
954 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
955 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
956}
957
281d0ccf
CIK
958static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
959 enum hns_roce_opcode_type opcode,
960 bool is_read)
a04ff739
WHX
961{
962 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
963 desc->opcode = cpu_to_le16(opcode);
964 desc->flag =
965 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
966 if (is_read)
967 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
968 else
969 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
970}
971
972static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
973{
974 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
975 u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
976
977 return head == priv->cmq.csq.next_to_use;
978}
979
980static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
981{
982 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
983 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
984 struct hns_roce_cmq_desc *desc;
985 u16 ntc = csq->next_to_clean;
986 u32 head;
987 int clean = 0;
988
989 desc = &csq->desc[ntc];
990 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
991 while (head != ntc) {
992 memset(desc, 0, sizeof(*desc));
993 ntc++;
994 if (ntc == csq->desc_num)
995 ntc = 0;
996 desc = &csq->desc[ntc];
997 clean++;
998 }
999 csq->next_to_clean = ntc;
1000
1001 return clean;
1002}
1003
6a04aed6
WHX
1004static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1005 struct hns_roce_cmq_desc *desc, int num)
a04ff739
WHX
1006{
1007 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1008 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1009 struct hns_roce_cmq_desc *desc_to_use;
1010 bool complete = false;
1011 u32 timeout = 0;
1012 int handle = 0;
1013 u16 desc_ret;
1014 int ret = 0;
1015 int ntc;
1016
1017 spin_lock_bh(&csq->lock);
1018
1019 if (num > hns_roce_cmq_space(csq)) {
1020 spin_unlock_bh(&csq->lock);
1021 return -EBUSY;
1022 }
1023
1024 /*
1025 * Record the location of desc in the cmq for this time
1026 * which will be use for hardware to write back
1027 */
1028 ntc = csq->next_to_use;
1029
1030 while (handle < num) {
1031 desc_to_use = &csq->desc[csq->next_to_use];
1032 *desc_to_use = desc[handle];
1033 dev_dbg(hr_dev->dev, "set cmq desc:\n");
1034 csq->next_to_use++;
1035 if (csq->next_to_use == csq->desc_num)
1036 csq->next_to_use = 0;
1037 handle++;
1038 }
1039
1040 /* Write to hardware */
1041 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
1042
1043 /*
1044 * If the command is sync, wait for the firmware to write back,
1045 * if multi descriptors to be sent, use the first one to check
1046 */
1047 if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
1048 do {
1049 if (hns_roce_cmq_csq_done(hr_dev))
1050 break;
988e175b 1051 udelay(1);
a04ff739
WHX
1052 timeout++;
1053 } while (timeout < priv->cmq.tx_timeout);
1054 }
1055
1056 if (hns_roce_cmq_csq_done(hr_dev)) {
1057 complete = true;
1058 handle = 0;
1059 while (handle < num) {
1060 /* get the result of hardware write back */
1061 desc_to_use = &csq->desc[ntc];
1062 desc[handle] = *desc_to_use;
1063 dev_dbg(hr_dev->dev, "Get cmq desc:\n");
1064 desc_ret = desc[handle].retval;
1065 if (desc_ret == CMD_EXEC_SUCCESS)
1066 ret = 0;
1067 else
1068 ret = -EIO;
1069 priv->cmq.last_status = desc_ret;
1070 ntc++;
1071 handle++;
1072 if (ntc == csq->desc_num)
1073 ntc = 0;
1074 }
1075 }
1076
1077 if (!complete)
1078 ret = -EAGAIN;
1079
1080 /* clean the command send queue */
1081 handle = hns_roce_cmq_csq_clean(hr_dev);
1082 if (handle != num)
1083 dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
1084 handle, num);
1085
1086 spin_unlock_bh(&csq->lock);
1087
1088 return ret;
1089}
1090
e95e52a1 1091static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
6a04aed6
WHX
1092 struct hns_roce_cmq_desc *desc, int num)
1093{
1094 int retval;
1095 int ret;
1096
1097 ret = hns_roce_v2_rst_process_cmd(hr_dev);
1098 if (ret == CMD_RST_PRC_SUCCESS)
1099 return 0;
1100 if (ret == CMD_RST_PRC_EBUSY)
1101 return ret;
1102
1103 ret = __hns_roce_cmq_send(hr_dev, desc, num);
1104 if (ret) {
1105 retval = hns_roce_v2_rst_process_cmd(hr_dev);
1106 if (retval == CMD_RST_PRC_SUCCESS)
1107 return 0;
1108 else if (retval == CMD_RST_PRC_EBUSY)
1109 return retval;
1110 }
1111
1112 return ret;
1113}
1114
281d0ccf 1115static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
cfc85f3e
WHX
1116{
1117 struct hns_roce_query_version *resp;
1118 struct hns_roce_cmq_desc desc;
1119 int ret;
1120
1121 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1122 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1123 if (ret)
1124 return ret;
1125
1126 resp = (struct hns_roce_query_version *)desc.data;
1127 hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
3a63c964
LO
1128 hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1129
1130 return 0;
1131}
1132
89a6da3c
LC
1133static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1134{
1135 bool fclr_write_fail_flag = false;
1136 struct hns_roce_func_clear *resp;
1137 struct hns_roce_cmq_desc desc;
1138 unsigned long end;
1139 int ret;
1140
1141 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
1142 resp = (struct hns_roce_func_clear *)desc.data;
1143
1144 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1145 if (ret) {
1146 fclr_write_fail_flag = true;
1147 dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
1148 ret);
1149 return;
1150 }
1151
1152 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1153 end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1154 while (end) {
1155 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1156 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1157
1158 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
1159 true);
1160
1161 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1162 if (ret)
1163 continue;
1164
1165 if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
1166 hr_dev->is_reset = true;
1167 return;
1168 }
1169 }
1170
1171 dev_err(hr_dev->dev, "Func clear fail.\n");
1172}
1173
3a63c964
LO
1174static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1175{
1176 struct hns_roce_query_fw_info *resp;
1177 struct hns_roce_cmq_desc desc;
1178 int ret;
1179
1180 hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1181 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1182 if (ret)
1183 return ret;
1184
1185 resp = (struct hns_roce_query_fw_info *)desc.data;
1186 hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
cfc85f3e
WHX
1187
1188 return 0;
1189}
1190
1191static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1192{
1193 struct hns_roce_cfg_global_param *req;
1194 struct hns_roce_cmq_desc desc;
1195
1196 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1197 false);
1198
1199 req = (struct hns_roce_cfg_global_param *)desc.data;
1200 memset(req, 0, sizeof(*req));
1201 roce_set_field(req->time_cfg_udp_port,
1202 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
1203 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
1204 roce_set_field(req->time_cfg_udp_port,
1205 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
1206 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
1207
1208 return hns_roce_cmq_send(hr_dev, &desc, 1);
1209}
1210
1211static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1212{
1213 struct hns_roce_cmq_desc desc[2];
6b63597d 1214 struct hns_roce_pf_res_a *req_a;
1215 struct hns_roce_pf_res_b *req_b;
cfc85f3e
WHX
1216 int ret;
1217 int i;
1218
1219 for (i = 0; i < 2; i++) {
1220 hns_roce_cmq_setup_basic_desc(&desc[i],
1221 HNS_ROCE_OPC_QUERY_PF_RES, true);
1222
1223 if (i == 0)
1224 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1225 else
1226 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1227 }
1228
1229 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1230 if (ret)
1231 return ret;
1232
6b63597d 1233 req_a = (struct hns_roce_pf_res_a *)desc[0].data;
1234 req_b = (struct hns_roce_pf_res_b *)desc[1].data;
cfc85f3e 1235
6b63597d 1236 hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
cfc85f3e
WHX
1237 PF_RES_DATA_1_PF_QPC_BT_NUM_M,
1238 PF_RES_DATA_1_PF_QPC_BT_NUM_S);
6b63597d 1239 hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
cfc85f3e
WHX
1240 PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
1241 PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
6b63597d 1242 hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
cfc85f3e
WHX
1243 PF_RES_DATA_3_PF_CQC_BT_NUM_M,
1244 PF_RES_DATA_3_PF_CQC_BT_NUM_S);
6b63597d 1245 hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
cfc85f3e
WHX
1246 PF_RES_DATA_4_PF_MPT_BT_NUM_M,
1247 PF_RES_DATA_4_PF_MPT_BT_NUM_S);
1248
6b63597d 1249 hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
1250 PF_RES_DATA_3_PF_SL_NUM_M,
1251 PF_RES_DATA_3_PF_SL_NUM_S);
6a157f7d
YL
1252 hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
1253 PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
1254 PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
6b63597d 1255
cfc85f3e
WHX
1256 return 0;
1257}
1258
0e40dc2f
YL
1259static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
1260{
1261 struct hns_roce_pf_timer_res_a *req_a;
1262 struct hns_roce_cmq_desc desc[2];
1263 int ret, i;
1264
1265 for (i = 0; i < 2; i++) {
1266 hns_roce_cmq_setup_basic_desc(&desc[i],
1267 HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1268 true);
1269
1270 if (i == 0)
1271 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1272 else
1273 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1274 }
1275
1276 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1277 if (ret)
1278 return ret;
1279
1280 req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data;
1281
1282 hr_dev->caps.qpc_timer_bt_num =
1283 roce_get_field(req_a->qpc_timer_bt_idx_num,
1284 PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
1285 PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
1286 hr_dev->caps.cqc_timer_bt_num =
1287 roce_get_field(req_a->cqc_timer_bt_idx_num,
1288 PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
1289 PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
1290
1291 return 0;
1292}
1293
0c1c3880
LO
1294static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
1295 int vf_id)
1296{
1297 struct hns_roce_cmq_desc desc;
1298 struct hns_roce_vf_switch *swt;
1299 int ret;
1300
1301 swt = (struct hns_roce_vf_switch *)desc.data;
1302 hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1303 swt->rocee_sel |= cpu_to_le16(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1304 roce_set_field(swt->fun_id,
1305 VF_SWITCH_DATA_FUN_ID_VF_ID_M,
1306 VF_SWITCH_DATA_FUN_ID_VF_ID_S,
1307 vf_id);
1308 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1309 if (ret)
1310 return ret;
1311 desc.flag =
1312 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1313 desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1314 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
1315 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 1);
1316 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
1317
1318 return hns_roce_cmq_send(hr_dev, &desc, 1);
1319}
1320
cfc85f3e
WHX
1321static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1322{
1323 struct hns_roce_cmq_desc desc[2];
1324 struct hns_roce_vf_res_a *req_a;
1325 struct hns_roce_vf_res_b *req_b;
1326 int i;
1327
1328 req_a = (struct hns_roce_vf_res_a *)desc[0].data;
1329 req_b = (struct hns_roce_vf_res_b *)desc[1].data;
1330 memset(req_a, 0, sizeof(*req_a));
1331 memset(req_b, 0, sizeof(*req_b));
1332 for (i = 0; i < 2; i++) {
1333 hns_roce_cmq_setup_basic_desc(&desc[i],
1334 HNS_ROCE_OPC_ALLOC_VF_RES, false);
1335
1336 if (i == 0)
1337 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1338 else
1339 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1340
1341 if (i == 0) {
1342 roce_set_field(req_a->vf_qpc_bt_idx_num,
1343 VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
1344 VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
1345 roce_set_field(req_a->vf_qpc_bt_idx_num,
1346 VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
1347 VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
1348 HNS_ROCE_VF_QPC_BT_NUM);
1349
1350 roce_set_field(req_a->vf_srqc_bt_idx_num,
1351 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
1352 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
1353 roce_set_field(req_a->vf_srqc_bt_idx_num,
1354 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
1355 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
1356 HNS_ROCE_VF_SRQC_BT_NUM);
1357
1358 roce_set_field(req_a->vf_cqc_bt_idx_num,
1359 VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
1360 VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
1361 roce_set_field(req_a->vf_cqc_bt_idx_num,
1362 VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
1363 VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
1364 HNS_ROCE_VF_CQC_BT_NUM);
1365
1366 roce_set_field(req_a->vf_mpt_bt_idx_num,
1367 VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
1368 VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
1369 roce_set_field(req_a->vf_mpt_bt_idx_num,
1370 VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
1371 VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
1372 HNS_ROCE_VF_MPT_BT_NUM);
1373
1374 roce_set_field(req_a->vf_eqc_bt_idx_num,
1375 VF_RES_A_DATA_5_VF_EQC_IDX_M,
1376 VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
1377 roce_set_field(req_a->vf_eqc_bt_idx_num,
1378 VF_RES_A_DATA_5_VF_EQC_NUM_M,
1379 VF_RES_A_DATA_5_VF_EQC_NUM_S,
1380 HNS_ROCE_VF_EQC_NUM);
1381 } else {
1382 roce_set_field(req_b->vf_smac_idx_num,
1383 VF_RES_B_DATA_1_VF_SMAC_IDX_M,
1384 VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
1385 roce_set_field(req_b->vf_smac_idx_num,
1386 VF_RES_B_DATA_1_VF_SMAC_NUM_M,
1387 VF_RES_B_DATA_1_VF_SMAC_NUM_S,
1388 HNS_ROCE_VF_SMAC_NUM);
1389
1390 roce_set_field(req_b->vf_sgid_idx_num,
1391 VF_RES_B_DATA_2_VF_SGID_IDX_M,
1392 VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
1393 roce_set_field(req_b->vf_sgid_idx_num,
1394 VF_RES_B_DATA_2_VF_SGID_NUM_M,
1395 VF_RES_B_DATA_2_VF_SGID_NUM_S,
1396 HNS_ROCE_VF_SGID_NUM);
1397
1398 roce_set_field(req_b->vf_qid_idx_sl_num,
1399 VF_RES_B_DATA_3_VF_QID_IDX_M,
1400 VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1401 roce_set_field(req_b->vf_qid_idx_sl_num,
1402 VF_RES_B_DATA_3_VF_SL_NUM_M,
1403 VF_RES_B_DATA_3_VF_SL_NUM_S,
1404 HNS_ROCE_VF_SL_NUM);
6a157f7d
YL
1405
1406 roce_set_field(req_b->vf_sccc_idx_num,
1407 VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
1408 VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
1409 roce_set_field(req_b->vf_sccc_idx_num,
1410 VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
1411 VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
1412 HNS_ROCE_VF_SCCC_BT_NUM);
cfc85f3e
WHX
1413 }
1414 }
1415
1416 return hns_roce_cmq_send(hr_dev, desc, 2);
1417}
1418
a81fba28
WHX
1419static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1420{
1421 u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1422 u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1423 u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1424 u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
6a157f7d 1425 u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
a81fba28
WHX
1426 struct hns_roce_cfg_bt_attr *req;
1427 struct hns_roce_cmq_desc desc;
1428
1429 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1430 req = (struct hns_roce_cfg_bt_attr *)desc.data;
1431 memset(req, 0, sizeof(*req));
1432
1433 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1434 CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
5e6e78db 1435 hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
a81fba28
WHX
1436 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1437 CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
5e6e78db 1438 hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
a81fba28
WHX
1439 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1440 CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1441 qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1442
1443 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1444 CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
5e6e78db 1445 hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
a81fba28
WHX
1446 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1447 CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
5e6e78db 1448 hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
a81fba28
WHX
1449 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1450 CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1451 srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1452
1453 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1454 CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
5e6e78db 1455 hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
a81fba28
WHX
1456 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1457 CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
5e6e78db 1458 hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
a81fba28
WHX
1459 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1460 CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1461 cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1462
1463 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1464 CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
5e6e78db 1465 hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
a81fba28
WHX
1466 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1467 CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
5e6e78db 1468 hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
a81fba28
WHX
1469 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1470 CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1471 mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1472
6a157f7d
YL
1473 roce_set_field(req->vf_sccc_cfg,
1474 CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
1475 CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
1476 hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1477 roce_set_field(req->vf_sccc_cfg,
1478 CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
1479 CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
1480 hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1481 roce_set_field(req->vf_sccc_cfg,
1482 CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
1483 CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
1484 sccc_hop_num ==
1485 HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
1486
a81fba28
WHX
1487 return hns_roce_cmq_send(hr_dev, &desc, 1);
1488}
1489
cfc85f3e
WHX
1490static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1491{
1492 struct hns_roce_caps *caps = &hr_dev->caps;
1493 int ret;
1494
1495 ret = hns_roce_cmq_query_hw_info(hr_dev);
3a63c964
LO
1496 if (ret) {
1497 dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
1498 ret);
1499 return ret;
1500 }
1501
1502 ret = hns_roce_query_fw_ver(hr_dev);
cfc85f3e
WHX
1503 if (ret) {
1504 dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
1505 ret);
1506 return ret;
1507 }
1508
1509 ret = hns_roce_config_global_param(hr_dev);
1510 if (ret) {
1511 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1512 ret);
2349fdd4 1513 return ret;
cfc85f3e
WHX
1514 }
1515
1516 /* Get pf resource owned by every pf */
1517 ret = hns_roce_query_pf_resource(hr_dev);
1518 if (ret) {
1519 dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
1520 ret);
1521 return ret;
1522 }
1523
0e40dc2f
YL
1524 if (hr_dev->pci_dev->revision == 0x21) {
1525 ret = hns_roce_query_pf_timer_resource(hr_dev);
1526 if (ret) {
1527 dev_err(hr_dev->dev,
1528 "Query pf timer resource fail, ret = %d.\n",
1529 ret);
1530 return ret;
1531 }
1532 }
1533
cfc85f3e
WHX
1534 ret = hns_roce_alloc_vf_resource(hr_dev);
1535 if (ret) {
1536 dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
1537 ret);
1538 return ret;
1539 }
1540
0c1c3880
LO
1541 if (hr_dev->pci_dev->revision == 0x21) {
1542 ret = hns_roce_set_vf_switch_param(hr_dev, 0);
1543 if (ret) {
1544 dev_err(hr_dev->dev,
1545 "Set function switch param fail, ret = %d.\n",
1546 ret);
1547 return ret;
1548 }
1549 }
3a63c964
LO
1550
1551 hr_dev->vendor_part_id = hr_dev->pci_dev->device;
1552 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
cfc85f3e
WHX
1553
1554 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
1555 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
1556 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
5c1f167a 1557 caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
704e0e61 1558 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
cfc85f3e 1559 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
5c1f167a 1560 caps->max_srqwqes = HNS_ROCE_V2_MAX_SRQWQE_NUM;
cfc85f3e 1561 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
05ad5482 1562 caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
cfc85f3e
WHX
1563 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1564 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
5c1f167a 1565 caps->max_srq_sg = HNS_ROCE_V2_MAX_SRQ_SGE_NUM;
cfc85f3e
WHX
1566 caps->num_uars = HNS_ROCE_V2_UAR_NUM;
1567 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
a5073d60
YL
1568 caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
1569 caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
1570 caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
cfc85f3e
WHX
1571 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
1572 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
1573 caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
5c1f167a
LO
1574 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
1575 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
cfc85f3e
WHX
1576 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
1577 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1578 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1579 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1580 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1581 caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1582 caps->qpc_entry_sz = HNS_ROCE_V2_QPC_ENTRY_SZ;
1583 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
e92f2c18 1584 caps->trrl_entry_sz = HNS_ROCE_V2_TRRL_ENTRY_SZ;
cfc85f3e 1585 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
5c1f167a 1586 caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ;
cfc85f3e
WHX
1587 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1588 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
5c1f167a 1589 caps->idx_entry_sz = 4;
cfc85f3e
WHX
1590 caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
1591 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1592 caps->reserved_lkey = 0;
1593 caps->reserved_pds = 0;
1594 caps->reserved_mrws = 1;
1595 caps->reserved_uars = 0;
1596 caps->reserved_cqs = 0;
5c1f167a 1597 caps->reserved_srqs = 0;
06ef0ee4 1598 caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
cfc85f3e 1599
a25d13cb
SX
1600 caps->qpc_ba_pg_sz = 0;
1601 caps->qpc_buf_pg_sz = 0;
1602 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1603 caps->srqc_ba_pg_sz = 0;
1604 caps->srqc_buf_pg_sz = 0;
2b277dae 1605 caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
a25d13cb
SX
1606 caps->cqc_ba_pg_sz = 0;
1607 caps->cqc_buf_pg_sz = 0;
1608 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1609 caps->mpt_ba_pg_sz = 0;
1610 caps->mpt_buf_pg_sz = 0;
1611 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
91fb4d83 1612 caps->pbl_ba_pg_sz = 2;
ff795f71
WHX
1613 caps->pbl_buf_pg_sz = 0;
1614 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
6a93c77a
SX
1615 caps->mtt_ba_pg_sz = 0;
1616 caps->mtt_buf_pg_sz = 0;
1617 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
8d18ad83
LO
1618 caps->wqe_sq_hop_num = 2;
1619 caps->wqe_sge_hop_num = 1;
1620 caps->wqe_rq_hop_num = 2;
6a93c77a
SX
1621 caps->cqe_ba_pg_sz = 0;
1622 caps->cqe_buf_pg_sz = 0;
1623 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
c7bcb134
LO
1624 caps->srqwqe_ba_pg_sz = 0;
1625 caps->srqwqe_buf_pg_sz = 0;
1626 caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM;
1627 caps->idx_ba_pg_sz = 0;
1628 caps->idx_buf_pg_sz = 0;
1629 caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
a5073d60
YL
1630 caps->eqe_ba_pg_sz = 0;
1631 caps->eqe_buf_pg_sz = 0;
1632 caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
6b63597d 1633 caps->tsq_buf_pg_sz = 0;
29a1fe5d 1634 caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
a25d13cb 1635
023c1477 1636 caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
0009c2db 1637 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
e088a685 1638 HNS_ROCE_CAP_FLAG_RQ_INLINE |
0425e3e6
YL
1639 HNS_ROCE_CAP_FLAG_RECORD_DB |
1640 HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
c7c28191
YL
1641
1642 if (hr_dev->pci_dev->revision == 0x21)
68a997c5
YL
1643 caps->flags |= HNS_ROCE_CAP_FLAG_MW |
1644 HNS_ROCE_CAP_FLAG_FRMR;
c7c28191 1645
cfc85f3e 1646 caps->pkey_table_len[0] = 1;
b5ff0f61 1647 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
a5073d60
YL
1648 caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
1649 caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
cfc85f3e
WHX
1650 caps->local_ca_ack_delay = 0;
1651 caps->max_mtu = IB_MTU_4096;
1652
d16da119
LO
1653 caps->max_srqs = HNS_ROCE_V2_MAX_SRQ;
1654 caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
1655 caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
1656
6a157f7d 1657 if (hr_dev->pci_dev->revision == 0x21) {
d16da119 1658 caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC |
aa84fa18
YL
1659 HNS_ROCE_CAP_FLAG_SRQ |
1660 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
1661
0e40dc2f
YL
1662 caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
1663 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
1664 caps->qpc_timer_ba_pg_sz = 0;
1665 caps->qpc_timer_buf_pg_sz = 0;
1666 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1667 caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
1668 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
1669 caps->cqc_timer_ba_pg_sz = 0;
1670 caps->cqc_timer_buf_pg_sz = 0;
1671 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1672
6a157f7d
YL
1673 caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
1674 caps->sccc_ba_pg_sz = 0;
1675 caps->sccc_buf_pg_sz = 0;
1676 caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
1677 }
384f8818 1678
a81fba28
WHX
1679 ret = hns_roce_v2_set_bt(hr_dev);
1680 if (ret)
1681 dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
1682 ret);
1683
1684 return ret;
cfc85f3e
WHX
1685}
1686
6b63597d 1687static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
1688 enum hns_roce_link_table_type type)
1689{
1690 struct hns_roce_cmq_desc desc[2];
1691 struct hns_roce_cfg_llm_a *req_a =
1692 (struct hns_roce_cfg_llm_a *)desc[0].data;
1693 struct hns_roce_cfg_llm_b *req_b =
1694 (struct hns_roce_cfg_llm_b *)desc[1].data;
1695 struct hns_roce_v2_priv *priv = hr_dev->priv;
1696 struct hns_roce_link_table *link_tbl;
1697 struct hns_roce_link_table_entry *entry;
1698 enum hns_roce_opcode_type opcode;
1699 u32 page_num;
1700 int i;
1701
1702 switch (type) {
1703 case TSQ_LINK_TABLE:
1704 link_tbl = &priv->tsq;
1705 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
1706 break;
ded58ff9 1707 case TPQ_LINK_TABLE:
1708 link_tbl = &priv->tpq;
1709 opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
1710 break;
6b63597d 1711 default:
1712 return -EINVAL;
1713 }
1714
1715 page_num = link_tbl->npages;
1716 entry = link_tbl->table.buf;
1717 memset(req_a, 0, sizeof(*req_a));
1718 memset(req_b, 0, sizeof(*req_b));
1719
1720 for (i = 0; i < 2; i++) {
1721 hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
1722
1723 if (i == 0)
1724 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1725 else
1726 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1727
1728 if (i == 0) {
1729 req_a->base_addr_l = link_tbl->table.map & 0xffffffff;
1730 req_a->base_addr_h = (link_tbl->table.map >> 32) &
1731 0xffffffff;
1732 roce_set_field(req_a->depth_pgsz_init_en,
1733 CFG_LLM_QUE_DEPTH_M,
1734 CFG_LLM_QUE_DEPTH_S,
1735 link_tbl->npages);
1736 roce_set_field(req_a->depth_pgsz_init_en,
1737 CFG_LLM_QUE_PGSZ_M,
1738 CFG_LLM_QUE_PGSZ_S,
1739 link_tbl->pg_sz);
1740 req_a->head_ba_l = entry[0].blk_ba0;
1741 req_a->head_ba_h_nxtptr = entry[0].blk_ba1_nxt_ptr;
1742 roce_set_field(req_a->head_ptr,
1743 CFG_LLM_HEAD_PTR_M,
1744 CFG_LLM_HEAD_PTR_S, 0);
1745 } else {
1746 req_b->tail_ba_l = entry[page_num - 1].blk_ba0;
1747 roce_set_field(req_b->tail_ba_h,
1748 CFG_LLM_TAIL_BA_H_M,
1749 CFG_LLM_TAIL_BA_H_S,
1750 entry[page_num - 1].blk_ba1_nxt_ptr &
1751 HNS_ROCE_LINK_TABLE_BA1_M);
1752 roce_set_field(req_b->tail_ptr,
1753 CFG_LLM_TAIL_PTR_M,
1754 CFG_LLM_TAIL_PTR_S,
1755 (entry[page_num - 2].blk_ba1_nxt_ptr &
1756 HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
1757 HNS_ROCE_LINK_TABLE_NXT_PTR_S);
1758 }
1759 }
1760 roce_set_field(req_a->depth_pgsz_init_en,
1761 CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1);
1762
1763 return hns_roce_cmq_send(hr_dev, desc, 2);
1764}
1765
1766static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
1767 enum hns_roce_link_table_type type)
1768{
1769 struct hns_roce_v2_priv *priv = hr_dev->priv;
1770 struct hns_roce_link_table *link_tbl;
1771 struct hns_roce_link_table_entry *entry;
1772 struct device *dev = hr_dev->dev;
1773 u32 buf_chk_sz;
1774 dma_addr_t t;
ded58ff9 1775 int func_num = 1;
6b63597d 1776 int pg_num_a;
1777 int pg_num_b;
1778 int pg_num;
1779 int size;
1780 int i;
1781
1782 switch (type) {
1783 case TSQ_LINK_TABLE:
1784 link_tbl = &priv->tsq;
1785 buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
1786 pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
1787 pg_num_b = hr_dev->caps.sl_num * 4 + 2;
1788 break;
ded58ff9 1789 case TPQ_LINK_TABLE:
1790 link_tbl = &priv->tpq;
1791 buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
1792 pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
1793 pg_num_b = 2 * 4 * func_num + 2;
1794 break;
6b63597d 1795 default:
1796 return -EINVAL;
1797 }
1798
1799 pg_num = max(pg_num_a, pg_num_b);
1800 size = pg_num * sizeof(struct hns_roce_link_table_entry);
1801
1802 link_tbl->table.buf = dma_alloc_coherent(dev, size,
1803 &link_tbl->table.map,
1804 GFP_KERNEL);
1805 if (!link_tbl->table.buf)
1806 goto out;
1807
1808 link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
1809 GFP_KERNEL);
1810 if (!link_tbl->pg_list)
1811 goto err_kcalloc_failed;
1812
1813 entry = link_tbl->table.buf;
1814 for (i = 0; i < pg_num; ++i) {
1815 link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
1816 &t, GFP_KERNEL);
1817 if (!link_tbl->pg_list[i].buf)
1818 goto err_alloc_buf_failed;
1819
1820 link_tbl->pg_list[i].map = t;
1821 memset(link_tbl->pg_list[i].buf, 0, buf_chk_sz);
1822
1823 entry[i].blk_ba0 = (t >> 12) & 0xffffffff;
1824 roce_set_field(entry[i].blk_ba1_nxt_ptr,
1825 HNS_ROCE_LINK_TABLE_BA1_M,
1826 HNS_ROCE_LINK_TABLE_BA1_S,
1827 t >> 44);
1828
1829 if (i < (pg_num - 1))
1830 roce_set_field(entry[i].blk_ba1_nxt_ptr,
1831 HNS_ROCE_LINK_TABLE_NXT_PTR_M,
1832 HNS_ROCE_LINK_TABLE_NXT_PTR_S,
1833 i + 1);
1834 }
1835 link_tbl->npages = pg_num;
1836 link_tbl->pg_sz = buf_chk_sz;
1837
1838 return hns_roce_config_link_table(hr_dev, type);
1839
1840err_alloc_buf_failed:
1841 for (i -= 1; i >= 0; i--)
1842 dma_free_coherent(dev, buf_chk_sz,
1843 link_tbl->pg_list[i].buf,
1844 link_tbl->pg_list[i].map);
1845 kfree(link_tbl->pg_list);
1846
1847err_kcalloc_failed:
1848 dma_free_coherent(dev, size, link_tbl->table.buf,
1849 link_tbl->table.map);
1850
1851out:
1852 return -ENOMEM;
1853}
1854
1855static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
1856 struct hns_roce_link_table *link_tbl)
1857{
1858 struct device *dev = hr_dev->dev;
1859 int size;
1860 int i;
1861
1862 size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
1863
1864 for (i = 0; i < link_tbl->npages; ++i)
1865 if (link_tbl->pg_list[i].buf)
1866 dma_free_coherent(dev, link_tbl->pg_sz,
1867 link_tbl->pg_list[i].buf,
1868 link_tbl->pg_list[i].map);
1869 kfree(link_tbl->pg_list);
1870
1871 dma_free_coherent(dev, size, link_tbl->table.buf,
1872 link_tbl->table.map);
1873}
1874
1875static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
1876{
ded58ff9 1877 struct hns_roce_v2_priv *priv = hr_dev->priv;
0e40dc2f
YL
1878 int qpc_count, cqc_count;
1879 int ret, i;
6b63597d 1880
1881 /* TSQ includes SQ doorbell and ack doorbell */
1882 ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
ded58ff9 1883 if (ret) {
6b63597d 1884 dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
ded58ff9 1885 return ret;
1886 }
1887
1888 ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
1889 if (ret) {
1890 dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
1891 goto err_tpq_init_failed;
1892 }
1893
0e40dc2f
YL
1894 /* Alloc memory for QPC Timer buffer space chunk*/
1895 for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
1896 qpc_count++) {
1897 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
1898 qpc_count);
1899 if (ret) {
1900 dev_err(hr_dev->dev, "QPC Timer get failed\n");
1901 goto err_qpc_timer_failed;
1902 }
1903 }
1904
1905 /* Alloc memory for CQC Timer buffer space chunk*/
1906 for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
1907 cqc_count++) {
1908 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
1909 cqc_count);
1910 if (ret) {
1911 dev_err(hr_dev->dev, "CQC Timer get failed\n");
1912 goto err_cqc_timer_failed;
1913 }
1914 }
1915
ded58ff9 1916 return 0;
1917
0e40dc2f
YL
1918err_cqc_timer_failed:
1919 for (i = 0; i < cqc_count; i++)
1920 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
1921
1922err_qpc_timer_failed:
1923 for (i = 0; i < qpc_count; i++)
1924 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
1925
1926 hns_roce_free_link_table(hr_dev, &priv->tpq);
1927
ded58ff9 1928err_tpq_init_failed:
1929 hns_roce_free_link_table(hr_dev, &priv->tsq);
6b63597d 1930
1931 return ret;
1932}
1933
1934static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
1935{
1936 struct hns_roce_v2_priv *priv = hr_dev->priv;
1937
89a6da3c
LC
1938 if (hr_dev->pci_dev->revision == 0x21)
1939 hns_roce_function_clear(hr_dev);
1940
ded58ff9 1941 hns_roce_free_link_table(hr_dev, &priv->tpq);
6b63597d 1942 hns_roce_free_link_table(hr_dev, &priv->tsq);
1943}
1944
f747b689
LO
1945static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
1946{
1947 struct hns_roce_cmq_desc desc;
1948 struct hns_roce_mbox_status *mb_st =
1949 (struct hns_roce_mbox_status *)desc.data;
1950 enum hns_roce_cmd_return_status status;
1951
1952 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
1953
1954 status = hns_roce_cmq_send(hr_dev, &desc, 1);
1955 if (status)
1956 return status;
1957
1958 return cpu_to_le32(mb_st->mb_status_hw_run);
1959}
1960
a680f2f3
WHX
1961static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
1962{
f747b689 1963 u32 status = hns_roce_query_mbox_status(hr_dev);
a680f2f3
WHX
1964
1965 return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
1966}
1967
1968static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
1969{
f747b689 1970 u32 status = hns_roce_query_mbox_status(hr_dev);
a680f2f3
WHX
1971
1972 return status & HNS_ROCE_HW_MB_STATUS_MASK;
1973}
1974
f747b689
LO
1975static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
1976 u64 out_param, u32 in_modifier, u8 op_modifier,
1977 u16 op, u16 token, int event)
1978{
1979 struct hns_roce_cmq_desc desc;
1980 struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
1981
1982 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
1983
1984 mb->in_param_l = cpu_to_le64(in_param);
1985 mb->in_param_h = cpu_to_le64(in_param) >> 32;
1986 mb->out_param_l = cpu_to_le64(out_param);
1987 mb->out_param_h = cpu_to_le64(out_param) >> 32;
1988 mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
1989 mb->token_event_en = cpu_to_le32(event << 16 | token);
1990
1991 return hns_roce_cmq_send(hr_dev, &desc, 1);
1992}
1993
a680f2f3
WHX
1994static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1995 u64 out_param, u32 in_modifier, u8 op_modifier,
1996 u16 op, u16 token, int event)
1997{
1998 struct device *dev = hr_dev->dev;
a680f2f3 1999 unsigned long end;
f747b689 2000 int ret;
a680f2f3
WHX
2001
2002 end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
2003 while (hns_roce_v2_cmd_pending(hr_dev)) {
2004 if (time_after(jiffies, end)) {
2005 dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
2006 (int)end);
2007 return -EAGAIN;
2008 }
2009 cond_resched();
2010 }
2011
f747b689
LO
2012 ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
2013 op_modifier, op, token, event);
2014 if (ret)
2015 dev_err(dev, "Post mailbox fail(%d)\n", ret);
a680f2f3 2016
f747b689 2017 return ret;
a680f2f3
WHX
2018}
2019
2020static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
2021 unsigned long timeout)
2022{
2023 struct device *dev = hr_dev->dev;
2024 unsigned long end = 0;
2025 u32 status;
2026
2027 end = msecs_to_jiffies(timeout) + jiffies;
2028 while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
2029 cond_resched();
2030
2031 if (hns_roce_v2_cmd_pending(hr_dev)) {
2032 dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
2033 return -ETIMEDOUT;
2034 }
2035
2036 status = hns_roce_v2_cmd_complete(hr_dev);
2037 if (status != 0x1) {
6a04aed6
WHX
2038 if (status == CMD_RST_PRC_EBUSY)
2039 return status;
2040
a680f2f3
WHX
2041 dev_err(dev, "mailbox status 0x%x!\n", status);
2042 return -EBUSY;
2043 }
2044
2045 return 0;
2046}
2047
4db134a3 2048static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
2049 int gid_index, const union ib_gid *gid,
2050 enum hns_roce_sgid_type sgid_type)
2051{
2052 struct hns_roce_cmq_desc desc;
2053 struct hns_roce_cfg_sgid_tb *sgid_tb =
2054 (struct hns_roce_cfg_sgid_tb *)desc.data;
2055 u32 *p;
2056
2057 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
2058
2059 roce_set_field(sgid_tb->table_idx_rsv,
2060 CFG_SGID_TB_TABLE_IDX_M,
2061 CFG_SGID_TB_TABLE_IDX_S, gid_index);
2062 roce_set_field(sgid_tb->vf_sgid_type_rsv,
2063 CFG_SGID_TB_VF_SGID_TYPE_M,
2064 CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
2065
2066 p = (u32 *)&gid->raw[0];
2067 sgid_tb->vf_sgid_l = cpu_to_le32(*p);
2068
2069 p = (u32 *)&gid->raw[4];
2070 sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
2071
2072 p = (u32 *)&gid->raw[8];
2073 sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
2074
2075 p = (u32 *)&gid->raw[0xc];
2076 sgid_tb->vf_sgid_h = cpu_to_le32(*p);
2077
2078 return hns_roce_cmq_send(hr_dev, &desc, 1);
2079}
2080
b5ff0f61 2081static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
f4df9a7c 2082 int gid_index, const union ib_gid *gid,
b5ff0f61 2083 const struct ib_gid_attr *attr)
7afddafa 2084{
b5ff0f61 2085 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
4db134a3 2086 int ret;
7afddafa 2087
b5ff0f61
WHX
2088 if (!gid || !attr)
2089 return -EINVAL;
2090
2091 if (attr->gid_type == IB_GID_TYPE_ROCE)
2092 sgid_type = GID_TYPE_FLAG_ROCE_V1;
2093
2094 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
2095 if (ipv6_addr_v4mapped((void *)gid))
2096 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
2097 else
2098 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
2099 }
2100
4db134a3 2101 ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
2102 if (ret)
2103 dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret);
b5ff0f61 2104
4db134a3 2105 return ret;
7afddafa
WHX
2106}
2107
a74dc41d
WHX
2108static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
2109 u8 *addr)
7afddafa 2110{
e8e8b652 2111 struct hns_roce_cmq_desc desc;
2112 struct hns_roce_cfg_smac_tb *smac_tb =
2113 (struct hns_roce_cfg_smac_tb *)desc.data;
7afddafa
WHX
2114 u16 reg_smac_h;
2115 u32 reg_smac_l;
e8e8b652 2116
2117 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
7afddafa
WHX
2118
2119 reg_smac_l = *(u32 *)(&addr[0]);
e8e8b652 2120 reg_smac_h = *(u16 *)(&addr[4]);
7afddafa 2121
e8e8b652 2122 memset(smac_tb, 0, sizeof(*smac_tb));
2123 roce_set_field(smac_tb->tb_idx_rsv,
2124 CFG_SMAC_TB_IDX_M,
2125 CFG_SMAC_TB_IDX_S, phy_port);
2126 roce_set_field(smac_tb->vf_smac_h_rsv,
2127 CFG_SMAC_TB_VF_SMAC_H_M,
2128 CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
2129 smac_tb->vf_smac_l = reg_smac_l;
a74dc41d 2130
e8e8b652 2131 return hns_roce_cmq_send(hr_dev, &desc, 1);
7afddafa
WHX
2132}
2133
ca088320
YL
2134static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
2135 struct hns_roce_mr *mr)
3958cc56 2136{
3856ec55 2137 struct sg_dma_page_iter sg_iter;
db270c41 2138 u64 page_addr;
3958cc56 2139 u64 *pages;
3856ec55 2140 int i;
3958cc56 2141
ca088320
YL
2142 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2143 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2144 roce_set_field(mpt_entry->byte_48_mode_ba,
2145 V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
2146 upper_32_bits(mr->pbl_ba >> 3));
2147
2148 pages = (u64 *)__get_free_page(GFP_KERNEL);
2149 if (!pages)
2150 return -ENOMEM;
2151
2152 i = 0;
3856ec55
SS
2153 for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
2154 page_addr = sg_page_iter_dma_address(&sg_iter);
2155 pages[i] = page_addr >> 6;
2156
2157 /* Record the first 2 entry directly to MTPT table */
2158 if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
2159 goto found;
2160 i++;
ca088320
YL
2161 }
2162found:
2163 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
2164 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
2165 V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
2166
2167 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
2168 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
2169 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
2170 roce_set_field(mpt_entry->byte_64_buf_pa1,
2171 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2172 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2173 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2174
2175 free_page((unsigned long)pages);
2176
2177 return 0;
2178}
2179
2180static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
2181 unsigned long mtpt_idx)
2182{
2183 struct hns_roce_v2_mpt_entry *mpt_entry;
2184 int ret;
2185
3958cc56
WHX
2186 mpt_entry = mb_buf;
2187 memset(mpt_entry, 0, sizeof(*mpt_entry));
2188
2189 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2190 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2191 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2192 V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
2193 HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
2194 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2195 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
5e6e78db
YL
2196 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2197 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
3958cc56
WHX
2198 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2199 V2_MPT_BYTE_4_PD_S, mr->pd);
3958cc56
WHX
2200
2201 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
82342e49 2202 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
e93df010 2203 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
3958cc56
WHX
2204 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
2205 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
384f8818
LO
2206 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
2207 mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
3958cc56
WHX
2208 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2209 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
2210 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2211 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
2212 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2213 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
3958cc56
WHX
2214
2215 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
2216 mr->type == MR_TYPE_MR ? 0 : 1);
85e0274d 2217 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
2218 1);
3958cc56
WHX
2219
2220 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
2221 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
2222 mpt_entry->lkey = cpu_to_le32(mr->key);
2223 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
2224 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
2225
2226 if (mr->type == MR_TYPE_DMA)
2227 return 0;
2228
ca088320 2229 ret = set_mtpt_pbl(mpt_entry, mr);
3958cc56 2230
ca088320 2231 return ret;
3958cc56
WHX
2232}
2233
a2c80b7b
WHX
2234static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
2235 struct hns_roce_mr *mr, int flags,
2236 u32 pdn, int mr_access_flags, u64 iova,
2237 u64 size, void *mb_buf)
2238{
2239 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
ca088320 2240 int ret = 0;
a2c80b7b 2241
ab22bf05
YL
2242 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2243 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2244
a2c80b7b
WHX
2245 if (flags & IB_MR_REREG_PD) {
2246 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2247 V2_MPT_BYTE_4_PD_S, pdn);
2248 mr->pd = pdn;
2249 }
2250
2251 if (flags & IB_MR_REREG_ACCESS) {
2252 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2253 V2_MPT_BYTE_8_BIND_EN_S,
2254 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
2255 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
ca088320
YL
2256 V2_MPT_BYTE_8_ATOMIC_EN_S,
2257 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
a2c80b7b 2258 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
ca088320 2259 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
a2c80b7b 2260 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
ca088320 2261 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
a2c80b7b 2262 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
ca088320 2263 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
a2c80b7b
WHX
2264 }
2265
2266 if (flags & IB_MR_REREG_TRANS) {
2267 mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
2268 mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
2269 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
2270 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
2271
a2c80b7b
WHX
2272 mr->iova = iova;
2273 mr->size = size;
ca088320
YL
2274
2275 ret = set_mtpt_pbl(mpt_entry, mr);
a2c80b7b
WHX
2276 }
2277
ca088320 2278 return ret;
a2c80b7b
WHX
2279}
2280
68a997c5
YL
2281static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
2282{
2283 struct hns_roce_v2_mpt_entry *mpt_entry;
2284
2285 mpt_entry = mb_buf;
2286 memset(mpt_entry, 0, sizeof(*mpt_entry));
2287
2288 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2289 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2290 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2291 V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
2292 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2293 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2294 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2295 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2296 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2297 V2_MPT_BYTE_4_PD_S, mr->pd);
2298
2299 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
2300 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2301 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2302
2303 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
2304 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2305 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
2306 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2307
2308 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2309
2310 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2311 roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
2312 V2_MPT_BYTE_48_PBL_BA_H_S,
2313 upper_32_bits(mr->pbl_ba >> 3));
2314
2315 roce_set_field(mpt_entry->byte_64_buf_pa1,
2316 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2317 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2318 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2319
2320 return 0;
2321}
2322
c7c28191
YL
2323static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
2324{
2325 struct hns_roce_v2_mpt_entry *mpt_entry;
2326
2327 mpt_entry = mb_buf;
2328 memset(mpt_entry, 0, sizeof(*mpt_entry));
2329
2330 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2331 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2332 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2333 V2_MPT_BYTE_4_PD_S, mw->pdn);
2334 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2335 V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2336 V2_MPT_BYTE_4_PBL_HOP_NUM_S,
2337 mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ?
2338 0 : mw->pbl_hop_num);
2339 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2340 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2341 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2342 mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2343
2344 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2345 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2346
2347 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2348 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
2349 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2350 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
2351 mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
2352
2353 roce_set_field(mpt_entry->byte_64_buf_pa1,
2354 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2355 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2356 mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2357
2358 mpt_entry->lkey = cpu_to_le32(mw->rkey);
2359
2360 return 0;
2361}
2362
93aa2187
WHX
2363static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2364{
2365 return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
2366 n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
2367}
2368
2369static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2370{
2371 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
2372
2373 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
2374 return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
2375 !!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
2376}
2377
2378static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
2379{
2380 return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
2381}
2382
c7bcb134
LO
2383static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
2384{
2385 return hns_roce_buf_offset(&srq->buf, n << srq->wqe_shift);
2386}
2387
2388static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
2389{
c7bcb134
LO
2390 /* always called with interrupts disabled. */
2391 spin_lock(&srq->lock);
2392
97545b10 2393 bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
c7bcb134
LO
2394 srq->tail++;
2395
2396 spin_unlock(&srq->lock);
2397}
2398
93aa2187
WHX
2399static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
2400{
86188a88 2401 *hr_cq->set_ci_db = cons_index & 0xffffff;
93aa2187
WHX
2402}
2403
926a01dc
WHX
2404static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2405 struct hns_roce_srq *srq)
2406{
2407 struct hns_roce_v2_cqe *cqe, *dest;
2408 u32 prod_index;
2409 int nfreed = 0;
c7bcb134 2410 int wqe_index;
926a01dc
WHX
2411 u8 owner_bit;
2412
2413 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
2414 ++prod_index) {
2415 if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
2416 break;
2417 }
2418
2419 /*
2420 * Now backwards through the CQ, removing CQ entries
2421 * that match our QP by overwriting them with next entries.
2422 */
2423 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2424 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2425 if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2426 V2_CQE_BYTE_16_LCL_QPN_S) &
2427 HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
c7bcb134
LO
2428 if (srq &&
2429 roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) {
2430 wqe_index = roce_get_field(cqe->byte_4,
2431 V2_CQE_BYTE_4_WQE_INDX_M,
2432 V2_CQE_BYTE_4_WQE_INDX_S);
2433 hns_roce_free_srq_wqe(srq, wqe_index);
2434 }
926a01dc
WHX
2435 ++nfreed;
2436 } else if (nfreed) {
2437 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
2438 hr_cq->ib_cq.cqe);
2439 owner_bit = roce_get_bit(dest->byte_4,
2440 V2_CQE_BYTE_4_OWNER_S);
2441 memcpy(dest, cqe, sizeof(*cqe));
2442 roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
2443 owner_bit);
2444 }
2445 }
2446
2447 if (nfreed) {
2448 hr_cq->cons_index += nfreed;
2449 /*
2450 * Make sure update of buffer contents is done before
2451 * updating consumer index.
2452 */
2453 wmb();
2454 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2455 }
2456}
2457
2458static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2459 struct hns_roce_srq *srq)
2460{
2461 spin_lock_irq(&hr_cq->lock);
2462 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
2463 spin_unlock_irq(&hr_cq->lock);
2464}
2465
93aa2187
WHX
2466static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
2467 struct hns_roce_cq *hr_cq, void *mb_buf,
2468 u64 *mtts, dma_addr_t dma_handle, int nent,
2469 u32 vector)
2470{
2471 struct hns_roce_v2_cq_context *cq_context;
2472
2473 cq_context = mb_buf;
2474 memset(cq_context, 0, sizeof(*cq_context));
2475
2476 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
2477 V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
a5073d60
YL
2478 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
2479 V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
93aa2187
WHX
2480 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
2481 V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
2482 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
2483 V2_CQC_BYTE_4_CEQN_S, vector);
2484 cq_context->byte_4_pg_ceqn = cpu_to_le32(cq_context->byte_4_pg_ceqn);
2485
2486 roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
2487 V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
2488
2489 cq_context->cqe_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
2490 cq_context->cqe_cur_blk_addr =
2491 cpu_to_le32(cq_context->cqe_cur_blk_addr);
2492
2493 roce_set_field(cq_context->byte_16_hop_addr,
2494 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
2495 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
2496 cpu_to_le32((mtts[0]) >> (32 + PAGE_ADDR_SHIFT)));
2497 roce_set_field(cq_context->byte_16_hop_addr,
2498 V2_CQC_BYTE_16_CQE_HOP_NUM_M,
2499 V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
2500 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
2501
2502 cq_context->cqe_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT);
2503 roce_set_field(cq_context->byte_24_pgsz_addr,
2504 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
2505 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
2506 cpu_to_le32((mtts[1]) >> (32 + PAGE_ADDR_SHIFT)));
2507 roce_set_field(cq_context->byte_24_pgsz_addr,
2508 V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
2509 V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
5e6e78db 2510 hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
93aa2187
WHX
2511 roce_set_field(cq_context->byte_24_pgsz_addr,
2512 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
2513 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
5e6e78db 2514 hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
93aa2187
WHX
2515
2516 cq_context->cqe_ba = (u32)(dma_handle >> 3);
2517
2518 roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
2519 V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
a5073d60 2520
9b44703d
YL
2521 if (hr_cq->db_en)
2522 roce_set_bit(cq_context->byte_44_db_record,
2523 V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
2524
2525 roce_set_field(cq_context->byte_44_db_record,
2526 V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
2527 V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
2528 ((u32)hr_cq->db.dma) >> 1);
2529 cq_context->db_record_addr = hr_cq->db.dma >> 32;
2530
a5073d60
YL
2531 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2532 V2_CQC_BYTE_56_CQ_MAX_CNT_M,
2533 V2_CQC_BYTE_56_CQ_MAX_CNT_S,
2534 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
2535 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2536 V2_CQC_BYTE_56_CQ_PERIOD_M,
2537 V2_CQC_BYTE_56_CQ_PERIOD_S,
2538 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
93aa2187
WHX
2539}
2540
2541static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
2542 enum ib_cq_notify_flags flags)
2543{
d3743fa9 2544 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
93aa2187
WHX
2545 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2546 u32 notification_flag;
2547 u32 doorbell[2];
2548
2549 doorbell[0] = 0;
2550 doorbell[1] = 0;
2551
2552 notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
2553 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
2554 /*
2555 * flags = 0; Notification Flag = 1, next
2556 * flags = 1; Notification Flag = 0, solocited
2557 */
2558 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
2559 hr_cq->cqn);
2560 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
2561 HNS_ROCE_V2_CQ_DB_NTR);
2562 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
2563 V2_CQ_DB_PARAMETER_CONS_IDX_S,
2564 hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2565 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
26beb85f 2566 V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
93aa2187
WHX
2567 roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
2568 notification_flag);
2569
d3743fa9 2570 hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
93aa2187
WHX
2571
2572 return 0;
2573}
2574
0009c2db 2575static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
2576 struct hns_roce_qp **cur_qp,
2577 struct ib_wc *wc)
2578{
2579 struct hns_roce_rinl_sge *sge_list;
2580 u32 wr_num, wr_cnt, sge_num;
2581 u32 sge_cnt, data_len, size;
2582 void *wqe_buf;
2583
2584 wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
2585 V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
2586 wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
2587
2588 sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
2589 sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
2590 wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
2591 data_len = wc->byte_len;
2592
2593 for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
2594 size = min(sge_list[sge_cnt].len, data_len);
2595 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
2596
2597 data_len -= size;
2598 wqe_buf += size;
2599 }
2600
2601 if (data_len) {
2602 wc->status = IB_WC_LOC_LEN_ERR;
2603 return -EAGAIN;
2604 }
2605
2606 return 0;
2607}
2608
93aa2187
WHX
2609static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
2610 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2611{
c7bcb134 2612 struct hns_roce_srq *srq = NULL;
93aa2187
WHX
2613 struct hns_roce_dev *hr_dev;
2614 struct hns_roce_v2_cqe *cqe;
2615 struct hns_roce_qp *hr_qp;
2616 struct hns_roce_wq *wq;
0425e3e6
YL
2617 struct ib_qp_attr attr;
2618 int attr_mask;
93aa2187
WHX
2619 int is_send;
2620 u16 wqe_ctr;
2621 u32 opcode;
2622 u32 status;
2623 int qpn;
0009c2db 2624 int ret;
93aa2187
WHX
2625
2626 /* Find cqe according to consumer index */
2627 cqe = next_cqe_sw_v2(hr_cq);
2628 if (!cqe)
2629 return -EAGAIN;
2630
2631 ++hr_cq->cons_index;
2632 /* Memory barrier */
2633 rmb();
2634
2635 /* 0->SQ, 1->RQ */
2636 is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
2637
2638 qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2639 V2_CQE_BYTE_16_LCL_QPN_S);
2640
2641 if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2642 hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2643 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2644 if (unlikely(!hr_qp)) {
2645 dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
2646 hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
2647 return -EINVAL;
2648 }
2649 *cur_qp = hr_qp;
2650 }
2651
2652 wc->qp = &(*cur_qp)->ibqp;
2653 wc->vendor_err = 0;
2654
c7bcb134
LO
2655 if (is_send) {
2656 wq = &(*cur_qp)->sq;
2657 if ((*cur_qp)->sq_signal_bits) {
2658 /*
2659 * If sg_signal_bit is 1,
2660 * firstly tail pointer updated to wqe
2661 * which current cqe correspond to
2662 */
2663 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
2664 V2_CQE_BYTE_4_WQE_INDX_M,
2665 V2_CQE_BYTE_4_WQE_INDX_S);
2666 wq->tail += (wqe_ctr - (u16)wq->tail) &
2667 (wq->wqe_cnt - 1);
2668 }
2669
2670 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2671 ++wq->tail;
2672 } else if ((*cur_qp)->ibqp.srq) {
2673 srq = to_hr_srq((*cur_qp)->ibqp.srq);
2674 wqe_ctr = le16_to_cpu(roce_get_field(cqe->byte_4,
2675 V2_CQE_BYTE_4_WQE_INDX_M,
2676 V2_CQE_BYTE_4_WQE_INDX_S));
2677 wc->wr_id = srq->wrid[wqe_ctr];
2678 hns_roce_free_srq_wqe(srq, wqe_ctr);
2679 } else {
2680 /* Update tail pointer, record wr_id */
2681 wq = &(*cur_qp)->rq;
2682 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2683 ++wq->tail;
2684 }
2685
93aa2187
WHX
2686 status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
2687 V2_CQE_BYTE_4_STATUS_S);
2688 switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
2689 case HNS_ROCE_CQE_V2_SUCCESS:
2690 wc->status = IB_WC_SUCCESS;
2691 break;
2692 case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
2693 wc->status = IB_WC_LOC_LEN_ERR;
2694 break;
2695 case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
2696 wc->status = IB_WC_LOC_QP_OP_ERR;
2697 break;
2698 case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
2699 wc->status = IB_WC_LOC_PROT_ERR;
2700 break;
2701 case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
2702 wc->status = IB_WC_WR_FLUSH_ERR;
2703 break;
2704 case HNS_ROCE_CQE_V2_MW_BIND_ERR:
2705 wc->status = IB_WC_MW_BIND_ERR;
2706 break;
2707 case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
2708 wc->status = IB_WC_BAD_RESP_ERR;
2709 break;
2710 case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
2711 wc->status = IB_WC_LOC_ACCESS_ERR;
2712 break;
2713 case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
2714 wc->status = IB_WC_REM_INV_REQ_ERR;
2715 break;
2716 case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
2717 wc->status = IB_WC_REM_ACCESS_ERR;
2718 break;
2719 case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
2720 wc->status = IB_WC_REM_OP_ERR;
2721 break;
2722 case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
2723 wc->status = IB_WC_RETRY_EXC_ERR;
2724 break;
2725 case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
2726 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2727 break;
2728 case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
2729 wc->status = IB_WC_REM_ABORT_ERR;
2730 break;
2731 default:
2732 wc->status = IB_WC_GENERAL_ERR;
2733 break;
2734 }
2735
0425e3e6
YL
2736 /* flush cqe if wc status is error, excluding flush error */
2737 if ((wc->status != IB_WC_SUCCESS) &&
2738 (wc->status != IB_WC_WR_FLUSH_ERR)) {
2739 attr_mask = IB_QP_STATE;
2740 attr.qp_state = IB_QPS_ERR;
2741 return hns_roce_v2_modify_qp(&(*cur_qp)->ibqp,
2742 &attr, attr_mask,
2743 (*cur_qp)->state, IB_QPS_ERR);
2744 }
2745
2746 if (wc->status == IB_WC_WR_FLUSH_ERR)
93aa2187
WHX
2747 return 0;
2748
2749 if (is_send) {
2750 wc->wc_flags = 0;
2751 /* SQ corresponding to CQE */
2752 switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2753 V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
2754 case HNS_ROCE_SQ_OPCODE_SEND:
2755 wc->opcode = IB_WC_SEND;
2756 break;
2757 case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
2758 wc->opcode = IB_WC_SEND;
2759 break;
2760 case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
2761 wc->opcode = IB_WC_SEND;
2762 wc->wc_flags |= IB_WC_WITH_IMM;
2763 break;
2764 case HNS_ROCE_SQ_OPCODE_RDMA_READ:
2765 wc->opcode = IB_WC_RDMA_READ;
2766 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2767 break;
2768 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
2769 wc->opcode = IB_WC_RDMA_WRITE;
2770 break;
2771 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
2772 wc->opcode = IB_WC_RDMA_WRITE;
2773 wc->wc_flags |= IB_WC_WITH_IMM;
2774 break;
2775 case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
2776 wc->opcode = IB_WC_LOCAL_INV;
2777 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2778 break;
2779 case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
2780 wc->opcode = IB_WC_COMP_SWAP;
2781 wc->byte_len = 8;
2782 break;
2783 case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
2784 wc->opcode = IB_WC_FETCH_ADD;
2785 wc->byte_len = 8;
2786 break;
2787 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
2788 wc->opcode = IB_WC_MASKED_COMP_SWAP;
2789 wc->byte_len = 8;
2790 break;
2791 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
2792 wc->opcode = IB_WC_MASKED_FETCH_ADD;
2793 wc->byte_len = 8;
2794 break;
2795 case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
2796 wc->opcode = IB_WC_REG_MR;
2797 break;
2798 case HNS_ROCE_SQ_OPCODE_BIND_MW:
2799 wc->opcode = IB_WC_REG_MR;
2800 break;
2801 default:
2802 wc->status = IB_WC_GENERAL_ERR;
2803 break;
2804 }
93aa2187
WHX
2805 } else {
2806 /* RQ correspond to CQE */
2807 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2808
2809 opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2810 V2_CQE_BYTE_4_OPCODE_S);
2811 switch (opcode & 0x1f) {
2812 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
2813 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2814 wc->wc_flags = IB_WC_WITH_IMM;
0c4a0e29
LO
2815 wc->ex.imm_data =
2816 cpu_to_be32(le32_to_cpu(cqe->immtdata));
93aa2187
WHX
2817 break;
2818 case HNS_ROCE_V2_OPCODE_SEND:
2819 wc->opcode = IB_WC_RECV;
2820 wc->wc_flags = 0;
2821 break;
2822 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
2823 wc->opcode = IB_WC_RECV;
2824 wc->wc_flags = IB_WC_WITH_IMM;
0c4a0e29
LO
2825 wc->ex.imm_data =
2826 cpu_to_be32(le32_to_cpu(cqe->immtdata));
93aa2187
WHX
2827 break;
2828 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
2829 wc->opcode = IB_WC_RECV;
2830 wc->wc_flags = IB_WC_WITH_INVALIDATE;
ccb8a29e 2831 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
93aa2187
WHX
2832 break;
2833 default:
2834 wc->status = IB_WC_GENERAL_ERR;
2835 break;
2836 }
2837
0009c2db 2838 if ((wc->qp->qp_type == IB_QPT_RC ||
2839 wc->qp->qp_type == IB_QPT_UC) &&
2840 (opcode == HNS_ROCE_V2_OPCODE_SEND ||
2841 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
2842 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
2843 (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
2844 ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
2845 if (ret)
2846 return -EAGAIN;
2847 }
2848
93aa2187
WHX
2849 wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
2850 V2_CQE_BYTE_32_SL_S);
2851 wc->src_qp = (u8)roce_get_field(cqe->byte_32,
2852 V2_CQE_BYTE_32_RMT_QPN_M,
2853 V2_CQE_BYTE_32_RMT_QPN_S);
15fc056f 2854 wc->slid = 0;
93aa2187
WHX
2855 wc->wc_flags |= (roce_get_bit(cqe->byte_32,
2856 V2_CQE_BYTE_32_GRH_S) ?
2857 IB_WC_GRH : 0);
6c1f08b3 2858 wc->port_num = roce_get_field(cqe->byte_32,
2859 V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
2860 wc->pkey_index = 0;
2eade675 2861 memcpy(wc->smac, cqe->smac, 4);
2862 wc->smac[4] = roce_get_field(cqe->byte_28,
2863 V2_CQE_BYTE_28_SMAC_4_M,
2864 V2_CQE_BYTE_28_SMAC_4_S);
2865 wc->smac[5] = roce_get_field(cqe->byte_28,
2866 V2_CQE_BYTE_28_SMAC_5_M,
2867 V2_CQE_BYTE_28_SMAC_5_S);
944e6409
LO
2868 if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
2869 wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
2870 V2_CQE_BYTE_28_VID_M,
2871 V2_CQE_BYTE_28_VID_S);
2872 } else {
2873 wc->vlan_id = 0xffff;
2874 }
2875
2eade675 2876 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
2877 wc->network_hdr_type = roce_get_field(cqe->byte_28,
2878 V2_CQE_BYTE_28_PORT_TYPE_M,
2879 V2_CQE_BYTE_28_PORT_TYPE_S);
93aa2187
WHX
2880 }
2881
2882 return 0;
2883}
2884
2885static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
2886 struct ib_wc *wc)
2887{
2888 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2889 struct hns_roce_qp *cur_qp = NULL;
2890 unsigned long flags;
2891 int npolled;
2892
2893 spin_lock_irqsave(&hr_cq->lock, flags);
2894
2895 for (npolled = 0; npolled < num_entries; ++npolled) {
2896 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
2897 break;
2898 }
2899
2900 if (npolled) {
2901 /* Memory barrier */
2902 wmb();
2903 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2904 }
2905
2906 spin_unlock_irqrestore(&hr_cq->lock, flags);
2907
2908 return npolled;
2909}
2910
a81fba28
WHX
2911static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
2912 struct hns_roce_hem_table *table, int obj,
2913 int step_idx)
2914{
2915 struct device *dev = hr_dev->dev;
2916 struct hns_roce_cmd_mailbox *mailbox;
2917 struct hns_roce_hem_iter iter;
2918 struct hns_roce_hem_mhop mhop;
2919 struct hns_roce_hem *hem;
2920 unsigned long mhop_obj = obj;
2921 int i, j, k;
2922 int ret = 0;
2923 u64 hem_idx = 0;
2924 u64 l1_idx = 0;
2925 u64 bt_ba = 0;
2926 u32 chunk_ba_num;
2927 u32 hop_num;
2928 u16 op = 0xff;
2929
2930 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
2931 return 0;
2932
2933 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
2934 i = mhop.l0_idx;
2935 j = mhop.l1_idx;
2936 k = mhop.l2_idx;
2937 hop_num = mhop.hop_num;
2938 chunk_ba_num = mhop.bt_chunk_size / 8;
2939
2940 if (hop_num == 2) {
2941 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
2942 k;
2943 l1_idx = i * chunk_ba_num + j;
2944 } else if (hop_num == 1) {
2945 hem_idx = i * chunk_ba_num + j;
2946 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
2947 hem_idx = i;
2948 }
2949
2950 switch (table->type) {
2951 case HEM_TYPE_QPC:
2952 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
2953 break;
2954 case HEM_TYPE_MTPT:
2955 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
2956 break;
2957 case HEM_TYPE_CQC:
2958 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
2959 break;
2960 case HEM_TYPE_SRQC:
2961 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
2962 break;
6a157f7d
YL
2963 case HEM_TYPE_SCCC:
2964 op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
2965 break;
0e40dc2f
YL
2966 case HEM_TYPE_QPC_TIMER:
2967 op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
2968 break;
2969 case HEM_TYPE_CQC_TIMER:
2970 op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
2971 break;
a81fba28
WHX
2972 default:
2973 dev_warn(dev, "Table %d not to be written by mailbox!\n",
2974 table->type);
2975 return 0;
2976 }
6a157f7d
YL
2977
2978 if (table->type == HEM_TYPE_SCCC && step_idx)
2979 return 0;
2980
a81fba28
WHX
2981 op += step_idx;
2982
2983 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2984 if (IS_ERR(mailbox))
2985 return PTR_ERR(mailbox);
2986
6ac16e40
YL
2987 if (table->type == HEM_TYPE_SCCC)
2988 obj = mhop.l0_idx;
2989
a81fba28
WHX
2990 if (check_whether_last_step(hop_num, step_idx)) {
2991 hem = table->hem[hem_idx];
2992 for (hns_roce_hem_first(hem, &iter);
2993 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
2994 bt_ba = hns_roce_hem_addr(&iter);
2995
2996 /* configure the ba, tag, and op */
2997 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
2998 obj, 0, op,
2999 HNS_ROCE_CMD_TIMEOUT_MSECS);
3000 }
3001 } else {
3002 if (step_idx == 0)
3003 bt_ba = table->bt_l0_dma_addr[i];
3004 else if (step_idx == 1 && hop_num == 2)
3005 bt_ba = table->bt_l1_dma_addr[l1_idx];
3006
3007 /* configure the ba, tag, and op */
3008 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
3009 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
3010 }
3011
3012 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3013 return ret;
3014}
3015
3016static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
3017 struct hns_roce_hem_table *table, int obj,
3018 int step_idx)
3019{
3020 struct device *dev = hr_dev->dev;
3021 struct hns_roce_cmd_mailbox *mailbox;
3022 int ret = 0;
3023 u16 op = 0xff;
3024
3025 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3026 return 0;
3027
3028 switch (table->type) {
3029 case HEM_TYPE_QPC:
3030 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
3031 break;
3032 case HEM_TYPE_MTPT:
3033 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
3034 break;
3035 case HEM_TYPE_CQC:
3036 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
3037 break;
6a157f7d 3038 case HEM_TYPE_SCCC:
0e40dc2f
YL
3039 case HEM_TYPE_QPC_TIMER:
3040 case HEM_TYPE_CQC_TIMER:
6a157f7d 3041 break;
a81fba28
WHX
3042 case HEM_TYPE_SRQC:
3043 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
3044 break;
3045 default:
3046 dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
3047 table->type);
3048 return 0;
3049 }
6a157f7d 3050
0e40dc2f
YL
3051 if (table->type == HEM_TYPE_SCCC ||
3052 table->type == HEM_TYPE_QPC_TIMER ||
3053 table->type == HEM_TYPE_CQC_TIMER)
6a157f7d
YL
3054 return 0;
3055
a81fba28
WHX
3056 op += step_idx;
3057
3058 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3059 if (IS_ERR(mailbox))
3060 return PTR_ERR(mailbox);
3061
3062 /* configure the tag and op */
3063 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
3064 HNS_ROCE_CMD_TIMEOUT_MSECS);
3065
3066 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3067 return ret;
3068}
3069
926a01dc 3070static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
926a01dc
WHX
3071 enum ib_qp_state cur_state,
3072 enum ib_qp_state new_state,
3073 struct hns_roce_v2_qp_context *context,
3074 struct hns_roce_qp *hr_qp)
3075{
3076 struct hns_roce_cmd_mailbox *mailbox;
3077 int ret;
3078
3079 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3080 if (IS_ERR(mailbox))
3081 return PTR_ERR(mailbox);
3082
3083 memcpy(mailbox->buf, context, sizeof(*context) * 2);
3084
3085 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
3086 HNS_ROCE_CMD_MODIFY_QPC,
3087 HNS_ROCE_CMD_TIMEOUT_MSECS);
3088
3089 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3090
3091 return ret;
3092}
3093
ace1c541 3094static void set_access_flags(struct hns_roce_qp *hr_qp,
3095 struct hns_roce_v2_qp_context *context,
3096 struct hns_roce_v2_qp_context *qpc_mask,
3097 const struct ib_qp_attr *attr, int attr_mask)
3098{
3099 u8 dest_rd_atomic;
3100 u32 access_flags;
3101
c2799119 3102 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
ace1c541 3103 attr->max_dest_rd_atomic : hr_qp->resp_depth;
3104
c2799119 3105 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
ace1c541 3106 attr->qp_access_flags : hr_qp->atomic_rd_en;
3107
3108 if (!dest_rd_atomic)
3109 access_flags &= IB_ACCESS_REMOTE_WRITE;
3110
3111 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3112 !!(access_flags & IB_ACCESS_REMOTE_READ));
3113 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
3114
3115 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3116 !!(access_flags & IB_ACCESS_REMOTE_WRITE));
3117 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
3118
3119 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3120 !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3121 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
3122}
3123
926a01dc
WHX
3124static void modify_qp_reset_to_init(struct ib_qp *ibqp,
3125 const struct ib_qp_attr *attr,
0fa95a9a 3126 int attr_mask,
926a01dc
WHX
3127 struct hns_roce_v2_qp_context *context,
3128 struct hns_roce_v2_qp_context *qpc_mask)
3129{
ecaaf1e2 3130 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
926a01dc
WHX
3131 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3132
3133 /*
3134 * In v2 engine, software pass context and context mask to hardware
3135 * when modifying qp. If software need modify some fields in context,
3136 * we should set all bits of the relevant fields in context mask to
3137 * 0 at the same time, else set them to 0x1.
3138 */
3139 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3140 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3141 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3142 V2_QPC_BYTE_4_TST_S, 0);
3143
0fa95a9a 3144 if (ibqp->qp_type == IB_QPT_GSI)
3145 roce_set_field(context->byte_4_sqpn_tst,
3146 V2_QPC_BYTE_4_SGE_SHIFT_M,
3147 V2_QPC_BYTE_4_SGE_SHIFT_S,
3148 ilog2((unsigned int)hr_qp->sge.sge_cnt));
3149 else
3150 roce_set_field(context->byte_4_sqpn_tst,
3151 V2_QPC_BYTE_4_SGE_SHIFT_M,
3152 V2_QPC_BYTE_4_SGE_SHIFT_S,
3153 hr_qp->sq.max_gs > 2 ?
3154 ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
3155
926a01dc
WHX
3156 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
3157 V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
3158
3159 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3160 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3161 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3162 V2_QPC_BYTE_4_SQPN_S, 0);
3163
3164 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3165 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3166 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3167 V2_QPC_BYTE_16_PD_S, 0);
3168
3169 roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3170 V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
3171 roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3172 V2_QPC_BYTE_20_RQWS_S, 0);
3173
3174 roce_set_field(context->byte_20_smac_sgid_idx,
3175 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
3176 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
3177 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3178 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
3179
3180 roce_set_field(context->byte_20_smac_sgid_idx,
3181 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
c7bcb134
LO
3182 (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
3183 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 :
926a01dc
WHX
3184 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
3185 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3186 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
3187
3188 /* No VLAN need to set 0xFFF */
c8e46f8d
LO
3189 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3190 V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
3191 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3192 V2_QPC_BYTE_24_VLAN_ID_S, 0);
926a01dc
WHX
3193
3194 /*
3195 * Set some fields in context to zero, Because the default values
3196 * of all fields in context are zero, we need not set them to 0 again.
3197 * but we should set the relevant fields of context mask to 0.
3198 */
3199 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
3200 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
3201 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
3202 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
3203
2362ccee
LO
3204 roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M,
3205 V2_QPC_BYTE_60_TEMPID_S, 0);
3206
3207 roce_set_field(qpc_mask->byte_60_qpst_tempid,
3208 V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S,
3209 0);
3210 roce_set_bit(qpc_mask->byte_60_qpst_tempid,
3211 V2_QPC_BYTE_60_SQ_DB_DOING_S, 0);
3212 roce_set_bit(qpc_mask->byte_60_qpst_tempid,
3213 V2_QPC_BYTE_60_RQ_DB_DOING_S, 0);
926a01dc
WHX
3214 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
3215 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
3216
e088a685
YL
3217 if (hr_qp->rdb_en) {
3218 roce_set_bit(context->byte_68_rq_db,
3219 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
3220 roce_set_bit(qpc_mask->byte_68_rq_db,
3221 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
3222 }
3223
3224 roce_set_field(context->byte_68_rq_db,
3225 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3226 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
3227 ((u32)hr_qp->rdb.dma) >> 1);
3228 roce_set_field(qpc_mask->byte_68_rq_db,
3229 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3230 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
3231 context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
3232 qpc_mask->rq_db_record_addr = 0;
3233
ecaaf1e2 3234 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
3235 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
926a01dc
WHX
3236 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
3237
3238 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3239 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3240 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3241 V2_QPC_BYTE_80_RX_CQN_S, 0);
3242 if (ibqp->srq) {
3243 roce_set_field(context->byte_76_srqn_op_en,
3244 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3245 to_hr_srq(ibqp->srq)->srqn);
3246 roce_set_field(qpc_mask->byte_76_srqn_op_en,
3247 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3248 roce_set_bit(context->byte_76_srqn_op_en,
3249 V2_QPC_BYTE_76_SRQ_EN_S, 1);
3250 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3251 V2_QPC_BYTE_76_SRQ_EN_S, 0);
3252 }
3253
3254 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3255 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3256 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3257 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3258 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3259 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3260
3261 roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
3262 V2_QPC_BYTE_92_SRQ_INFO_S, 0);
3263
3264 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3265 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3266
3267 roce_set_field(qpc_mask->byte_104_rq_sge,
3268 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
3269 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
3270
3271 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3272 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3273 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3274 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3275 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3276 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3277 V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
3278
3279 qpc_mask->rq_rnr_timer = 0;
3280 qpc_mask->rx_msg_len = 0;
3281 qpc_mask->rx_rkey_pkt_info = 0;
3282 qpc_mask->rx_va = 0;
3283
3284 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3285 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3286 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3287 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3288
2362ccee
LO
3289 roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S,
3290 0);
926a01dc
WHX
3291 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
3292 V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
3293 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
3294 V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
3295
3296 roce_set_field(qpc_mask->byte_144_raq,
3297 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
3298 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
926a01dc
WHX
3299 roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
3300 V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
3301 roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
3302
3303 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
3304 V2_QPC_BYTE_148_RQ_MSN_S, 0);
3305 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
3306 V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
3307
3308 roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3309 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
3310 roce_set_field(qpc_mask->byte_152_raq,
3311 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
3312 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
3313
3314 roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
3315 V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
3316
3317 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3318 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
3319 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
3320 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3321 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
3322 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
3323
2362ccee
LO
3324 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3325 V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0);
3326 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3327 V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0);
3328 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3329 V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0);
926a01dc
WHX
3330 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3331 V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
b5fddb7c 3332 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3333 V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
926a01dc
WHX
3334 roce_set_field(qpc_mask->byte_168_irrl_idx,
3335 V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
3336 V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
3337
3338 roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3339 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
3340 roce_set_field(qpc_mask->byte_172_sq_psn,
3341 V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3342 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
3343
3344 roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
3345 0);
3346
68a997c5
YL
3347 roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
3348 roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0);
3349
926a01dc
WHX
3350 roce_set_field(qpc_mask->byte_176_msg_pktn,
3351 V2_QPC_BYTE_176_MSG_USE_PKTN_M,
3352 V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
3353 roce_set_field(qpc_mask->byte_176_msg_pktn,
3354 V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
3355 V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
3356
3357 roce_set_field(qpc_mask->byte_184_irrl_idx,
3358 V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
3359 V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
3360
3361 qpc_mask->cur_sge_offset = 0;
3362
3363 roce_set_field(qpc_mask->byte_192_ext_sge,
3364 V2_QPC_BYTE_192_CUR_SGE_IDX_M,
3365 V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
3366 roce_set_field(qpc_mask->byte_192_ext_sge,
3367 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
3368 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
3369
3370 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3371 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
3372
3373 roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
3374 V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
3375 roce_set_field(qpc_mask->byte_200_sq_max,
3376 V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
3377 V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
3378
3379 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
3380 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
3381
3382 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3383 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3384
3385 qpc_mask->sq_timer = 0;
3386
3387 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3388 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3389 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3390 roce_set_field(qpc_mask->byte_232_irrl_sge,
3391 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3392 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3393
2362ccee
LO
3394 roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S,
3395 0);
3396 roce_set_bit(qpc_mask->byte_232_irrl_sge,
3397 V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0);
3398 roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S,
3399 0);
3400
926a01dc
WHX
3401 qpc_mask->irrl_cur_sge_offset = 0;
3402
3403 roce_set_field(qpc_mask->byte_240_irrl_tail,
3404 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3405 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3406 roce_set_field(qpc_mask->byte_240_irrl_tail,
3407 V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
3408 V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
3409 roce_set_field(qpc_mask->byte_240_irrl_tail,
3410 V2_QPC_BYTE_240_RX_ACK_MSN_M,
3411 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3412
3413 roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
3414 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3415 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
3416 0);
3417 roce_set_field(qpc_mask->byte_248_ack_psn,
3418 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3419 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3420 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
3421 0);
3422 roce_set_bit(qpc_mask->byte_248_ack_psn,
3423 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3424 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
3425 0);
3426
3427 hr_qp->access_flags = attr->qp_access_flags;
926a01dc
WHX
3428 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3429 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3430 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3431 V2_QPC_BYTE_252_TX_CQN_S, 0);
3432
3433 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
3434 V2_QPC_BYTE_252_ERR_TYPE_S, 0);
3435
3436 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3437 V2_QPC_BYTE_256_RQ_CQE_IDX_M,
3438 V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
3439 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3440 V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
3441 V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
3442}
3443
3444static void modify_qp_init_to_init(struct ib_qp *ibqp,
3445 const struct ib_qp_attr *attr, int attr_mask,
3446 struct hns_roce_v2_qp_context *context,
3447 struct hns_roce_v2_qp_context *qpc_mask)
3448{
3449 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3450
3451 /*
3452 * In v2 engine, software pass context and context mask to hardware
3453 * when modifying qp. If software need modify some fields in context,
3454 * we should set all bits of the relevant fields in context mask to
3455 * 0 at the same time, else set them to 0x1.
3456 */
3457 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3458 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3459 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3460 V2_QPC_BYTE_4_TST_S, 0);
3461
0fa95a9a 3462 if (ibqp->qp_type == IB_QPT_GSI)
3463 roce_set_field(context->byte_4_sqpn_tst,
3464 V2_QPC_BYTE_4_SGE_SHIFT_M,
3465 V2_QPC_BYTE_4_SGE_SHIFT_S,
3466 ilog2((unsigned int)hr_qp->sge.sge_cnt));
3467 else
3468 roce_set_field(context->byte_4_sqpn_tst,
3469 V2_QPC_BYTE_4_SGE_SHIFT_M,
2a3d923f
LO
3470 V2_QPC_BYTE_4_SGE_SHIFT_S,
3471 hr_qp->sq.max_gs >
3472 HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
0fa95a9a 3473 ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
3474
926a01dc
WHX
3475 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
3476 V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
3477
3478 if (attr_mask & IB_QP_ACCESS_FLAGS) {
3479 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3480 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
3481 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3482 0);
3483
3484 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3485 !!(attr->qp_access_flags &
3486 IB_ACCESS_REMOTE_WRITE));
3487 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3488 0);
3489
3490 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3491 !!(attr->qp_access_flags &
3492 IB_ACCESS_REMOTE_ATOMIC));
3493 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3494 0);
3495 } else {
3496 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3497 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
3498 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3499 0);
3500
3501 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3502 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
3503 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3504 0);
3505
3506 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3507 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
3508 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3509 0);
3510 }
3511
3512 roce_set_field(context->byte_20_smac_sgid_idx,
3513 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
3514 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
3515 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3516 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
3517
3518 roce_set_field(context->byte_20_smac_sgid_idx,
3519 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
c7bcb134
LO
3520 (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
3521 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 :
926a01dc
WHX
3522 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
3523 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3524 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
3525
3526 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3527 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3528 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3529 V2_QPC_BYTE_16_PD_S, 0);
3530
3531 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3532 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3533 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3534 V2_QPC_BYTE_80_RX_CQN_S, 0);
3535
3536 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
6d13b869 3537 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
926a01dc
WHX
3538 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3539 V2_QPC_BYTE_252_TX_CQN_S, 0);
3540
3541 if (ibqp->srq) {
3542 roce_set_bit(context->byte_76_srqn_op_en,
3543 V2_QPC_BYTE_76_SRQ_EN_S, 1);
3544 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3545 V2_QPC_BYTE_76_SRQ_EN_S, 0);
3546 roce_set_field(context->byte_76_srqn_op_en,
3547 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3548 to_hr_srq(ibqp->srq)->srqn);
3549 roce_set_field(qpc_mask->byte_76_srqn_op_en,
3550 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3551 }
3552
926a01dc
WHX
3553 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3554 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3555 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3556 V2_QPC_BYTE_4_SQPN_S, 0);
3557
b6dd9b34 3558 if (attr_mask & IB_QP_DEST_QPN) {
3559 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3560 V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
3561 roce_set_field(qpc_mask->byte_56_dqpn_err,
3562 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3563 }
926a01dc
WHX
3564}
3565
8d18ad83
LO
3566static bool check_wqe_rq_mtt_count(struct hns_roce_dev *hr_dev,
3567 struct hns_roce_qp *hr_qp, int mtt_cnt,
3568 u32 page_size)
3569{
3570 struct device *dev = hr_dev->dev;
3571
3572 if (hr_qp->rq.wqe_cnt < 1)
3573 return true;
3574
3575 if (mtt_cnt < 1) {
3576 dev_err(dev, "qp(0x%lx) rqwqe buf ba find failed\n",
3577 hr_qp->qpn);
3578 return false;
3579 }
3580
3581 if (mtt_cnt < MTT_MIN_COUNT &&
3582 (hr_qp->rq.offset + page_size) < hr_qp->buff_size) {
3583 dev_err(dev, "qp(0x%lx) next rqwqe buf ba find failed\n",
3584 hr_qp->qpn);
3585 return false;
3586 }
3587
3588 return true;
3589}
3590
926a01dc
WHX
3591static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
3592 const struct ib_qp_attr *attr, int attr_mask,
3593 struct hns_roce_v2_qp_context *context,
3594 struct hns_roce_v2_qp_context *qpc_mask)
3595{
3596 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
3597 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3598 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3599 struct device *dev = hr_dev->dev;
8d18ad83 3600 u64 mtts[MTT_MIN_COUNT] = { 0 };
e92f2c18 3601 dma_addr_t dma_handle_3;
926a01dc 3602 dma_addr_t dma_handle_2;
8d18ad83 3603 u64 wqe_sge_ba;
926a01dc
WHX
3604 u32 page_size;
3605 u8 port_num;
e92f2c18 3606 u64 *mtts_3;
926a01dc 3607 u64 *mtts_2;
8d18ad83 3608 int count;
926a01dc
WHX
3609 u8 *dmac;
3610 u8 *smac;
3611 int port;
3612
3613 /* Search qp buf's mtts */
8d18ad83
LO
3614 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3615 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
3616 hr_qp->rq.offset / page_size, mtts,
3617 MTT_MIN_COUNT, &wqe_sge_ba);
3618 if (!ibqp->srq)
3619 if (!check_wqe_rq_mtt_count(hr_dev, hr_qp, count, page_size))
3620 return -EINVAL;
926a01dc
WHX
3621
3622 /* Search IRRL's mtts */
3623 mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
3624 hr_qp->qpn, &dma_handle_2);
3625 if (!mtts_2) {
3626 dev_err(dev, "qp irrl_table find failed\n");
3627 return -EINVAL;
3628 }
3629
e92f2c18 3630 /* Search TRRL's mtts */
3631 mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
3632 hr_qp->qpn, &dma_handle_3);
3633 if (!mtts_3) {
3634 dev_err(dev, "qp trrl_table find failed\n");
3635 return -EINVAL;
3636 }
3637
734f3863 3638 if (attr_mask & IB_QP_ALT_PATH) {
926a01dc
WHX
3639 dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
3640 return -EINVAL;
3641 }
3642
3643 dmac = (u8 *)attr->ah_attr.roce.dmac;
8d18ad83 3644 context->wqe_sge_ba = (u32)(wqe_sge_ba >> 3);
926a01dc
WHX
3645 qpc_mask->wqe_sge_ba = 0;
3646
3647 /*
3648 * In v2 engine, software pass context and context mask to hardware
3649 * when modifying qp. If software need modify some fields in context,
3650 * we should set all bits of the relevant fields in context mask to
3651 * 0 at the same time, else set them to 0x1.
3652 */
3653 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
8d18ad83 3654 V2_QPC_BYTE_12_WQE_SGE_BA_S, wqe_sge_ba >> (32 + 3));
926a01dc
WHX
3655 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3656 V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
3657
3658 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3659 V2_QPC_BYTE_12_SQ_HOP_NUM_S,
8d18ad83
LO
3660 hr_dev->caps.wqe_sq_hop_num == HNS_ROCE_HOP_NUM_0 ?
3661 0 : hr_dev->caps.wqe_sq_hop_num);
926a01dc
WHX
3662 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3663 V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
3664
3665 roce_set_field(context->byte_20_smac_sgid_idx,
3666 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3667 V2_QPC_BYTE_20_SGE_HOP_NUM_S,
8d18ad83
LO
3668 ((ibqp->qp_type == IB_QPT_GSI) ||
3669 hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
3670 hr_dev->caps.wqe_sge_hop_num : 0);
926a01dc
WHX
3671 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3672 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3673 V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
3674
3675 roce_set_field(context->byte_20_smac_sgid_idx,
3676 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3677 V2_QPC_BYTE_20_RQ_HOP_NUM_S,
8d18ad83
LO
3678 hr_dev->caps.wqe_rq_hop_num == HNS_ROCE_HOP_NUM_0 ?
3679 0 : hr_dev->caps.wqe_rq_hop_num);
926a01dc
WHX
3680 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3681 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3682 V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
3683
3684 roce_set_field(context->byte_16_buf_ba_pg_sz,
3685 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3686 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
8d18ad83 3687 hr_qp->wqe_bt_pg_shift + PG_SHIFT_OFFSET);
926a01dc
WHX
3688 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3689 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3690 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
3691
3692 roce_set_field(context->byte_16_buf_ba_pg_sz,
3693 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3694 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
5e6e78db 3695 hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
926a01dc
WHX
3696 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3697 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3698 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
3699
8d18ad83 3700 context->rq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
926a01dc
WHX
3701 qpc_mask->rq_cur_blk_addr = 0;
3702
3703 roce_set_field(context->byte_92_srq_info,
3704 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3705 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
8d18ad83 3706 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
926a01dc
WHX
3707 roce_set_field(qpc_mask->byte_92_srq_info,
3708 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3709 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
3710
8d18ad83 3711 context->rq_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT);
926a01dc
WHX
3712 qpc_mask->rq_nxt_blk_addr = 0;
3713
3714 roce_set_field(context->byte_104_rq_sge,
3715 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3716 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
8d18ad83 3717 mtts[1] >> (32 + PAGE_ADDR_SHIFT));
926a01dc
WHX
3718 roce_set_field(qpc_mask->byte_104_rq_sge,
3719 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3720 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
3721
e92f2c18 3722 roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3723 V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
3724 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3725 V2_QPC_BYTE_132_TRRL_BA_S, 0);
3726 context->trrl_ba = (u32)(dma_handle_3 >> (16 + 4));
3727 qpc_mask->trrl_ba = 0;
3728 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3729 V2_QPC_BYTE_140_TRRL_BA_S,
3730 (u32)(dma_handle_3 >> (32 + 16 + 4)));
3731 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3732 V2_QPC_BYTE_140_TRRL_BA_S, 0);
3733
d5514246 3734 context->irrl_ba = (u32)(dma_handle_2 >> 6);
926a01dc
WHX
3735 qpc_mask->irrl_ba = 0;
3736 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3737 V2_QPC_BYTE_208_IRRL_BA_S,
d5514246 3738 dma_handle_2 >> (32 + 6));
926a01dc
WHX
3739 roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3740 V2_QPC_BYTE_208_IRRL_BA_S, 0);
3741
3742 roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
3743 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
3744
3745 roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3746 hr_qp->sq_signal_bits);
3747 roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3748 0);
3749
3750 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
3751
3752 smac = (u8 *)hr_dev->dev_addr[port];
3753 /* when dmac equals smac or loop_idc is 1, it should loopback */
3754 if (ether_addr_equal_unaligned(dmac, smac) ||
3755 hr_dev->loop_idc == 0x1) {
3756 roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
3757 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
3758 }
3759
b6dd9b34 3760 if (attr_mask & IB_QP_DEST_QPN) {
3761 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3762 V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
3763 roce_set_field(qpc_mask->byte_56_dqpn_err,
3764 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3765 }
926a01dc
WHX
3766
3767 /* Configure GID index */
3768 port_num = rdma_ah_get_port_num(&attr->ah_attr);
3769 roce_set_field(context->byte_20_smac_sgid_idx,
3770 V2_QPC_BYTE_20_SGID_IDX_M,
3771 V2_QPC_BYTE_20_SGID_IDX_S,
3772 hns_get_gid_index(hr_dev, port_num - 1,
3773 grh->sgid_index));
3774 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3775 V2_QPC_BYTE_20_SGID_IDX_M,
3776 V2_QPC_BYTE_20_SGID_IDX_S, 0);
2a3d923f 3777 memcpy(&(context->dmac), dmac, sizeof(u32));
926a01dc
WHX
3778 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3779 V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
3780 qpc_mask->dmac = 0;
3781 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3782 V2_QPC_BYTE_52_DMAC_S, 0);
3783
2a3d923f 3784 /* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
926a01dc
WHX
3785 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3786 V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
3787 roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3788 V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
3789
0fa95a9a 3790 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
3791 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3792 V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
6852af86 3793 else if (attr_mask & IB_QP_PATH_MTU)
0fa95a9a 3794 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3795 V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
3796
926a01dc
WHX
3797 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3798 V2_QPC_BYTE_24_MTU_S, 0);
3799
926a01dc
WHX
3800 roce_set_field(context->byte_84_rq_ci_pi,
3801 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3802 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
3803 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3804 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3805 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3806
3807 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3808 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3809 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3810 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3811 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3812 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3813 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3814 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3815 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3816 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3817
3818 context->rq_rnr_timer = 0;
3819 qpc_mask->rq_rnr_timer = 0;
3820
926a01dc
WHX
3821 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3822 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3823 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3824 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3825
2a3d923f 3826 /* rocee send 2^lp_sgen_ini segs every time */
926a01dc
WHX
3827 roce_set_field(context->byte_168_irrl_idx,
3828 V2_QPC_BYTE_168_LP_SGEN_INI_M,
3829 V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
3830 roce_set_field(qpc_mask->byte_168_irrl_idx,
3831 V2_QPC_BYTE_168_LP_SGEN_INI_M,
3832 V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
3833
926a01dc
WHX
3834 return 0;
3835}
3836
3837static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
3838 const struct ib_qp_attr *attr, int attr_mask,
3839 struct hns_roce_v2_qp_context *context,
3840 struct hns_roce_v2_qp_context *qpc_mask)
3841{
3842 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3843 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3844 struct device *dev = hr_dev->dev;
8d18ad83
LO
3845 u64 sge_cur_blk = 0;
3846 u64 sq_cur_blk = 0;
befb63b4 3847 u32 page_size;
8d18ad83 3848 int count;
926a01dc
WHX
3849
3850 /* Search qp buf's mtts */
8d18ad83
LO
3851 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
3852 if (count < 1) {
3853 dev_err(dev, "qp(0x%lx) buf pa find failed\n", hr_qp->qpn);
926a01dc
WHX
3854 return -EINVAL;
3855 }
3856
8d18ad83
LO
3857 if (hr_qp->sge.offset) {
3858 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3859 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
3860 hr_qp->sge.offset / page_size,
3861 &sge_cur_blk, 1, NULL);
3862 if (count < 1) {
3863 dev_err(dev, "qp(0x%lx) sge pa find failed\n",
3864 hr_qp->qpn);
3865 return -EINVAL;
3866 }
3867 }
3868
734f3863 3869 /* Not support alternate path and path migration */
3870 if ((attr_mask & IB_QP_ALT_PATH) ||
3871 (attr_mask & IB_QP_PATH_MIG_STATE)) {
926a01dc
WHX
3872 dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
3873 return -EINVAL;
3874 }
3875
3876 /*
3877 * In v2 engine, software pass context and context mask to hardware
3878 * when modifying qp. If software need modify some fields in context,
3879 * we should set all bits of the relevant fields in context mask to
3880 * 0 at the same time, else set them to 0x1.
3881 */
8d18ad83 3882 context->sq_cur_blk_addr = (u32)(sq_cur_blk >> PAGE_ADDR_SHIFT);
926a01dc
WHX
3883 roce_set_field(context->byte_168_irrl_idx,
3884 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3885 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
8d18ad83 3886 sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
926a01dc
WHX
3887 qpc_mask->sq_cur_blk_addr = 0;
3888 roce_set_field(qpc_mask->byte_168_irrl_idx,
3889 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3890 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
3891
2a3d923f
LO
3892 context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI) ||
3893 hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
8d18ad83 3894 ((u32)(sge_cur_blk >>
2a3d923f 3895 PAGE_ADDR_SHIFT)) : 0;
befb63b4 3896 roce_set_field(context->byte_184_irrl_idx,
3897 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3898 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
2a3d923f
LO
3899 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs >
3900 HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
8d18ad83 3901 (sge_cur_blk >>
befb63b4 3902 (32 + PAGE_ADDR_SHIFT)) : 0);
3903 qpc_mask->sq_cur_sge_blk_addr = 0;
3904 roce_set_field(qpc_mask->byte_184_irrl_idx,
3905 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3906 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
3907
8d18ad83 3908 context->rx_sq_cur_blk_addr = (u32)(sq_cur_blk >> PAGE_ADDR_SHIFT);
926a01dc
WHX
3909 roce_set_field(context->byte_232_irrl_sge,
3910 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3911 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
8d18ad83 3912 sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
926a01dc
WHX
3913 qpc_mask->rx_sq_cur_blk_addr = 0;
3914 roce_set_field(qpc_mask->byte_232_irrl_sge,
3915 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3916 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
3917
3918 /*
3919 * Set some fields in context to zero, Because the default values
3920 * of all fields in context are zero, we need not set them to 0 again.
3921 * but we should set the relevant fields of context mask to 0.
3922 */
3923 roce_set_field(qpc_mask->byte_232_irrl_sge,
3924 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3925 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3926
3927 roce_set_field(qpc_mask->byte_240_irrl_tail,
3928 V2_QPC_BYTE_240_RX_ACK_MSN_M,
3929 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3930
926a01dc
WHX
3931 roce_set_field(qpc_mask->byte_248_ack_psn,
3932 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3933 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3934 roce_set_bit(qpc_mask->byte_248_ack_psn,
3935 V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
3936 roce_set_field(qpc_mask->byte_248_ack_psn,
3937 V2_QPC_BYTE_248_IRRL_PSN_M,
3938 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3939
3940 roce_set_field(qpc_mask->byte_240_irrl_tail,
3941 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3942 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3943
926a01dc
WHX
3944 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3945 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3946 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3947
3948 roce_set_bit(qpc_mask->byte_248_ack_psn,
3949 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3950
3951 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3952 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3953
926a01dc
WHX
3954 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3955 V2_QPC_BYTE_212_LSN_S, 0x100);
3956 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3957 V2_QPC_BYTE_212_LSN_S, 0);
3958
926a01dc
WHX
3959 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3960 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
926a01dc
WHX
3961
3962 return 0;
3963}
3964
233673e4
LO
3965static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state,
3966 enum ib_qp_state new_state)
3967{
3968
3969 if ((cur_state != IB_QPS_RESET &&
3970 (new_state == IB_QPS_ERR || new_state == IB_QPS_RESET)) ||
3971 ((cur_state == IB_QPS_RTS || cur_state == IB_QPS_SQD) &&
3972 (new_state == IB_QPS_RTS || new_state == IB_QPS_SQD)) ||
3973 (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS))
3974 return true;
3975
3976 return false;
3977
3978}
3979
926a01dc
WHX
3980static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
3981 const struct ib_qp_attr *attr,
3982 int attr_mask, enum ib_qp_state cur_state,
3983 enum ib_qp_state new_state)
3984{
3985 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3986 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3987 struct hns_roce_v2_qp_context *context;
3988 struct hns_roce_v2_qp_context *qpc_mask;
3989 struct device *dev = hr_dev->dev;
3990 int ret = -EINVAL;
3991
4e69cf1f 3992 context = kcalloc(2, sizeof(*context), GFP_ATOMIC);
926a01dc
WHX
3993 if (!context)
3994 return -ENOMEM;
3995
3996 qpc_mask = context + 1;
3997 /*
3998 * In v2 engine, software pass context and context mask to hardware
3999 * when modifying qp. If software need modify some fields in context,
4000 * we should set all bits of the relevant fields in context mask to
4001 * 0 at the same time, else set them to 0x1.
4002 */
4003 memset(qpc_mask, 0xff, sizeof(*qpc_mask));
4004 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
9f507101 4005 memset(qpc_mask, 0, sizeof(*qpc_mask));
0fa95a9a 4006 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
4007 qpc_mask);
926a01dc
WHX
4008 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
4009 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
4010 qpc_mask);
4011 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4012 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
4013 qpc_mask);
4014 if (ret)
4015 goto out;
4016 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
4017 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
4018 qpc_mask);
4019 if (ret)
4020 goto out;
233673e4 4021 } else if (hns_roce_v2_check_qp_stat(cur_state, new_state)) {
926a01dc
WHX
4022 /* Nothing */
4023 ;
4024 } else {
4025 dev_err(dev, "Illegal state for QP!\n");
ac7cbf96 4026 ret = -EINVAL;
926a01dc
WHX
4027 goto out;
4028 }
4029
0425e3e6
YL
4030 /* When QP state is err, SQ and RQ WQE should be flushed */
4031 if (new_state == IB_QPS_ERR) {
4032 roce_set_field(context->byte_160_sq_ci_pi,
4033 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4034 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
4035 hr_qp->sq.head);
4036 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
4037 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4038 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
9c6ccc03
LO
4039
4040 if (!ibqp->srq) {
4041 roce_set_field(context->byte_84_rq_ci_pi,
0425e3e6
YL
4042 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4043 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
4044 hr_qp->rq.head);
9c6ccc03 4045 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
0425e3e6
YL
4046 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4047 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
9c6ccc03 4048 }
0425e3e6
YL
4049 }
4050
610b8967
LO
4051 if (attr_mask & IB_QP_AV) {
4052 const struct ib_global_route *grh =
4053 rdma_ah_read_grh(&attr->ah_attr);
4054 const struct ib_gid_attr *gid_attr = NULL;
610b8967
LO
4055 int is_roce_protocol;
4056 u16 vlan = 0xffff;
4057 u8 ib_port;
4058 u8 hr_port;
4059
4060 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num :
4061 hr_qp->port + 1;
4062 hr_port = ib_port - 1;
4063 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4064 rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4065
4066 if (is_roce_protocol) {
4067 gid_attr = attr->ah_attr.grh.sgid_attr;
a70c0739
PP
4068 ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL);
4069 if (ret)
4070 goto out;
610b8967
LO
4071 }
4072
a70c0739 4073 if (vlan < VLAN_CFI_MASK) {
caf3e406
LO
4074 roce_set_bit(context->byte_76_srqn_op_en,
4075 V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
4076 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
4077 V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
4078 roce_set_bit(context->byte_168_irrl_idx,
4079 V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
4080 roce_set_bit(qpc_mask->byte_168_irrl_idx,
4081 V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
4082 }
4083
c8e46f8d
LO
4084 roce_set_field(context->byte_24_mtu_tc,
4085 V2_QPC_BYTE_24_VLAN_ID_M,
4086 V2_QPC_BYTE_24_VLAN_ID_S, vlan);
4087 roce_set_field(qpc_mask->byte_24_mtu_tc,
4088 V2_QPC_BYTE_24_VLAN_ID_M,
4089 V2_QPC_BYTE_24_VLAN_ID_S, 0);
4090
610b8967
LO
4091 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4092 dev_err(hr_dev->dev,
4093 "sgid_index(%u) too large. max is %d\n",
4094 grh->sgid_index,
4095 hr_dev->caps.gid_table_len[hr_port]);
4096 ret = -EINVAL;
4097 goto out;
4098 }
4099
4100 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4101 dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
4102 ret = -EINVAL;
4103 goto out;
4104 }
4105
4106 roce_set_field(context->byte_52_udpspn_dmac,
4107 V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S,
4108 (gid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ?
4109 0 : 0x12b7);
4110
4111 roce_set_field(qpc_mask->byte_52_udpspn_dmac,
4112 V2_QPC_BYTE_52_UDPSPN_M,
4113 V2_QPC_BYTE_52_UDPSPN_S, 0);
4114
4115 roce_set_field(context->byte_20_smac_sgid_idx,
4116 V2_QPC_BYTE_20_SGID_IDX_M,
4117 V2_QPC_BYTE_20_SGID_IDX_S, grh->sgid_index);
4118
4119 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4120 V2_QPC_BYTE_20_SGID_IDX_M,
4121 V2_QPC_BYTE_20_SGID_IDX_S, 0);
4122
4123 roce_set_field(context->byte_24_mtu_tc,
4124 V2_QPC_BYTE_24_HOP_LIMIT_M,
4125 V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
4126 roce_set_field(qpc_mask->byte_24_mtu_tc,
4127 V2_QPC_BYTE_24_HOP_LIMIT_M,
4128 V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
4129
157b52a0
LO
4130 if (hr_dev->pci_dev->revision == 0x21 &&
4131 gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
4132 roce_set_field(context->byte_24_mtu_tc,
4133 V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
4134 grh->traffic_class >> 2);
4135 else
4136 roce_set_field(context->byte_24_mtu_tc,
4137 V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
4138 grh->traffic_class);
610b8967
LO
4139 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4140 V2_QPC_BYTE_24_TC_S, 0);
4141 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4142 V2_QPC_BYTE_28_FL_S, grh->flow_label);
4143 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4144 V2_QPC_BYTE_28_FL_S, 0);
4145 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4146 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
4147 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4148 V2_QPC_BYTE_28_SL_S,
4149 rdma_ah_get_sl(&attr->ah_attr));
4150 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4151 V2_QPC_BYTE_28_SL_S, 0);
4152 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4153 }
4154
5b01b243
LO
4155 if (attr_mask & IB_QP_TIMEOUT) {
4156 if (attr->timeout < 31) {
4157 roce_set_field(context->byte_28_at_fl,
4158 V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4159 attr->timeout);
4160 roce_set_field(qpc_mask->byte_28_at_fl,
4161 V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4162 0);
4163 } else {
4164 dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
4165 }
4166 }
4167
4168 if (attr_mask & IB_QP_RETRY_CNT) {
4169 roce_set_field(context->byte_212_lsn,
4170 V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4171 V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
4172 attr->retry_cnt);
4173 roce_set_field(qpc_mask->byte_212_lsn,
4174 V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4175 V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
4176
4177 roce_set_field(context->byte_212_lsn,
4178 V2_QPC_BYTE_212_RETRY_CNT_M,
4179 V2_QPC_BYTE_212_RETRY_CNT_S,
4180 attr->retry_cnt);
4181 roce_set_field(qpc_mask->byte_212_lsn,
4182 V2_QPC_BYTE_212_RETRY_CNT_M,
4183 V2_QPC_BYTE_212_RETRY_CNT_S, 0);
4184 }
4185
4186 if (attr_mask & IB_QP_RNR_RETRY) {
4187 roce_set_field(context->byte_244_rnr_rxack,
4188 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4189 V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
4190 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4191 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4192 V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
4193
4194 roce_set_field(context->byte_244_rnr_rxack,
4195 V2_QPC_BYTE_244_RNR_CNT_M,
4196 V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
4197 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4198 V2_QPC_BYTE_244_RNR_CNT_M,
4199 V2_QPC_BYTE_244_RNR_CNT_S, 0);
4200 }
4201
f04cc178
LO
4202 if (attr_mask & IB_QP_SQ_PSN) {
4203 roce_set_field(context->byte_172_sq_psn,
4204 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4205 V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
4206 roce_set_field(qpc_mask->byte_172_sq_psn,
4207 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4208 V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
4209
4210 roce_set_field(context->byte_196_sq_psn,
4211 V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4212 V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
4213 roce_set_field(qpc_mask->byte_196_sq_psn,
4214 V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4215 V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
4216
4217 roce_set_field(context->byte_220_retry_psn_msn,
4218 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4219 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
4220 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4221 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4222 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
4223
4224 roce_set_field(context->byte_224_retry_msg,
4225 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4226 V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
2a3d923f 4227 attr->sq_psn >> V2_QPC_BYTE_220_RETRY_MSG_PSN_S);
f04cc178
LO
4228 roce_set_field(qpc_mask->byte_224_retry_msg,
4229 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4230 V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
4231
4232 roce_set_field(context->byte_224_retry_msg,
4233 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4234 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
4235 attr->sq_psn);
4236 roce_set_field(qpc_mask->byte_224_retry_msg,
4237 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4238 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
4239
4240 roce_set_field(context->byte_244_rnr_rxack,
4241 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4242 V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
4243 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4244 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4245 V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
4246 }
4247
5b01b243
LO
4248 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
4249 attr->max_dest_rd_atomic) {
4250 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4251 V2_QPC_BYTE_140_RR_MAX_S,
4252 fls(attr->max_dest_rd_atomic - 1));
4253 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4254 V2_QPC_BYTE_140_RR_MAX_S, 0);
4255 }
4256
4257 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
4258 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
4259 V2_QPC_BYTE_208_SR_MAX_S,
4260 fls(attr->max_rd_atomic - 1));
4261 roce_set_field(qpc_mask->byte_208_irrl,
4262 V2_QPC_BYTE_208_SR_MAX_M,
4263 V2_QPC_BYTE_208_SR_MAX_S, 0);
4264 }
4265
ace1c541 4266 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
4267 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
4268
5b01b243
LO
4269 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
4270 roce_set_field(context->byte_80_rnr_rx_cqn,
4271 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4272 V2_QPC_BYTE_80_MIN_RNR_TIME_S,
4273 attr->min_rnr_timer);
4274 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
4275 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4276 V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
4277 }
4278
601f3e6d
LO
4279 /* RC&UC required attr */
4280 if (attr_mask & IB_QP_RQ_PSN) {
4281 roce_set_field(context->byte_108_rx_reqepsn,
4282 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4283 V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
4284 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
4285 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4286 V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
4287
4288 roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
4289 V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
4290 roce_set_field(qpc_mask->byte_152_raq,
4291 V2_QPC_BYTE_152_RAQ_PSN_M,
4292 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
4293 }
4294
5b01b243
LO
4295 if (attr_mask & IB_QP_QKEY) {
4296 context->qkey_xrcd = attr->qkey;
4297 qpc_mask->qkey_xrcd = 0;
4298 hr_qp->qkey = attr->qkey;
4299 }
4300
c7bcb134
LO
4301 roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
4302 ibqp->srq ? 1 : 0);
4303 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4304 V2_QPC_BYTE_108_INV_CREDIT_S, 0);
4305
926a01dc 4306 /* Every status migrate must change state */
2362ccee 4307 roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
926a01dc 4308 V2_QPC_BYTE_60_QP_ST_S, new_state);
2362ccee 4309 roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
926a01dc
WHX
4310 V2_QPC_BYTE_60_QP_ST_S, 0);
4311
4312 /* SW pass context to HW */
8d18ad83 4313 ret = hns_roce_v2_qp_modify(hr_dev, cur_state, new_state,
926a01dc
WHX
4314 context, hr_qp);
4315 if (ret) {
4316 dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
4317 goto out;
4318 }
4319
4320 hr_qp->state = new_state;
4321
ace1c541 4322 if (attr_mask & IB_QP_ACCESS_FLAGS)
4323 hr_qp->atomic_rd_en = attr->qp_access_flags;
4324
926a01dc
WHX
4325 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4326 hr_qp->resp_depth = attr->max_dest_rd_atomic;
4327 if (attr_mask & IB_QP_PORT) {
4328 hr_qp->port = attr->port_num - 1;
4329 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
4330 }
4331
4332 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
4333 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
4334 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
4335 if (ibqp->send_cq != ibqp->recv_cq)
4336 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
4337 hr_qp->qpn, NULL);
4338
4339 hr_qp->rq.head = 0;
4340 hr_qp->rq.tail = 0;
4341 hr_qp->sq.head = 0;
4342 hr_qp->sq.tail = 0;
4343 hr_qp->sq_next_wqe = 0;
4344 hr_qp->next_sge = 0;
e088a685
YL
4345 if (hr_qp->rq.wqe_cnt)
4346 *hr_qp->rdb.db_record = 0;
926a01dc
WHX
4347 }
4348
4349out:
4350 kfree(context);
4351 return ret;
4352}
4353
4354static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
4355{
4356 switch (state) {
4357 case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
4358 case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
4359 case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
4360 case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
4361 case HNS_ROCE_QP_ST_SQ_DRAINING:
4362 case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
4363 case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
4364 case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
4365 default: return -1;
4366 }
4367}
4368
4369static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
4370 struct hns_roce_qp *hr_qp,
4371 struct hns_roce_v2_qp_context *hr_context)
4372{
4373 struct hns_roce_cmd_mailbox *mailbox;
4374 int ret;
4375
4376 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4377 if (IS_ERR(mailbox))
4378 return PTR_ERR(mailbox);
4379
4380 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
4381 HNS_ROCE_CMD_QUERY_QPC,
4382 HNS_ROCE_CMD_TIMEOUT_MSECS);
4383 if (ret) {
4384 dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
4385 goto out;
4386 }
4387
4388 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
4389
4390out:
4391 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4392 return ret;
4393}
4394
4395static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
4396 int qp_attr_mask,
4397 struct ib_qp_init_attr *qp_init_attr)
4398{
4399 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4400 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4401 struct hns_roce_v2_qp_context *context;
4402 struct device *dev = hr_dev->dev;
4403 int tmp_qp_state;
4404 int state;
4405 int ret;
4406
4407 context = kzalloc(sizeof(*context), GFP_KERNEL);
4408 if (!context)
4409 return -ENOMEM;
4410
4411 memset(qp_attr, 0, sizeof(*qp_attr));
4412 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
4413
4414 mutex_lock(&hr_qp->mutex);
4415
4416 if (hr_qp->state == IB_QPS_RESET) {
4417 qp_attr->qp_state = IB_QPS_RESET;
63ea641f 4418 ret = 0;
926a01dc
WHX
4419 goto done;
4420 }
4421
4422 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context);
4423 if (ret) {
4424 dev_err(dev, "query qpc error\n");
4425 ret = -EINVAL;
4426 goto out;
4427 }
4428
2362ccee 4429 state = roce_get_field(context->byte_60_qpst_tempid,
926a01dc
WHX
4430 V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
4431 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
4432 if (tmp_qp_state == -1) {
4433 dev_err(dev, "Illegal ib_qp_state\n");
4434 ret = -EINVAL;
4435 goto out;
4436 }
4437 hr_qp->state = (u8)tmp_qp_state;
4438 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
4439 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->byte_24_mtu_tc,
4440 V2_QPC_BYTE_24_MTU_M,
4441 V2_QPC_BYTE_24_MTU_S);
4442 qp_attr->path_mig_state = IB_MIG_ARMED;
2bf910d4 4443 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
926a01dc
WHX
4444 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
4445 qp_attr->qkey = V2_QKEY_VAL;
4446
4447 qp_attr->rq_psn = roce_get_field(context->byte_108_rx_reqepsn,
4448 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4449 V2_QPC_BYTE_108_RX_REQ_EPSN_S);
4450 qp_attr->sq_psn = (u32)roce_get_field(context->byte_172_sq_psn,
4451 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4452 V2_QPC_BYTE_172_SQ_CUR_PSN_S);
4453 qp_attr->dest_qp_num = (u8)roce_get_field(context->byte_56_dqpn_err,
4454 V2_QPC_BYTE_56_DQPN_M,
4455 V2_QPC_BYTE_56_DQPN_S);
4456 qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
2a3d923f
LO
4457 V2_QPC_BYTE_76_RRE_S)) << V2_QP_RWE_S) |
4458 ((roce_get_bit(context->byte_76_srqn_op_en,
4459 V2_QPC_BYTE_76_RWE_S)) << V2_QP_RRE_S) |
4460 ((roce_get_bit(context->byte_76_srqn_op_en,
4461 V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
4462
926a01dc
WHX
4463 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
4464 hr_qp->ibqp.qp_type == IB_QPT_UC) {
4465 struct ib_global_route *grh =
4466 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
4467
4468 rdma_ah_set_sl(&qp_attr->ah_attr,
4469 roce_get_field(context->byte_28_at_fl,
4470 V2_QPC_BYTE_28_SL_M,
4471 V2_QPC_BYTE_28_SL_S));
4472 grh->flow_label = roce_get_field(context->byte_28_at_fl,
4473 V2_QPC_BYTE_28_FL_M,
4474 V2_QPC_BYTE_28_FL_S);
4475 grh->sgid_index = roce_get_field(context->byte_20_smac_sgid_idx,
4476 V2_QPC_BYTE_20_SGID_IDX_M,
4477 V2_QPC_BYTE_20_SGID_IDX_S);
4478 grh->hop_limit = roce_get_field(context->byte_24_mtu_tc,
4479 V2_QPC_BYTE_24_HOP_LIMIT_M,
4480 V2_QPC_BYTE_24_HOP_LIMIT_S);
4481 grh->traffic_class = roce_get_field(context->byte_24_mtu_tc,
4482 V2_QPC_BYTE_24_TC_M,
4483 V2_QPC_BYTE_24_TC_S);
4484
4485 memcpy(grh->dgid.raw, context->dgid, sizeof(grh->dgid.raw));
4486 }
4487
4488 qp_attr->port_num = hr_qp->port + 1;
4489 qp_attr->sq_draining = 0;
4490 qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl,
4491 V2_QPC_BYTE_208_SR_MAX_M,
4492 V2_QPC_BYTE_208_SR_MAX_S);
4493 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->byte_140_raq,
4494 V2_QPC_BYTE_140_RR_MAX_M,
4495 V2_QPC_BYTE_140_RR_MAX_S);
4496 qp_attr->min_rnr_timer = (u8)roce_get_field(context->byte_80_rnr_rx_cqn,
4497 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4498 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
4499 qp_attr->timeout = (u8)roce_get_field(context->byte_28_at_fl,
4500 V2_QPC_BYTE_28_AT_M,
4501 V2_QPC_BYTE_28_AT_S);
4502 qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn,
4503 V2_QPC_BYTE_212_RETRY_CNT_M,
4504 V2_QPC_BYTE_212_RETRY_CNT_S);
4505 qp_attr->rnr_retry = context->rq_rnr_timer;
4506
4507done:
4508 qp_attr->cur_qp_state = qp_attr->qp_state;
4509 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
4510 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
4511
4512 if (!ibqp->uobject) {
4513 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
4514 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
4515 } else {
4516 qp_attr->cap.max_send_wr = 0;
4517 qp_attr->cap.max_send_sge = 0;
4518 }
4519
4520 qp_init_attr->cap = qp_attr->cap;
4521
4522out:
4523 mutex_unlock(&hr_qp->mutex);
4524 kfree(context);
4525 return ret;
4526}
4527
4528static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
4529 struct hns_roce_qp *hr_qp,
bdeacabd 4530 struct ib_udata *udata)
926a01dc
WHX
4531{
4532 struct hns_roce_cq *send_cq, *recv_cq;
4533 struct device *dev = hr_dev->dev;
4534 int ret;
4535
4536 if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
4537 /* Modify qp to reset before destroying qp */
4538 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
4539 hr_qp->state, IB_QPS_RESET);
4540 if (ret) {
4541 dev_err(dev, "modify QP %06lx to ERR failed.\n",
4542 hr_qp->qpn);
4543 return ret;
4544 }
4545 }
4546
4547 send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
4548 recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
4549
4550 hns_roce_lock_cqs(send_cq, recv_cq);
4551
bdeacabd 4552 if (!udata) {
926a01dc
WHX
4553 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
4554 to_hr_srq(hr_qp->ibqp.srq) : NULL);
4555 if (send_cq != recv_cq)
4556 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
4557 }
4558
4559 hns_roce_qp_remove(hr_dev, hr_qp);
4560
4561 hns_roce_unlock_cqs(send_cq, recv_cq);
4562
4563 hns_roce_qp_free(hr_dev, hr_qp);
4564
4565 /* Not special_QP, free their QPN */
4566 if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
4567 (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
4568 (hr_qp->ibqp.qp_type == IB_QPT_UD))
4569 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
4570
4571 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
4572
bdeacabd
SR
4573 if (udata) {
4574 struct hns_roce_ucontext *context =
4575 rdma_udata_to_drv_context(
4576 udata,
4577 struct hns_roce_ucontext,
4578 ibucontext);
4579
0425e3e6 4580 if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
bdeacabd 4581 hns_roce_db_unmap_user(context, &hr_qp->sdb);
0425e3e6 4582
e088a685 4583 if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
bdeacabd 4584 hns_roce_db_unmap_user(context, &hr_qp->rdb);
926a01dc
WHX
4585 } else {
4586 kfree(hr_qp->sq.wrid);
4587 kfree(hr_qp->rq.wrid);
4588 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
472bc0fb
YL
4589 if (hr_qp->rq.wqe_cnt)
4590 hns_roce_free_db(hr_dev, &hr_qp->rdb);
926a01dc 4591 }
836a0fbb 4592 ib_umem_release(hr_qp->umem);
926a01dc 4593
c7bcb134
LO
4594 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
4595 hr_qp->rq.wqe_cnt) {
0009c2db 4596 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
4597 kfree(hr_qp->rq_inl_buf.wqe_list);
4598 }
4599
926a01dc
WHX
4600 return 0;
4601}
4602
c4367a26 4603static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
926a01dc
WHX
4604{
4605 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4606 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4607 int ret;
4608
bdeacabd 4609 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
926a01dc
WHX
4610 if (ret) {
4611 dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
4612 return ret;
4613 }
4614
4615 if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
4616 kfree(hr_to_hr_sqp(hr_qp));
4617 else
4618 kfree(hr_qp);
4619
4620 return 0;
4621}
4622
aa84fa18
YL
4623static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
4624 struct hns_roce_qp *hr_qp)
4625{
da91ddfd 4626 struct hns_roce_sccc_clr_done *resp;
aa84fa18
YL
4627 struct hns_roce_sccc_clr *clr;
4628 struct hns_roce_cmq_desc desc;
4629 int ret, i;
4630
4631 mutex_lock(&hr_dev->qp_table.scc_mutex);
4632
4633 /* set scc ctx clear done flag */
4634 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
aa84fa18
YL
4635 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4636 if (ret) {
4637 dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret);
4638 goto out;
4639 }
4640
4641 /* clear scc context */
4642 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
4643 clr = (struct hns_roce_sccc_clr *)desc.data;
4644 clr->qpn = cpu_to_le32(hr_qp->qpn);
4645 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4646 if (ret) {
4647 dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret);
4648 goto out;
4649 }
4650
4651 /* query scc context clear is done or not */
4652 resp = (struct hns_roce_sccc_clr_done *)desc.data;
4653 for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
4654 hns_roce_cmq_setup_basic_desc(&desc,
4655 HNS_ROCE_OPC_QUERY_SCCC, true);
4656 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4657 if (ret) {
4658 dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret);
4659 goto out;
4660 }
4661
4662 if (resp->clr_done)
4663 goto out;
4664
4665 msleep(20);
4666 }
4667
4668 dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n");
4669 ret = -ETIMEDOUT;
4670
4671out:
4672 mutex_unlock(&hr_dev->qp_table.scc_mutex);
4673 return ret;
4674}
4675
b156269d 4676static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
4677{
4678 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
4679 struct hns_roce_v2_cq_context *cq_context;
4680 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
4681 struct hns_roce_v2_cq_context *cqc_mask;
4682 struct hns_roce_cmd_mailbox *mailbox;
4683 int ret;
4684
4685 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4686 if (IS_ERR(mailbox))
4687 return PTR_ERR(mailbox);
4688
4689 cq_context = mailbox->buf;
4690 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
4691
4692 memset(cqc_mask, 0xff, sizeof(*cqc_mask));
4693
4694 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
4695 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
4696 cq_count);
4697 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
4698 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
4699 0);
4700 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
4701 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
4702 cq_period);
4703 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
4704 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
4705 0);
4706
4707 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
4708 HNS_ROCE_CMD_MODIFY_CQC,
4709 HNS_ROCE_CMD_TIMEOUT_MSECS);
4710 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4711 if (ret)
4712 dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
4713
4714 return ret;
4715}
4716
0425e3e6
YL
4717static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
4718{
4719 struct hns_roce_qp *hr_qp;
4720 struct ib_qp_attr attr;
4721 int attr_mask;
4722 int ret;
4723
4724 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
4725 if (!hr_qp) {
4726 dev_warn(hr_dev->dev, "no hr_qp can be found!\n");
4727 return;
4728 }
4729
4730 if (hr_qp->ibqp.uobject) {
4731 if (hr_qp->sdb_en == 1) {
4732 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
9c6ccc03
LO
4733 if (hr_qp->rdb_en == 1)
4734 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
0425e3e6
YL
4735 } else {
4736 dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
4737 return;
4738 }
4739 }
4740
4741 attr_mask = IB_QP_STATE;
4742 attr.qp_state = IB_QPS_ERR;
4743 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask,
4744 hr_qp->state, IB_QPS_ERR);
4745 if (ret)
4746 dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n",
4747 qpn);
4748}
4749
4750static void hns_roce_irq_work_handle(struct work_struct *work)
4751{
4752 struct hns_roce_work *irq_work =
4753 container_of(work, struct hns_roce_work, work);
b00a92c8 4754 struct device *dev = irq_work->hr_dev->dev;
0425e3e6 4755 u32 qpn = irq_work->qpn;
b00a92c8 4756 u32 cqn = irq_work->cqn;
0425e3e6
YL
4757
4758 switch (irq_work->event_type) {
b00a92c8 4759 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4760 dev_info(dev, "Path migrated succeeded.\n");
4761 break;
4762 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4763 dev_warn(dev, "Path migration failed.\n");
4764 break;
4765 case HNS_ROCE_EVENT_TYPE_COMM_EST:
b00a92c8 4766 break;
4767 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4768 dev_warn(dev, "Send queue drained.\n");
4769 break;
0425e3e6 4770 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
e95c716c
YL
4771 dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n",
4772 qpn, irq_work->sub_type);
b00a92c8 4773 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
b00a92c8 4774 break;
0425e3e6 4775 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
e95c716c
YL
4776 dev_err(dev, "Invalid request local work queue 0x%x error.\n",
4777 qpn);
b00a92c8 4778 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4779 break;
0425e3e6 4780 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
e95c716c
YL
4781 dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n",
4782 qpn, irq_work->sub_type);
0425e3e6 4783 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
b00a92c8 4784 break;
4785 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4786 dev_warn(dev, "SRQ limit reach.\n");
4787 break;
4788 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4789 dev_warn(dev, "SRQ last wqe reach.\n");
4790 break;
4791 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4792 dev_err(dev, "SRQ catas error.\n");
4793 break;
4794 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4795 dev_err(dev, "CQ 0x%x access err.\n", cqn);
4796 break;
4797 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4798 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
4799 break;
4800 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4801 dev_warn(dev, "DB overflow.\n");
4802 break;
4803 case HNS_ROCE_EVENT_TYPE_FLR:
4804 dev_warn(dev, "Function level reset.\n");
0425e3e6
YL
4805 break;
4806 default:
4807 break;
4808 }
4809
4810 kfree(irq_work);
4811}
4812
4813static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
b00a92c8 4814 struct hns_roce_eq *eq,
4815 u32 qpn, u32 cqn)
0425e3e6
YL
4816{
4817 struct hns_roce_work *irq_work;
4818
4819 irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
4820 if (!irq_work)
4821 return;
4822
4823 INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
4824 irq_work->hr_dev = hr_dev;
4825 irq_work->qpn = qpn;
b00a92c8 4826 irq_work->cqn = cqn;
0425e3e6
YL
4827 irq_work->event_type = eq->event_type;
4828 irq_work->sub_type = eq->sub_type;
4829 queue_work(hr_dev->irq_workq, &(irq_work->work));
4830}
4831
a5073d60
YL
4832static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
4833{
d3743fa9 4834 struct hns_roce_dev *hr_dev = eq->hr_dev;
a5073d60
YL
4835 u32 doorbell[2];
4836
4837 doorbell[0] = 0;
4838 doorbell[1] = 0;
4839
4840 if (eq->type_flag == HNS_ROCE_AEQ) {
4841 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4842 HNS_ROCE_V2_EQ_DB_CMD_S,
4843 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4844 HNS_ROCE_EQ_DB_CMD_AEQ :
4845 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
4846 } else {
4847 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
4848 HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
4849
4850 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4851 HNS_ROCE_V2_EQ_DB_CMD_S,
4852 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4853 HNS_ROCE_EQ_DB_CMD_CEQ :
4854 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
4855 }
4856
4857 roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
4858 HNS_ROCE_V2_EQ_DB_PARA_S,
4859 (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
4860
d3743fa9 4861 hns_roce_write64(hr_dev, doorbell, eq->doorbell);
a5073d60
YL
4862}
4863
a5073d60
YL
4864static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
4865{
4866 u32 buf_chk_sz;
4867 unsigned long off;
4868
4869 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4870 off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4871
4872 return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
4873 off % buf_chk_sz);
4874}
4875
4876static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
4877{
4878 u32 buf_chk_sz;
4879 unsigned long off;
4880
4881 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4882
4883 off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4884
4885 if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
4886 return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
4887 off % buf_chk_sz);
4888 else
4889 return (struct hns_roce_aeqe *)((u8 *)
4890 (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
4891}
4892
4893static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
4894{
4895 struct hns_roce_aeqe *aeqe;
4896
4897 if (!eq->hop_num)
4898 aeqe = get_aeqe_v2(eq, eq->cons_index);
4899 else
4900 aeqe = mhop_get_aeqe(eq, eq->cons_index);
4901
4902 return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
4903 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
4904}
4905
4906static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
4907 struct hns_roce_eq *eq)
4908{
4909 struct device *dev = hr_dev->dev;
4910 struct hns_roce_aeqe *aeqe;
4911 int aeqe_found = 0;
4912 int event_type;
0425e3e6 4913 int sub_type;
81fce629 4914 u32 srqn;
0425e3e6
YL
4915 u32 qpn;
4916 u32 cqn;
a5073d60
YL
4917
4918 while ((aeqe = next_aeqe_sw_v2(eq))) {
4044a3f4
YL
4919
4920 /* Make sure we read AEQ entry after we have checked the
4921 * ownership bit
4922 */
4923 dma_rmb();
a5073d60
YL
4924
4925 event_type = roce_get_field(aeqe->asyn,
4926 HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
4927 HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
0425e3e6
YL
4928 sub_type = roce_get_field(aeqe->asyn,
4929 HNS_ROCE_V2_AEQE_SUB_TYPE_M,
4930 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
4931 qpn = roce_get_field(aeqe->event.qp_event.qp,
4932 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
4933 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
4934 cqn = roce_get_field(aeqe->event.cq_event.cq,
4935 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
4936 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
81fce629
LO
4937 srqn = roce_get_field(aeqe->event.srq_event.srq,
4938 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
4939 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
a5073d60
YL
4940
4941 switch (event_type) {
4942 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
a5073d60 4943 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
a5073d60
YL
4944 case HNS_ROCE_EVENT_TYPE_COMM_EST:
4945 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4946 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
81fce629 4947 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
a5073d60
YL
4948 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4949 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
b00a92c8 4950 hns_roce_qp_event(hr_dev, qpn, event_type);
a5073d60
YL
4951 break;
4952 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
a5073d60 4953 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
81fce629 4954 hns_roce_srq_event(hr_dev, srqn, event_type);
a5073d60
YL
4955 break;
4956 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4957 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
b00a92c8 4958 hns_roce_cq_event(hr_dev, cqn, event_type);
a5073d60
YL
4959 break;
4960 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
a5073d60
YL
4961 break;
4962 case HNS_ROCE_EVENT_TYPE_MB:
4963 hns_roce_cmd_event(hr_dev,
4964 le16_to_cpu(aeqe->event.cmd.token),
4965 aeqe->event.cmd.status,
4966 le64_to_cpu(aeqe->event.cmd.out_param));
4967 break;
4968 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
a5073d60
YL
4969 break;
4970 case HNS_ROCE_EVENT_TYPE_FLR:
a5073d60
YL
4971 break;
4972 default:
4973 dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
4974 event_type, eq->eqn, eq->cons_index);
4975 break;
790b57f6 4976 }
a5073d60 4977
0425e3e6
YL
4978 eq->event_type = event_type;
4979 eq->sub_type = sub_type;
a5073d60
YL
4980 ++eq->cons_index;
4981 aeqe_found = 1;
4982
4983 if (eq->cons_index > (2 * eq->entries - 1)) {
4984 dev_warn(dev, "cons_index overflow, set back to 0.\n");
4985 eq->cons_index = 0;
4986 }
b00a92c8 4987 hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
a5073d60
YL
4988 }
4989
4990 set_eq_cons_index_v2(eq);
4991 return aeqe_found;
4992}
4993
4994static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
4995{
4996 u32 buf_chk_sz;
4997 unsigned long off;
4998
4999 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5000 off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
5001
5002 return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
5003 off % buf_chk_sz);
5004}
5005
5006static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
5007{
5008 u32 buf_chk_sz;
5009 unsigned long off;
5010
5011 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5012
5013 off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
5014
5015 if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
5016 return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
5017 off % buf_chk_sz);
5018 else
5019 return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
5020 buf_chk_sz]) + off % buf_chk_sz);
5021}
5022
5023static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
5024{
5025 struct hns_roce_ceqe *ceqe;
5026
5027 if (!eq->hop_num)
5028 ceqe = get_ceqe_v2(eq, eq->cons_index);
5029 else
5030 ceqe = mhop_get_ceqe(eq, eq->cons_index);
5031
5032 return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
5033 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
5034}
5035
5036static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
5037 struct hns_roce_eq *eq)
5038{
5039 struct device *dev = hr_dev->dev;
5040 struct hns_roce_ceqe *ceqe;
5041 int ceqe_found = 0;
5042 u32 cqn;
5043
5044 while ((ceqe = next_ceqe_sw_v2(eq))) {
5045
4044a3f4
YL
5046 /* Make sure we read CEQ entry after we have checked the
5047 * ownership bit
5048 */
5049 dma_rmb();
5050
a5073d60
YL
5051 cqn = roce_get_field(ceqe->comp,
5052 HNS_ROCE_V2_CEQE_COMP_CQN_M,
5053 HNS_ROCE_V2_CEQE_COMP_CQN_S);
5054
5055 hns_roce_cq_completion(hr_dev, cqn);
5056
5057 ++eq->cons_index;
5058 ceqe_found = 1;
5059
5060 if (eq->cons_index > (2 * eq->entries - 1)) {
5061 dev_warn(dev, "cons_index overflow, set back to 0.\n");
5062 eq->cons_index = 0;
5063 }
5064 }
5065
5066 set_eq_cons_index_v2(eq);
5067
5068 return ceqe_found;
5069}
5070
5071static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
5072{
5073 struct hns_roce_eq *eq = eq_ptr;
5074 struct hns_roce_dev *hr_dev = eq->hr_dev;
5075 int int_work = 0;
5076
5077 if (eq->type_flag == HNS_ROCE_CEQ)
5078 /* Completion event interrupt */
5079 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
5080 else
5081 /* Asychronous event interrupt */
5082 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
5083
5084 return IRQ_RETVAL(int_work);
5085}
5086
5087static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
5088{
5089 struct hns_roce_dev *hr_dev = dev_id;
5090 struct device *dev = hr_dev->dev;
5091 int int_work = 0;
5092 u32 int_st;
5093 u32 int_en;
5094
5095 /* Abnormal interrupt */
5096 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
5097 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
5098
5099 if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
2b9acb9a
XT
5100 struct pci_dev *pdev = hr_dev->pci_dev;
5101 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
5102 const struct hnae3_ae_ops *ops = ae_dev->ops;
5103
a5073d60
YL
5104 dev_err(dev, "AEQ overflow!\n");
5105
5106 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
5107 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5108
2b9acb9a
XT
5109 /* Set reset level for reset_event() */
5110 if (ops->set_default_reset_request)
5111 ops->set_default_reset_request(ae_dev,
5112 HNAE3_FUNC_RESET);
5113 if (ops->reset_event)
5114 ops->reset_event(pdev, NULL);
5115
a5073d60
YL
5116 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
5117 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5118
5119 int_work = 1;
5120 } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
5121 dev_err(dev, "BUS ERR!\n");
5122
5123 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1);
5124 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5125
a5073d60
YL
5126 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
5127 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5128
5129 int_work = 1;
5130 } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
5131 dev_err(dev, "OTHER ERR!\n");
5132
5133 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1);
5134 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5135
a5073d60
YL
5136 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
5137 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5138
5139 int_work = 1;
5140 } else
5141 dev_err(dev, "There is no abnormal irq found!\n");
5142
5143 return IRQ_RETVAL(int_work);
5144}
5145
5146static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
5147 int eq_num, int enable_flag)
5148{
5149 int i;
5150
5151 if (enable_flag == EQ_ENABLE) {
5152 for (i = 0; i < eq_num; i++)
5153 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5154 i * EQ_REG_OFFSET,
5155 HNS_ROCE_V2_VF_EVENT_INT_EN_M);
5156
5157 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5158 HNS_ROCE_V2_VF_ABN_INT_EN_M);
5159 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5160 HNS_ROCE_V2_VF_ABN_INT_CFG_M);
5161 } else {
5162 for (i = 0; i < eq_num; i++)
5163 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5164 i * EQ_REG_OFFSET,
5165 HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
5166
5167 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5168 HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
5169 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5170 HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
5171 }
5172}
5173
5174static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
5175{
5176 struct device *dev = hr_dev->dev;
5177 int ret;
5178
5179 if (eqn < hr_dev->caps.num_comp_vectors)
5180 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5181 0, HNS_ROCE_CMD_DESTROY_CEQC,
5182 HNS_ROCE_CMD_TIMEOUT_MSECS);
5183 else
5184 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5185 0, HNS_ROCE_CMD_DESTROY_AEQC,
5186 HNS_ROCE_CMD_TIMEOUT_MSECS);
5187 if (ret)
5188 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
5189}
5190
5191static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
5192 struct hns_roce_eq *eq)
5193{
5194 struct device *dev = hr_dev->dev;
5195 u64 idx;
5196 u64 size;
5197 u32 buf_chk_sz;
5198 u32 bt_chk_sz;
5199 u32 mhop_num;
5200 int eqe_alloc;
a5073d60
YL
5201 int i = 0;
5202 int j = 0;
5203
5204 mhop_num = hr_dev->caps.eqe_hop_num;
5205 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5206 bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
a5073d60
YL
5207
5208 /* hop_num = 0 */
5209 if (mhop_num == HNS_ROCE_HOP_NUM_0) {
5210 dma_free_coherent(dev, (unsigned int)(eq->entries *
5211 eq->eqe_size), eq->bt_l0, eq->l0_dma);
5212 return;
5213 }
5214
5215 /* hop_num = 1 or hop = 2 */
5216 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5217 if (mhop_num == 1) {
5218 for (i = 0; i < eq->l0_last_num; i++) {
5219 if (i == eq->l0_last_num - 1) {
5220 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5221 size = (eq->entries - eqe_alloc) * eq->eqe_size;
5222 dma_free_coherent(dev, size, eq->buf[i],
5223 eq->buf_dma[i]);
5224 break;
5225 }
5226 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
5227 eq->buf_dma[i]);
5228 }
5229 } else if (mhop_num == 2) {
5230 for (i = 0; i < eq->l0_last_num; i++) {
5231 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5232 eq->l1_dma[i]);
5233
2a3d923f
LO
5234 for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5235 idx = i * (bt_chk_sz / BA_BYTE_LEN) + j;
a5073d60
YL
5236 if ((i == eq->l0_last_num - 1)
5237 && j == eq->l1_last_num - 1) {
5238 eqe_alloc = (buf_chk_sz / eq->eqe_size)
5239 * idx;
5240 size = (eq->entries - eqe_alloc)
5241 * eq->eqe_size;
5242 dma_free_coherent(dev, size,
5243 eq->buf[idx],
5244 eq->buf_dma[idx]);
5245 break;
5246 }
5247 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
5248 eq->buf_dma[idx]);
5249 }
5250 }
5251 }
5252 kfree(eq->buf_dma);
5253 kfree(eq->buf);
5254 kfree(eq->l1_dma);
5255 kfree(eq->bt_l1);
5256 eq->buf_dma = NULL;
5257 eq->buf = NULL;
5258 eq->l1_dma = NULL;
5259 eq->bt_l1 = NULL;
5260}
5261
5262static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
5263 struct hns_roce_eq *eq)
5264{
5265 u32 buf_chk_sz;
5266
5267 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5268
5269 if (hr_dev->caps.eqe_hop_num) {
5270 hns_roce_mhop_free_eq(hr_dev, eq);
5271 return;
5272 }
5273
5274 if (eq->buf_list)
5275 dma_free_coherent(hr_dev->dev, buf_chk_sz,
5276 eq->buf_list->buf, eq->buf_list->map);
5277}
5278
5279static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
5280 struct hns_roce_eq *eq,
5281 void *mb_buf)
5282{
5283 struct hns_roce_eq_context *eqc;
5284
5285 eqc = mb_buf;
5286 memset(eqc, 0, sizeof(struct hns_roce_eq_context));
5287
5288 /* init eqc */
5289 eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
5290 eq->hop_num = hr_dev->caps.eqe_hop_num;
5291 eq->cons_index = 0;
5292 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
5293 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
5294 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
5295 eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
5296 eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
5297 eq->shift = ilog2((unsigned int)eq->entries);
5298
5299 if (!eq->hop_num)
5300 eq->eqe_ba = eq->buf_list->map;
5301 else
5302 eq->eqe_ba = eq->l0_dma;
5303
5304 /* set eqc state */
5305 roce_set_field(eqc->byte_4,
5306 HNS_ROCE_EQC_EQ_ST_M,
5307 HNS_ROCE_EQC_EQ_ST_S,
5308 HNS_ROCE_V2_EQ_STATE_VALID);
5309
5310 /* set eqe hop num */
5311 roce_set_field(eqc->byte_4,
5312 HNS_ROCE_EQC_HOP_NUM_M,
5313 HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
5314
5315 /* set eqc over_ignore */
5316 roce_set_field(eqc->byte_4,
5317 HNS_ROCE_EQC_OVER_IGNORE_M,
5318 HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
5319
5320 /* set eqc coalesce */
5321 roce_set_field(eqc->byte_4,
5322 HNS_ROCE_EQC_COALESCE_M,
5323 HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
5324
5325 /* set eqc arm_state */
5326 roce_set_field(eqc->byte_4,
5327 HNS_ROCE_EQC_ARM_ST_M,
5328 HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
5329
5330 /* set eqn */
5331 roce_set_field(eqc->byte_4,
5332 HNS_ROCE_EQC_EQN_M,
5333 HNS_ROCE_EQC_EQN_S, eq->eqn);
5334
5335 /* set eqe_cnt */
5336 roce_set_field(eqc->byte_4,
5337 HNS_ROCE_EQC_EQE_CNT_M,
5338 HNS_ROCE_EQC_EQE_CNT_S,
5339 HNS_ROCE_EQ_INIT_EQE_CNT);
5340
5341 /* set eqe_ba_pg_sz */
5342 roce_set_field(eqc->byte_8,
5343 HNS_ROCE_EQC_BA_PG_SZ_M,
5e6e78db
YL
5344 HNS_ROCE_EQC_BA_PG_SZ_S,
5345 eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
a5073d60
YL
5346
5347 /* set eqe_buf_pg_sz */
5348 roce_set_field(eqc->byte_8,
5349 HNS_ROCE_EQC_BUF_PG_SZ_M,
5e6e78db
YL
5350 HNS_ROCE_EQC_BUF_PG_SZ_S,
5351 eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
a5073d60
YL
5352
5353 /* set eq_producer_idx */
5354 roce_set_field(eqc->byte_8,
5355 HNS_ROCE_EQC_PROD_INDX_M,
5356 HNS_ROCE_EQC_PROD_INDX_S,
5357 HNS_ROCE_EQ_INIT_PROD_IDX);
5358
5359 /* set eq_max_cnt */
5360 roce_set_field(eqc->byte_12,
5361 HNS_ROCE_EQC_MAX_CNT_M,
5362 HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
5363
5364 /* set eq_period */
5365 roce_set_field(eqc->byte_12,
5366 HNS_ROCE_EQC_PERIOD_M,
5367 HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
5368
5369 /* set eqe_report_timer */
5370 roce_set_field(eqc->eqe_report_timer,
5371 HNS_ROCE_EQC_REPORT_TIMER_M,
5372 HNS_ROCE_EQC_REPORT_TIMER_S,
5373 HNS_ROCE_EQ_INIT_REPORT_TIMER);
5374
5375 /* set eqe_ba [34:3] */
5376 roce_set_field(eqc->eqe_ba0,
5377 HNS_ROCE_EQC_EQE_BA_L_M,
5378 HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
5379
5380 /* set eqe_ba [64:35] */
5381 roce_set_field(eqc->eqe_ba1,
5382 HNS_ROCE_EQC_EQE_BA_H_M,
5383 HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
5384
5385 /* set eq shift */
5386 roce_set_field(eqc->byte_28,
5387 HNS_ROCE_EQC_SHIFT_M,
5388 HNS_ROCE_EQC_SHIFT_S, eq->shift);
5389
5390 /* set eq MSI_IDX */
5391 roce_set_field(eqc->byte_28,
5392 HNS_ROCE_EQC_MSI_INDX_M,
5393 HNS_ROCE_EQC_MSI_INDX_S,
5394 HNS_ROCE_EQ_INIT_MSI_IDX);
5395
5396 /* set cur_eqe_ba [27:12] */
5397 roce_set_field(eqc->byte_28,
5398 HNS_ROCE_EQC_CUR_EQE_BA_L_M,
5399 HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
5400
5401 /* set cur_eqe_ba [59:28] */
5402 roce_set_field(eqc->byte_32,
5403 HNS_ROCE_EQC_CUR_EQE_BA_M_M,
5404 HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
5405
5406 /* set cur_eqe_ba [63:60] */
5407 roce_set_field(eqc->byte_36,
5408 HNS_ROCE_EQC_CUR_EQE_BA_H_M,
5409 HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
5410
5411 /* set eq consumer idx */
5412 roce_set_field(eqc->byte_36,
5413 HNS_ROCE_EQC_CONS_INDX_M,
5414 HNS_ROCE_EQC_CONS_INDX_S,
5415 HNS_ROCE_EQ_INIT_CONS_IDX);
5416
5417 /* set nex_eqe_ba[43:12] */
5418 roce_set_field(eqc->nxt_eqe_ba0,
5419 HNS_ROCE_EQC_NXT_EQE_BA_L_M,
5420 HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
5421
5422 /* set nex_eqe_ba[63:44] */
5423 roce_set_field(eqc->nxt_eqe_ba1,
5424 HNS_ROCE_EQC_NXT_EQE_BA_H_M,
5425 HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
5426}
5427
5428static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
5429 struct hns_roce_eq *eq)
5430{
5431 struct device *dev = hr_dev->dev;
5432 int eq_alloc_done = 0;
5433 int eq_buf_cnt = 0;
5434 int eqe_alloc;
5435 u32 buf_chk_sz;
5436 u32 bt_chk_sz;
5437 u32 mhop_num;
5438 u64 size;
5439 u64 idx;
5440 int ba_num;
5441 int bt_num;
5442 int record_i;
5443 int record_j;
5444 int i = 0;
5445 int j = 0;
5446
5447 mhop_num = hr_dev->caps.eqe_hop_num;
5448 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5449 bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
5450
2a3d923f
LO
5451 ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size),
5452 buf_chk_sz);
5453 bt_num = DIV_ROUND_UP(ba_num, bt_chk_sz / BA_BYTE_LEN);
a5073d60
YL
5454
5455 /* hop_num = 0 */
5456 if (mhop_num == HNS_ROCE_HOP_NUM_0) {
5457 if (eq->entries > buf_chk_sz / eq->eqe_size) {
5458 dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
5459 eq->entries);
5460 return -EINVAL;
5461 }
5462 eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
5463 &(eq->l0_dma), GFP_KERNEL);
5464 if (!eq->bt_l0)
5465 return -ENOMEM;
5466
5467 eq->cur_eqe_ba = eq->l0_dma;
5468 eq->nxt_eqe_ba = 0;
5469
5470 memset(eq->bt_l0, 0, eq->entries * eq->eqe_size);
5471
5472 return 0;
5473 }
5474
5475 eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
5476 if (!eq->buf_dma)
5477 return -ENOMEM;
5478 eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
5479 if (!eq->buf)
5480 goto err_kcalloc_buf;
5481
5482 if (mhop_num == 2) {
5483 eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
5484 if (!eq->l1_dma)
5485 goto err_kcalloc_l1_dma;
5486
5487 eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
5488 if (!eq->bt_l1)
5489 goto err_kcalloc_bt_l1;
5490 }
5491
5492 /* alloc L0 BT */
5493 eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
5494 if (!eq->bt_l0)
5495 goto err_dma_alloc_l0;
5496
5497 if (mhop_num == 1) {
2a3d923f 5498 if (ba_num > (bt_chk_sz / BA_BYTE_LEN))
a5073d60
YL
5499 dev_err(dev, "ba_num %d is too large for 1 hop\n",
5500 ba_num);
5501
5502 /* alloc buf */
2a3d923f 5503 for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
a5073d60
YL
5504 if (eq_buf_cnt + 1 < ba_num) {
5505 size = buf_chk_sz;
5506 } else {
5507 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5508 size = (eq->entries - eqe_alloc) * eq->eqe_size;
5509 }
750afb08 5510 eq->buf[i] = dma_alloc_coherent(dev, size,
a5073d60
YL
5511 &(eq->buf_dma[i]),
5512 GFP_KERNEL);
5513 if (!eq->buf[i])
5514 goto err_dma_alloc_buf;
5515
a5073d60
YL
5516 *(eq->bt_l0 + i) = eq->buf_dma[i];
5517
5518 eq_buf_cnt++;
5519 if (eq_buf_cnt >= ba_num)
5520 break;
5521 }
5522 eq->cur_eqe_ba = eq->buf_dma[0];
5523 eq->nxt_eqe_ba = eq->buf_dma[1];
5524
5525 } else if (mhop_num == 2) {
5526 /* alloc L1 BT and buf */
2a3d923f 5527 for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
a5073d60
YL
5528 eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
5529 &(eq->l1_dma[i]),
5530 GFP_KERNEL);
5531 if (!eq->bt_l1[i])
5532 goto err_dma_alloc_l1;
5533 *(eq->bt_l0 + i) = eq->l1_dma[i];
5534
2a3d923f
LO
5535 for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5536 idx = i * bt_chk_sz / BA_BYTE_LEN + j;
a5073d60
YL
5537 if (eq_buf_cnt + 1 < ba_num) {
5538 size = buf_chk_sz;
5539 } else {
5540 eqe_alloc = (buf_chk_sz / eq->eqe_size)
5541 * idx;
5542 size = (eq->entries - eqe_alloc)
5543 * eq->eqe_size;
5544 }
750afb08
LC
5545 eq->buf[idx] = dma_alloc_coherent(dev, size,
5546 &(eq->buf_dma[idx]),
5547 GFP_KERNEL);
a5073d60
YL
5548 if (!eq->buf[idx])
5549 goto err_dma_alloc_buf;
5550
a5073d60
YL
5551 *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
5552
5553 eq_buf_cnt++;
5554 if (eq_buf_cnt >= ba_num) {
5555 eq_alloc_done = 1;
5556 break;
5557 }
5558 }
5559
5560 if (eq_alloc_done)
5561 break;
5562 }
5563 eq->cur_eqe_ba = eq->buf_dma[0];
5564 eq->nxt_eqe_ba = eq->buf_dma[1];
5565 }
5566
5567 eq->l0_last_num = i + 1;
5568 if (mhop_num == 2)
5569 eq->l1_last_num = j + 1;
5570
5571 return 0;
5572
5573err_dma_alloc_l1:
5574 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5575 eq->bt_l0 = NULL;
5576 eq->l0_dma = 0;
5577 for (i -= 1; i >= 0; i--) {
5578 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5579 eq->l1_dma[i]);
5580
2a3d923f
LO
5581 for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5582 idx = i * bt_chk_sz / BA_BYTE_LEN + j;
a5073d60
YL
5583 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
5584 eq->buf_dma[idx]);
5585 }
5586 }
5587 goto err_dma_alloc_l0;
5588
5589err_dma_alloc_buf:
5590 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5591 eq->bt_l0 = NULL;
5592 eq->l0_dma = 0;
5593
5594 if (mhop_num == 1)
38759d61 5595 for (i -= 1; i >= 0; i--)
a5073d60
YL
5596 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
5597 eq->buf_dma[i]);
5598 else if (mhop_num == 2) {
5599 record_i = i;
5600 record_j = j;
5601 for (; i >= 0; i--) {
5602 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5603 eq->l1_dma[i]);
5604
2a3d923f 5605 for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
a5073d60
YL
5606 if (i == record_i && j >= record_j)
5607 break;
5608
2a3d923f 5609 idx = i * bt_chk_sz / BA_BYTE_LEN + j;
a5073d60
YL
5610 dma_free_coherent(dev, buf_chk_sz,
5611 eq->buf[idx],
5612 eq->buf_dma[idx]);
5613 }
5614 }
5615 }
5616
5617err_dma_alloc_l0:
5618 kfree(eq->bt_l1);
5619 eq->bt_l1 = NULL;
5620
5621err_kcalloc_bt_l1:
5622 kfree(eq->l1_dma);
5623 eq->l1_dma = NULL;
5624
5625err_kcalloc_l1_dma:
5626 kfree(eq->buf);
5627 eq->buf = NULL;
5628
5629err_kcalloc_buf:
5630 kfree(eq->buf_dma);
5631 eq->buf_dma = NULL;
5632
5633 return -ENOMEM;
5634}
5635
5636static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
5637 struct hns_roce_eq *eq,
5638 unsigned int eq_cmd)
5639{
5640 struct device *dev = hr_dev->dev;
5641 struct hns_roce_cmd_mailbox *mailbox;
5642 u32 buf_chk_sz = 0;
5643 int ret;
5644
5645 /* Allocate mailbox memory */
5646 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5647 if (IS_ERR(mailbox))
5648 return PTR_ERR(mailbox);
5649
5650 if (!hr_dev->caps.eqe_hop_num) {
5651 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5652
5653 eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
5654 GFP_KERNEL);
5655 if (!eq->buf_list) {
5656 ret = -ENOMEM;
5657 goto free_cmd_mbox;
5658 }
5659
750afb08 5660 eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
a5073d60
YL
5661 &(eq->buf_list->map),
5662 GFP_KERNEL);
5663 if (!eq->buf_list->buf) {
5664 ret = -ENOMEM;
5665 goto err_alloc_buf;
5666 }
5667
a5073d60
YL
5668 } else {
5669 ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
5670 if (ret) {
5671 ret = -ENOMEM;
5672 goto free_cmd_mbox;
5673 }
5674 }
5675
5676 hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
5677
5678 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
5679 eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
5680 if (ret) {
ab178849 5681 dev_err(dev, "[mailbox cmd] create eqc failed.\n");
a5073d60
YL
5682 goto err_cmd_mbox;
5683 }
5684
5685 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5686
5687 return 0;
5688
5689err_cmd_mbox:
5690 if (!hr_dev->caps.eqe_hop_num)
5691 dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
5692 eq->buf_list->map);
5693 else {
5694 hns_roce_mhop_free_eq(hr_dev, eq);
5695 goto free_cmd_mbox;
5696 }
5697
5698err_alloc_buf:
5699 kfree(eq->buf_list);
5700
5701free_cmd_mbox:
5702 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5703
5704 return ret;
5705}
5706
5707static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
5708{
5709 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5710 struct device *dev = hr_dev->dev;
5711 struct hns_roce_eq *eq;
5712 unsigned int eq_cmd;
5713 int irq_num;
5714 int eq_num;
5715 int other_num;
5716 int comp_num;
5717 int aeq_num;
5718 int i, j, k;
5719 int ret;
5720
5721 other_num = hr_dev->caps.num_other_vectors;
5722 comp_num = hr_dev->caps.num_comp_vectors;
5723 aeq_num = hr_dev->caps.num_aeq_vectors;
5724
5725 eq_num = comp_num + aeq_num;
5726 irq_num = eq_num + other_num;
5727
5728 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
5729 if (!eq_table->eq)
5730 return -ENOMEM;
5731
5732 for (i = 0; i < irq_num; i++) {
5733 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
5734 GFP_KERNEL);
5735 if (!hr_dev->irq_names[i]) {
5736 ret = -ENOMEM;
5737 goto err_failed_kzalloc;
5738 }
5739 }
5740
5741 /* create eq */
5742 for (j = 0; j < eq_num; j++) {
5743 eq = &eq_table->eq[j];
5744 eq->hr_dev = hr_dev;
5745 eq->eqn = j;
5746 if (j < comp_num) {
5747 /* CEQ */
5748 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
5749 eq->type_flag = HNS_ROCE_CEQ;
5750 eq->entries = hr_dev->caps.ceqe_depth;
5751 eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
5752 eq->irq = hr_dev->irq[j + other_num + aeq_num];
5753 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
5754 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
5755 } else {
5756 /* AEQ */
5757 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
5758 eq->type_flag = HNS_ROCE_AEQ;
5759 eq->entries = hr_dev->caps.aeqe_depth;
5760 eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
5761 eq->irq = hr_dev->irq[j - comp_num + other_num];
5762 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
5763 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
5764 }
5765
5766 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
5767 if (ret) {
5768 dev_err(dev, "eq create failed.\n");
5769 goto err_create_eq_fail;
5770 }
5771 }
5772
5773 /* enable irq */
5774 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
5775
5776 /* irq contains: abnormal + AEQ + CEQ*/
5777 for (k = 0; k < irq_num; k++)
5778 if (k < other_num)
5779 snprintf((char *)hr_dev->irq_names[k],
5780 HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k);
5781 else if (k < (other_num + aeq_num))
5782 snprintf((char *)hr_dev->irq_names[k],
5783 HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
5784 k - other_num);
5785 else
5786 snprintf((char *)hr_dev->irq_names[k],
5787 HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
5788 k - other_num - aeq_num);
5789
5790 for (k = 0; k < irq_num; k++) {
5791 if (k < other_num)
5792 ret = request_irq(hr_dev->irq[k],
5793 hns_roce_v2_msix_interrupt_abn,
5794 0, hr_dev->irq_names[k], hr_dev);
5795
5796 else if (k < (other_num + comp_num))
5797 ret = request_irq(eq_table->eq[k - other_num].irq,
5798 hns_roce_v2_msix_interrupt_eq,
5799 0, hr_dev->irq_names[k + aeq_num],
5800 &eq_table->eq[k - other_num]);
5801 else
5802 ret = request_irq(eq_table->eq[k - other_num].irq,
5803 hns_roce_v2_msix_interrupt_eq,
5804 0, hr_dev->irq_names[k - comp_num],
5805 &eq_table->eq[k - other_num]);
5806 if (ret) {
5807 dev_err(dev, "Request irq error!\n");
5808 goto err_request_irq_fail;
5809 }
5810 }
5811
0425e3e6
YL
5812 hr_dev->irq_workq =
5813 create_singlethread_workqueue("hns_roce_irq_workqueue");
5814 if (!hr_dev->irq_workq) {
5815 dev_err(dev, "Create irq workqueue failed!\n");
f1a31542 5816 ret = -ENOMEM;
0425e3e6
YL
5817 goto err_request_irq_fail;
5818 }
5819
a5073d60
YL
5820 return 0;
5821
5822err_request_irq_fail:
5823 for (k -= 1; k >= 0; k--)
5824 if (k < other_num)
5825 free_irq(hr_dev->irq[k], hr_dev);
5826 else
5827 free_irq(eq_table->eq[k - other_num].irq,
5828 &eq_table->eq[k - other_num]);
5829
5830err_create_eq_fail:
5831 for (j -= 1; j >= 0; j--)
5832 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]);
5833
5834err_failed_kzalloc:
5835 for (i -= 1; i >= 0; i--)
5836 kfree(hr_dev->irq_names[i]);
5837 kfree(eq_table->eq);
5838
5839 return ret;
5840}
5841
5842static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
5843{
5844 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5845 int irq_num;
5846 int eq_num;
5847 int i;
5848
5849 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
5850 irq_num = eq_num + hr_dev->caps.num_other_vectors;
5851
5852 /* Disable irq */
5853 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
5854
5855 for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
5856 free_irq(hr_dev->irq[i], hr_dev);
5857
5858 for (i = 0; i < eq_num; i++) {
5859 hns_roce_v2_destroy_eqc(hr_dev, i);
5860
5861 free_irq(eq_table->eq[i].irq, &eq_table->eq[i]);
5862
5863 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
5864 }
5865
5866 for (i = 0; i < irq_num; i++)
5867 kfree(hr_dev->irq_names[i]);
5868
5869 kfree(eq_table->eq);
0425e3e6
YL
5870
5871 flush_workqueue(hr_dev->irq_workq);
5872 destroy_workqueue(hr_dev->irq_workq);
a5073d60
YL
5873}
5874
c7bcb134
LO
5875static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
5876 struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
5877 u32 cqn, void *mb_buf, u64 *mtts_wqe,
5878 u64 *mtts_idx, dma_addr_t dma_handle_wqe,
5879 dma_addr_t dma_handle_idx)
5880{
5881 struct hns_roce_srq_context *srq_context;
5882
5883 srq_context = mb_buf;
5884 memset(srq_context, 0, sizeof(*srq_context));
5885
5886 roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
5887 SRQC_BYTE_4_SRQ_ST_S, 1);
5888
5889 roce_set_field(srq_context->byte_4_srqn_srqst,
5890 SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
5891 SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
5892 (hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
5893 hr_dev->caps.srqwqe_hop_num));
5894 roce_set_field(srq_context->byte_4_srqn_srqst,
5895 SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
5896 ilog2(srq->max));
5897
5898 roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
5899 SRQC_BYTE_4_SRQN_S, srq->srqn);
5900
5901 roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5902 SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
5903
5904 roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
5905 SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
5906
5907 srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
5908
5909 roce_set_field(srq_context->byte_24_wqe_bt_ba,
5910 SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
5911 SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
5912 cpu_to_le32(dma_handle_wqe >> 35));
5913
5914 roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
5915 SRQC_BYTE_28_PD_S, pdn);
5916 roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
5917 SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
5918 fls(srq->max_gs - 1));
5919
5920 srq_context->idx_bt_ba = (u32)(dma_handle_idx >> 3);
5921 srq_context->idx_bt_ba = cpu_to_le32(srq_context->idx_bt_ba);
5922 roce_set_field(srq_context->rsv_idx_bt_ba,
5923 SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
5924 SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
5925 cpu_to_le32(dma_handle_idx >> 35));
5926
5927 srq_context->idx_cur_blk_addr = (u32)(mtts_idx[0] >> PAGE_ADDR_SHIFT);
5928 srq_context->idx_cur_blk_addr =
5929 cpu_to_le32(srq_context->idx_cur_blk_addr);
5930 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5931 SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
5932 SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
5933 cpu_to_le32((mtts_idx[0]) >> (32 + PAGE_ADDR_SHIFT)));
5934 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5935 SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
5936 SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
5937 hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
5938 hr_dev->caps.idx_hop_num);
5939
5940 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5941 SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
5942 SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
5943 hr_dev->caps.idx_ba_pg_sz);
5944 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5945 SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
5946 SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
5947 hr_dev->caps.idx_buf_pg_sz);
5948
5949 srq_context->idx_nxt_blk_addr = (u32)(mtts_idx[1] >> PAGE_ADDR_SHIFT);
5950 srq_context->idx_nxt_blk_addr =
5951 cpu_to_le32(srq_context->idx_nxt_blk_addr);
5952 roce_set_field(srq_context->rsv_idxnxtblkaddr,
5953 SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
5954 SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
5955 cpu_to_le32((mtts_idx[1]) >> (32 + PAGE_ADDR_SHIFT)));
5956 roce_set_field(srq_context->byte_56_xrc_cqn,
5957 SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
5958 cqn);
5959 roce_set_field(srq_context->byte_56_xrc_cqn,
5960 SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
5961 SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
5962 hr_dev->caps.srqwqe_ba_pg_sz + PG_SHIFT_OFFSET);
5963 roce_set_field(srq_context->byte_56_xrc_cqn,
5964 SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
5965 SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
5966 hr_dev->caps.srqwqe_buf_pg_sz + PG_SHIFT_OFFSET);
5967
5968 roce_set_bit(srq_context->db_record_addr_record_en,
5969 SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
5970}
5971
5972static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
5973 struct ib_srq_attr *srq_attr,
5974 enum ib_srq_attr_mask srq_attr_mask,
5975 struct ib_udata *udata)
5976{
5977 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5978 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5979 struct hns_roce_srq_context *srq_context;
5980 struct hns_roce_srq_context *srqc_mask;
5981 struct hns_roce_cmd_mailbox *mailbox;
5982 int ret;
5983
5984 if (srq_attr_mask & IB_SRQ_LIMIT) {
5985 if (srq_attr->srq_limit >= srq->max)
5986 return -EINVAL;
5987
5988 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5989 if (IS_ERR(mailbox))
5990 return PTR_ERR(mailbox);
5991
5992 srq_context = mailbox->buf;
5993 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
5994
5995 memset(srqc_mask, 0xff, sizeof(*srqc_mask));
5996
5997 roce_set_field(srq_context->byte_8_limit_wl,
5998 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5999 SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
6000 roce_set_field(srqc_mask->byte_8_limit_wl,
6001 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6002 SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
6003
6004 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
6005 HNS_ROCE_CMD_MODIFY_SRQC,
6006 HNS_ROCE_CMD_TIMEOUT_MSECS);
6007 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6008 if (ret) {
6009 dev_err(hr_dev->dev,
6010 "MODIFY SRQ Failed to cmd mailbox.\n");
6011 return ret;
6012 }
6013 }
6014
6015 return 0;
6016}
6017
c3c668e7 6018static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
c7bcb134
LO
6019{
6020 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
6021 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6022 struct hns_roce_srq_context *srq_context;
6023 struct hns_roce_cmd_mailbox *mailbox;
6024 int limit_wl;
6025 int ret;
6026
6027 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6028 if (IS_ERR(mailbox))
6029 return PTR_ERR(mailbox);
6030
6031 srq_context = mailbox->buf;
6032 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
6033 HNS_ROCE_CMD_QUERY_SRQC,
6034 HNS_ROCE_CMD_TIMEOUT_MSECS);
6035 if (ret) {
6036 dev_err(hr_dev->dev, "QUERY SRQ cmd process error\n");
6037 goto out;
6038 }
6039
6040 limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
6041 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6042 SRQC_BYTE_8_SRQ_LIMIT_WL_S);
6043
6044 attr->srq_limit = limit_wl;
6045 attr->max_wr = srq->max - 1;
6046 attr->max_sge = srq->max_gs;
6047
6048 memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
6049
6050out:
6051 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6052 return ret;
6053}
6054
97545b10
LO
6055static int find_empty_entry(struct hns_roce_idx_que *idx_que,
6056 unsigned long size)
c7bcb134 6057{
97545b10 6058 int wqe_idx;
c7bcb134 6059
97545b10
LO
6060 if (unlikely(bitmap_full(idx_que->bitmap, size)))
6061 return -ENOSPC;
6062
6063 wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
6064
6065 bitmap_set(idx_que->bitmap, wqe_idx, 1);
c7bcb134 6066
97545b10 6067 return wqe_idx;
c7bcb134
LO
6068}
6069
6070static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
6071 int cur_idx, int wqe_idx)
6072{
6073 unsigned int *addr;
6074
6075 addr = (unsigned int *)hns_roce_buf_offset(&idx_que->idx_buf,
6076 cur_idx * idx_que->entry_sz);
6077 *addr = wqe_idx;
6078}
6079
6080static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
6081 const struct ib_recv_wr *wr,
6082 const struct ib_recv_wr **bad_wr)
6083{
d3743fa9 6084 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
c7bcb134
LO
6085 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6086 struct hns_roce_v2_wqe_data_seg *dseg;
6087 struct hns_roce_v2_db srq_db;
6088 unsigned long flags;
6089 int ret = 0;
6090 int wqe_idx;
6091 void *wqe;
6092 int nreq;
6093 int ind;
6094 int i;
6095
6096 spin_lock_irqsave(&srq->lock, flags);
6097
6098 ind = srq->head & (srq->max - 1);
6099
6100 for (nreq = 0; wr; ++nreq, wr = wr->next) {
6101 if (unlikely(wr->num_sge > srq->max_gs)) {
6102 ret = -EINVAL;
6103 *bad_wr = wr;
6104 break;
6105 }
6106
6107 if (unlikely(srq->head == srq->tail)) {
6108 ret = -ENOMEM;
6109 *bad_wr = wr;
6110 break;
6111 }
6112
97545b10
LO
6113 wqe_idx = find_empty_entry(&srq->idx_que, srq->max);
6114 if (wqe_idx < 0) {
6115 ret = -ENOMEM;
6116 *bad_wr = wr;
6117 break;
6118 }
6119
c7bcb134
LO
6120 fill_idx_queue(&srq->idx_que, ind, wqe_idx);
6121 wqe = get_srq_wqe(srq, wqe_idx);
6122 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
6123
6124 for (i = 0; i < wr->num_sge; ++i) {
6125 dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
6126 dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
6127 dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
6128 }
6129
6130 if (i < srq->max_gs) {
4f18904c
LO
6131 dseg[i].len = 0;
6132 dseg[i].lkey = cpu_to_le32(0x100);
6133 dseg[i].addr = 0;
c7bcb134
LO
6134 }
6135
6136 srq->wrid[wqe_idx] = wr->wr_id;
6137 ind = (ind + 1) & (srq->max - 1);
6138 }
6139
6140 if (likely(nreq)) {
6141 srq->head += nreq;
6142
6143 /*
6144 * Make sure that descriptors are written before
6145 * doorbell record.
6146 */
6147 wmb();
6148
2a3d923f
LO
6149 srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
6150 (srq->srqn & V2_DB_BYTE_4_TAG_M);
c7bcb134
LO
6151 srq_db.parameter = srq->head;
6152
d3743fa9 6153 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
c7bcb134
LO
6154
6155 }
6156
6157 spin_unlock_irqrestore(&srq->lock, flags);
6158
6159 return ret;
6160}
6161
e1c9a0dc
LO
6162static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
6163 .query_cqc_info = hns_roce_v2_query_cqc_info,
6164};
6165
7f645a58
KH
6166static const struct ib_device_ops hns_roce_v2_dev_ops = {
6167 .destroy_qp = hns_roce_v2_destroy_qp,
6168 .modify_cq = hns_roce_v2_modify_cq,
6169 .poll_cq = hns_roce_v2_poll_cq,
6170 .post_recv = hns_roce_v2_post_recv,
6171 .post_send = hns_roce_v2_post_send,
6172 .query_qp = hns_roce_v2_query_qp,
6173 .req_notify_cq = hns_roce_v2_req_notify_cq,
6174};
6175
6176static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6177 .modify_srq = hns_roce_v2_modify_srq,
6178 .post_srq_recv = hns_roce_v2_post_srq_recv,
6179 .query_srq = hns_roce_v2_query_srq,
6180};
6181
a04ff739
WHX
6182static const struct hns_roce_hw hns_roce_hw_v2 = {
6183 .cmq_init = hns_roce_v2_cmq_init,
6184 .cmq_exit = hns_roce_v2_cmq_exit,
cfc85f3e 6185 .hw_profile = hns_roce_v2_profile,
6b63597d 6186 .hw_init = hns_roce_v2_init,
6187 .hw_exit = hns_roce_v2_exit,
a680f2f3
WHX
6188 .post_mbox = hns_roce_v2_post_mbox,
6189 .chk_mbox = hns_roce_v2_chk_mbox,
6a04aed6 6190 .rst_prc_mbox = hns_roce_v2_rst_process_cmd,
7afddafa
WHX
6191 .set_gid = hns_roce_v2_set_gid,
6192 .set_mac = hns_roce_v2_set_mac,
3958cc56 6193 .write_mtpt = hns_roce_v2_write_mtpt,
a2c80b7b 6194 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
68a997c5 6195 .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
c7c28191 6196 .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
93aa2187 6197 .write_cqc = hns_roce_v2_write_cqc,
a81fba28
WHX
6198 .set_hem = hns_roce_v2_set_hem,
6199 .clear_hem = hns_roce_v2_clear_hem,
926a01dc
WHX
6200 .modify_qp = hns_roce_v2_modify_qp,
6201 .query_qp = hns_roce_v2_query_qp,
6202 .destroy_qp = hns_roce_v2_destroy_qp,
aa84fa18 6203 .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
b156269d 6204 .modify_cq = hns_roce_v2_modify_cq,
2d407888
WHX
6205 .post_send = hns_roce_v2_post_send,
6206 .post_recv = hns_roce_v2_post_recv,
93aa2187
WHX
6207 .req_notify_cq = hns_roce_v2_req_notify_cq,
6208 .poll_cq = hns_roce_v2_poll_cq,
a5073d60
YL
6209 .init_eq = hns_roce_v2_init_eq_table,
6210 .cleanup_eq = hns_roce_v2_cleanup_eq_table,
c7bcb134
LO
6211 .write_srqc = hns_roce_v2_write_srqc,
6212 .modify_srq = hns_roce_v2_modify_srq,
6213 .query_srq = hns_roce_v2_query_srq,
6214 .post_srq_recv = hns_roce_v2_post_srq_recv,
7f645a58
KH
6215 .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6216 .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
a04ff739 6217};
dd74282d
WHX
6218
6219static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6220 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6221 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
aaa31567
LO
6222 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6223 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
dd74282d
WHX
6224 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6225 /* required last entry */
6226 {0, }
6227};
6228
f97a62c3 6229MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6230
dd74282d
WHX
6231static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6232 struct hnae3_handle *handle)
6233{
d061effc 6234 struct hns_roce_v2_priv *priv = hr_dev->priv;
a5073d60 6235 int i;
dd74282d 6236
dd74282d 6237 hr_dev->hw = &hns_roce_hw_v2;
e1c9a0dc 6238 hr_dev->dfx = &hns_roce_dfx_hw_v2;
2d407888
WHX
6239 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6240 hr_dev->odb_offset = hr_dev->sdb_offset;
dd74282d
WHX
6241
6242 /* Get info from NIC driver. */
6243 hr_dev->reg_base = handle->rinfo.roce_io_base;
6244 hr_dev->caps.num_ports = 1;
6245 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6246 hr_dev->iboe.phy_port[0] = 0;
6247
d4994d2f 6248 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6249 hr_dev->iboe.netdevs[0]->dev_addr);
6250
a5073d60
YL
6251 for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
6252 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6253 i + handle->rinfo.base_vector);
6254
dd74282d 6255 /* cmd issue mode: 0 is poll, 1 is event */
a5073d60 6256 hr_dev->cmd_mod = 1;
dd74282d
WHX
6257 hr_dev->loop_idc = 0;
6258
d061effc
WHX
6259 hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6260 priv->handle = handle;
6261
dd74282d
WHX
6262 return 0;
6263}
6264
d061effc 6265static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
dd74282d
WHX
6266{
6267 struct hns_roce_dev *hr_dev;
6268 int ret;
6269
459cc69f 6270 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
dd74282d
WHX
6271 if (!hr_dev)
6272 return -ENOMEM;
6273
a04ff739
WHX
6274 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6275 if (!hr_dev->priv) {
6276 ret = -ENOMEM;
6277 goto error_failed_kzalloc;
6278 }
6279
dd74282d
WHX
6280 hr_dev->pci_dev = handle->pdev;
6281 hr_dev->dev = &handle->pdev->dev;
dd74282d
WHX
6282
6283 ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
6284 if (ret) {
6285 dev_err(hr_dev->dev, "Get Configuration failed!\n");
6286 goto error_failed_get_cfg;
6287 }
6288
6289 ret = hns_roce_init(hr_dev);
6290 if (ret) {
6291 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6292 goto error_failed_get_cfg;
6293 }
6294
d061effc
WHX
6295 handle->priv = hr_dev;
6296
dd74282d
WHX
6297 return 0;
6298
6299error_failed_get_cfg:
a04ff739
WHX
6300 kfree(hr_dev->priv);
6301
6302error_failed_kzalloc:
dd74282d
WHX
6303 ib_dealloc_device(&hr_dev->ib_dev);
6304
6305 return ret;
6306}
6307
d061effc 6308static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
dd74282d
WHX
6309 bool reset)
6310{
6311 struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
6312
cb7a94c9
WHX
6313 if (!hr_dev)
6314 return;
6315
d061effc 6316 handle->priv = NULL;
dd74282d 6317 hns_roce_exit(hr_dev);
a04ff739 6318 kfree(hr_dev->priv);
dd74282d
WHX
6319 ib_dealloc_device(&hr_dev->ib_dev);
6320}
6321
d061effc
WHX
6322static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6323{
6324 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
07c2339a 6325 const struct pci_device_id *id;
d061effc
WHX
6326 struct device *dev = &handle->pdev->dev;
6327 int ret;
6328
6329 handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6330
6331 if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6332 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6333 goto reset_chk_err;
6334 }
6335
07c2339a
LO
6336 id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6337 if (!id)
6338 return 0;
6339
d061effc
WHX
6340 ret = __hns_roce_hw_v2_init_instance(handle);
6341 if (ret) {
6342 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6343 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6344 if (ops->ae_dev_resetting(handle) ||
6345 ops->get_hw_reset_stat(handle))
6346 goto reset_chk_err;
6347 else
6348 return ret;
6349 }
6350
6351 handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6352
6353
6354 return 0;
6355
6356reset_chk_err:
6357 dev_err(dev, "Device is busy in resetting state.\n"
6358 "please retry later.\n");
6359
6360 return -EBUSY;
6361}
6362
6363static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6364 bool reset)
6365{
6366 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6367 return;
6368
6369 handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6370
6371 __hns_roce_hw_v2_uninit_instance(handle, reset);
6372
6373 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6374}
cb7a94c9
WHX
6375static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6376{
d061effc 6377 struct hns_roce_dev *hr_dev;
cb7a94c9
WHX
6378 struct ib_event event;
6379
d061effc
WHX
6380 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6381 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6382 return 0;
cb7a94c9
WHX
6383 }
6384
d061effc
WHX
6385 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6386 clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6387
6388 hr_dev = (struct hns_roce_dev *)handle->priv;
6389 if (!hr_dev)
6390 return 0;
6391
cb7a94c9 6392 hr_dev->active = false;
d3743fa9 6393 hr_dev->dis_db = true;
cb7a94c9
WHX
6394
6395 event.event = IB_EVENT_DEVICE_FATAL;
6396 event.device = &hr_dev->ib_dev;
6397 event.element.port_num = 1;
6398 ib_dispatch_event(&event);
6399
6400 return 0;
6401}
6402
6403static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6404{
d061effc 6405 struct device *dev = &handle->pdev->dev;
cb7a94c9
WHX
6406 int ret;
6407
d061effc
WHX
6408 if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6409 &handle->rinfo.state)) {
6410 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6411 return 0;
6412 }
6413
6414 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6415
6416 dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6417 ret = __hns_roce_hw_v2_init_instance(handle);
cb7a94c9
WHX
6418 if (ret) {
6419 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
6420 * callback function, RoCE Engine reinitialize. If RoCE reinit
6421 * failed, we should inform NIC driver.
6422 */
6423 handle->priv = NULL;
d061effc
WHX
6424 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6425 } else {
6426 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6427 dev_info(dev, "Reset done, RoCE client reinit finished.\n");
cb7a94c9
WHX
6428 }
6429
6430 return ret;
6431}
6432
6433static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6434{
d061effc
WHX
6435 if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6436 return 0;
6437
6438 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6439 dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
cb7a94c9 6440 msleep(100);
d061effc
WHX
6441 __hns_roce_hw_v2_uninit_instance(handle, false);
6442
cb7a94c9
WHX
6443 return 0;
6444}
6445
6446static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6447 enum hnae3_reset_notify_type type)
6448{
6449 int ret = 0;
6450
6451 switch (type) {
6452 case HNAE3_DOWN_CLIENT:
6453 ret = hns_roce_hw_v2_reset_notify_down(handle);
6454 break;
6455 case HNAE3_INIT_CLIENT:
6456 ret = hns_roce_hw_v2_reset_notify_init(handle);
6457 break;
6458 case HNAE3_UNINIT_CLIENT:
6459 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6460 break;
6461 default:
6462 break;
6463 }
6464
6465 return ret;
6466}
6467
dd74282d
WHX
6468static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6469 .init_instance = hns_roce_hw_v2_init_instance,
6470 .uninit_instance = hns_roce_hw_v2_uninit_instance,
cb7a94c9 6471 .reset_notify = hns_roce_hw_v2_reset_notify,
dd74282d
WHX
6472};
6473
6474static struct hnae3_client hns_roce_hw_v2_client = {
6475 .name = "hns_roce_hw_v2",
6476 .type = HNAE3_CLIENT_ROCE,
6477 .ops = &hns_roce_hw_v2_ops,
6478};
6479
6480static int __init hns_roce_hw_v2_init(void)
6481{
6482 return hnae3_register_client(&hns_roce_hw_v2_client);
6483}
6484
6485static void __exit hns_roce_hw_v2_exit(void)
6486{
6487 hnae3_unregister_client(&hns_roce_hw_v2_client);
6488}
6489
6490module_init(hns_roce_hw_v2_init);
6491module_exit(hns_roce_hw_v2_exit);
6492
6493MODULE_LICENSE("Dual BSD/GPL");
6494MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6495MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6496MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6497MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");