RDMA/hns: Support cq record doorbell for the user space
[linux-2.6-block.git] / drivers / infiniband / hw / hns / hns_roce_hw_v2.c
CommitLineData
dd74282d
WHX
1/*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/acpi.h>
34#include <linux/etherdevice.h>
35#include <linux/interrupt.h>
36#include <linux/kernel.h>
d4994d2f 37#include <net/addrconf.h>
dd74282d
WHX
38#include <rdma/ib_umem.h>
39
40#include "hnae3.h"
41#include "hns_roce_common.h"
42#include "hns_roce_device.h"
43#include "hns_roce_cmd.h"
44#include "hns_roce_hem.h"
a04ff739 45#include "hns_roce_hw_v2.h"
dd74282d 46
2d407888
WHX
47static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
48 struct ib_sge *sg)
49{
50 dseg->lkey = cpu_to_le32(sg->lkey);
51 dseg->addr = cpu_to_le64(sg->addr);
52 dseg->len = cpu_to_le32(sg->length);
53}
54
7bdee415 55static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
56 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
57 void *wqe, unsigned int *sge_ind,
58 struct ib_send_wr **bad_wr)
59{
60 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
61 struct hns_roce_v2_wqe_data_seg *dseg = wqe;
62 struct hns_roce_qp *qp = to_hr_qp(ibqp);
63 int i;
64
65 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
8b9b8d14 66 if (le32_to_cpu(rc_sq_wqe->msg_len) >
67 hr_dev->caps.max_sq_inline) {
7bdee415 68 *bad_wr = wr;
69 dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
70 rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
71 return -EINVAL;
72 }
73
74 for (i = 0; i < wr->num_sge; i++) {
75 memcpy(wqe, ((void *)wr->sg_list[i].addr),
76 wr->sg_list[i].length);
77 wqe += wr->sg_list[i].length;
78 }
79
80 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
81 1);
82 } else {
83 if (wr->num_sge <= 2) {
84 for (i = 0; i < wr->num_sge; i++) {
85 if (likely(wr->sg_list[i].length)) {
86 set_data_seg_v2(dseg, wr->sg_list + i);
87 dseg++;
88 }
89 }
90 } else {
91 roce_set_field(rc_sq_wqe->byte_20,
92 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
93 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
94 (*sge_ind) & (qp->sge.sge_cnt - 1));
95
96 for (i = 0; i < 2; i++) {
97 if (likely(wr->sg_list[i].length)) {
98 set_data_seg_v2(dseg, wr->sg_list + i);
99 dseg++;
100 }
101 }
102
103 dseg = get_send_extend_sge(qp,
104 (*sge_ind) & (qp->sge.sge_cnt - 1));
105
106 for (i = 0; i < wr->num_sge - 2; i++) {
107 if (likely(wr->sg_list[i + 2].length)) {
108 set_data_seg_v2(dseg,
109 wr->sg_list + 2 + i);
110 dseg++;
111 (*sge_ind)++;
112 }
113 }
114 }
115
116 roce_set_field(rc_sq_wqe->byte_16,
117 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
118 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
119 }
120
121 return 0;
122}
123
2d407888
WHX
124static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
125 struct ib_send_wr **bad_wr)
126{
127 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
7bdee415 128 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
129 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
2d407888
WHX
130 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
131 struct hns_roce_qp *qp = to_hr_qp(ibqp);
132 struct hns_roce_v2_wqe_data_seg *dseg;
133 struct device *dev = hr_dev->dev;
134 struct hns_roce_v2_db sq_db;
135 unsigned int sge_ind = 0;
e8d18533 136 unsigned int owner_bit;
2d407888
WHX
137 unsigned long flags;
138 unsigned int ind;
139 void *wqe = NULL;
8b9b8d14 140 u32 tmp_len = 0;
7bdee415 141 bool loopback;
2d407888 142 int ret = 0;
7bdee415 143 u8 *smac;
2d407888
WHX
144 int nreq;
145 int i;
146
7bdee415 147 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
148 ibqp->qp_type != IB_QPT_GSI &&
149 ibqp->qp_type != IB_QPT_UD)) {
2d407888
WHX
150 dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
151 *bad_wr = NULL;
152 return -EOPNOTSUPP;
153 }
154
10bd2ade
YL
155 if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
156 qp->state == IB_QPS_RTR)) {
2d407888
WHX
157 dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
158 *bad_wr = wr;
159 return -EINVAL;
160 }
161
162 spin_lock_irqsave(&qp->sq.lock, flags);
163 ind = qp->sq_next_wqe;
164 sge_ind = qp->next_sge;
165
166 for (nreq = 0; wr; ++nreq, wr = wr->next) {
167 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
168 ret = -ENOMEM;
169 *bad_wr = wr;
170 goto out;
171 }
172
173 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
174 dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
175 wr->num_sge, qp->sq.max_gs);
176 ret = -EINVAL;
177 *bad_wr = wr;
178 goto out;
179 }
180
181 wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
182 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
183 wr->wr_id;
184
e8d18533 185 owner_bit = ~(qp->sq.head >> ilog2(qp->sq.wqe_cnt)) & 0x1;
2d407888 186
7bdee415 187 /* Corresponding to the QP type, wqe process separately */
188 if (ibqp->qp_type == IB_QPT_GSI) {
189 ud_sq_wqe = wqe;
190 memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
191
192 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
193 V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
194 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
195 V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
196 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
197 V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
198 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
199 V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
200 roce_set_field(ud_sq_wqe->byte_48,
201 V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
202 V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
203 ah->av.mac[4]);
204 roce_set_field(ud_sq_wqe->byte_48,
205 V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
206 V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
207 ah->av.mac[5]);
208
209 /* MAC loopback */
210 smac = (u8 *)hr_dev->dev_addr[qp->port];
211 loopback = ether_addr_equal_unaligned(ah->av.mac,
212 smac) ? 1 : 0;
213
214 roce_set_bit(ud_sq_wqe->byte_40,
215 V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
216
217 roce_set_field(ud_sq_wqe->byte_4,
218 V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
219 V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
220 HNS_ROCE_V2_WQE_OP_SEND);
2d407888 221
7bdee415 222 for (i = 0; i < wr->num_sge; i++)
8b9b8d14 223 tmp_len += wr->sg_list[i].length;
492b2bd0 224
8b9b8d14 225 ud_sq_wqe->msg_len =
226 cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
227
228 switch (wr->opcode) {
229 case IB_WR_SEND_WITH_IMM:
230 case IB_WR_RDMA_WRITE_WITH_IMM:
231 ud_sq_wqe->immtdata = wr->ex.imm_data;
232 break;
233 default:
234 ud_sq_wqe->immtdata = 0;
235 break;
236 }
651487c2 237
7bdee415 238 /* Set sig attr */
239 roce_set_bit(ud_sq_wqe->byte_4,
240 V2_UD_SEND_WQE_BYTE_4_CQE_S,
241 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
a49d761f 242
7bdee415 243 /* Set se attr */
244 roce_set_bit(ud_sq_wqe->byte_4,
245 V2_UD_SEND_WQE_BYTE_4_SE_S,
246 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
e8d18533 247
7bdee415 248 roce_set_bit(ud_sq_wqe->byte_4,
249 V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
250
251 roce_set_field(ud_sq_wqe->byte_16,
252 V2_UD_SEND_WQE_BYTE_16_PD_M,
253 V2_UD_SEND_WQE_BYTE_16_PD_S,
254 to_hr_pd(ibqp->pd)->pdn);
255
256 roce_set_field(ud_sq_wqe->byte_16,
257 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
258 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
259 wr->num_sge);
260
261 roce_set_field(ud_sq_wqe->byte_20,
262 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
263 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
264 sge_ind & (qp->sge.sge_cnt - 1));
265
266 roce_set_field(ud_sq_wqe->byte_24,
267 V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
268 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
269 ud_sq_wqe->qkey =
8b9b8d14 270 cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
271 qp->qkey : ud_wr(wr)->remote_qkey);
7bdee415 272 roce_set_field(ud_sq_wqe->byte_32,
273 V2_UD_SEND_WQE_BYTE_32_DQPN_M,
274 V2_UD_SEND_WQE_BYTE_32_DQPN_S,
275 ud_wr(wr)->remote_qpn);
276
277 roce_set_field(ud_sq_wqe->byte_36,
278 V2_UD_SEND_WQE_BYTE_36_VLAN_M,
279 V2_UD_SEND_WQE_BYTE_36_VLAN_S,
8b9b8d14 280 le16_to_cpu(ah->av.vlan));
7bdee415 281 roce_set_field(ud_sq_wqe->byte_36,
282 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
283 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
284 ah->av.hop_limit);
285 roce_set_field(ud_sq_wqe->byte_36,
286 V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
287 V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
288 0);
289 roce_set_field(ud_sq_wqe->byte_36,
290 V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
291 V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
292 0);
293 roce_set_field(ud_sq_wqe->byte_40,
294 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
295 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, 0);
296 roce_set_field(ud_sq_wqe->byte_40,
297 V2_UD_SEND_WQE_BYTE_40_SL_M,
298 V2_UD_SEND_WQE_BYTE_40_SL_S,
8b9b8d14 299 le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
300 HNS_ROCE_SL_SHIFT);
7bdee415 301 roce_set_field(ud_sq_wqe->byte_40,
302 V2_UD_SEND_WQE_BYTE_40_PORTN_M,
303 V2_UD_SEND_WQE_BYTE_40_PORTN_S,
304 qp->port);
305
306 roce_set_field(ud_sq_wqe->byte_48,
307 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
308 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
309 hns_get_gid_index(hr_dev, qp->phy_port,
310 ah->av.gid_index));
311
312 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
313 GID_LEN_V2);
314
315 dseg = get_send_extend_sge(qp,
316 sge_ind & (qp->sge.sge_cnt - 1));
317 for (i = 0; i < wr->num_sge; i++) {
318 set_data_seg_v2(dseg + i, wr->sg_list + i);
319 sge_ind++;
320 }
321
322 ind++;
323 } else if (ibqp->qp_type == IB_QPT_RC) {
324 rc_sq_wqe = wqe;
325 memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
326 for (i = 0; i < wr->num_sge; i++)
8b9b8d14 327 tmp_len += wr->sg_list[i].length;
328
329 rc_sq_wqe->msg_len =
330 cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
7bdee415 331
8b9b8d14 332 switch (wr->opcode) {
333 case IB_WR_SEND_WITH_IMM:
334 case IB_WR_RDMA_WRITE_WITH_IMM:
335 rc_sq_wqe->immtdata = wr->ex.imm_data;
336 break;
337 case IB_WR_SEND_WITH_INV:
338 rc_sq_wqe->inv_key =
339 cpu_to_le32(wr->ex.invalidate_rkey);
340 break;
341 default:
342 rc_sq_wqe->immtdata = 0;
343 break;
344 }
7bdee415 345
346 roce_set_bit(rc_sq_wqe->byte_4,
347 V2_RC_SEND_WQE_BYTE_4_FENCE_S,
348 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
349
350 roce_set_bit(rc_sq_wqe->byte_4,
351 V2_RC_SEND_WQE_BYTE_4_SE_S,
352 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
353
354 roce_set_bit(rc_sq_wqe->byte_4,
355 V2_RC_SEND_WQE_BYTE_4_CQE_S,
356 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
357
358 roce_set_bit(rc_sq_wqe->byte_4,
359 V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
360
361 switch (wr->opcode) {
362 case IB_WR_RDMA_READ:
363 roce_set_field(rc_sq_wqe->byte_4,
364 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
365 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
366 HNS_ROCE_V2_WQE_OP_RDMA_READ);
367 rc_sq_wqe->rkey =
368 cpu_to_le32(rdma_wr(wr)->rkey);
369 rc_sq_wqe->va =
370 cpu_to_le64(rdma_wr(wr)->remote_addr);
371 break;
372 case IB_WR_RDMA_WRITE:
373 roce_set_field(rc_sq_wqe->byte_4,
374 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
375 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
376 HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
377 rc_sq_wqe->rkey =
378 cpu_to_le32(rdma_wr(wr)->rkey);
379 rc_sq_wqe->va =
380 cpu_to_le64(rdma_wr(wr)->remote_addr);
381 break;
382 case IB_WR_RDMA_WRITE_WITH_IMM:
383 roce_set_field(rc_sq_wqe->byte_4,
2d407888
WHX
384 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
385 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
386 HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM);
7bdee415 387 rc_sq_wqe->rkey =
388 cpu_to_le32(rdma_wr(wr)->rkey);
389 rc_sq_wqe->va =
390 cpu_to_le64(rdma_wr(wr)->remote_addr);
391 break;
392 case IB_WR_SEND:
393 roce_set_field(rc_sq_wqe->byte_4,
394 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
395 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
396 HNS_ROCE_V2_WQE_OP_SEND);
397 break;
398 case IB_WR_SEND_WITH_INV:
399 roce_set_field(rc_sq_wqe->byte_4,
2d407888
WHX
400 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
401 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
402 HNS_ROCE_V2_WQE_OP_SEND_WITH_INV);
7bdee415 403 break;
404 case IB_WR_SEND_WITH_IMM:
405 roce_set_field(rc_sq_wqe->byte_4,
406 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
407 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
408 HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
409 break;
410 case IB_WR_LOCAL_INV:
411 roce_set_field(rc_sq_wqe->byte_4,
412 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
413 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
414 HNS_ROCE_V2_WQE_OP_LOCAL_INV);
415 break;
416 case IB_WR_ATOMIC_CMP_AND_SWP:
417 roce_set_field(rc_sq_wqe->byte_4,
418 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
419 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
420 HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
421 break;
422 case IB_WR_ATOMIC_FETCH_AND_ADD:
423 roce_set_field(rc_sq_wqe->byte_4,
424 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
425 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
426 HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
427 break;
428 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
429 roce_set_field(rc_sq_wqe->byte_4,
2d407888
WHX
430 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
431 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
432 HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP);
7bdee415 433 break;
434 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
435 roce_set_field(rc_sq_wqe->byte_4,
2d407888
WHX
436 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
437 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
438 HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD);
7bdee415 439 break;
440 default:
441 roce_set_field(rc_sq_wqe->byte_4,
442 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
443 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
444 HNS_ROCE_V2_WQE_OP_MASK);
445 break;
2d407888
WHX
446 }
447
7bdee415 448 wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
449 dseg = wqe;
2d407888 450
7bdee415 451 ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe,
452 &sge_ind, bad_wr);
453 if (ret)
454 goto out;
455 ind++;
2d407888 456 } else {
7bdee415 457 dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
458 spin_unlock_irqrestore(&qp->sq.lock, flags);
459 return -EOPNOTSUPP;
2d407888 460 }
2d407888
WHX
461 }
462
463out:
464 if (likely(nreq)) {
465 qp->sq.head += nreq;
466 /* Memory barrier */
467 wmb();
468
469 sq_db.byte_4 = 0;
470 sq_db.parameter = 0;
471
472 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
473 V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
474 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
475 V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
476 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M,
477 V2_DB_PARAMETER_CONS_IDX_S,
478 qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
479 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
480 V2_DB_PARAMETER_SL_S, qp->sl);
481
8b9b8d14 482 hns_roce_write64_k((__le32 *)&sq_db, qp->sq.db_reg_l);
2d407888
WHX
483
484 qp->sq_next_wqe = ind;
485 qp->next_sge = sge_ind;
486 }
487
488 spin_unlock_irqrestore(&qp->sq.lock, flags);
489
490 return ret;
491}
492
493static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
494 struct ib_recv_wr **bad_wr)
495{
496 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
497 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
498 struct hns_roce_v2_wqe_data_seg *dseg;
0009c2db 499 struct hns_roce_rinl_sge *sge_list;
2d407888
WHX
500 struct device *dev = hr_dev->dev;
501 struct hns_roce_v2_db rq_db;
502 unsigned long flags;
503 void *wqe = NULL;
504 int ret = 0;
505 int nreq;
506 int ind;
507 int i;
508
509 spin_lock_irqsave(&hr_qp->rq.lock, flags);
510 ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
511
ced07769 512 if (hr_qp->state == IB_QPS_RESET) {
2d407888
WHX
513 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
514 *bad_wr = wr;
515 return -EINVAL;
516 }
517
518 for (nreq = 0; wr; ++nreq, wr = wr->next) {
519 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
520 hr_qp->ibqp.recv_cq)) {
521 ret = -ENOMEM;
522 *bad_wr = wr;
523 goto out;
524 }
525
526 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
527 dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
528 wr->num_sge, hr_qp->rq.max_gs);
529 ret = -EINVAL;
530 *bad_wr = wr;
531 goto out;
532 }
533
534 wqe = get_recv_wqe(hr_qp, ind);
535 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
536 for (i = 0; i < wr->num_sge; i++) {
537 if (!wr->sg_list[i].length)
538 continue;
539 set_data_seg_v2(dseg, wr->sg_list + i);
540 dseg++;
541 }
542
543 if (i < hr_qp->rq.max_gs) {
8b9b8d14 544 dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
2d407888
WHX
545 dseg[i].addr = 0;
546 }
547
0009c2db 548 /* rq support inline data */
549 sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
550 hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = (u32)wr->num_sge;
551 for (i = 0; i < wr->num_sge; i++) {
552 sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
553 sge_list[i].len = wr->sg_list[i].length;
554 }
555
2d407888
WHX
556 hr_qp->rq.wrid[ind] = wr->wr_id;
557
558 ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
559 }
560
561out:
562 if (likely(nreq)) {
563 hr_qp->rq.head += nreq;
564 /* Memory barrier */
565 wmb();
566
567 rq_db.byte_4 = 0;
568 rq_db.parameter = 0;
569
570 roce_set_field(rq_db.byte_4, V2_DB_BYTE_4_TAG_M,
571 V2_DB_BYTE_4_TAG_S, hr_qp->qpn);
572 roce_set_field(rq_db.byte_4, V2_DB_BYTE_4_CMD_M,
573 V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_RQ_DB);
574 roce_set_field(rq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M,
575 V2_DB_PARAMETER_CONS_IDX_S, hr_qp->rq.head);
576
8b9b8d14 577 hns_roce_write64_k((__le32 *)&rq_db, hr_qp->rq.db_reg_l);
2d407888
WHX
578 }
579 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
580
581 return ret;
582}
583
a04ff739
WHX
584static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
585{
586 int ntu = ring->next_to_use;
587 int ntc = ring->next_to_clean;
588 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
589
590 return ring->desc_num - used - 1;
591}
592
593static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
594 struct hns_roce_v2_cmq_ring *ring)
595{
596 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
597
598 ring->desc = kzalloc(size, GFP_KERNEL);
599 if (!ring->desc)
600 return -ENOMEM;
601
602 ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
603 DMA_BIDIRECTIONAL);
604 if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
605 ring->desc_dma_addr = 0;
606 kfree(ring->desc);
607 ring->desc = NULL;
608 return -ENOMEM;
609 }
610
611 return 0;
612}
613
614static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
615 struct hns_roce_v2_cmq_ring *ring)
616{
617 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
618 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
619 DMA_BIDIRECTIONAL);
620 kfree(ring->desc);
621}
622
623static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
624{
625 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
626 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
627 &priv->cmq.csq : &priv->cmq.crq;
628
629 ring->flag = ring_type;
630 ring->next_to_clean = 0;
631 ring->next_to_use = 0;
632
633 return hns_roce_alloc_cmq_desc(hr_dev, ring);
634}
635
636static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
637{
638 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
639 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
640 &priv->cmq.csq : &priv->cmq.crq;
641 dma_addr_t dma = ring->desc_dma_addr;
642
643 if (ring_type == TYPE_CSQ) {
644 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
645 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
646 upper_32_bits(dma));
647 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
648 (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
649 HNS_ROCE_CMQ_ENABLE);
650 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
651 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
652 } else {
653 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
654 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
655 upper_32_bits(dma));
656 roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
657 (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
658 HNS_ROCE_CMQ_ENABLE);
659 roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
660 roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
661 }
662}
663
664static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
665{
666 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
667 int ret;
668
669 /* Setup the queue entries for command queue */
670 priv->cmq.csq.desc_num = 1024;
671 priv->cmq.crq.desc_num = 1024;
672
673 /* Setup the lock for command queue */
674 spin_lock_init(&priv->cmq.csq.lock);
675 spin_lock_init(&priv->cmq.crq.lock);
676
677 /* Setup Tx write back timeout */
678 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
679
680 /* Init CSQ */
681 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
682 if (ret) {
683 dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
684 return ret;
685 }
686
687 /* Init CRQ */
688 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
689 if (ret) {
690 dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
691 goto err_crq;
692 }
693
694 /* Init CSQ REG */
695 hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
696
697 /* Init CRQ REG */
698 hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
699
700 return 0;
701
702err_crq:
703 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
704
705 return ret;
706}
707
708static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
709{
710 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
711
712 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
713 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
714}
715
281d0ccf
CIK
716static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
717 enum hns_roce_opcode_type opcode,
718 bool is_read)
a04ff739
WHX
719{
720 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
721 desc->opcode = cpu_to_le16(opcode);
722 desc->flag =
723 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
724 if (is_read)
725 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
726 else
727 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
728}
729
730static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
731{
732 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
733 u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
734
735 return head == priv->cmq.csq.next_to_use;
736}
737
738static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
739{
740 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
741 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
742 struct hns_roce_cmq_desc *desc;
743 u16 ntc = csq->next_to_clean;
744 u32 head;
745 int clean = 0;
746
747 desc = &csq->desc[ntc];
748 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
749 while (head != ntc) {
750 memset(desc, 0, sizeof(*desc));
751 ntc++;
752 if (ntc == csq->desc_num)
753 ntc = 0;
754 desc = &csq->desc[ntc];
755 clean++;
756 }
757 csq->next_to_clean = ntc;
758
759 return clean;
760}
761
281d0ccf
CIK
762static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
763 struct hns_roce_cmq_desc *desc, int num)
a04ff739
WHX
764{
765 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
766 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
767 struct hns_roce_cmq_desc *desc_to_use;
768 bool complete = false;
769 u32 timeout = 0;
770 int handle = 0;
771 u16 desc_ret;
772 int ret = 0;
773 int ntc;
774
775 spin_lock_bh(&csq->lock);
776
777 if (num > hns_roce_cmq_space(csq)) {
778 spin_unlock_bh(&csq->lock);
779 return -EBUSY;
780 }
781
782 /*
783 * Record the location of desc in the cmq for this time
784 * which will be use for hardware to write back
785 */
786 ntc = csq->next_to_use;
787
788 while (handle < num) {
789 desc_to_use = &csq->desc[csq->next_to_use];
790 *desc_to_use = desc[handle];
791 dev_dbg(hr_dev->dev, "set cmq desc:\n");
792 csq->next_to_use++;
793 if (csq->next_to_use == csq->desc_num)
794 csq->next_to_use = 0;
795 handle++;
796 }
797
798 /* Write to hardware */
799 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
800
801 /*
802 * If the command is sync, wait for the firmware to write back,
803 * if multi descriptors to be sent, use the first one to check
804 */
805 if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
806 do {
807 if (hns_roce_cmq_csq_done(hr_dev))
808 break;
988e175b 809 udelay(1);
a04ff739
WHX
810 timeout++;
811 } while (timeout < priv->cmq.tx_timeout);
812 }
813
814 if (hns_roce_cmq_csq_done(hr_dev)) {
815 complete = true;
816 handle = 0;
817 while (handle < num) {
818 /* get the result of hardware write back */
819 desc_to_use = &csq->desc[ntc];
820 desc[handle] = *desc_to_use;
821 dev_dbg(hr_dev->dev, "Get cmq desc:\n");
822 desc_ret = desc[handle].retval;
823 if (desc_ret == CMD_EXEC_SUCCESS)
824 ret = 0;
825 else
826 ret = -EIO;
827 priv->cmq.last_status = desc_ret;
828 ntc++;
829 handle++;
830 if (ntc == csq->desc_num)
831 ntc = 0;
832 }
833 }
834
835 if (!complete)
836 ret = -EAGAIN;
837
838 /* clean the command send queue */
839 handle = hns_roce_cmq_csq_clean(hr_dev);
840 if (handle != num)
841 dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
842 handle, num);
843
844 spin_unlock_bh(&csq->lock);
845
846 return ret;
847}
848
281d0ccf 849static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
cfc85f3e
WHX
850{
851 struct hns_roce_query_version *resp;
852 struct hns_roce_cmq_desc desc;
853 int ret;
854
855 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
856 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
857 if (ret)
858 return ret;
859
860 resp = (struct hns_roce_query_version *)desc.data;
861 hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
862 hr_dev->vendor_id = le32_to_cpu(resp->rocee_vendor_id);
863
864 return 0;
865}
866
867static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
868{
869 struct hns_roce_cfg_global_param *req;
870 struct hns_roce_cmq_desc desc;
871
872 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
873 false);
874
875 req = (struct hns_roce_cfg_global_param *)desc.data;
876 memset(req, 0, sizeof(*req));
877 roce_set_field(req->time_cfg_udp_port,
878 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
879 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
880 roce_set_field(req->time_cfg_udp_port,
881 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
882 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
883
884 return hns_roce_cmq_send(hr_dev, &desc, 1);
885}
886
887static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
888{
889 struct hns_roce_cmq_desc desc[2];
890 struct hns_roce_pf_res *res;
891 int ret;
892 int i;
893
894 for (i = 0; i < 2; i++) {
895 hns_roce_cmq_setup_basic_desc(&desc[i],
896 HNS_ROCE_OPC_QUERY_PF_RES, true);
897
898 if (i == 0)
899 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
900 else
901 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
902 }
903
904 ret = hns_roce_cmq_send(hr_dev, desc, 2);
905 if (ret)
906 return ret;
907
908 res = (struct hns_roce_pf_res *)desc[0].data;
909
910 hr_dev->caps.qpc_bt_num = roce_get_field(res->qpc_bt_idx_num,
911 PF_RES_DATA_1_PF_QPC_BT_NUM_M,
912 PF_RES_DATA_1_PF_QPC_BT_NUM_S);
913 hr_dev->caps.srqc_bt_num = roce_get_field(res->srqc_bt_idx_num,
914 PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
915 PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
916 hr_dev->caps.cqc_bt_num = roce_get_field(res->cqc_bt_idx_num,
917 PF_RES_DATA_3_PF_CQC_BT_NUM_M,
918 PF_RES_DATA_3_PF_CQC_BT_NUM_S);
919 hr_dev->caps.mpt_bt_num = roce_get_field(res->mpt_bt_idx_num,
920 PF_RES_DATA_4_PF_MPT_BT_NUM_M,
921 PF_RES_DATA_4_PF_MPT_BT_NUM_S);
922
923 return 0;
924}
925
926static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
927{
928 struct hns_roce_cmq_desc desc[2];
929 struct hns_roce_vf_res_a *req_a;
930 struct hns_roce_vf_res_b *req_b;
931 int i;
932
933 req_a = (struct hns_roce_vf_res_a *)desc[0].data;
934 req_b = (struct hns_roce_vf_res_b *)desc[1].data;
935 memset(req_a, 0, sizeof(*req_a));
936 memset(req_b, 0, sizeof(*req_b));
937 for (i = 0; i < 2; i++) {
938 hns_roce_cmq_setup_basic_desc(&desc[i],
939 HNS_ROCE_OPC_ALLOC_VF_RES, false);
940
941 if (i == 0)
942 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
943 else
944 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
945
946 if (i == 0) {
947 roce_set_field(req_a->vf_qpc_bt_idx_num,
948 VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
949 VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
950 roce_set_field(req_a->vf_qpc_bt_idx_num,
951 VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
952 VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
953 HNS_ROCE_VF_QPC_BT_NUM);
954
955 roce_set_field(req_a->vf_srqc_bt_idx_num,
956 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
957 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
958 roce_set_field(req_a->vf_srqc_bt_idx_num,
959 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
960 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
961 HNS_ROCE_VF_SRQC_BT_NUM);
962
963 roce_set_field(req_a->vf_cqc_bt_idx_num,
964 VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
965 VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
966 roce_set_field(req_a->vf_cqc_bt_idx_num,
967 VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
968 VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
969 HNS_ROCE_VF_CQC_BT_NUM);
970
971 roce_set_field(req_a->vf_mpt_bt_idx_num,
972 VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
973 VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
974 roce_set_field(req_a->vf_mpt_bt_idx_num,
975 VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
976 VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
977 HNS_ROCE_VF_MPT_BT_NUM);
978
979 roce_set_field(req_a->vf_eqc_bt_idx_num,
980 VF_RES_A_DATA_5_VF_EQC_IDX_M,
981 VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
982 roce_set_field(req_a->vf_eqc_bt_idx_num,
983 VF_RES_A_DATA_5_VF_EQC_NUM_M,
984 VF_RES_A_DATA_5_VF_EQC_NUM_S,
985 HNS_ROCE_VF_EQC_NUM);
986 } else {
987 roce_set_field(req_b->vf_smac_idx_num,
988 VF_RES_B_DATA_1_VF_SMAC_IDX_M,
989 VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
990 roce_set_field(req_b->vf_smac_idx_num,
991 VF_RES_B_DATA_1_VF_SMAC_NUM_M,
992 VF_RES_B_DATA_1_VF_SMAC_NUM_S,
993 HNS_ROCE_VF_SMAC_NUM);
994
995 roce_set_field(req_b->vf_sgid_idx_num,
996 VF_RES_B_DATA_2_VF_SGID_IDX_M,
997 VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
998 roce_set_field(req_b->vf_sgid_idx_num,
999 VF_RES_B_DATA_2_VF_SGID_NUM_M,
1000 VF_RES_B_DATA_2_VF_SGID_NUM_S,
1001 HNS_ROCE_VF_SGID_NUM);
1002
1003 roce_set_field(req_b->vf_qid_idx_sl_num,
1004 VF_RES_B_DATA_3_VF_QID_IDX_M,
1005 VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1006 roce_set_field(req_b->vf_qid_idx_sl_num,
1007 VF_RES_B_DATA_3_VF_SL_NUM_M,
1008 VF_RES_B_DATA_3_VF_SL_NUM_S,
1009 HNS_ROCE_VF_SL_NUM);
1010 }
1011 }
1012
1013 return hns_roce_cmq_send(hr_dev, desc, 2);
1014}
1015
a81fba28
WHX
1016static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1017{
1018 u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1019 u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1020 u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1021 u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
1022 struct hns_roce_cfg_bt_attr *req;
1023 struct hns_roce_cmq_desc desc;
1024
1025 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1026 req = (struct hns_roce_cfg_bt_attr *)desc.data;
1027 memset(req, 0, sizeof(*req));
1028
1029 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1030 CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
1031 hr_dev->caps.qpc_ba_pg_sz);
1032 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1033 CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
1034 hr_dev->caps.qpc_buf_pg_sz);
1035 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1036 CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1037 qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1038
1039 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1040 CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
1041 hr_dev->caps.srqc_ba_pg_sz);
1042 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1043 CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
1044 hr_dev->caps.srqc_buf_pg_sz);
1045 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1046 CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1047 srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1048
1049 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1050 CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
1051 hr_dev->caps.cqc_ba_pg_sz);
1052 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1053 CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
1054 hr_dev->caps.cqc_buf_pg_sz);
1055 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1056 CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1057 cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1058
1059 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1060 CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
1061 hr_dev->caps.mpt_ba_pg_sz);
1062 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1063 CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
1064 hr_dev->caps.mpt_buf_pg_sz);
1065 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1066 CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1067 mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1068
1069 return hns_roce_cmq_send(hr_dev, &desc, 1);
1070}
1071
cfc85f3e
WHX
1072static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1073{
1074 struct hns_roce_caps *caps = &hr_dev->caps;
1075 int ret;
1076
1077 ret = hns_roce_cmq_query_hw_info(hr_dev);
1078 if (ret) {
1079 dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
1080 ret);
1081 return ret;
1082 }
1083
1084 ret = hns_roce_config_global_param(hr_dev);
1085 if (ret) {
1086 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1087 ret);
1088 }
1089
1090 /* Get pf resource owned by every pf */
1091 ret = hns_roce_query_pf_resource(hr_dev);
1092 if (ret) {
1093 dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
1094 ret);
1095 return ret;
1096 }
1097
1098 ret = hns_roce_alloc_vf_resource(hr_dev);
1099 if (ret) {
1100 dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
1101 ret);
1102 return ret;
1103 }
1104
1105 hr_dev->vendor_part_id = 0;
1106 hr_dev->sys_image_guid = 0;
1107
1108 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
1109 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
1110 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
1111 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
1112 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1113 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1114 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
1115 caps->num_uars = HNS_ROCE_V2_UAR_NUM;
1116 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
a5073d60
YL
1117 caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
1118 caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
1119 caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
cfc85f3e
WHX
1120 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
1121 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
1122 caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
1123 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
1124 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1125 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1126 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1127 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1128 caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1129 caps->qpc_entry_sz = HNS_ROCE_V2_QPC_ENTRY_SZ;
1130 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
e92f2c18 1131 caps->trrl_entry_sz = HNS_ROCE_V2_TRRL_ENTRY_SZ;
cfc85f3e
WHX
1132 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
1133 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1134 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
1135 caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
1136 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1137 caps->reserved_lkey = 0;
1138 caps->reserved_pds = 0;
1139 caps->reserved_mrws = 1;
1140 caps->reserved_uars = 0;
1141 caps->reserved_cqs = 0;
1142
a25d13cb
SX
1143 caps->qpc_ba_pg_sz = 0;
1144 caps->qpc_buf_pg_sz = 0;
1145 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1146 caps->srqc_ba_pg_sz = 0;
1147 caps->srqc_buf_pg_sz = 0;
1148 caps->srqc_hop_num = HNS_ROCE_HOP_NUM_0;
1149 caps->cqc_ba_pg_sz = 0;
1150 caps->cqc_buf_pg_sz = 0;
1151 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1152 caps->mpt_ba_pg_sz = 0;
1153 caps->mpt_buf_pg_sz = 0;
1154 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
ff795f71
WHX
1155 caps->pbl_ba_pg_sz = 0;
1156 caps->pbl_buf_pg_sz = 0;
1157 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
6a93c77a
SX
1158 caps->mtt_ba_pg_sz = 0;
1159 caps->mtt_buf_pg_sz = 0;
1160 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
1161 caps->cqe_ba_pg_sz = 0;
1162 caps->cqe_buf_pg_sz = 0;
1163 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
a5073d60
YL
1164 caps->eqe_ba_pg_sz = 0;
1165 caps->eqe_buf_pg_sz = 0;
1166 caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
29a1fe5d 1167 caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
a25d13cb 1168
023c1477 1169 caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
0009c2db 1170 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
e088a685
YL
1171 HNS_ROCE_CAP_FLAG_RQ_INLINE |
1172 HNS_ROCE_CAP_FLAG_RECORD_DB;
cfc85f3e 1173 caps->pkey_table_len[0] = 1;
b5ff0f61 1174 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
a5073d60
YL
1175 caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
1176 caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
cfc85f3e
WHX
1177 caps->local_ca_ack_delay = 0;
1178 caps->max_mtu = IB_MTU_4096;
1179
a81fba28
WHX
1180 ret = hns_roce_v2_set_bt(hr_dev);
1181 if (ret)
1182 dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
1183 ret);
1184
1185 return ret;
cfc85f3e
WHX
1186}
1187
a680f2f3
WHX
1188static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
1189{
1190 u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
1191
1192 return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
1193}
1194
1195static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
1196{
1197 u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
1198
1199 return status & HNS_ROCE_HW_MB_STATUS_MASK;
1200}
1201
1202static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1203 u64 out_param, u32 in_modifier, u8 op_modifier,
1204 u16 op, u16 token, int event)
1205{
1206 struct device *dev = hr_dev->dev;
cc4ed08b
BVA
1207 u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base +
1208 ROCEE_VF_MB_CFG0_REG);
a680f2f3
WHX
1209 unsigned long end;
1210 u32 val0 = 0;
1211 u32 val1 = 0;
1212
1213 end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
1214 while (hns_roce_v2_cmd_pending(hr_dev)) {
1215 if (time_after(jiffies, end)) {
1216 dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
1217 (int)end);
1218 return -EAGAIN;
1219 }
1220 cond_resched();
1221 }
1222
1223 roce_set_field(val0, HNS_ROCE_VF_MB4_TAG_MASK,
1224 HNS_ROCE_VF_MB4_TAG_SHIFT, in_modifier);
1225 roce_set_field(val0, HNS_ROCE_VF_MB4_CMD_MASK,
1226 HNS_ROCE_VF_MB4_CMD_SHIFT, op);
1227 roce_set_field(val1, HNS_ROCE_VF_MB5_EVENT_MASK,
1228 HNS_ROCE_VF_MB5_EVENT_SHIFT, event);
1229 roce_set_field(val1, HNS_ROCE_VF_MB5_TOKEN_MASK,
1230 HNS_ROCE_VF_MB5_TOKEN_SHIFT, token);
1231
71591d12
AS
1232 writeq(in_param, hcr + 0);
1233 writeq(out_param, hcr + 2);
a680f2f3
WHX
1234
1235 /* Memory barrier */
1236 wmb();
1237
71591d12
AS
1238 writel(val0, hcr + 4);
1239 writel(val1, hcr + 5);
a680f2f3
WHX
1240
1241 mmiowb();
1242
1243 return 0;
1244}
1245
1246static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
1247 unsigned long timeout)
1248{
1249 struct device *dev = hr_dev->dev;
1250 unsigned long end = 0;
1251 u32 status;
1252
1253 end = msecs_to_jiffies(timeout) + jiffies;
1254 while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
1255 cond_resched();
1256
1257 if (hns_roce_v2_cmd_pending(hr_dev)) {
1258 dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1259 return -ETIMEDOUT;
1260 }
1261
1262 status = hns_roce_v2_cmd_complete(hr_dev);
1263 if (status != 0x1) {
1264 dev_err(dev, "mailbox status 0x%x!\n", status);
1265 return -EBUSY;
1266 }
1267
1268 return 0;
1269}
1270
b5ff0f61
WHX
1271static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
1272 int gid_index, union ib_gid *gid,
1273 const struct ib_gid_attr *attr)
7afddafa 1274{
b5ff0f61 1275 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
7afddafa
WHX
1276 u32 *p;
1277 u32 val;
1278
b5ff0f61
WHX
1279 if (!gid || !attr)
1280 return -EINVAL;
1281
1282 if (attr->gid_type == IB_GID_TYPE_ROCE)
1283 sgid_type = GID_TYPE_FLAG_ROCE_V1;
1284
1285 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
1286 if (ipv6_addr_v4mapped((void *)gid))
1287 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
1288 else
1289 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
1290 }
1291
7afddafa
WHX
1292 p = (u32 *)&gid->raw[0];
1293 roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG0_REG +
1294 0x20 * gid_index);
1295
1296 p = (u32 *)&gid->raw[4];
1297 roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG1_REG +
1298 0x20 * gid_index);
1299
1300 p = (u32 *)&gid->raw[8];
1301 roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG2_REG +
1302 0x20 * gid_index);
1303
1304 p = (u32 *)&gid->raw[0xc];
1305 roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG3_REG +
1306 0x20 * gid_index);
1307
1308 val = roce_read(hr_dev, ROCEE_VF_SGID_CFG4_REG + 0x20 * gid_index);
1309 roce_set_field(val, ROCEE_VF_SGID_CFG4_SGID_TYPE_M,
b5ff0f61 1310 ROCEE_VF_SGID_CFG4_SGID_TYPE_S, sgid_type);
7afddafa
WHX
1311
1312 roce_write(hr_dev, ROCEE_VF_SGID_CFG4_REG + 0x20 * gid_index, val);
b5ff0f61
WHX
1313
1314 return 0;
7afddafa
WHX
1315}
1316
a74dc41d
WHX
1317static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
1318 u8 *addr)
7afddafa
WHX
1319{
1320 u16 reg_smac_h;
1321 u32 reg_smac_l;
1322 u32 val;
1323
1324 reg_smac_l = *(u32 *)(&addr[0]);
1325 roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_VF_SMAC_CFG0_REG +
1326 0x08 * phy_port);
1327 val = roce_read(hr_dev, ROCEE_VF_SMAC_CFG1_REG + 0x08 * phy_port);
1328
1329 reg_smac_h = *(u16 *)(&addr[4]);
1330 roce_set_field(val, ROCEE_VF_SMAC_CFG1_VF_SMAC_H_M,
1331 ROCEE_VF_SMAC_CFG1_VF_SMAC_H_S, reg_smac_h);
1332 roce_write(hr_dev, ROCEE_VF_SMAC_CFG1_REG + 0x08 * phy_port, val);
a74dc41d
WHX
1333
1334 return 0;
7afddafa
WHX
1335}
1336
3958cc56
WHX
1337static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1338 unsigned long mtpt_idx)
1339{
1340 struct hns_roce_v2_mpt_entry *mpt_entry;
1341 struct scatterlist *sg;
db270c41 1342 u64 page_addr;
3958cc56 1343 u64 *pages;
db270c41
WHX
1344 int i, j;
1345 int len;
3958cc56 1346 int entry;
3958cc56
WHX
1347
1348 mpt_entry = mb_buf;
1349 memset(mpt_entry, 0, sizeof(*mpt_entry));
1350
1351 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
1352 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
1353 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
1354 V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
1355 HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
1356 roce_set_field(mpt_entry->byte_4_pd_hop_st,
1357 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
1358 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, mr->pbl_ba_pg_sz);
1359 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
1360 V2_MPT_BYTE_4_PD_S, mr->pd);
1361 mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
1362
1363 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
1364 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
1365 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 0);
1366 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
1367 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
1368 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, 0);
1369 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
1370 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
1371 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
1372 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1373 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
1374 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1375 mpt_entry->byte_8_mw_cnt_en = cpu_to_le32(mpt_entry->byte_8_mw_cnt_en);
1376
1377 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
1378 mr->type == MR_TYPE_MR ? 0 : 1);
1379 mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
1380
1381 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
1382 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
1383 mpt_entry->lkey = cpu_to_le32(mr->key);
1384 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
1385 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
1386
1387 if (mr->type == MR_TYPE_DMA)
1388 return 0;
1389
1390 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
1391
1392 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
1393 roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
1394 V2_MPT_BYTE_48_PBL_BA_H_S,
1395 upper_32_bits(mr->pbl_ba >> 3));
1396 mpt_entry->byte_48_mode_ba = cpu_to_le32(mpt_entry->byte_48_mode_ba);
1397
1398 pages = (u64 *)__get_free_page(GFP_KERNEL);
1399 if (!pages)
1400 return -ENOMEM;
1401
1402 i = 0;
1403 for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
db270c41
WHX
1404 len = sg_dma_len(sg) >> PAGE_SHIFT;
1405 for (j = 0; j < len; ++j) {
1406 page_addr = sg_dma_address(sg) +
1407 (j << mr->umem->page_shift);
1408 pages[i] = page_addr >> 6;
1409
1410 /* Record the first 2 entry directly to MTPT table */
1411 if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
1412 goto found;
1413 i++;
1414 }
3958cc56
WHX
1415 }
1416
db270c41 1417found:
3958cc56
WHX
1418 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
1419 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
1420 V2_MPT_BYTE_56_PA0_H_S,
1421 upper_32_bits(pages[0]));
1422 mpt_entry->byte_56_pa0_h = cpu_to_le32(mpt_entry->byte_56_pa0_h);
1423
1424 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
1425 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
1426 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
1427
1428 free_page((unsigned long)pages);
1429
1430 roce_set_field(mpt_entry->byte_64_buf_pa1,
1431 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
1432 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, mr->pbl_buf_pg_sz);
1433 mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1);
1434
1435 return 0;
1436}
1437
a2c80b7b
WHX
1438static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
1439 struct hns_roce_mr *mr, int flags,
1440 u32 pdn, int mr_access_flags, u64 iova,
1441 u64 size, void *mb_buf)
1442{
1443 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
1444
1445 if (flags & IB_MR_REREG_PD) {
1446 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
1447 V2_MPT_BYTE_4_PD_S, pdn);
1448 mr->pd = pdn;
1449 }
1450
1451 if (flags & IB_MR_REREG_ACCESS) {
1452 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
1453 V2_MPT_BYTE_8_BIND_EN_S,
1454 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
1455 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
1456 V2_MPT_BYTE_8_ATOMIC_EN_S,
1457 (mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0));
1458 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
1459 (mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0));
1460 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
1461 (mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1462 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
1463 (mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1464 }
1465
1466 if (flags & IB_MR_REREG_TRANS) {
1467 mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
1468 mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
1469 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
1470 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
1471
1472 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
1473 mpt_entry->pbl_ba_l =
1474 cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
1475 roce_set_field(mpt_entry->byte_48_mode_ba,
1476 V2_MPT_BYTE_48_PBL_BA_H_M,
1477 V2_MPT_BYTE_48_PBL_BA_H_S,
1478 upper_32_bits(mr->pbl_ba >> 3));
1479 mpt_entry->byte_48_mode_ba =
1480 cpu_to_le32(mpt_entry->byte_48_mode_ba);
1481
1482 mr->iova = iova;
1483 mr->size = size;
1484 }
1485
1486 return 0;
1487}
1488
93aa2187
WHX
1489static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
1490{
1491 return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
1492 n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
1493}
1494
1495static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
1496{
1497 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
1498
1499 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
1500 return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
1501 !!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
1502}
1503
1504static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
1505{
1506 return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
1507}
1508
1509static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
1510{
1511 struct hns_roce_v2_cq_db cq_db;
1512
1513 cq_db.byte_4 = 0;
1514 cq_db.parameter = 0;
1515
1516 roce_set_field(cq_db.byte_4, V2_CQ_DB_BYTE_4_TAG_M,
1517 V2_CQ_DB_BYTE_4_TAG_S, hr_cq->cqn);
1518 roce_set_field(cq_db.byte_4, V2_CQ_DB_BYTE_4_CMD_M,
1519 V2_CQ_DB_BYTE_4_CMD_S, HNS_ROCE_V2_CQ_DB_PTR);
1520
1521 roce_set_field(cq_db.parameter, V2_CQ_DB_PARAMETER_CONS_IDX_M,
1522 V2_CQ_DB_PARAMETER_CONS_IDX_S,
1523 cons_index & ((hr_cq->cq_depth << 1) - 1));
1524 roce_set_field(cq_db.parameter, V2_CQ_DB_PARAMETER_CMD_SN_M,
1525 V2_CQ_DB_PARAMETER_CMD_SN_S, 1);
1526
1527 hns_roce_write64_k((__be32 *)&cq_db, hr_cq->cq_db_l);
1528
1529}
1530
926a01dc
WHX
1531static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1532 struct hns_roce_srq *srq)
1533{
1534 struct hns_roce_v2_cqe *cqe, *dest;
1535 u32 prod_index;
1536 int nfreed = 0;
1537 u8 owner_bit;
1538
1539 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
1540 ++prod_index) {
1541 if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
1542 break;
1543 }
1544
1545 /*
1546 * Now backwards through the CQ, removing CQ entries
1547 * that match our QP by overwriting them with next entries.
1548 */
1549 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
1550 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
1551 if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
1552 V2_CQE_BYTE_16_LCL_QPN_S) &
1553 HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
1554 /* In v1 engine, not support SRQ */
1555 ++nfreed;
1556 } else if (nfreed) {
1557 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
1558 hr_cq->ib_cq.cqe);
1559 owner_bit = roce_get_bit(dest->byte_4,
1560 V2_CQE_BYTE_4_OWNER_S);
1561 memcpy(dest, cqe, sizeof(*cqe));
1562 roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
1563 owner_bit);
1564 }
1565 }
1566
1567 if (nfreed) {
1568 hr_cq->cons_index += nfreed;
1569 /*
1570 * Make sure update of buffer contents is done before
1571 * updating consumer index.
1572 */
1573 wmb();
1574 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
1575 }
1576}
1577
1578static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1579 struct hns_roce_srq *srq)
1580{
1581 spin_lock_irq(&hr_cq->lock);
1582 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
1583 spin_unlock_irq(&hr_cq->lock);
1584}
1585
93aa2187
WHX
1586static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
1587 struct hns_roce_cq *hr_cq, void *mb_buf,
1588 u64 *mtts, dma_addr_t dma_handle, int nent,
1589 u32 vector)
1590{
1591 struct hns_roce_v2_cq_context *cq_context;
1592
1593 cq_context = mb_buf;
1594 memset(cq_context, 0, sizeof(*cq_context));
1595
1596 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
1597 V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
a5073d60
YL
1598 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
1599 V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
93aa2187
WHX
1600 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
1601 V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
1602 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
1603 V2_CQC_BYTE_4_CEQN_S, vector);
1604 cq_context->byte_4_pg_ceqn = cpu_to_le32(cq_context->byte_4_pg_ceqn);
1605
1606 roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
1607 V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
1608
1609 cq_context->cqe_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
1610 cq_context->cqe_cur_blk_addr =
1611 cpu_to_le32(cq_context->cqe_cur_blk_addr);
1612
1613 roce_set_field(cq_context->byte_16_hop_addr,
1614 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
1615 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
1616 cpu_to_le32((mtts[0]) >> (32 + PAGE_ADDR_SHIFT)));
1617 roce_set_field(cq_context->byte_16_hop_addr,
1618 V2_CQC_BYTE_16_CQE_HOP_NUM_M,
1619 V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
1620 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
1621
1622 cq_context->cqe_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT);
1623 roce_set_field(cq_context->byte_24_pgsz_addr,
1624 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
1625 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
1626 cpu_to_le32((mtts[1]) >> (32 + PAGE_ADDR_SHIFT)));
1627 roce_set_field(cq_context->byte_24_pgsz_addr,
1628 V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
1629 V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
1630 hr_dev->caps.cqe_ba_pg_sz);
1631 roce_set_field(cq_context->byte_24_pgsz_addr,
1632 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
1633 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
1634 hr_dev->caps.cqe_buf_pg_sz);
1635
1636 cq_context->cqe_ba = (u32)(dma_handle >> 3);
1637
1638 roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
1639 V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
a5073d60 1640
9b44703d
YL
1641 if (hr_cq->db_en)
1642 roce_set_bit(cq_context->byte_44_db_record,
1643 V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
1644
1645 roce_set_field(cq_context->byte_44_db_record,
1646 V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
1647 V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
1648 ((u32)hr_cq->db.dma) >> 1);
1649 cq_context->db_record_addr = hr_cq->db.dma >> 32;
1650
a5073d60
YL
1651 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
1652 V2_CQC_BYTE_56_CQ_MAX_CNT_M,
1653 V2_CQC_BYTE_56_CQ_MAX_CNT_S,
1654 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
1655 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
1656 V2_CQC_BYTE_56_CQ_PERIOD_M,
1657 V2_CQC_BYTE_56_CQ_PERIOD_S,
1658 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
93aa2187
WHX
1659}
1660
1661static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
1662 enum ib_cq_notify_flags flags)
1663{
1664 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
1665 u32 notification_flag;
1666 u32 doorbell[2];
1667
1668 doorbell[0] = 0;
1669 doorbell[1] = 0;
1670
1671 notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
1672 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
1673 /*
1674 * flags = 0; Notification Flag = 1, next
1675 * flags = 1; Notification Flag = 0, solocited
1676 */
1677 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
1678 hr_cq->cqn);
1679 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
1680 HNS_ROCE_V2_CQ_DB_NTR);
1681 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
1682 V2_CQ_DB_PARAMETER_CONS_IDX_S,
1683 hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
1684 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
26beb85f 1685 V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
93aa2187
WHX
1686 roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
1687 notification_flag);
1688
1689 hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
1690
1691 return 0;
1692}
1693
0009c2db 1694static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
1695 struct hns_roce_qp **cur_qp,
1696 struct ib_wc *wc)
1697{
1698 struct hns_roce_rinl_sge *sge_list;
1699 u32 wr_num, wr_cnt, sge_num;
1700 u32 sge_cnt, data_len, size;
1701 void *wqe_buf;
1702
1703 wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
1704 V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
1705 wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
1706
1707 sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
1708 sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
1709 wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
1710 data_len = wc->byte_len;
1711
1712 for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
1713 size = min(sge_list[sge_cnt].len, data_len);
1714 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
1715
1716 data_len -= size;
1717 wqe_buf += size;
1718 }
1719
1720 if (data_len) {
1721 wc->status = IB_WC_LOC_LEN_ERR;
1722 return -EAGAIN;
1723 }
1724
1725 return 0;
1726}
1727
93aa2187
WHX
1728static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
1729 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
1730{
1731 struct hns_roce_dev *hr_dev;
1732 struct hns_roce_v2_cqe *cqe;
1733 struct hns_roce_qp *hr_qp;
1734 struct hns_roce_wq *wq;
1735 int is_send;
1736 u16 wqe_ctr;
1737 u32 opcode;
1738 u32 status;
1739 int qpn;
0009c2db 1740 int ret;
93aa2187
WHX
1741
1742 /* Find cqe according to consumer index */
1743 cqe = next_cqe_sw_v2(hr_cq);
1744 if (!cqe)
1745 return -EAGAIN;
1746
1747 ++hr_cq->cons_index;
1748 /* Memory barrier */
1749 rmb();
1750
1751 /* 0->SQ, 1->RQ */
1752 is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
1753
1754 qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
1755 V2_CQE_BYTE_16_LCL_QPN_S);
1756
1757 if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
1758 hr_dev = to_hr_dev(hr_cq->ib_cq.device);
1759 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
1760 if (unlikely(!hr_qp)) {
1761 dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
1762 hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
1763 return -EINVAL;
1764 }
1765 *cur_qp = hr_qp;
1766 }
1767
1768 wc->qp = &(*cur_qp)->ibqp;
1769 wc->vendor_err = 0;
1770
1771 status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
1772 V2_CQE_BYTE_4_STATUS_S);
1773 switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
1774 case HNS_ROCE_CQE_V2_SUCCESS:
1775 wc->status = IB_WC_SUCCESS;
1776 break;
1777 case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
1778 wc->status = IB_WC_LOC_LEN_ERR;
1779 break;
1780 case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
1781 wc->status = IB_WC_LOC_QP_OP_ERR;
1782 break;
1783 case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
1784 wc->status = IB_WC_LOC_PROT_ERR;
1785 break;
1786 case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
1787 wc->status = IB_WC_WR_FLUSH_ERR;
1788 break;
1789 case HNS_ROCE_CQE_V2_MW_BIND_ERR:
1790 wc->status = IB_WC_MW_BIND_ERR;
1791 break;
1792 case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
1793 wc->status = IB_WC_BAD_RESP_ERR;
1794 break;
1795 case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
1796 wc->status = IB_WC_LOC_ACCESS_ERR;
1797 break;
1798 case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
1799 wc->status = IB_WC_REM_INV_REQ_ERR;
1800 break;
1801 case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
1802 wc->status = IB_WC_REM_ACCESS_ERR;
1803 break;
1804 case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
1805 wc->status = IB_WC_REM_OP_ERR;
1806 break;
1807 case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
1808 wc->status = IB_WC_RETRY_EXC_ERR;
1809 break;
1810 case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
1811 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
1812 break;
1813 case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
1814 wc->status = IB_WC_REM_ABORT_ERR;
1815 break;
1816 default:
1817 wc->status = IB_WC_GENERAL_ERR;
1818 break;
1819 }
1820
1821 /* CQE status error, directly return */
1822 if (wc->status != IB_WC_SUCCESS)
1823 return 0;
1824
1825 if (is_send) {
1826 wc->wc_flags = 0;
1827 /* SQ corresponding to CQE */
1828 switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
1829 V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
1830 case HNS_ROCE_SQ_OPCODE_SEND:
1831 wc->opcode = IB_WC_SEND;
1832 break;
1833 case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
1834 wc->opcode = IB_WC_SEND;
1835 break;
1836 case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
1837 wc->opcode = IB_WC_SEND;
1838 wc->wc_flags |= IB_WC_WITH_IMM;
1839 break;
1840 case HNS_ROCE_SQ_OPCODE_RDMA_READ:
1841 wc->opcode = IB_WC_RDMA_READ;
1842 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
1843 break;
1844 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
1845 wc->opcode = IB_WC_RDMA_WRITE;
1846 break;
1847 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
1848 wc->opcode = IB_WC_RDMA_WRITE;
1849 wc->wc_flags |= IB_WC_WITH_IMM;
1850 break;
1851 case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
1852 wc->opcode = IB_WC_LOCAL_INV;
1853 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
1854 break;
1855 case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
1856 wc->opcode = IB_WC_COMP_SWAP;
1857 wc->byte_len = 8;
1858 break;
1859 case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
1860 wc->opcode = IB_WC_FETCH_ADD;
1861 wc->byte_len = 8;
1862 break;
1863 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
1864 wc->opcode = IB_WC_MASKED_COMP_SWAP;
1865 wc->byte_len = 8;
1866 break;
1867 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
1868 wc->opcode = IB_WC_MASKED_FETCH_ADD;
1869 wc->byte_len = 8;
1870 break;
1871 case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
1872 wc->opcode = IB_WC_REG_MR;
1873 break;
1874 case HNS_ROCE_SQ_OPCODE_BIND_MW:
1875 wc->opcode = IB_WC_REG_MR;
1876 break;
1877 default:
1878 wc->status = IB_WC_GENERAL_ERR;
1879 break;
1880 }
1881
1882 wq = &(*cur_qp)->sq;
1883 if ((*cur_qp)->sq_signal_bits) {
1884 /*
1885 * If sg_signal_bit is 1,
1886 * firstly tail pointer updated to wqe
1887 * which current cqe correspond to
1888 */
1889 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
1890 V2_CQE_BYTE_4_WQE_INDX_M,
1891 V2_CQE_BYTE_4_WQE_INDX_S);
1892 wq->tail += (wqe_ctr - (u16)wq->tail) &
1893 (wq->wqe_cnt - 1);
1894 }
1895
1896 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
1897 ++wq->tail;
1898 } else {
1899 /* RQ correspond to CQE */
1900 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
1901
1902 opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
1903 V2_CQE_BYTE_4_OPCODE_S);
1904 switch (opcode & 0x1f) {
1905 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
1906 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
1907 wc->wc_flags = IB_WC_WITH_IMM;
ccb8a29e 1908 wc->ex.imm_data = cqe->immtdata;
93aa2187
WHX
1909 break;
1910 case HNS_ROCE_V2_OPCODE_SEND:
1911 wc->opcode = IB_WC_RECV;
1912 wc->wc_flags = 0;
1913 break;
1914 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
1915 wc->opcode = IB_WC_RECV;
1916 wc->wc_flags = IB_WC_WITH_IMM;
ccb8a29e 1917 wc->ex.imm_data = cqe->immtdata;
93aa2187
WHX
1918 break;
1919 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
1920 wc->opcode = IB_WC_RECV;
1921 wc->wc_flags = IB_WC_WITH_INVALIDATE;
ccb8a29e 1922 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
93aa2187
WHX
1923 break;
1924 default:
1925 wc->status = IB_WC_GENERAL_ERR;
1926 break;
1927 }
1928
0009c2db 1929 if ((wc->qp->qp_type == IB_QPT_RC ||
1930 wc->qp->qp_type == IB_QPT_UC) &&
1931 (opcode == HNS_ROCE_V2_OPCODE_SEND ||
1932 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
1933 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
1934 (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
1935 ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
1936 if (ret)
1937 return -EAGAIN;
1938 }
1939
93aa2187
WHX
1940 /* Update tail pointer, record wr_id */
1941 wq = &(*cur_qp)->rq;
1942 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
1943 ++wq->tail;
1944
1945 wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
1946 V2_CQE_BYTE_32_SL_S);
1947 wc->src_qp = (u8)roce_get_field(cqe->byte_32,
1948 V2_CQE_BYTE_32_RMT_QPN_M,
1949 V2_CQE_BYTE_32_RMT_QPN_S);
1950 wc->wc_flags |= (roce_get_bit(cqe->byte_32,
1951 V2_CQE_BYTE_32_GRH_S) ?
1952 IB_WC_GRH : 0);
6c1f08b3 1953 wc->port_num = roce_get_field(cqe->byte_32,
1954 V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
1955 wc->pkey_index = 0;
2eade675 1956 memcpy(wc->smac, cqe->smac, 4);
1957 wc->smac[4] = roce_get_field(cqe->byte_28,
1958 V2_CQE_BYTE_28_SMAC_4_M,
1959 V2_CQE_BYTE_28_SMAC_4_S);
1960 wc->smac[5] = roce_get_field(cqe->byte_28,
1961 V2_CQE_BYTE_28_SMAC_5_M,
1962 V2_CQE_BYTE_28_SMAC_5_S);
1963 wc->vlan_id = 0xffff;
1964 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
1965 wc->network_hdr_type = roce_get_field(cqe->byte_28,
1966 V2_CQE_BYTE_28_PORT_TYPE_M,
1967 V2_CQE_BYTE_28_PORT_TYPE_S);
93aa2187
WHX
1968 }
1969
1970 return 0;
1971}
1972
1973static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
1974 struct ib_wc *wc)
1975{
1976 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
1977 struct hns_roce_qp *cur_qp = NULL;
1978 unsigned long flags;
1979 int npolled;
1980
1981 spin_lock_irqsave(&hr_cq->lock, flags);
1982
1983 for (npolled = 0; npolled < num_entries; ++npolled) {
1984 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
1985 break;
1986 }
1987
1988 if (npolled) {
1989 /* Memory barrier */
1990 wmb();
1991 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
1992 }
1993
1994 spin_unlock_irqrestore(&hr_cq->lock, flags);
1995
1996 return npolled;
1997}
1998
a81fba28
WHX
1999static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
2000 struct hns_roce_hem_table *table, int obj,
2001 int step_idx)
2002{
2003 struct device *dev = hr_dev->dev;
2004 struct hns_roce_cmd_mailbox *mailbox;
2005 struct hns_roce_hem_iter iter;
2006 struct hns_roce_hem_mhop mhop;
2007 struct hns_roce_hem *hem;
2008 unsigned long mhop_obj = obj;
2009 int i, j, k;
2010 int ret = 0;
2011 u64 hem_idx = 0;
2012 u64 l1_idx = 0;
2013 u64 bt_ba = 0;
2014 u32 chunk_ba_num;
2015 u32 hop_num;
2016 u16 op = 0xff;
2017
2018 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
2019 return 0;
2020
2021 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
2022 i = mhop.l0_idx;
2023 j = mhop.l1_idx;
2024 k = mhop.l2_idx;
2025 hop_num = mhop.hop_num;
2026 chunk_ba_num = mhop.bt_chunk_size / 8;
2027
2028 if (hop_num == 2) {
2029 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
2030 k;
2031 l1_idx = i * chunk_ba_num + j;
2032 } else if (hop_num == 1) {
2033 hem_idx = i * chunk_ba_num + j;
2034 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
2035 hem_idx = i;
2036 }
2037
2038 switch (table->type) {
2039 case HEM_TYPE_QPC:
2040 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
2041 break;
2042 case HEM_TYPE_MTPT:
2043 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
2044 break;
2045 case HEM_TYPE_CQC:
2046 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
2047 break;
2048 case HEM_TYPE_SRQC:
2049 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
2050 break;
2051 default:
2052 dev_warn(dev, "Table %d not to be written by mailbox!\n",
2053 table->type);
2054 return 0;
2055 }
2056 op += step_idx;
2057
2058 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2059 if (IS_ERR(mailbox))
2060 return PTR_ERR(mailbox);
2061
2062 if (check_whether_last_step(hop_num, step_idx)) {
2063 hem = table->hem[hem_idx];
2064 for (hns_roce_hem_first(hem, &iter);
2065 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
2066 bt_ba = hns_roce_hem_addr(&iter);
2067
2068 /* configure the ba, tag, and op */
2069 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
2070 obj, 0, op,
2071 HNS_ROCE_CMD_TIMEOUT_MSECS);
2072 }
2073 } else {
2074 if (step_idx == 0)
2075 bt_ba = table->bt_l0_dma_addr[i];
2076 else if (step_idx == 1 && hop_num == 2)
2077 bt_ba = table->bt_l1_dma_addr[l1_idx];
2078
2079 /* configure the ba, tag, and op */
2080 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
2081 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
2082 }
2083
2084 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2085 return ret;
2086}
2087
2088static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
2089 struct hns_roce_hem_table *table, int obj,
2090 int step_idx)
2091{
2092 struct device *dev = hr_dev->dev;
2093 struct hns_roce_cmd_mailbox *mailbox;
2094 int ret = 0;
2095 u16 op = 0xff;
2096
2097 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
2098 return 0;
2099
2100 switch (table->type) {
2101 case HEM_TYPE_QPC:
2102 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
2103 break;
2104 case HEM_TYPE_MTPT:
2105 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
2106 break;
2107 case HEM_TYPE_CQC:
2108 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
2109 break;
2110 case HEM_TYPE_SRQC:
2111 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
2112 break;
2113 default:
2114 dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
2115 table->type);
2116 return 0;
2117 }
2118 op += step_idx;
2119
2120 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2121 if (IS_ERR(mailbox))
2122 return PTR_ERR(mailbox);
2123
2124 /* configure the tag and op */
2125 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
2126 HNS_ROCE_CMD_TIMEOUT_MSECS);
2127
2128 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2129 return ret;
2130}
2131
926a01dc
WHX
2132static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
2133 struct hns_roce_mtt *mtt,
2134 enum ib_qp_state cur_state,
2135 enum ib_qp_state new_state,
2136 struct hns_roce_v2_qp_context *context,
2137 struct hns_roce_qp *hr_qp)
2138{
2139 struct hns_roce_cmd_mailbox *mailbox;
2140 int ret;
2141
2142 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2143 if (IS_ERR(mailbox))
2144 return PTR_ERR(mailbox);
2145
2146 memcpy(mailbox->buf, context, sizeof(*context) * 2);
2147
2148 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
2149 HNS_ROCE_CMD_MODIFY_QPC,
2150 HNS_ROCE_CMD_TIMEOUT_MSECS);
2151
2152 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2153
2154 return ret;
2155}
2156
ace1c541 2157static void set_access_flags(struct hns_roce_qp *hr_qp,
2158 struct hns_roce_v2_qp_context *context,
2159 struct hns_roce_v2_qp_context *qpc_mask,
2160 const struct ib_qp_attr *attr, int attr_mask)
2161{
2162 u8 dest_rd_atomic;
2163 u32 access_flags;
2164
c2799119 2165 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
ace1c541 2166 attr->max_dest_rd_atomic : hr_qp->resp_depth;
2167
c2799119 2168 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
ace1c541 2169 attr->qp_access_flags : hr_qp->atomic_rd_en;
2170
2171 if (!dest_rd_atomic)
2172 access_flags &= IB_ACCESS_REMOTE_WRITE;
2173
2174 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2175 !!(access_flags & IB_ACCESS_REMOTE_READ));
2176 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
2177
2178 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2179 !!(access_flags & IB_ACCESS_REMOTE_WRITE));
2180 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
2181
2182 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2183 !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
2184 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
2185}
2186
926a01dc
WHX
2187static void modify_qp_reset_to_init(struct ib_qp *ibqp,
2188 const struct ib_qp_attr *attr,
0fa95a9a 2189 int attr_mask,
926a01dc
WHX
2190 struct hns_roce_v2_qp_context *context,
2191 struct hns_roce_v2_qp_context *qpc_mask)
2192{
2193 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2194
2195 /*
2196 * In v2 engine, software pass context and context mask to hardware
2197 * when modifying qp. If software need modify some fields in context,
2198 * we should set all bits of the relevant fields in context mask to
2199 * 0 at the same time, else set them to 0x1.
2200 */
2201 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2202 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
2203 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2204 V2_QPC_BYTE_4_TST_S, 0);
2205
0fa95a9a 2206 if (ibqp->qp_type == IB_QPT_GSI)
2207 roce_set_field(context->byte_4_sqpn_tst,
2208 V2_QPC_BYTE_4_SGE_SHIFT_M,
2209 V2_QPC_BYTE_4_SGE_SHIFT_S,
2210 ilog2((unsigned int)hr_qp->sge.sge_cnt));
2211 else
2212 roce_set_field(context->byte_4_sqpn_tst,
2213 V2_QPC_BYTE_4_SGE_SHIFT_M,
2214 V2_QPC_BYTE_4_SGE_SHIFT_S,
2215 hr_qp->sq.max_gs > 2 ?
2216 ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
2217
926a01dc
WHX
2218 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
2219 V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
2220
2221 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2222 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
2223 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2224 V2_QPC_BYTE_4_SQPN_S, 0);
2225
2226 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2227 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
2228 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2229 V2_QPC_BYTE_16_PD_S, 0);
2230
2231 roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
2232 V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
2233 roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
2234 V2_QPC_BYTE_20_RQWS_S, 0);
2235
2236 roce_set_field(context->byte_20_smac_sgid_idx,
2237 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
2238 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2239 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2240 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
2241
2242 roce_set_field(context->byte_20_smac_sgid_idx,
2243 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
2244 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2245 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2246 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
2247
2248 /* No VLAN need to set 0xFFF */
2249 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_IDX_M,
2250 V2_QPC_BYTE_24_VLAN_IDX_S, 0xfff);
2251 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_IDX_M,
2252 V2_QPC_BYTE_24_VLAN_IDX_S, 0);
2253
2254 /*
2255 * Set some fields in context to zero, Because the default values
2256 * of all fields in context are zero, we need not set them to 0 again.
2257 * but we should set the relevant fields of context mask to 0.
2258 */
2259 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
2260 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
2261 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
2262 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
2263
2264 roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_MAPID_M,
2265 V2_QPC_BYTE_60_MAPID_S, 0);
2266
2267 roce_set_bit(qpc_mask->byte_60_qpst_mapid,
2268 V2_QPC_BYTE_60_INNER_MAP_IND_S, 0);
2269 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_MAP_IND_S,
2270 0);
2271 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_RQ_MAP_IND_S,
2272 0);
2273 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_EXT_MAP_IND_S,
2274 0);
2275 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_RLS_IND_S,
2276 0);
2277 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_EXT_IND_S,
2278 0);
2279 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
2280 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
2281
0fa95a9a 2282 if (attr_mask & IB_QP_QKEY) {
2283 context->qkey_xrcd = attr->qkey;
2284 qpc_mask->qkey_xrcd = 0;
2285 hr_qp->qkey = attr->qkey;
2286 }
2287
e088a685
YL
2288 if (hr_qp->rdb_en) {
2289 roce_set_bit(context->byte_68_rq_db,
2290 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
2291 roce_set_bit(qpc_mask->byte_68_rq_db,
2292 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
2293 }
2294
2295 roce_set_field(context->byte_68_rq_db,
2296 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
2297 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
2298 ((u32)hr_qp->rdb.dma) >> 1);
2299 roce_set_field(qpc_mask->byte_68_rq_db,
2300 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
2301 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
2302 context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
2303 qpc_mask->rq_db_record_addr = 0;
2304
0009c2db 2305 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 1);
926a01dc
WHX
2306 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
2307
2308 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2309 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
2310 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2311 V2_QPC_BYTE_80_RX_CQN_S, 0);
2312 if (ibqp->srq) {
2313 roce_set_field(context->byte_76_srqn_op_en,
2314 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
2315 to_hr_srq(ibqp->srq)->srqn);
2316 roce_set_field(qpc_mask->byte_76_srqn_op_en,
2317 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
2318 roce_set_bit(context->byte_76_srqn_op_en,
2319 V2_QPC_BYTE_76_SRQ_EN_S, 1);
2320 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
2321 V2_QPC_BYTE_76_SRQ_EN_S, 0);
2322 }
2323
2324 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
2325 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
2326 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
2327 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
2328 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
2329 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
2330
2331 roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
2332 V2_QPC_BYTE_92_SRQ_INFO_S, 0);
2333
2334 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
2335 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
2336
2337 roce_set_field(qpc_mask->byte_104_rq_sge,
2338 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
2339 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
2340
2341 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
2342 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
2343 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
2344 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
2345 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
2346 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
2347 V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
2348
2349 qpc_mask->rq_rnr_timer = 0;
2350 qpc_mask->rx_msg_len = 0;
2351 qpc_mask->rx_rkey_pkt_info = 0;
2352 qpc_mask->rx_va = 0;
2353
2354 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
2355 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
2356 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
2357 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
2358
2359 roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RSVD_RAQ_MAP_S, 0);
2360 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
2361 V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
2362 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
2363 V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
2364
2365 roce_set_field(qpc_mask->byte_144_raq,
2366 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
2367 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
2368 roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S,
2369 0);
2370 roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
2371 V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
2372 roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
2373
2374 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
2375 V2_QPC_BYTE_148_RQ_MSN_S, 0);
2376 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
2377 V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
2378
2379 roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
2380 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
2381 roce_set_field(qpc_mask->byte_152_raq,
2382 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
2383 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
2384
2385 roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
2386 V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
2387
2388 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
2389 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
2390 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
2391 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
2392 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
2393 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
2394
2395 roce_set_field(context->byte_168_irrl_idx,
2396 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
2397 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
2398 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2399 roce_set_field(qpc_mask->byte_168_irrl_idx,
2400 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
2401 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
2402
2403 roce_set_bit(qpc_mask->byte_168_irrl_idx,
2404 V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
b5fddb7c 2405 roce_set_bit(qpc_mask->byte_168_irrl_idx,
2406 V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
926a01dc
WHX
2407 roce_set_field(qpc_mask->byte_168_irrl_idx,
2408 V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
2409 V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
2410
2411 roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
2412 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
2413 roce_set_field(qpc_mask->byte_172_sq_psn,
2414 V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
2415 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
2416
2417 roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
2418 0);
2419
2420 roce_set_field(qpc_mask->byte_176_msg_pktn,
2421 V2_QPC_BYTE_176_MSG_USE_PKTN_M,
2422 V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
2423 roce_set_field(qpc_mask->byte_176_msg_pktn,
2424 V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
2425 V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
2426
2427 roce_set_field(qpc_mask->byte_184_irrl_idx,
2428 V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
2429 V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
2430
2431 qpc_mask->cur_sge_offset = 0;
2432
2433 roce_set_field(qpc_mask->byte_192_ext_sge,
2434 V2_QPC_BYTE_192_CUR_SGE_IDX_M,
2435 V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
2436 roce_set_field(qpc_mask->byte_192_ext_sge,
2437 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
2438 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
2439
2440 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
2441 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
2442
2443 roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
2444 V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
2445 roce_set_field(qpc_mask->byte_200_sq_max,
2446 V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
2447 V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
2448
2449 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
2450 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
2451
2452 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
2453 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
2454
2455 qpc_mask->sq_timer = 0;
2456
2457 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
2458 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
2459 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
2460 roce_set_field(qpc_mask->byte_232_irrl_sge,
2461 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
2462 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
2463
2464 qpc_mask->irrl_cur_sge_offset = 0;
2465
2466 roce_set_field(qpc_mask->byte_240_irrl_tail,
2467 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
2468 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
2469 roce_set_field(qpc_mask->byte_240_irrl_tail,
2470 V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
2471 V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
2472 roce_set_field(qpc_mask->byte_240_irrl_tail,
2473 V2_QPC_BYTE_240_RX_ACK_MSN_M,
2474 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
2475
2476 roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
2477 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
2478 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
2479 0);
2480 roce_set_field(qpc_mask->byte_248_ack_psn,
2481 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
2482 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
2483 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
2484 0);
2485 roce_set_bit(qpc_mask->byte_248_ack_psn,
2486 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
2487 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
2488 0);
2489
2490 hr_qp->access_flags = attr->qp_access_flags;
2491 hr_qp->pkey_index = attr->pkey_index;
2492 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2493 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
2494 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2495 V2_QPC_BYTE_252_TX_CQN_S, 0);
2496
2497 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
2498 V2_QPC_BYTE_252_ERR_TYPE_S, 0);
2499
2500 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
2501 V2_QPC_BYTE_256_RQ_CQE_IDX_M,
2502 V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
2503 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
2504 V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
2505 V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
2506}
2507
2508static void modify_qp_init_to_init(struct ib_qp *ibqp,
2509 const struct ib_qp_attr *attr, int attr_mask,
2510 struct hns_roce_v2_qp_context *context,
2511 struct hns_roce_v2_qp_context *qpc_mask)
2512{
2513 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2514
2515 /*
2516 * In v2 engine, software pass context and context mask to hardware
2517 * when modifying qp. If software need modify some fields in context,
2518 * we should set all bits of the relevant fields in context mask to
2519 * 0 at the same time, else set them to 0x1.
2520 */
2521 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2522 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
2523 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2524 V2_QPC_BYTE_4_TST_S, 0);
2525
0fa95a9a 2526 if (ibqp->qp_type == IB_QPT_GSI)
2527 roce_set_field(context->byte_4_sqpn_tst,
2528 V2_QPC_BYTE_4_SGE_SHIFT_M,
2529 V2_QPC_BYTE_4_SGE_SHIFT_S,
2530 ilog2((unsigned int)hr_qp->sge.sge_cnt));
2531 else
2532 roce_set_field(context->byte_4_sqpn_tst,
2533 V2_QPC_BYTE_4_SGE_SHIFT_M,
2534 V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
2535 ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
2536
926a01dc
WHX
2537 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
2538 V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
2539
2540 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2541 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2542 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
2543 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2544 0);
2545
2546 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2547 !!(attr->qp_access_flags &
2548 IB_ACCESS_REMOTE_WRITE));
2549 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2550 0);
2551
2552 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2553 !!(attr->qp_access_flags &
2554 IB_ACCESS_REMOTE_ATOMIC));
2555 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2556 0);
2557 } else {
2558 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2559 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
2560 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2561 0);
2562
2563 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2564 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
2565 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2566 0);
2567
2568 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2569 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
2570 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2571 0);
2572 }
2573
2574 roce_set_field(context->byte_20_smac_sgid_idx,
2575 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
2576 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2577 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2578 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
2579
2580 roce_set_field(context->byte_20_smac_sgid_idx,
2581 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
2582 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2583 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2584 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
2585
2586 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2587 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
2588 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2589 V2_QPC_BYTE_16_PD_S, 0);
2590
2591 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2592 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
2593 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2594 V2_QPC_BYTE_80_RX_CQN_S, 0);
2595
2596 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
6d13b869 2597 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
926a01dc
WHX
2598 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2599 V2_QPC_BYTE_252_TX_CQN_S, 0);
2600
2601 if (ibqp->srq) {
2602 roce_set_bit(context->byte_76_srqn_op_en,
2603 V2_QPC_BYTE_76_SRQ_EN_S, 1);
2604 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
2605 V2_QPC_BYTE_76_SRQ_EN_S, 0);
2606 roce_set_field(context->byte_76_srqn_op_en,
2607 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
2608 to_hr_srq(ibqp->srq)->srqn);
2609 roce_set_field(qpc_mask->byte_76_srqn_op_en,
2610 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
2611 }
2612
0fa95a9a 2613 if (attr_mask & IB_QP_QKEY) {
2614 context->qkey_xrcd = attr->qkey;
2615 qpc_mask->qkey_xrcd = 0;
2616 }
926a01dc
WHX
2617
2618 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2619 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
2620 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2621 V2_QPC_BYTE_4_SQPN_S, 0);
2622
2623 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
2624 V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
2625 roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
2626 V2_QPC_BYTE_56_DQPN_S, 0);
2627 roce_set_field(context->byte_168_irrl_idx,
2628 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
2629 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
2630 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2631 roce_set_field(qpc_mask->byte_168_irrl_idx,
2632 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
2633 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
2634}
2635
2636static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
2637 const struct ib_qp_attr *attr, int attr_mask,
2638 struct hns_roce_v2_qp_context *context,
2639 struct hns_roce_v2_qp_context *qpc_mask)
2640{
2641 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2642 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2643 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2644 struct device *dev = hr_dev->dev;
e92f2c18 2645 dma_addr_t dma_handle_3;
926a01dc
WHX
2646 dma_addr_t dma_handle_2;
2647 dma_addr_t dma_handle;
2648 u32 page_size;
2649 u8 port_num;
e92f2c18 2650 u64 *mtts_3;
926a01dc
WHX
2651 u64 *mtts_2;
2652 u64 *mtts;
2653 u8 *dmac;
2654 u8 *smac;
2655 int port;
2656
2657 /* Search qp buf's mtts */
2658 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
2659 hr_qp->mtt.first_seg, &dma_handle);
2660 if (!mtts) {
2661 dev_err(dev, "qp buf pa find failed\n");
2662 return -EINVAL;
2663 }
2664
2665 /* Search IRRL's mtts */
2666 mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
2667 hr_qp->qpn, &dma_handle_2);
2668 if (!mtts_2) {
2669 dev_err(dev, "qp irrl_table find failed\n");
2670 return -EINVAL;
2671 }
2672
e92f2c18 2673 /* Search TRRL's mtts */
2674 mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
2675 hr_qp->qpn, &dma_handle_3);
2676 if (!mtts_3) {
2677 dev_err(dev, "qp trrl_table find failed\n");
2678 return -EINVAL;
2679 }
2680
926a01dc
WHX
2681 if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) ||
2682 (attr_mask & IB_QP_PKEY_INDEX) || (attr_mask & IB_QP_QKEY)) {
2683 dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
2684 return -EINVAL;
2685 }
2686
2687 dmac = (u8 *)attr->ah_attr.roce.dmac;
2688 context->wqe_sge_ba = (u32)(dma_handle >> 3);
2689 qpc_mask->wqe_sge_ba = 0;
2690
2691 /*
2692 * In v2 engine, software pass context and context mask to hardware
2693 * when modifying qp. If software need modify some fields in context,
2694 * we should set all bits of the relevant fields in context mask to
2695 * 0 at the same time, else set them to 0x1.
2696 */
2697 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
2698 V2_QPC_BYTE_12_WQE_SGE_BA_S, dma_handle >> (32 + 3));
2699 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
2700 V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
2701
2702 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
2703 V2_QPC_BYTE_12_SQ_HOP_NUM_S,
2704 hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
2705 0 : hr_dev->caps.mtt_hop_num);
2706 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
2707 V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
2708
2709 roce_set_field(context->byte_20_smac_sgid_idx,
2710 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
2711 V2_QPC_BYTE_20_SGE_HOP_NUM_S,
0fa95a9a 2712 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
2713 hr_dev->caps.mtt_hop_num : 0);
926a01dc
WHX
2714 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2715 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
2716 V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
2717
2718 roce_set_field(context->byte_20_smac_sgid_idx,
2719 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
2720 V2_QPC_BYTE_20_RQ_HOP_NUM_S,
2721 hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
2722 0 : hr_dev->caps.mtt_hop_num);
2723 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2724 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
2725 V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
2726
2727 roce_set_field(context->byte_16_buf_ba_pg_sz,
2728 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
2729 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
2730 hr_dev->caps.mtt_ba_pg_sz);
2731 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
2732 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
2733 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
2734
2735 roce_set_field(context->byte_16_buf_ba_pg_sz,
2736 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
2737 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
2738 hr_dev->caps.mtt_buf_pg_sz);
2739 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
2740 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
2741 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
2742
2743 roce_set_field(context->byte_80_rnr_rx_cqn,
2744 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
2745 V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer);
2746 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
2747 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
2748 V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
2749
2750 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
2751 context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size]
2752 >> PAGE_ADDR_SHIFT);
2753 qpc_mask->rq_cur_blk_addr = 0;
2754
2755 roce_set_field(context->byte_92_srq_info,
2756 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
2757 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
2758 mtts[hr_qp->rq.offset / page_size]
2759 >> (32 + PAGE_ADDR_SHIFT));
2760 roce_set_field(qpc_mask->byte_92_srq_info,
2761 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
2762 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
2763
2764 context->rq_nxt_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size + 1]
2765 >> PAGE_ADDR_SHIFT);
2766 qpc_mask->rq_nxt_blk_addr = 0;
2767
2768 roce_set_field(context->byte_104_rq_sge,
2769 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
2770 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
2771 mtts[hr_qp->rq.offset / page_size + 1]
2772 >> (32 + PAGE_ADDR_SHIFT));
2773 roce_set_field(qpc_mask->byte_104_rq_sge,
2774 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
2775 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
2776
2777 roce_set_field(context->byte_108_rx_reqepsn,
2778 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
2779 V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
2780 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
2781 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
2782 V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
2783
e92f2c18 2784 roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
2785 V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
2786 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
2787 V2_QPC_BYTE_132_TRRL_BA_S, 0);
2788 context->trrl_ba = (u32)(dma_handle_3 >> (16 + 4));
2789 qpc_mask->trrl_ba = 0;
2790 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
2791 V2_QPC_BYTE_140_TRRL_BA_S,
2792 (u32)(dma_handle_3 >> (32 + 16 + 4)));
2793 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
2794 V2_QPC_BYTE_140_TRRL_BA_S, 0);
2795
d5514246 2796 context->irrl_ba = (u32)(dma_handle_2 >> 6);
926a01dc
WHX
2797 qpc_mask->irrl_ba = 0;
2798 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
2799 V2_QPC_BYTE_208_IRRL_BA_S,
d5514246 2800 dma_handle_2 >> (32 + 6));
926a01dc
WHX
2801 roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
2802 V2_QPC_BYTE_208_IRRL_BA_S, 0);
2803
2804 roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
2805 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
2806
2807 roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
2808 hr_qp->sq_signal_bits);
2809 roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
2810 0);
2811
2812 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
2813
2814 smac = (u8 *)hr_dev->dev_addr[port];
2815 /* when dmac equals smac or loop_idc is 1, it should loopback */
2816 if (ether_addr_equal_unaligned(dmac, smac) ||
2817 hr_dev->loop_idc == 0x1) {
2818 roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
2819 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
2820 }
2821
4f3f7a70 2822 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
2823 attr->max_dest_rd_atomic) {
2824 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
2825 V2_QPC_BYTE_140_RR_MAX_S,
2826 fls(attr->max_dest_rd_atomic - 1));
2827 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
2828 V2_QPC_BYTE_140_RR_MAX_S, 0);
2829 }
926a01dc
WHX
2830
2831 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
2832 V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
2833 roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
2834 V2_QPC_BYTE_56_DQPN_S, 0);
2835
2836 /* Configure GID index */
2837 port_num = rdma_ah_get_port_num(&attr->ah_attr);
2838 roce_set_field(context->byte_20_smac_sgid_idx,
2839 V2_QPC_BYTE_20_SGID_IDX_M,
2840 V2_QPC_BYTE_20_SGID_IDX_S,
2841 hns_get_gid_index(hr_dev, port_num - 1,
2842 grh->sgid_index));
2843 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2844 V2_QPC_BYTE_20_SGID_IDX_M,
2845 V2_QPC_BYTE_20_SGID_IDX_S, 0);
2846 memcpy(&(context->dmac), dmac, 4);
2847 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
2848 V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
2849 qpc_mask->dmac = 0;
2850 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
2851 V2_QPC_BYTE_52_DMAC_S, 0);
2852
2853 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
2854 V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
2855 roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
2856 V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
2857
2858 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
2859 V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
2860 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
2861 V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
2862
2863 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
2864 V2_QPC_BYTE_28_FL_S, grh->flow_label);
2865 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
2866 V2_QPC_BYTE_28_FL_S, 0);
2867
2868 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
2869 V2_QPC_BYTE_24_TC_S, grh->traffic_class);
2870 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
2871 V2_QPC_BYTE_24_TC_S, 0);
2872
0fa95a9a 2873 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
2874 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
2875 V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
2876 else
2877 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
2878 V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
2879
926a01dc
WHX
2880 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
2881 V2_QPC_BYTE_24_MTU_S, 0);
2882
2883 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
2884 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
2885
2886 roce_set_field(context->byte_84_rq_ci_pi,
2887 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
2888 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
2889 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
2890 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
2891 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
2892
2893 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
2894 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
2895 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
2896 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
2897 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
2898 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
2899 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
2900 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
2901 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
2902 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
2903
2904 context->rq_rnr_timer = 0;
2905 qpc_mask->rq_rnr_timer = 0;
2906
2907 roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
2908 V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
2909 roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
2910 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
2911
2912 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
2913 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
2914 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
2915 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
2916
2917 roce_set_field(context->byte_168_irrl_idx,
2918 V2_QPC_BYTE_168_LP_SGEN_INI_M,
2919 V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
2920 roce_set_field(qpc_mask->byte_168_irrl_idx,
2921 V2_QPC_BYTE_168_LP_SGEN_INI_M,
2922 V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
2923
926a01dc
WHX
2924 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
2925 V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
2926 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
2927 V2_QPC_BYTE_28_SL_S, 0);
2928 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
2929
2930 return 0;
2931}
2932
2933static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
2934 const struct ib_qp_attr *attr, int attr_mask,
2935 struct hns_roce_v2_qp_context *context,
2936 struct hns_roce_v2_qp_context *qpc_mask)
2937{
2938 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2939 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2940 struct device *dev = hr_dev->dev;
2941 dma_addr_t dma_handle;
befb63b4 2942 u32 page_size;
926a01dc
WHX
2943 u64 *mtts;
2944
2945 /* Search qp buf's mtts */
2946 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
2947 hr_qp->mtt.first_seg, &dma_handle);
2948 if (!mtts) {
2949 dev_err(dev, "qp buf pa find failed\n");
2950 return -EINVAL;
2951 }
2952
2953 /* If exist optional param, return error */
2954 if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) ||
2955 (attr_mask & IB_QP_QKEY) || (attr_mask & IB_QP_PATH_MIG_STATE) ||
2956 (attr_mask & IB_QP_CUR_STATE) ||
2957 (attr_mask & IB_QP_MIN_RNR_TIMER)) {
2958 dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
2959 return -EINVAL;
2960 }
2961
2962 /*
2963 * In v2 engine, software pass context and context mask to hardware
2964 * when modifying qp. If software need modify some fields in context,
2965 * we should set all bits of the relevant fields in context mask to
2966 * 0 at the same time, else set them to 0x1.
2967 */
2968 roce_set_field(context->byte_60_qpst_mapid,
2969 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
2970 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, attr->retry_cnt);
2971 roce_set_field(qpc_mask->byte_60_qpst_mapid,
2972 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
2973 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, 0);
2974
2975 context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
2976 roce_set_field(context->byte_168_irrl_idx,
2977 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
2978 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
2979 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
2980 qpc_mask->sq_cur_blk_addr = 0;
2981 roce_set_field(qpc_mask->byte_168_irrl_idx,
2982 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
2983 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
2984
befb63b4 2985 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
0fa95a9a 2986 context->sq_cur_sge_blk_addr =
2987 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
befb63b4 2988 ((u32)(mtts[hr_qp->sge.offset / page_size]
2989 >> PAGE_ADDR_SHIFT)) : 0;
2990 roce_set_field(context->byte_184_irrl_idx,
2991 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
2992 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
0fa95a9a 2993 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
befb63b4 2994 (mtts[hr_qp->sge.offset / page_size] >>
2995 (32 + PAGE_ADDR_SHIFT)) : 0);
2996 qpc_mask->sq_cur_sge_blk_addr = 0;
2997 roce_set_field(qpc_mask->byte_184_irrl_idx,
2998 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
2999 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
3000
926a01dc
WHX
3001 context->rx_sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
3002 roce_set_field(context->byte_232_irrl_sge,
3003 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3004 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
3005 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
3006 qpc_mask->rx_sq_cur_blk_addr = 0;
3007 roce_set_field(qpc_mask->byte_232_irrl_sge,
3008 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3009 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
3010
3011 /*
3012 * Set some fields in context to zero, Because the default values
3013 * of all fields in context are zero, we need not set them to 0 again.
3014 * but we should set the relevant fields of context mask to 0.
3015 */
3016 roce_set_field(qpc_mask->byte_232_irrl_sge,
3017 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3018 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3019
3020 roce_set_field(qpc_mask->byte_240_irrl_tail,
3021 V2_QPC_BYTE_240_RX_ACK_MSN_M,
3022 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3023
3024 roce_set_field(context->byte_244_rnr_rxack,
3025 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
3026 V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
3027 roce_set_field(qpc_mask->byte_244_rnr_rxack,
3028 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
3029 V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
3030
3031 roce_set_field(qpc_mask->byte_248_ack_psn,
3032 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3033 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3034 roce_set_bit(qpc_mask->byte_248_ack_psn,
3035 V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
3036 roce_set_field(qpc_mask->byte_248_ack_psn,
3037 V2_QPC_BYTE_248_IRRL_PSN_M,
3038 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3039
3040 roce_set_field(qpc_mask->byte_240_irrl_tail,
3041 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3042 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3043
3044 roce_set_field(context->byte_220_retry_psn_msn,
3045 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
3046 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
3047 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3048 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
3049 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
3050
3051 roce_set_field(context->byte_224_retry_msg,
3052 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
3053 V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> 16);
3054 roce_set_field(qpc_mask->byte_224_retry_msg,
3055 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
3056 V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
3057
3058 roce_set_field(context->byte_224_retry_msg,
3059 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
3060 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn);
3061 roce_set_field(qpc_mask->byte_224_retry_msg,
3062 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
3063 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
3064
3065 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3066 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3067 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3068
3069 roce_set_bit(qpc_mask->byte_248_ack_psn,
3070 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3071
3072 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3073 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3074
3075 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
3076 V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
3077 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
3078 V2_QPC_BYTE_212_RETRY_CNT_S, 0);
3079
3080 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
3081 V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt);
3082 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
3083 V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
3084
3085 roce_set_field(context->byte_244_rnr_rxack,
3086 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
3087 V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
3088 roce_set_field(qpc_mask->byte_244_rnr_rxack,
3089 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
3090 V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
3091
3092 roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
3093 V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
3094 roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
3095 V2_QPC_BYTE_244_RNR_CNT_S, 0);
3096
3097 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3098 V2_QPC_BYTE_212_LSN_S, 0x100);
3099 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3100 V2_QPC_BYTE_212_LSN_S, 0);
3101
28726461 3102 if (attr_mask & IB_QP_TIMEOUT) {
926a01dc
WHX
3103 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
3104 V2_QPC_BYTE_28_AT_S, attr->timeout);
28726461 3105 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
3106 V2_QPC_BYTE_28_AT_S, 0);
3107 }
926a01dc
WHX
3108
3109 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
3110 V2_QPC_BYTE_28_SL_S,
3111 rdma_ah_get_sl(&attr->ah_attr));
3112 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
3113 V2_QPC_BYTE_28_SL_S, 0);
3114 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3115
3116 roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3117 V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
3118 roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3119 V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
3120
3121 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3122 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
3123 roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
3124 V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
3125 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
3126 V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
3127
4f3f7a70 3128 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
3129 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
3130 V2_QPC_BYTE_208_SR_MAX_S,
3131 fls(attr->max_rd_atomic - 1));
3132 roce_set_field(qpc_mask->byte_208_irrl,
3133 V2_QPC_BYTE_208_SR_MAX_M,
3134 V2_QPC_BYTE_208_SR_MAX_S, 0);
3135 }
926a01dc
WHX
3136 return 0;
3137}
3138
3139static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
3140 const struct ib_qp_attr *attr,
3141 int attr_mask, enum ib_qp_state cur_state,
3142 enum ib_qp_state new_state)
3143{
3144 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3145 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3146 struct hns_roce_v2_qp_context *context;
3147 struct hns_roce_v2_qp_context *qpc_mask;
3148 struct device *dev = hr_dev->dev;
3149 int ret = -EINVAL;
3150
3151 context = kzalloc(2 * sizeof(*context), GFP_KERNEL);
3152 if (!context)
3153 return -ENOMEM;
3154
3155 qpc_mask = context + 1;
3156 /*
3157 * In v2 engine, software pass context and context mask to hardware
3158 * when modifying qp. If software need modify some fields in context,
3159 * we should set all bits of the relevant fields in context mask to
3160 * 0 at the same time, else set them to 0x1.
3161 */
3162 memset(qpc_mask, 0xff, sizeof(*qpc_mask));
3163 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
0fa95a9a 3164 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
3165 qpc_mask);
926a01dc
WHX
3166 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3167 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
3168 qpc_mask);
3169 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
3170 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
3171 qpc_mask);
3172 if (ret)
3173 goto out;
3174 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
3175 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
3176 qpc_mask);
3177 if (ret)
3178 goto out;
3179 } else if ((cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) ||
3180 (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS) ||
3181 (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD) ||
3182 (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD) ||
3183 (cur_state == IB_QPS_SQD && new_state == IB_QPS_RTS) ||
3184 (cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
3185 (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
3186 (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
3187 (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
3188 (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
3189 (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
3190 (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
3191 (cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) ||
3192 (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR)) {
3193 /* Nothing */
3194 ;
3195 } else {
3196 dev_err(dev, "Illegal state for QP!\n");
3197 goto out;
3198 }
3199
ace1c541 3200 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
3201 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
3202
926a01dc
WHX
3203 /* Every status migrate must change state */
3204 roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
3205 V2_QPC_BYTE_60_QP_ST_S, new_state);
3206 roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
3207 V2_QPC_BYTE_60_QP_ST_S, 0);
3208
3209 /* SW pass context to HW */
3210 ret = hns_roce_v2_qp_modify(hr_dev, &hr_qp->mtt, cur_state, new_state,
3211 context, hr_qp);
3212 if (ret) {
3213 dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
3214 goto out;
3215 }
3216
3217 hr_qp->state = new_state;
3218
ace1c541 3219 if (attr_mask & IB_QP_ACCESS_FLAGS)
3220 hr_qp->atomic_rd_en = attr->qp_access_flags;
3221
926a01dc
WHX
3222 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3223 hr_qp->resp_depth = attr->max_dest_rd_atomic;
3224 if (attr_mask & IB_QP_PORT) {
3225 hr_qp->port = attr->port_num - 1;
3226 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
3227 }
3228
3229 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
3230 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
3231 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
3232 if (ibqp->send_cq != ibqp->recv_cq)
3233 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
3234 hr_qp->qpn, NULL);
3235
3236 hr_qp->rq.head = 0;
3237 hr_qp->rq.tail = 0;
3238 hr_qp->sq.head = 0;
3239 hr_qp->sq.tail = 0;
3240 hr_qp->sq_next_wqe = 0;
3241 hr_qp->next_sge = 0;
e088a685
YL
3242 if (hr_qp->rq.wqe_cnt)
3243 *hr_qp->rdb.db_record = 0;
926a01dc
WHX
3244 }
3245
3246out:
3247 kfree(context);
3248 return ret;
3249}
3250
3251static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
3252{
3253 switch (state) {
3254 case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
3255 case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
3256 case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
3257 case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
3258 case HNS_ROCE_QP_ST_SQ_DRAINING:
3259 case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
3260 case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
3261 case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
3262 default: return -1;
3263 }
3264}
3265
3266static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
3267 struct hns_roce_qp *hr_qp,
3268 struct hns_roce_v2_qp_context *hr_context)
3269{
3270 struct hns_roce_cmd_mailbox *mailbox;
3271 int ret;
3272
3273 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3274 if (IS_ERR(mailbox))
3275 return PTR_ERR(mailbox);
3276
3277 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
3278 HNS_ROCE_CMD_QUERY_QPC,
3279 HNS_ROCE_CMD_TIMEOUT_MSECS);
3280 if (ret) {
3281 dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
3282 goto out;
3283 }
3284
3285 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
3286
3287out:
3288 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3289 return ret;
3290}
3291
3292static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3293 int qp_attr_mask,
3294 struct ib_qp_init_attr *qp_init_attr)
3295{
3296 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3297 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3298 struct hns_roce_v2_qp_context *context;
3299 struct device *dev = hr_dev->dev;
3300 int tmp_qp_state;
3301 int state;
3302 int ret;
3303
3304 context = kzalloc(sizeof(*context), GFP_KERNEL);
3305 if (!context)
3306 return -ENOMEM;
3307
3308 memset(qp_attr, 0, sizeof(*qp_attr));
3309 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3310
3311 mutex_lock(&hr_qp->mutex);
3312
3313 if (hr_qp->state == IB_QPS_RESET) {
3314 qp_attr->qp_state = IB_QPS_RESET;
63ea641f 3315 ret = 0;
926a01dc
WHX
3316 goto done;
3317 }
3318
3319 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context);
3320 if (ret) {
3321 dev_err(dev, "query qpc error\n");
3322 ret = -EINVAL;
3323 goto out;
3324 }
3325
3326 state = roce_get_field(context->byte_60_qpst_mapid,
3327 V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
3328 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
3329 if (tmp_qp_state == -1) {
3330 dev_err(dev, "Illegal ib_qp_state\n");
3331 ret = -EINVAL;
3332 goto out;
3333 }
3334 hr_qp->state = (u8)tmp_qp_state;
3335 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
3336 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->byte_24_mtu_tc,
3337 V2_QPC_BYTE_24_MTU_M,
3338 V2_QPC_BYTE_24_MTU_S);
3339 qp_attr->path_mig_state = IB_MIG_ARMED;
2bf910d4 3340 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
926a01dc
WHX
3341 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
3342 qp_attr->qkey = V2_QKEY_VAL;
3343
3344 qp_attr->rq_psn = roce_get_field(context->byte_108_rx_reqepsn,
3345 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
3346 V2_QPC_BYTE_108_RX_REQ_EPSN_S);
3347 qp_attr->sq_psn = (u32)roce_get_field(context->byte_172_sq_psn,
3348 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3349 V2_QPC_BYTE_172_SQ_CUR_PSN_S);
3350 qp_attr->dest_qp_num = (u8)roce_get_field(context->byte_56_dqpn_err,
3351 V2_QPC_BYTE_56_DQPN_M,
3352 V2_QPC_BYTE_56_DQPN_S);
3353 qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
3354 V2_QPC_BYTE_76_RRE_S)) << 2) |
3355 ((roce_get_bit(context->byte_76_srqn_op_en,
3356 V2_QPC_BYTE_76_RWE_S)) << 1) |
3357 ((roce_get_bit(context->byte_76_srqn_op_en,
3358 V2_QPC_BYTE_76_ATE_S)) << 3);
3359 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
3360 hr_qp->ibqp.qp_type == IB_QPT_UC) {
3361 struct ib_global_route *grh =
3362 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
3363
3364 rdma_ah_set_sl(&qp_attr->ah_attr,
3365 roce_get_field(context->byte_28_at_fl,
3366 V2_QPC_BYTE_28_SL_M,
3367 V2_QPC_BYTE_28_SL_S));
3368 grh->flow_label = roce_get_field(context->byte_28_at_fl,
3369 V2_QPC_BYTE_28_FL_M,
3370 V2_QPC_BYTE_28_FL_S);
3371 grh->sgid_index = roce_get_field(context->byte_20_smac_sgid_idx,
3372 V2_QPC_BYTE_20_SGID_IDX_M,
3373 V2_QPC_BYTE_20_SGID_IDX_S);
3374 grh->hop_limit = roce_get_field(context->byte_24_mtu_tc,
3375 V2_QPC_BYTE_24_HOP_LIMIT_M,
3376 V2_QPC_BYTE_24_HOP_LIMIT_S);
3377 grh->traffic_class = roce_get_field(context->byte_24_mtu_tc,
3378 V2_QPC_BYTE_24_TC_M,
3379 V2_QPC_BYTE_24_TC_S);
3380
3381 memcpy(grh->dgid.raw, context->dgid, sizeof(grh->dgid.raw));
3382 }
3383
3384 qp_attr->port_num = hr_qp->port + 1;
3385 qp_attr->sq_draining = 0;
3386 qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl,
3387 V2_QPC_BYTE_208_SR_MAX_M,
3388 V2_QPC_BYTE_208_SR_MAX_S);
3389 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->byte_140_raq,
3390 V2_QPC_BYTE_140_RR_MAX_M,
3391 V2_QPC_BYTE_140_RR_MAX_S);
3392 qp_attr->min_rnr_timer = (u8)roce_get_field(context->byte_80_rnr_rx_cqn,
3393 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
3394 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
3395 qp_attr->timeout = (u8)roce_get_field(context->byte_28_at_fl,
3396 V2_QPC_BYTE_28_AT_M,
3397 V2_QPC_BYTE_28_AT_S);
3398 qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn,
3399 V2_QPC_BYTE_212_RETRY_CNT_M,
3400 V2_QPC_BYTE_212_RETRY_CNT_S);
3401 qp_attr->rnr_retry = context->rq_rnr_timer;
3402
3403done:
3404 qp_attr->cur_qp_state = qp_attr->qp_state;
3405 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3406 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3407
3408 if (!ibqp->uobject) {
3409 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3410 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3411 } else {
3412 qp_attr->cap.max_send_wr = 0;
3413 qp_attr->cap.max_send_sge = 0;
3414 }
3415
3416 qp_init_attr->cap = qp_attr->cap;
3417
3418out:
3419 mutex_unlock(&hr_qp->mutex);
3420 kfree(context);
3421 return ret;
3422}
3423
3424static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
3425 struct hns_roce_qp *hr_qp,
3426 int is_user)
3427{
3428 struct hns_roce_cq *send_cq, *recv_cq;
3429 struct device *dev = hr_dev->dev;
3430 int ret;
3431
3432 if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
3433 /* Modify qp to reset before destroying qp */
3434 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
3435 hr_qp->state, IB_QPS_RESET);
3436 if (ret) {
3437 dev_err(dev, "modify QP %06lx to ERR failed.\n",
3438 hr_qp->qpn);
3439 return ret;
3440 }
3441 }
3442
3443 send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
3444 recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
3445
3446 hns_roce_lock_cqs(send_cq, recv_cq);
3447
3448 if (!is_user) {
3449 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
3450 to_hr_srq(hr_qp->ibqp.srq) : NULL);
3451 if (send_cq != recv_cq)
3452 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
3453 }
3454
3455 hns_roce_qp_remove(hr_dev, hr_qp);
3456
3457 hns_roce_unlock_cqs(send_cq, recv_cq);
3458
3459 hns_roce_qp_free(hr_dev, hr_qp);
3460
3461 /* Not special_QP, free their QPN */
3462 if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
3463 (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
3464 (hr_qp->ibqp.qp_type == IB_QPT_UD))
3465 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
3466
3467 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
3468
3469 if (is_user) {
e088a685
YL
3470 if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
3471 hns_roce_db_unmap_user(
3472 to_hr_ucontext(hr_qp->ibqp.uobject->context),
3473 &hr_qp->rdb);
926a01dc
WHX
3474 ib_umem_release(hr_qp->umem);
3475 } else {
3476 kfree(hr_qp->sq.wrid);
3477 kfree(hr_qp->rq.wrid);
3478 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
3479 }
3480
0009c2db 3481 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
3482 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
3483 kfree(hr_qp->rq_inl_buf.wqe_list);
3484 }
3485
926a01dc
WHX
3486 return 0;
3487}
3488
3489static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
3490{
3491 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3492 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3493 int ret;
3494
3495 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
3496 if (ret) {
3497 dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
3498 return ret;
3499 }
3500
3501 if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
3502 kfree(hr_to_hr_sqp(hr_qp));
3503 else
3504 kfree(hr_qp);
3505
3506 return 0;
3507}
3508
b156269d 3509static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
3510{
3511 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
3512 struct hns_roce_v2_cq_context *cq_context;
3513 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
3514 struct hns_roce_v2_cq_context *cqc_mask;
3515 struct hns_roce_cmd_mailbox *mailbox;
3516 int ret;
3517
3518 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3519 if (IS_ERR(mailbox))
3520 return PTR_ERR(mailbox);
3521
3522 cq_context = mailbox->buf;
3523 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
3524
3525 memset(cqc_mask, 0xff, sizeof(*cqc_mask));
3526
3527 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
3528 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
3529 cq_count);
3530 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
3531 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
3532 0);
3533 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
3534 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
3535 cq_period);
3536 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
3537 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
3538 0);
3539
3540 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
3541 HNS_ROCE_CMD_MODIFY_CQC,
3542 HNS_ROCE_CMD_TIMEOUT_MSECS);
3543 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3544 if (ret)
3545 dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
3546
3547 return ret;
3548}
3549
a5073d60
YL
3550static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
3551{
3552 u32 doorbell[2];
3553
3554 doorbell[0] = 0;
3555 doorbell[1] = 0;
3556
3557 if (eq->type_flag == HNS_ROCE_AEQ) {
3558 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
3559 HNS_ROCE_V2_EQ_DB_CMD_S,
3560 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
3561 HNS_ROCE_EQ_DB_CMD_AEQ :
3562 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
3563 } else {
3564 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
3565 HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
3566
3567 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
3568 HNS_ROCE_V2_EQ_DB_CMD_S,
3569 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
3570 HNS_ROCE_EQ_DB_CMD_CEQ :
3571 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
3572 }
3573
3574 roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
3575 HNS_ROCE_V2_EQ_DB_PARA_S,
3576 (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
3577
3578 hns_roce_write64_k(doorbell, eq->doorbell);
a5073d60
YL
3579}
3580
3581static void hns_roce_v2_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
3582 struct hns_roce_aeqe *aeqe,
3583 u32 qpn)
3584{
3585 struct device *dev = hr_dev->dev;
3586 int sub_type;
3587
3588 dev_warn(dev, "Local work queue catastrophic error.\n");
3589 sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
3590 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
3591 switch (sub_type) {
3592 case HNS_ROCE_LWQCE_QPC_ERROR:
3593 dev_warn(dev, "QP %d, QPC error.\n", qpn);
3594 break;
3595 case HNS_ROCE_LWQCE_MTU_ERROR:
3596 dev_warn(dev, "QP %d, MTU error.\n", qpn);
3597 break;
3598 case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
3599 dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
3600 break;
3601 case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
3602 dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
3603 break;
3604 case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
3605 dev_warn(dev, "QP %d, WQE shift error.\n", qpn);
3606 break;
3607 default:
3608 dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
3609 break;
3610 }
3611}
3612
3613static void hns_roce_v2_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
3614 struct hns_roce_aeqe *aeqe, u32 qpn)
3615{
3616 struct device *dev = hr_dev->dev;
3617 int sub_type;
3618
3619 dev_warn(dev, "Local access violation work queue error.\n");
3620 sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
3621 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
3622 switch (sub_type) {
3623 case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
3624 dev_warn(dev, "QP %d, R_key violation.\n", qpn);
3625 break;
3626 case HNS_ROCE_LAVWQE_LENGTH_ERROR:
3627 dev_warn(dev, "QP %d, length error.\n", qpn);
3628 break;
3629 case HNS_ROCE_LAVWQE_VA_ERROR:
3630 dev_warn(dev, "QP %d, VA error.\n", qpn);
3631 break;
3632 case HNS_ROCE_LAVWQE_PD_ERROR:
3633 dev_err(dev, "QP %d, PD error.\n", qpn);
3634 break;
3635 case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
3636 dev_warn(dev, "QP %d, rw acc error.\n", qpn);
3637 break;
3638 case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
3639 dev_warn(dev, "QP %d, key state error.\n", qpn);
3640 break;
3641 case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
3642 dev_warn(dev, "QP %d, MR operation error.\n", qpn);
3643 break;
3644 default:
3645 dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
3646 break;
3647 }
3648}
3649
3650static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev,
3651 struct hns_roce_aeqe *aeqe,
3652 int event_type)
3653{
3654 struct device *dev = hr_dev->dev;
3655 u32 qpn;
3656
3657 qpn = roce_get_field(aeqe->event.qp_event.qp,
3658 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
3659 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
3660
3661 switch (event_type) {
3662 case HNS_ROCE_EVENT_TYPE_COMM_EST:
3663 dev_warn(dev, "Communication established.\n");
3664 break;
3665 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
3666 dev_warn(dev, "Send queue drained.\n");
3667 break;
3668 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3669 hns_roce_v2_wq_catas_err_handle(hr_dev, aeqe, qpn);
3670 break;
3671 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3672 dev_warn(dev, "Invalid request local work queue error.\n");
3673 break;
3674 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3675 hns_roce_v2_local_wq_access_err_handle(hr_dev, aeqe, qpn);
3676 break;
3677 default:
3678 break;
3679 }
3680
3681 hns_roce_qp_event(hr_dev, qpn, event_type);
3682}
3683
3684static void hns_roce_v2_cq_err_handle(struct hns_roce_dev *hr_dev,
3685 struct hns_roce_aeqe *aeqe,
3686 int event_type)
3687{
3688 struct device *dev = hr_dev->dev;
3689 u32 cqn;
3690
3691 cqn = roce_get_field(aeqe->event.cq_event.cq,
3692 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
3693 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
3694
3695 switch (event_type) {
3696 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3697 dev_warn(dev, "CQ 0x%x access err.\n", cqn);
3698 break;
3699 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3700 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
3701 break;
3702 default:
3703 break;
3704 }
3705
3706 hns_roce_cq_event(hr_dev, cqn, event_type);
3707}
3708
3709static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
3710{
3711 u32 buf_chk_sz;
3712 unsigned long off;
3713
3714 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
3715 off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
3716
3717 return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
3718 off % buf_chk_sz);
3719}
3720
3721static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
3722{
3723 u32 buf_chk_sz;
3724 unsigned long off;
3725
3726 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
3727
3728 off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
3729
3730 if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
3731 return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
3732 off % buf_chk_sz);
3733 else
3734 return (struct hns_roce_aeqe *)((u8 *)
3735 (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
3736}
3737
3738static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
3739{
3740 struct hns_roce_aeqe *aeqe;
3741
3742 if (!eq->hop_num)
3743 aeqe = get_aeqe_v2(eq, eq->cons_index);
3744 else
3745 aeqe = mhop_get_aeqe(eq, eq->cons_index);
3746
3747 return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
3748 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
3749}
3750
3751static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
3752 struct hns_roce_eq *eq)
3753{
3754 struct device *dev = hr_dev->dev;
3755 struct hns_roce_aeqe *aeqe;
3756 int aeqe_found = 0;
3757 int event_type;
3758
3759 while ((aeqe = next_aeqe_sw_v2(eq))) {
4044a3f4
YL
3760
3761 /* Make sure we read AEQ entry after we have checked the
3762 * ownership bit
3763 */
3764 dma_rmb();
a5073d60
YL
3765
3766 event_type = roce_get_field(aeqe->asyn,
3767 HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
3768 HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
3769
3770 switch (event_type) {
3771 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
3772 dev_warn(dev, "Path migrated succeeded.\n");
3773 break;
3774 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
3775 dev_warn(dev, "Path migration failed.\n");
3776 break;
3777 case HNS_ROCE_EVENT_TYPE_COMM_EST:
3778 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
3779 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3780 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3781 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3782 hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type);
3783 break;
3784 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
3785 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
3786 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
3787 dev_warn(dev, "SRQ not support.\n");
3788 break;
3789 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3790 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3791 hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type);
3792 break;
3793 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
3794 dev_warn(dev, "DB overflow.\n");
3795 break;
3796 case HNS_ROCE_EVENT_TYPE_MB:
3797 hns_roce_cmd_event(hr_dev,
3798 le16_to_cpu(aeqe->event.cmd.token),
3799 aeqe->event.cmd.status,
3800 le64_to_cpu(aeqe->event.cmd.out_param));
3801 break;
3802 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
3803 dev_warn(dev, "CEQ overflow.\n");
3804 break;
3805 case HNS_ROCE_EVENT_TYPE_FLR:
3806 dev_warn(dev, "Function level reset.\n");
3807 break;
3808 default:
3809 dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
3810 event_type, eq->eqn, eq->cons_index);
3811 break;
3812 };
3813
3814 ++eq->cons_index;
3815 aeqe_found = 1;
3816
3817 if (eq->cons_index > (2 * eq->entries - 1)) {
3818 dev_warn(dev, "cons_index overflow, set back to 0.\n");
3819 eq->cons_index = 0;
3820 }
3821 }
3822
3823 set_eq_cons_index_v2(eq);
3824 return aeqe_found;
3825}
3826
3827static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
3828{
3829 u32 buf_chk_sz;
3830 unsigned long off;
3831
3832 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
3833 off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
3834
3835 return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
3836 off % buf_chk_sz);
3837}
3838
3839static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
3840{
3841 u32 buf_chk_sz;
3842 unsigned long off;
3843
3844 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
3845
3846 off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
3847
3848 if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
3849 return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
3850 off % buf_chk_sz);
3851 else
3852 return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
3853 buf_chk_sz]) + off % buf_chk_sz);
3854}
3855
3856static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
3857{
3858 struct hns_roce_ceqe *ceqe;
3859
3860 if (!eq->hop_num)
3861 ceqe = get_ceqe_v2(eq, eq->cons_index);
3862 else
3863 ceqe = mhop_get_ceqe(eq, eq->cons_index);
3864
3865 return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
3866 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
3867}
3868
3869static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
3870 struct hns_roce_eq *eq)
3871{
3872 struct device *dev = hr_dev->dev;
3873 struct hns_roce_ceqe *ceqe;
3874 int ceqe_found = 0;
3875 u32 cqn;
3876
3877 while ((ceqe = next_ceqe_sw_v2(eq))) {
3878
4044a3f4
YL
3879 /* Make sure we read CEQ entry after we have checked the
3880 * ownership bit
3881 */
3882 dma_rmb();
3883
a5073d60
YL
3884 cqn = roce_get_field(ceqe->comp,
3885 HNS_ROCE_V2_CEQE_COMP_CQN_M,
3886 HNS_ROCE_V2_CEQE_COMP_CQN_S);
3887
3888 hns_roce_cq_completion(hr_dev, cqn);
3889
3890 ++eq->cons_index;
3891 ceqe_found = 1;
3892
3893 if (eq->cons_index > (2 * eq->entries - 1)) {
3894 dev_warn(dev, "cons_index overflow, set back to 0.\n");
3895 eq->cons_index = 0;
3896 }
3897 }
3898
3899 set_eq_cons_index_v2(eq);
3900
3901 return ceqe_found;
3902}
3903
3904static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
3905{
3906 struct hns_roce_eq *eq = eq_ptr;
3907 struct hns_roce_dev *hr_dev = eq->hr_dev;
3908 int int_work = 0;
3909
3910 if (eq->type_flag == HNS_ROCE_CEQ)
3911 /* Completion event interrupt */
3912 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
3913 else
3914 /* Asychronous event interrupt */
3915 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
3916
3917 return IRQ_RETVAL(int_work);
3918}
3919
3920static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
3921{
3922 struct hns_roce_dev *hr_dev = dev_id;
3923 struct device *dev = hr_dev->dev;
3924 int int_work = 0;
3925 u32 int_st;
3926 u32 int_en;
3927
3928 /* Abnormal interrupt */
3929 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
3930 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
3931
3932 if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
3933 dev_err(dev, "AEQ overflow!\n");
3934
3935 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
3936 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
3937
a5073d60
YL
3938 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
3939 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
3940
3941 int_work = 1;
3942 } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
3943 dev_err(dev, "BUS ERR!\n");
3944
3945 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1);
3946 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
3947
a5073d60
YL
3948 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
3949 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
3950
3951 int_work = 1;
3952 } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
3953 dev_err(dev, "OTHER ERR!\n");
3954
3955 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1);
3956 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
3957
a5073d60
YL
3958 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
3959 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
3960
3961 int_work = 1;
3962 } else
3963 dev_err(dev, "There is no abnormal irq found!\n");
3964
3965 return IRQ_RETVAL(int_work);
3966}
3967
3968static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
3969 int eq_num, int enable_flag)
3970{
3971 int i;
3972
3973 if (enable_flag == EQ_ENABLE) {
3974 for (i = 0; i < eq_num; i++)
3975 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
3976 i * EQ_REG_OFFSET,
3977 HNS_ROCE_V2_VF_EVENT_INT_EN_M);
3978
3979 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
3980 HNS_ROCE_V2_VF_ABN_INT_EN_M);
3981 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
3982 HNS_ROCE_V2_VF_ABN_INT_CFG_M);
3983 } else {
3984 for (i = 0; i < eq_num; i++)
3985 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
3986 i * EQ_REG_OFFSET,
3987 HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
3988
3989 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
3990 HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
3991 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
3992 HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
3993 }
3994}
3995
3996static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
3997{
3998 struct device *dev = hr_dev->dev;
3999 int ret;
4000
4001 if (eqn < hr_dev->caps.num_comp_vectors)
4002 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
4003 0, HNS_ROCE_CMD_DESTROY_CEQC,
4004 HNS_ROCE_CMD_TIMEOUT_MSECS);
4005 else
4006 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
4007 0, HNS_ROCE_CMD_DESTROY_AEQC,
4008 HNS_ROCE_CMD_TIMEOUT_MSECS);
4009 if (ret)
4010 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
4011}
4012
4013static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
4014 struct hns_roce_eq *eq)
4015{
4016 struct device *dev = hr_dev->dev;
4017 u64 idx;
4018 u64 size;
4019 u32 buf_chk_sz;
4020 u32 bt_chk_sz;
4021 u32 mhop_num;
4022 int eqe_alloc;
4023 int ba_num;
4024 int i = 0;
4025 int j = 0;
4026
4027 mhop_num = hr_dev->caps.eqe_hop_num;
4028 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
4029 bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
4030 ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1) /
4031 buf_chk_sz;
4032
4033 /* hop_num = 0 */
4034 if (mhop_num == HNS_ROCE_HOP_NUM_0) {
4035 dma_free_coherent(dev, (unsigned int)(eq->entries *
4036 eq->eqe_size), eq->bt_l0, eq->l0_dma);
4037 return;
4038 }
4039
4040 /* hop_num = 1 or hop = 2 */
4041 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
4042 if (mhop_num == 1) {
4043 for (i = 0; i < eq->l0_last_num; i++) {
4044 if (i == eq->l0_last_num - 1) {
4045 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
4046 size = (eq->entries - eqe_alloc) * eq->eqe_size;
4047 dma_free_coherent(dev, size, eq->buf[i],
4048 eq->buf_dma[i]);
4049 break;
4050 }
4051 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
4052 eq->buf_dma[i]);
4053 }
4054 } else if (mhop_num == 2) {
4055 for (i = 0; i < eq->l0_last_num; i++) {
4056 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
4057 eq->l1_dma[i]);
4058
4059 for (j = 0; j < bt_chk_sz / 8; j++) {
4060 idx = i * (bt_chk_sz / 8) + j;
4061 if ((i == eq->l0_last_num - 1)
4062 && j == eq->l1_last_num - 1) {
4063 eqe_alloc = (buf_chk_sz / eq->eqe_size)
4064 * idx;
4065 size = (eq->entries - eqe_alloc)
4066 * eq->eqe_size;
4067 dma_free_coherent(dev, size,
4068 eq->buf[idx],
4069 eq->buf_dma[idx]);
4070 break;
4071 }
4072 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
4073 eq->buf_dma[idx]);
4074 }
4075 }
4076 }
4077 kfree(eq->buf_dma);
4078 kfree(eq->buf);
4079 kfree(eq->l1_dma);
4080 kfree(eq->bt_l1);
4081 eq->buf_dma = NULL;
4082 eq->buf = NULL;
4083 eq->l1_dma = NULL;
4084 eq->bt_l1 = NULL;
4085}
4086
4087static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
4088 struct hns_roce_eq *eq)
4089{
4090 u32 buf_chk_sz;
4091
4092 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4093
4094 if (hr_dev->caps.eqe_hop_num) {
4095 hns_roce_mhop_free_eq(hr_dev, eq);
4096 return;
4097 }
4098
4099 if (eq->buf_list)
4100 dma_free_coherent(hr_dev->dev, buf_chk_sz,
4101 eq->buf_list->buf, eq->buf_list->map);
4102}
4103
4104static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
4105 struct hns_roce_eq *eq,
4106 void *mb_buf)
4107{
4108 struct hns_roce_eq_context *eqc;
4109
4110 eqc = mb_buf;
4111 memset(eqc, 0, sizeof(struct hns_roce_eq_context));
4112
4113 /* init eqc */
4114 eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
4115 eq->hop_num = hr_dev->caps.eqe_hop_num;
4116 eq->cons_index = 0;
4117 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
4118 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
4119 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
4120 eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
4121 eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
4122 eq->shift = ilog2((unsigned int)eq->entries);
4123
4124 if (!eq->hop_num)
4125 eq->eqe_ba = eq->buf_list->map;
4126 else
4127 eq->eqe_ba = eq->l0_dma;
4128
4129 /* set eqc state */
4130 roce_set_field(eqc->byte_4,
4131 HNS_ROCE_EQC_EQ_ST_M,
4132 HNS_ROCE_EQC_EQ_ST_S,
4133 HNS_ROCE_V2_EQ_STATE_VALID);
4134
4135 /* set eqe hop num */
4136 roce_set_field(eqc->byte_4,
4137 HNS_ROCE_EQC_HOP_NUM_M,
4138 HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
4139
4140 /* set eqc over_ignore */
4141 roce_set_field(eqc->byte_4,
4142 HNS_ROCE_EQC_OVER_IGNORE_M,
4143 HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
4144
4145 /* set eqc coalesce */
4146 roce_set_field(eqc->byte_4,
4147 HNS_ROCE_EQC_COALESCE_M,
4148 HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
4149
4150 /* set eqc arm_state */
4151 roce_set_field(eqc->byte_4,
4152 HNS_ROCE_EQC_ARM_ST_M,
4153 HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
4154
4155 /* set eqn */
4156 roce_set_field(eqc->byte_4,
4157 HNS_ROCE_EQC_EQN_M,
4158 HNS_ROCE_EQC_EQN_S, eq->eqn);
4159
4160 /* set eqe_cnt */
4161 roce_set_field(eqc->byte_4,
4162 HNS_ROCE_EQC_EQE_CNT_M,
4163 HNS_ROCE_EQC_EQE_CNT_S,
4164 HNS_ROCE_EQ_INIT_EQE_CNT);
4165
4166 /* set eqe_ba_pg_sz */
4167 roce_set_field(eqc->byte_8,
4168 HNS_ROCE_EQC_BA_PG_SZ_M,
4169 HNS_ROCE_EQC_BA_PG_SZ_S, eq->eqe_ba_pg_sz);
4170
4171 /* set eqe_buf_pg_sz */
4172 roce_set_field(eqc->byte_8,
4173 HNS_ROCE_EQC_BUF_PG_SZ_M,
4174 HNS_ROCE_EQC_BUF_PG_SZ_S, eq->eqe_buf_pg_sz);
4175
4176 /* set eq_producer_idx */
4177 roce_set_field(eqc->byte_8,
4178 HNS_ROCE_EQC_PROD_INDX_M,
4179 HNS_ROCE_EQC_PROD_INDX_S,
4180 HNS_ROCE_EQ_INIT_PROD_IDX);
4181
4182 /* set eq_max_cnt */
4183 roce_set_field(eqc->byte_12,
4184 HNS_ROCE_EQC_MAX_CNT_M,
4185 HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
4186
4187 /* set eq_period */
4188 roce_set_field(eqc->byte_12,
4189 HNS_ROCE_EQC_PERIOD_M,
4190 HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
4191
4192 /* set eqe_report_timer */
4193 roce_set_field(eqc->eqe_report_timer,
4194 HNS_ROCE_EQC_REPORT_TIMER_M,
4195 HNS_ROCE_EQC_REPORT_TIMER_S,
4196 HNS_ROCE_EQ_INIT_REPORT_TIMER);
4197
4198 /* set eqe_ba [34:3] */
4199 roce_set_field(eqc->eqe_ba0,
4200 HNS_ROCE_EQC_EQE_BA_L_M,
4201 HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
4202
4203 /* set eqe_ba [64:35] */
4204 roce_set_field(eqc->eqe_ba1,
4205 HNS_ROCE_EQC_EQE_BA_H_M,
4206 HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
4207
4208 /* set eq shift */
4209 roce_set_field(eqc->byte_28,
4210 HNS_ROCE_EQC_SHIFT_M,
4211 HNS_ROCE_EQC_SHIFT_S, eq->shift);
4212
4213 /* set eq MSI_IDX */
4214 roce_set_field(eqc->byte_28,
4215 HNS_ROCE_EQC_MSI_INDX_M,
4216 HNS_ROCE_EQC_MSI_INDX_S,
4217 HNS_ROCE_EQ_INIT_MSI_IDX);
4218
4219 /* set cur_eqe_ba [27:12] */
4220 roce_set_field(eqc->byte_28,
4221 HNS_ROCE_EQC_CUR_EQE_BA_L_M,
4222 HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
4223
4224 /* set cur_eqe_ba [59:28] */
4225 roce_set_field(eqc->byte_32,
4226 HNS_ROCE_EQC_CUR_EQE_BA_M_M,
4227 HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
4228
4229 /* set cur_eqe_ba [63:60] */
4230 roce_set_field(eqc->byte_36,
4231 HNS_ROCE_EQC_CUR_EQE_BA_H_M,
4232 HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
4233
4234 /* set eq consumer idx */
4235 roce_set_field(eqc->byte_36,
4236 HNS_ROCE_EQC_CONS_INDX_M,
4237 HNS_ROCE_EQC_CONS_INDX_S,
4238 HNS_ROCE_EQ_INIT_CONS_IDX);
4239
4240 /* set nex_eqe_ba[43:12] */
4241 roce_set_field(eqc->nxt_eqe_ba0,
4242 HNS_ROCE_EQC_NXT_EQE_BA_L_M,
4243 HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
4244
4245 /* set nex_eqe_ba[63:44] */
4246 roce_set_field(eqc->nxt_eqe_ba1,
4247 HNS_ROCE_EQC_NXT_EQE_BA_H_M,
4248 HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
4249}
4250
4251static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
4252 struct hns_roce_eq *eq)
4253{
4254 struct device *dev = hr_dev->dev;
4255 int eq_alloc_done = 0;
4256 int eq_buf_cnt = 0;
4257 int eqe_alloc;
4258 u32 buf_chk_sz;
4259 u32 bt_chk_sz;
4260 u32 mhop_num;
4261 u64 size;
4262 u64 idx;
4263 int ba_num;
4264 int bt_num;
4265 int record_i;
4266 int record_j;
4267 int i = 0;
4268 int j = 0;
4269
4270 mhop_num = hr_dev->caps.eqe_hop_num;
4271 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
4272 bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
4273
4274 ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1)
4275 / buf_chk_sz;
4276 bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8);
4277
4278 /* hop_num = 0 */
4279 if (mhop_num == HNS_ROCE_HOP_NUM_0) {
4280 if (eq->entries > buf_chk_sz / eq->eqe_size) {
4281 dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
4282 eq->entries);
4283 return -EINVAL;
4284 }
4285 eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
4286 &(eq->l0_dma), GFP_KERNEL);
4287 if (!eq->bt_l0)
4288 return -ENOMEM;
4289
4290 eq->cur_eqe_ba = eq->l0_dma;
4291 eq->nxt_eqe_ba = 0;
4292
4293 memset(eq->bt_l0, 0, eq->entries * eq->eqe_size);
4294
4295 return 0;
4296 }
4297
4298 eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
4299 if (!eq->buf_dma)
4300 return -ENOMEM;
4301 eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
4302 if (!eq->buf)
4303 goto err_kcalloc_buf;
4304
4305 if (mhop_num == 2) {
4306 eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
4307 if (!eq->l1_dma)
4308 goto err_kcalloc_l1_dma;
4309
4310 eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
4311 if (!eq->bt_l1)
4312 goto err_kcalloc_bt_l1;
4313 }
4314
4315 /* alloc L0 BT */
4316 eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
4317 if (!eq->bt_l0)
4318 goto err_dma_alloc_l0;
4319
4320 if (mhop_num == 1) {
4321 if (ba_num > (bt_chk_sz / 8))
4322 dev_err(dev, "ba_num %d is too large for 1 hop\n",
4323 ba_num);
4324
4325 /* alloc buf */
4326 for (i = 0; i < bt_chk_sz / 8; i++) {
4327 if (eq_buf_cnt + 1 < ba_num) {
4328 size = buf_chk_sz;
4329 } else {
4330 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
4331 size = (eq->entries - eqe_alloc) * eq->eqe_size;
4332 }
4333 eq->buf[i] = dma_alloc_coherent(dev, size,
4334 &(eq->buf_dma[i]),
4335 GFP_KERNEL);
4336 if (!eq->buf[i])
4337 goto err_dma_alloc_buf;
4338
4339 memset(eq->buf[i], 0, size);
4340 *(eq->bt_l0 + i) = eq->buf_dma[i];
4341
4342 eq_buf_cnt++;
4343 if (eq_buf_cnt >= ba_num)
4344 break;
4345 }
4346 eq->cur_eqe_ba = eq->buf_dma[0];
4347 eq->nxt_eqe_ba = eq->buf_dma[1];
4348
4349 } else if (mhop_num == 2) {
4350 /* alloc L1 BT and buf */
4351 for (i = 0; i < bt_chk_sz / 8; i++) {
4352 eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
4353 &(eq->l1_dma[i]),
4354 GFP_KERNEL);
4355 if (!eq->bt_l1[i])
4356 goto err_dma_alloc_l1;
4357 *(eq->bt_l0 + i) = eq->l1_dma[i];
4358
4359 for (j = 0; j < bt_chk_sz / 8; j++) {
4360 idx = i * bt_chk_sz / 8 + j;
4361 if (eq_buf_cnt + 1 < ba_num) {
4362 size = buf_chk_sz;
4363 } else {
4364 eqe_alloc = (buf_chk_sz / eq->eqe_size)
4365 * idx;
4366 size = (eq->entries - eqe_alloc)
4367 * eq->eqe_size;
4368 }
4369 eq->buf[idx] = dma_alloc_coherent(dev, size,
4370 &(eq->buf_dma[idx]),
4371 GFP_KERNEL);
4372 if (!eq->buf[idx])
4373 goto err_dma_alloc_buf;
4374
4375 memset(eq->buf[idx], 0, size);
4376 *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
4377
4378 eq_buf_cnt++;
4379 if (eq_buf_cnt >= ba_num) {
4380 eq_alloc_done = 1;
4381 break;
4382 }
4383 }
4384
4385 if (eq_alloc_done)
4386 break;
4387 }
4388 eq->cur_eqe_ba = eq->buf_dma[0];
4389 eq->nxt_eqe_ba = eq->buf_dma[1];
4390 }
4391
4392 eq->l0_last_num = i + 1;
4393 if (mhop_num == 2)
4394 eq->l1_last_num = j + 1;
4395
4396 return 0;
4397
4398err_dma_alloc_l1:
4399 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
4400 eq->bt_l0 = NULL;
4401 eq->l0_dma = 0;
4402 for (i -= 1; i >= 0; i--) {
4403 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
4404 eq->l1_dma[i]);
4405
4406 for (j = 0; j < bt_chk_sz / 8; j++) {
4407 idx = i * bt_chk_sz / 8 + j;
4408 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
4409 eq->buf_dma[idx]);
4410 }
4411 }
4412 goto err_dma_alloc_l0;
4413
4414err_dma_alloc_buf:
4415 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
4416 eq->bt_l0 = NULL;
4417 eq->l0_dma = 0;
4418
4419 if (mhop_num == 1)
4420 for (i -= i; i >= 0; i--)
4421 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
4422 eq->buf_dma[i]);
4423 else if (mhop_num == 2) {
4424 record_i = i;
4425 record_j = j;
4426 for (; i >= 0; i--) {
4427 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
4428 eq->l1_dma[i]);
4429
4430 for (j = 0; j < bt_chk_sz / 8; j++) {
4431 if (i == record_i && j >= record_j)
4432 break;
4433
4434 idx = i * bt_chk_sz / 8 + j;
4435 dma_free_coherent(dev, buf_chk_sz,
4436 eq->buf[idx],
4437 eq->buf_dma[idx]);
4438 }
4439 }
4440 }
4441
4442err_dma_alloc_l0:
4443 kfree(eq->bt_l1);
4444 eq->bt_l1 = NULL;
4445
4446err_kcalloc_bt_l1:
4447 kfree(eq->l1_dma);
4448 eq->l1_dma = NULL;
4449
4450err_kcalloc_l1_dma:
4451 kfree(eq->buf);
4452 eq->buf = NULL;
4453
4454err_kcalloc_buf:
4455 kfree(eq->buf_dma);
4456 eq->buf_dma = NULL;
4457
4458 return -ENOMEM;
4459}
4460
4461static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
4462 struct hns_roce_eq *eq,
4463 unsigned int eq_cmd)
4464{
4465 struct device *dev = hr_dev->dev;
4466 struct hns_roce_cmd_mailbox *mailbox;
4467 u32 buf_chk_sz = 0;
4468 int ret;
4469
4470 /* Allocate mailbox memory */
4471 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4472 if (IS_ERR(mailbox))
4473 return PTR_ERR(mailbox);
4474
4475 if (!hr_dev->caps.eqe_hop_num) {
4476 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
4477
4478 eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
4479 GFP_KERNEL);
4480 if (!eq->buf_list) {
4481 ret = -ENOMEM;
4482 goto free_cmd_mbox;
4483 }
4484
4485 eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
4486 &(eq->buf_list->map),
4487 GFP_KERNEL);
4488 if (!eq->buf_list->buf) {
4489 ret = -ENOMEM;
4490 goto err_alloc_buf;
4491 }
4492
4493 memset(eq->buf_list->buf, 0, buf_chk_sz);
4494 } else {
4495 ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
4496 if (ret) {
4497 ret = -ENOMEM;
4498 goto free_cmd_mbox;
4499 }
4500 }
4501
4502 hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
4503
4504 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
4505 eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
4506 if (ret) {
4507 dev_err(dev, "[mailbox cmd] creat eqc failed.\n");
4508 goto err_cmd_mbox;
4509 }
4510
4511 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4512
4513 return 0;
4514
4515err_cmd_mbox:
4516 if (!hr_dev->caps.eqe_hop_num)
4517 dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
4518 eq->buf_list->map);
4519 else {
4520 hns_roce_mhop_free_eq(hr_dev, eq);
4521 goto free_cmd_mbox;
4522 }
4523
4524err_alloc_buf:
4525 kfree(eq->buf_list);
4526
4527free_cmd_mbox:
4528 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4529
4530 return ret;
4531}
4532
4533static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
4534{
4535 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4536 struct device *dev = hr_dev->dev;
4537 struct hns_roce_eq *eq;
4538 unsigned int eq_cmd;
4539 int irq_num;
4540 int eq_num;
4541 int other_num;
4542 int comp_num;
4543 int aeq_num;
4544 int i, j, k;
4545 int ret;
4546
4547 other_num = hr_dev->caps.num_other_vectors;
4548 comp_num = hr_dev->caps.num_comp_vectors;
4549 aeq_num = hr_dev->caps.num_aeq_vectors;
4550
4551 eq_num = comp_num + aeq_num;
4552 irq_num = eq_num + other_num;
4553
4554 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
4555 if (!eq_table->eq)
4556 return -ENOMEM;
4557
4558 for (i = 0; i < irq_num; i++) {
4559 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
4560 GFP_KERNEL);
4561 if (!hr_dev->irq_names[i]) {
4562 ret = -ENOMEM;
4563 goto err_failed_kzalloc;
4564 }
4565 }
4566
4567 /* create eq */
4568 for (j = 0; j < eq_num; j++) {
4569 eq = &eq_table->eq[j];
4570 eq->hr_dev = hr_dev;
4571 eq->eqn = j;
4572 if (j < comp_num) {
4573 /* CEQ */
4574 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
4575 eq->type_flag = HNS_ROCE_CEQ;
4576 eq->entries = hr_dev->caps.ceqe_depth;
4577 eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
4578 eq->irq = hr_dev->irq[j + other_num + aeq_num];
4579 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
4580 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
4581 } else {
4582 /* AEQ */
4583 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
4584 eq->type_flag = HNS_ROCE_AEQ;
4585 eq->entries = hr_dev->caps.aeqe_depth;
4586 eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
4587 eq->irq = hr_dev->irq[j - comp_num + other_num];
4588 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
4589 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
4590 }
4591
4592 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
4593 if (ret) {
4594 dev_err(dev, "eq create failed.\n");
4595 goto err_create_eq_fail;
4596 }
4597 }
4598
4599 /* enable irq */
4600 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
4601
4602 /* irq contains: abnormal + AEQ + CEQ*/
4603 for (k = 0; k < irq_num; k++)
4604 if (k < other_num)
4605 snprintf((char *)hr_dev->irq_names[k],
4606 HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k);
4607 else if (k < (other_num + aeq_num))
4608 snprintf((char *)hr_dev->irq_names[k],
4609 HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
4610 k - other_num);
4611 else
4612 snprintf((char *)hr_dev->irq_names[k],
4613 HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
4614 k - other_num - aeq_num);
4615
4616 for (k = 0; k < irq_num; k++) {
4617 if (k < other_num)
4618 ret = request_irq(hr_dev->irq[k],
4619 hns_roce_v2_msix_interrupt_abn,
4620 0, hr_dev->irq_names[k], hr_dev);
4621
4622 else if (k < (other_num + comp_num))
4623 ret = request_irq(eq_table->eq[k - other_num].irq,
4624 hns_roce_v2_msix_interrupt_eq,
4625 0, hr_dev->irq_names[k + aeq_num],
4626 &eq_table->eq[k - other_num]);
4627 else
4628 ret = request_irq(eq_table->eq[k - other_num].irq,
4629 hns_roce_v2_msix_interrupt_eq,
4630 0, hr_dev->irq_names[k - comp_num],
4631 &eq_table->eq[k - other_num]);
4632 if (ret) {
4633 dev_err(dev, "Request irq error!\n");
4634 goto err_request_irq_fail;
4635 }
4636 }
4637
4638 return 0;
4639
4640err_request_irq_fail:
4641 for (k -= 1; k >= 0; k--)
4642 if (k < other_num)
4643 free_irq(hr_dev->irq[k], hr_dev);
4644 else
4645 free_irq(eq_table->eq[k - other_num].irq,
4646 &eq_table->eq[k - other_num]);
4647
4648err_create_eq_fail:
4649 for (j -= 1; j >= 0; j--)
4650 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]);
4651
4652err_failed_kzalloc:
4653 for (i -= 1; i >= 0; i--)
4654 kfree(hr_dev->irq_names[i]);
4655 kfree(eq_table->eq);
4656
4657 return ret;
4658}
4659
4660static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
4661{
4662 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4663 int irq_num;
4664 int eq_num;
4665 int i;
4666
4667 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4668 irq_num = eq_num + hr_dev->caps.num_other_vectors;
4669
4670 /* Disable irq */
4671 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
4672
4673 for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
4674 free_irq(hr_dev->irq[i], hr_dev);
4675
4676 for (i = 0; i < eq_num; i++) {
4677 hns_roce_v2_destroy_eqc(hr_dev, i);
4678
4679 free_irq(eq_table->eq[i].irq, &eq_table->eq[i]);
4680
4681 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
4682 }
4683
4684 for (i = 0; i < irq_num; i++)
4685 kfree(hr_dev->irq_names[i]);
4686
4687 kfree(eq_table->eq);
4688}
4689
a04ff739
WHX
4690static const struct hns_roce_hw hns_roce_hw_v2 = {
4691 .cmq_init = hns_roce_v2_cmq_init,
4692 .cmq_exit = hns_roce_v2_cmq_exit,
cfc85f3e 4693 .hw_profile = hns_roce_v2_profile,
a680f2f3
WHX
4694 .post_mbox = hns_roce_v2_post_mbox,
4695 .chk_mbox = hns_roce_v2_chk_mbox,
7afddafa
WHX
4696 .set_gid = hns_roce_v2_set_gid,
4697 .set_mac = hns_roce_v2_set_mac,
3958cc56 4698 .write_mtpt = hns_roce_v2_write_mtpt,
a2c80b7b 4699 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
93aa2187 4700 .write_cqc = hns_roce_v2_write_cqc,
a81fba28
WHX
4701 .set_hem = hns_roce_v2_set_hem,
4702 .clear_hem = hns_roce_v2_clear_hem,
926a01dc
WHX
4703 .modify_qp = hns_roce_v2_modify_qp,
4704 .query_qp = hns_roce_v2_query_qp,
4705 .destroy_qp = hns_roce_v2_destroy_qp,
b156269d 4706 .modify_cq = hns_roce_v2_modify_cq,
2d407888
WHX
4707 .post_send = hns_roce_v2_post_send,
4708 .post_recv = hns_roce_v2_post_recv,
93aa2187
WHX
4709 .req_notify_cq = hns_roce_v2_req_notify_cq,
4710 .poll_cq = hns_roce_v2_poll_cq,
a5073d60
YL
4711 .init_eq = hns_roce_v2_init_eq_table,
4712 .cleanup_eq = hns_roce_v2_cleanup_eq_table,
a04ff739 4713};
dd74282d
WHX
4714
4715static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
4716 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
4717 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
4718 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
4719 /* required last entry */
4720 {0, }
4721};
4722
4723static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
4724 struct hnae3_handle *handle)
4725{
4726 const struct pci_device_id *id;
a5073d60 4727 int i;
dd74282d
WHX
4728
4729 id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
4730 if (!id) {
4731 dev_err(hr_dev->dev, "device is not compatible!\n");
4732 return -ENXIO;
4733 }
4734
4735 hr_dev->hw = &hns_roce_hw_v2;
2d407888
WHX
4736 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
4737 hr_dev->odb_offset = hr_dev->sdb_offset;
dd74282d
WHX
4738
4739 /* Get info from NIC driver. */
4740 hr_dev->reg_base = handle->rinfo.roce_io_base;
4741 hr_dev->caps.num_ports = 1;
4742 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
4743 hr_dev->iboe.phy_port[0] = 0;
4744
d4994d2f 4745 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
4746 hr_dev->iboe.netdevs[0]->dev_addr);
4747
a5073d60
YL
4748 for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
4749 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
4750 i + handle->rinfo.base_vector);
4751
dd74282d 4752 /* cmd issue mode: 0 is poll, 1 is event */
a5073d60 4753 hr_dev->cmd_mod = 1;
dd74282d
WHX
4754 hr_dev->loop_idc = 0;
4755
4756 return 0;
4757}
4758
4759static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
4760{
4761 struct hns_roce_dev *hr_dev;
4762 int ret;
4763
4764 hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
4765 if (!hr_dev)
4766 return -ENOMEM;
4767
a04ff739
WHX
4768 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
4769 if (!hr_dev->priv) {
4770 ret = -ENOMEM;
4771 goto error_failed_kzalloc;
4772 }
4773
dd74282d
WHX
4774 hr_dev->pci_dev = handle->pdev;
4775 hr_dev->dev = &handle->pdev->dev;
4776 handle->priv = hr_dev;
4777
4778 ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
4779 if (ret) {
4780 dev_err(hr_dev->dev, "Get Configuration failed!\n");
4781 goto error_failed_get_cfg;
4782 }
4783
4784 ret = hns_roce_init(hr_dev);
4785 if (ret) {
4786 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
4787 goto error_failed_get_cfg;
4788 }
4789
4790 return 0;
4791
4792error_failed_get_cfg:
a04ff739
WHX
4793 kfree(hr_dev->priv);
4794
4795error_failed_kzalloc:
dd74282d
WHX
4796 ib_dealloc_device(&hr_dev->ib_dev);
4797
4798 return ret;
4799}
4800
4801static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
4802 bool reset)
4803{
4804 struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
4805
4806 hns_roce_exit(hr_dev);
a04ff739 4807 kfree(hr_dev->priv);
dd74282d
WHX
4808 ib_dealloc_device(&hr_dev->ib_dev);
4809}
4810
4811static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
4812 .init_instance = hns_roce_hw_v2_init_instance,
4813 .uninit_instance = hns_roce_hw_v2_uninit_instance,
4814};
4815
4816static struct hnae3_client hns_roce_hw_v2_client = {
4817 .name = "hns_roce_hw_v2",
4818 .type = HNAE3_CLIENT_ROCE,
4819 .ops = &hns_roce_hw_v2_ops,
4820};
4821
4822static int __init hns_roce_hw_v2_init(void)
4823{
4824 return hnae3_register_client(&hns_roce_hw_v2_client);
4825}
4826
4827static void __exit hns_roce_hw_v2_exit(void)
4828{
4829 hnae3_unregister_client(&hns_roce_hw_v2_client);
4830}
4831
4832module_init(hns_roce_hw_v2_init);
4833module_exit(hns_roce_hw_v2_exit);
4834
4835MODULE_LICENSE("Dual BSD/GPL");
4836MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
4837MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
4838MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
4839MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");