IB/uverbs: Use the standard kConfig format for experimental
[linux-2.6-block.git] / drivers / infiniband / hw / hns / hns_roce_hw_v1.c
CommitLineData
9a443537 1/*
2 * Copyright (c) 2016 Hisilicon Limited.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/platform_device.h>
528f1deb 34#include <linux/acpi.h>
543bfe6c 35#include <linux/etherdevice.h>
b16f8188 36#include <linux/interrupt.h>
cd6ce4a5 37#include <linux/of.h>
08805fdb 38#include <linux/of_platform.h>
9a443537 39#include <rdma/ib_umem.h>
40#include "hns_roce_common.h"
41#include "hns_roce_device.h"
42#include "hns_roce_cmd.h"
43#include "hns_roce_hem.h"
44#include "hns_roce_hw_v1.h"
45
46static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
47{
48 dseg->lkey = cpu_to_le32(sg->lkey);
49 dseg->addr = cpu_to_le64(sg->addr);
50 dseg->len = cpu_to_le32(sg->length);
51}
52
53static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
54 u32 rkey)
55{
56 rseg->raddr = cpu_to_le64(remote_addr);
57 rseg->rkey = cpu_to_le32(rkey);
58 rseg->len = 0;
59}
60
d61d6de0
BVA
61static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
62 struct ib_send_wr **bad_wr)
9a443537 63{
64 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
65 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
66 struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
67 struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
68 struct hns_roce_wqe_data_seg *dseg = NULL;
69 struct hns_roce_qp *qp = to_hr_qp(ibqp);
70 struct device *dev = &hr_dev->pdev->dev;
71 struct hns_roce_sq_db sq_db;
72 int ps_opcode = 0, i = 0;
73 unsigned long flags = 0;
74 void *wqe = NULL;
75 u32 doorbell[2];
76 int nreq = 0;
77 u32 ind = 0;
78 int ret = 0;
543bfe6c
LO
79 u8 *smac;
80 int loopback;
9a443537 81
07182fa7
LO
82 if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
83 ibqp->qp_type != IB_QPT_RC)) {
84 dev_err(dev, "un-supported QP type\n");
85 *bad_wr = NULL;
86 return -EOPNOTSUPP;
87 }
9a443537 88
07182fa7 89 spin_lock_irqsave(&qp->sq.lock, flags);
9a443537 90 ind = qp->sq_next_wqe;
91 for (nreq = 0; wr; ++nreq, wr = wr->next) {
92 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
93 ret = -ENOMEM;
94 *bad_wr = wr;
95 goto out;
96 }
97
98 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
99 dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
100 wr->num_sge, qp->sq.max_gs);
101 ret = -EINVAL;
102 *bad_wr = wr;
103 goto out;
104 }
105
106 wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
107 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
108 wr->wr_id;
109
110 /* Corresponding to the RC and RD type wqe process separately */
111 if (ibqp->qp_type == IB_QPT_GSI) {
112 ud_sq_wqe = wqe;
113 roce_set_field(ud_sq_wqe->dmac_h,
114 UD_SEND_WQE_U32_4_DMAC_0_M,
115 UD_SEND_WQE_U32_4_DMAC_0_S,
116 ah->av.mac[0]);
117 roce_set_field(ud_sq_wqe->dmac_h,
118 UD_SEND_WQE_U32_4_DMAC_1_M,
119 UD_SEND_WQE_U32_4_DMAC_1_S,
120 ah->av.mac[1]);
121 roce_set_field(ud_sq_wqe->dmac_h,
122 UD_SEND_WQE_U32_4_DMAC_2_M,
123 UD_SEND_WQE_U32_4_DMAC_2_S,
124 ah->av.mac[2]);
125 roce_set_field(ud_sq_wqe->dmac_h,
126 UD_SEND_WQE_U32_4_DMAC_3_M,
127 UD_SEND_WQE_U32_4_DMAC_3_S,
128 ah->av.mac[3]);
129
130 roce_set_field(ud_sq_wqe->u32_8,
131 UD_SEND_WQE_U32_8_DMAC_4_M,
132 UD_SEND_WQE_U32_8_DMAC_4_S,
133 ah->av.mac[4]);
134 roce_set_field(ud_sq_wqe->u32_8,
135 UD_SEND_WQE_U32_8_DMAC_5_M,
136 UD_SEND_WQE_U32_8_DMAC_5_S,
137 ah->av.mac[5]);
543bfe6c
LO
138
139 smac = (u8 *)hr_dev->dev_addr[qp->port];
140 loopback = ether_addr_equal_unaligned(ah->av.mac,
141 smac) ? 1 : 0;
142 roce_set_bit(ud_sq_wqe->u32_8,
143 UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
144 loopback);
145
9a443537 146 roce_set_field(ud_sq_wqe->u32_8,
147 UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
148 UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
149 HNS_ROCE_WQE_OPCODE_SEND);
150 roce_set_field(ud_sq_wqe->u32_8,
151 UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
152 UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
153 2);
154 roce_set_bit(ud_sq_wqe->u32_8,
155 UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
156 1);
157
158 ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
159 cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
160 (wr->send_flags & IB_SEND_SOLICITED ?
161 cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
162 ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
163 cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
164
165 roce_set_field(ud_sq_wqe->u32_16,
166 UD_SEND_WQE_U32_16_DEST_QP_M,
167 UD_SEND_WQE_U32_16_DEST_QP_S,
168 ud_wr(wr)->remote_qpn);
169 roce_set_field(ud_sq_wqe->u32_16,
170 UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
171 UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
172 ah->av.stat_rate);
173
174 roce_set_field(ud_sq_wqe->u32_36,
175 UD_SEND_WQE_U32_36_FLOW_LABEL_M,
176 UD_SEND_WQE_U32_36_FLOW_LABEL_S, 0);
177 roce_set_field(ud_sq_wqe->u32_36,
178 UD_SEND_WQE_U32_36_PRIORITY_M,
179 UD_SEND_WQE_U32_36_PRIORITY_S,
180 ah->av.sl_tclass_flowlabel >>
181 HNS_ROCE_SL_SHIFT);
182 roce_set_field(ud_sq_wqe->u32_36,
183 UD_SEND_WQE_U32_36_SGID_INDEX_M,
184 UD_SEND_WQE_U32_36_SGID_INDEX_S,
7716809e 185 hns_get_gid_index(hr_dev, qp->phy_port,
9a443537 186 ah->av.gid_index));
187
188 roce_set_field(ud_sq_wqe->u32_40,
189 UD_SEND_WQE_U32_40_HOP_LIMIT_M,
190 UD_SEND_WQE_U32_40_HOP_LIMIT_S,
191 ah->av.hop_limit);
192 roce_set_field(ud_sq_wqe->u32_40,
193 UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
194 UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, 0);
195
196 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
197
198 ud_sq_wqe->va0_l = (u32)wr->sg_list[0].addr;
199 ud_sq_wqe->va0_h = (wr->sg_list[0].addr) >> 32;
200 ud_sq_wqe->l_key0 = wr->sg_list[0].lkey;
201
202 ud_sq_wqe->va1_l = (u32)wr->sg_list[1].addr;
203 ud_sq_wqe->va1_h = (wr->sg_list[1].addr) >> 32;
204 ud_sq_wqe->l_key1 = wr->sg_list[1].lkey;
205 ind++;
206 } else if (ibqp->qp_type == IB_QPT_RC) {
207 ctrl = wqe;
208 memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
209 for (i = 0; i < wr->num_sge; i++)
210 ctrl->msg_length += wr->sg_list[i].length;
211
212 ctrl->sgl_pa_h = 0;
213 ctrl->flag = 0;
214 ctrl->imm_data = send_ieth(wr);
215
216 /*Ctrl field, ctrl set type: sig, solic, imm, fence */
217 /* SO wait for conforming application scenarios */
218 ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
219 cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
220 (wr->send_flags & IB_SEND_SOLICITED ?
221 cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
222 ((wr->opcode == IB_WR_SEND_WITH_IMM ||
223 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
224 cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
225 (wr->send_flags & IB_SEND_FENCE ?
226 (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
227
c24bf895 228 wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
9a443537 229
230 switch (wr->opcode) {
231 case IB_WR_RDMA_READ:
232 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
9de61d3f 233 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
234 rdma_wr(wr)->rkey);
9a443537 235 break;
236 case IB_WR_RDMA_WRITE:
237 case IB_WR_RDMA_WRITE_WITH_IMM:
238 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
9de61d3f 239 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
240 rdma_wr(wr)->rkey);
9a443537 241 break;
242 case IB_WR_SEND:
243 case IB_WR_SEND_WITH_INV:
244 case IB_WR_SEND_WITH_IMM:
245 ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
246 break;
247 case IB_WR_LOCAL_INV:
248 break;
249 case IB_WR_ATOMIC_CMP_AND_SWP:
250 case IB_WR_ATOMIC_FETCH_AND_ADD:
251 case IB_WR_LSO:
252 default:
253 ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
254 break;
255 }
256 ctrl->flag |= cpu_to_le32(ps_opcode);
c24bf895 257 wqe += sizeof(struct hns_roce_wqe_raddr_seg);
9a443537 258
259 dseg = wqe;
260 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
261 if (ctrl->msg_length >
262 hr_dev->caps.max_sq_inline) {
263 ret = -EINVAL;
264 *bad_wr = wr;
265 dev_err(dev, "inline len(1-%d)=%d, illegal",
266 ctrl->msg_length,
267 hr_dev->caps.max_sq_inline);
268 goto out;
269 }
270 for (i = 0; i < wr->num_sge; i++) {
271 memcpy(wqe, ((void *) (uintptr_t)
272 wr->sg_list[i].addr),
273 wr->sg_list[i].length);
c24bf895 274 wqe += wr->sg_list[i].length;
9a443537 275 }
276 ctrl->flag |= HNS_ROCE_WQE_INLINE;
277 } else {
278 /*sqe num is two */
279 for (i = 0; i < wr->num_sge; i++)
280 set_data_seg(dseg + i, wr->sg_list + i);
281
282 ctrl->flag |= cpu_to_le32(wr->num_sge <<
283 HNS_ROCE_WQE_SGE_NUM_BIT);
284 }
285 ind++;
9a443537 286 }
287 }
288
289out:
290 /* Set DB return */
291 if (likely(nreq)) {
292 qp->sq.head += nreq;
293 /* Memory barrier */
294 wmb();
295
296 sq_db.u32_4 = 0;
297 sq_db.u32_8 = 0;
298 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
299 SQ_DOORBELL_U32_4_SQ_HEAD_S,
300 (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
bfcc681b
SX
301 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
302 SQ_DOORBELL_U32_4_SL_S, qp->sl);
9a443537 303 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
7716809e 304 SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
9a443537 305 roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
306 SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
307 roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
308
309 doorbell[0] = sq_db.u32_4;
310 doorbell[1] = sq_db.u32_8;
311
312 hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
313 qp->sq_next_wqe = ind;
314 }
315
316 spin_unlock_irqrestore(&qp->sq.lock, flags);
317
318 return ret;
319}
320
d61d6de0
BVA
321static int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
322 struct ib_recv_wr **bad_wr)
9a443537 323{
324 int ret = 0;
325 int nreq = 0;
326 int ind = 0;
327 int i = 0;
328 u32 reg_val = 0;
329 unsigned long flags = 0;
330 struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
331 struct hns_roce_wqe_data_seg *scat = NULL;
332 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
333 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
334 struct device *dev = &hr_dev->pdev->dev;
335 struct hns_roce_rq_db rq_db;
336 uint32_t doorbell[2] = {0};
337
338 spin_lock_irqsave(&hr_qp->rq.lock, flags);
339 ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
340
341 for (nreq = 0; wr; ++nreq, wr = wr->next) {
342 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
343 hr_qp->ibqp.recv_cq)) {
344 ret = -ENOMEM;
345 *bad_wr = wr;
346 goto out;
347 }
348
349 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
350 dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
351 wr->num_sge, hr_qp->rq.max_gs);
352 ret = -EINVAL;
353 *bad_wr = wr;
354 goto out;
355 }
356
357 ctrl = get_recv_wqe(hr_qp, ind);
358
359 roce_set_field(ctrl->rwqe_byte_12,
360 RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
361 RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
362 wr->num_sge);
363
364 scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
365
366 for (i = 0; i < wr->num_sge; i++)
367 set_data_seg(scat + i, wr->sg_list + i);
368
369 hr_qp->rq.wrid[ind] = wr->wr_id;
370
371 ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
372 }
373
374out:
375 if (likely(nreq)) {
376 hr_qp->rq.head += nreq;
377 /* Memory barrier */
378 wmb();
379
380 if (ibqp->qp_type == IB_QPT_GSI) {
381 /* SW update GSI rq header */
382 reg_val = roce_read(to_hr_dev(ibqp->device),
383 ROCEE_QP1C_CFG3_0_REG +
7716809e 384 QP1C_CFGN_OFFSET * hr_qp->phy_port);
9a443537 385 roce_set_field(reg_val,
386 ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
387 ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
388 hr_qp->rq.head);
389 roce_write(to_hr_dev(ibqp->device),
390 ROCEE_QP1C_CFG3_0_REG +
7716809e 391 QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
9a443537 392 } else {
393 rq_db.u32_4 = 0;
394 rq_db.u32_8 = 0;
395
396 roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
397 RQ_DOORBELL_U32_4_RQ_HEAD_S,
398 hr_qp->rq.head);
399 roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
400 RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
401 roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
402 RQ_DOORBELL_U32_8_CMD_S, 1);
403 roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
404 1);
405
406 doorbell[0] = rq_db.u32_4;
407 doorbell[1] = rq_db.u32_8;
408
409 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
410 }
411 }
412 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
413
414 return ret;
415}
416
417static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
418 int sdb_mode, int odb_mode)
419{
420 u32 val;
421
422 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
423 roce_set_bit(val, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
424 roce_set_bit(val, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
425 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
426}
427
428static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
429 u32 odb_mode)
430{
431 u32 val;
432
433 /* Configure SDB/ODB extend mode */
434 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
435 roce_set_bit(val, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
436 roce_set_bit(val, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
437 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
438}
439
440static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
441 u32 sdb_alful)
442{
443 u32 val;
444
445 /* Configure SDB */
446 val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
447 roce_set_field(val, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
448 ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
449 roce_set_field(val, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
450 ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
451 roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
452}
453
454static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
455 u32 odb_alful)
456{
457 u32 val;
458
459 /* Configure ODB */
460 val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
461 roce_set_field(val, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
462 ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
463 roce_set_field(val, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
464 ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
465 roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
466}
467
468static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
469 u32 ext_sdb_alful)
470{
471 struct device *dev = &hr_dev->pdev->dev;
472 struct hns_roce_v1_priv *priv;
473 struct hns_roce_db_table *db;
474 dma_addr_t sdb_dma_addr;
475 u32 val;
476
016a0059 477 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
9a443537 478 db = &priv->db_table;
479
480 /* Configure extend SDB threshold */
481 roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
482 roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
483
484 /* Configure extend SDB base addr */
485 sdb_dma_addr = db->ext_db->sdb_buf_list->map;
486 roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
487
488 /* Configure extend SDB depth */
489 val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
490 roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
491 ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
492 db->ext_db->esdb_dep);
493 /*
494 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
495 * using 4K page, and shift more 32 because of
496 * caculating the high 32 bit value evaluated to hardware.
497 */
498 roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
499 ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
500 roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
501
502 dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
503 dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n",
504 ext_sdb_alept, ext_sdb_alful);
505}
506
507static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
508 u32 ext_odb_alful)
509{
510 struct device *dev = &hr_dev->pdev->dev;
511 struct hns_roce_v1_priv *priv;
512 struct hns_roce_db_table *db;
513 dma_addr_t odb_dma_addr;
514 u32 val;
515
016a0059 516 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
9a443537 517 db = &priv->db_table;
518
519 /* Configure extend ODB threshold */
520 roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
521 roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
522
523 /* Configure extend ODB base addr */
524 odb_dma_addr = db->ext_db->odb_buf_list->map;
525 roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
526
527 /* Configure extend ODB depth */
528 val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
529 roce_set_field(val, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
530 ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
531 db->ext_db->eodb_dep);
532 roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
533 ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
534 db->ext_db->eodb_dep);
535 roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
536
537 dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
538 dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
539 ext_odb_alept, ext_odb_alful);
540}
541
542static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
543 u32 odb_ext_mod)
544{
545 struct device *dev = &hr_dev->pdev->dev;
546 struct hns_roce_v1_priv *priv;
547 struct hns_roce_db_table *db;
548 dma_addr_t sdb_dma_addr;
549 dma_addr_t odb_dma_addr;
550 int ret = 0;
551
016a0059 552 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
9a443537 553 db = &priv->db_table;
554
555 db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
556 if (!db->ext_db)
557 return -ENOMEM;
558
559 if (sdb_ext_mod) {
560 db->ext_db->sdb_buf_list = kmalloc(
561 sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
562 if (!db->ext_db->sdb_buf_list) {
563 ret = -ENOMEM;
564 goto ext_sdb_buf_fail_out;
565 }
566
567 db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
568 HNS_ROCE_V1_EXT_SDB_SIZE,
569 &sdb_dma_addr, GFP_KERNEL);
570 if (!db->ext_db->sdb_buf_list->buf) {
571 ret = -ENOMEM;
572 goto alloc_sq_db_buf_fail;
573 }
574 db->ext_db->sdb_buf_list->map = sdb_dma_addr;
575
576 db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
577 hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
578 HNS_ROCE_V1_EXT_SDB_ALFUL);
579 } else
580 hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
581 HNS_ROCE_V1_SDB_ALFUL);
582
583 if (odb_ext_mod) {
584 db->ext_db->odb_buf_list = kmalloc(
585 sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
586 if (!db->ext_db->odb_buf_list) {
587 ret = -ENOMEM;
588 goto ext_odb_buf_fail_out;
589 }
590
591 db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
592 HNS_ROCE_V1_EXT_ODB_SIZE,
593 &odb_dma_addr, GFP_KERNEL);
594 if (!db->ext_db->odb_buf_list->buf) {
595 ret = -ENOMEM;
596 goto alloc_otr_db_buf_fail;
597 }
598 db->ext_db->odb_buf_list->map = odb_dma_addr;
599
600 db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
601 hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
602 HNS_ROCE_V1_EXT_ODB_ALFUL);
603 } else
604 hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
605 HNS_ROCE_V1_ODB_ALFUL);
606
607 hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
608
609 return 0;
610
611alloc_otr_db_buf_fail:
612 kfree(db->ext_db->odb_buf_list);
613
614ext_odb_buf_fail_out:
615 if (sdb_ext_mod) {
616 dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
617 db->ext_db->sdb_buf_list->buf,
618 db->ext_db->sdb_buf_list->map);
619 }
620
621alloc_sq_db_buf_fail:
622 if (sdb_ext_mod)
623 kfree(db->ext_db->sdb_buf_list);
624
625ext_sdb_buf_fail_out:
626 kfree(db->ext_db);
627 return ret;
628}
629
bfcc681b
SX
630static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
631 struct ib_pd *pd)
632{
633 struct device *dev = &hr_dev->pdev->dev;
634 struct ib_qp_init_attr init_attr;
635 struct ib_qp *qp;
636
637 memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
638 init_attr.qp_type = IB_QPT_RC;
639 init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
640 init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM;
641 init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM;
642
643 qp = hns_roce_create_qp(pd, &init_attr, NULL);
644 if (IS_ERR(qp)) {
645 dev_err(dev, "Create loop qp for mr free failed!");
646 return NULL;
647 }
648
649 return to_hr_qp(qp);
650}
651
652static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
653{
654 struct hns_roce_caps *caps = &hr_dev->caps;
655 struct device *dev = &hr_dev->pdev->dev;
656 struct ib_cq_init_attr cq_init_attr;
657 struct hns_roce_free_mr *free_mr;
658 struct ib_qp_attr attr = { 0 };
659 struct hns_roce_v1_priv *priv;
660 struct hns_roce_qp *hr_qp;
661 struct ib_cq *cq;
662 struct ib_pd *pd;
d8966fcd 663 union ib_gid dgid;
bfcc681b
SX
664 u64 subnet_prefix;
665 int attr_mask = 0;
5802883d 666 int i, j;
bfcc681b 667 int ret;
5802883d 668 u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
bfcc681b 669 u8 phy_port;
5802883d 670 u8 port = 0;
bfcc681b
SX
671 u8 sl;
672
016a0059 673 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
bfcc681b
SX
674 free_mr = &priv->free_mr;
675
676 /* Reserved cq for loop qp */
677 cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
678 cq_init_attr.comp_vector = 0;
679 cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL);
680 if (IS_ERR(cq)) {
681 dev_err(dev, "Create cq for reseved loop qp failed!");
682 return -ENOMEM;
683 }
684 free_mr->mr_free_cq = to_hr_cq(cq);
685 free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev;
686 free_mr->mr_free_cq->ib_cq.uobject = NULL;
687 free_mr->mr_free_cq->ib_cq.comp_handler = NULL;
688 free_mr->mr_free_cq->ib_cq.event_handler = NULL;
689 free_mr->mr_free_cq->ib_cq.cq_context = NULL;
690 atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
691
692 pd = hns_roce_alloc_pd(&hr_dev->ib_dev, NULL, NULL);
693 if (IS_ERR(pd)) {
694 dev_err(dev, "Create pd for reseved loop qp failed!");
695 ret = -ENOMEM;
696 goto alloc_pd_failed;
697 }
698 free_mr->mr_free_pd = to_hr_pd(pd);
699 free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
700 free_mr->mr_free_pd->ibpd.uobject = NULL;
701 atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
702
703 attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
704 attr.pkey_index = 0;
705 attr.min_rnr_timer = 0;
706 /* Disable read ability */
707 attr.max_dest_rd_atomic = 0;
708 attr.max_rd_atomic = 0;
709 /* Use arbitrary values as rq_psn and sq_psn */
710 attr.rq_psn = 0x0808;
711 attr.sq_psn = 0x0808;
712 attr.retry_cnt = 7;
713 attr.rnr_retry = 7;
714 attr.timeout = 0x12;
715 attr.path_mtu = IB_MTU_256;
5802883d 716 attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
d8966fcd
DC
717 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
718 rdma_ah_set_static_rate(&attr.ah_attr, 3);
bfcc681b
SX
719
720 subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
721 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
5802883d 722 phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
723 (i % HNS_ROCE_MAX_PORTS);
724 sl = i / HNS_ROCE_MAX_PORTS;
725
726 for (j = 0; j < caps->num_ports; j++) {
727 if (hr_dev->iboe.phy_port[j] == phy_port) {
728 queue_en[i] = 1;
729 port = j;
730 break;
731 }
732 }
733
734 if (!queue_en[i])
735 continue;
736
bfcc681b 737 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
5db465f2 738 if (!free_mr->mr_free_qp[i]) {
bfcc681b
SX
739 dev_err(dev, "Create loop qp failed!\n");
740 goto create_lp_qp_failed;
741 }
742 hr_qp = free_mr->mr_free_qp[i];
743
5802883d 744 hr_qp->port = port;
bfcc681b
SX
745 hr_qp->phy_port = phy_port;
746 hr_qp->ibqp.qp_type = IB_QPT_RC;
747 hr_qp->ibqp.device = &hr_dev->ib_dev;
748 hr_qp->ibqp.uobject = NULL;
749 atomic_set(&hr_qp->ibqp.usecnt, 0);
750 hr_qp->ibqp.pd = pd;
751 hr_qp->ibqp.recv_cq = cq;
752 hr_qp->ibqp.send_cq = cq;
753
5802883d 754 rdma_ah_set_port_num(&attr.ah_attr, port + 1);
755 rdma_ah_set_sl(&attr.ah_attr, sl);
756 attr.port_num = port + 1;
bfcc681b
SX
757
758 attr.dest_qp_num = hr_qp->qpn;
d8966fcd 759 memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
5802883d 760 hr_dev->dev_addr[port],
bfcc681b
SX
761 MAC_ADDR_OCTET_NUM);
762
d8966fcd 763 memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
5802883d 764 memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
765 memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
d8966fcd
DC
766 dgid.raw[11] = 0xff;
767 dgid.raw[12] = 0xfe;
768 dgid.raw[8] ^= 2;
769 rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
bfcc681b
SX
770
771 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
772 IB_QPS_RESET, IB_QPS_INIT);
773 if (ret) {
774 dev_err(dev, "modify qp failed(%d)!\n", ret);
775 goto create_lp_qp_failed;
776 }
777
107013ce 778 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
bfcc681b
SX
779 IB_QPS_INIT, IB_QPS_RTR);
780 if (ret) {
781 dev_err(dev, "modify qp failed(%d)!\n", ret);
782 goto create_lp_qp_failed;
783 }
784
785 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
786 IB_QPS_RTR, IB_QPS_RTS);
787 if (ret) {
788 dev_err(dev, "modify qp failed(%d)!\n", ret);
789 goto create_lp_qp_failed;
790 }
791 }
792
793 return 0;
794
795create_lp_qp_failed:
796 for (i -= 1; i >= 0; i--) {
797 hr_qp = free_mr->mr_free_qp[i];
798 if (hns_roce_v1_destroy_qp(&hr_qp->ibqp))
799 dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
800 }
801
802 if (hns_roce_dealloc_pd(pd))
803 dev_err(dev, "Destroy pd for create_lp_qp failed!\n");
804
805alloc_pd_failed:
806 if (hns_roce_ib_destroy_cq(cq))
807 dev_err(dev, "Destroy cq for create_lp_qp failed!\n");
808
809 return -EINVAL;
810}
811
812static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
813{
814 struct device *dev = &hr_dev->pdev->dev;
815 struct hns_roce_free_mr *free_mr;
816 struct hns_roce_v1_priv *priv;
817 struct hns_roce_qp *hr_qp;
818 int ret;
819 int i;
820
016a0059 821 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
bfcc681b
SX
822 free_mr = &priv->free_mr;
823
824 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
825 hr_qp = free_mr->mr_free_qp[i];
5802883d 826 if (!hr_qp)
827 continue;
828
bfcc681b
SX
829 ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
830 if (ret)
831 dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
832 i, ret);
833 }
834
835 ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq);
836 if (ret)
837 dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
838
839 ret = hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd);
840 if (ret)
841 dev_err(dev, "Destroy pd for mr_free failed(%d)!\n", ret);
842}
843
9a443537 844static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
845{
846 struct device *dev = &hr_dev->pdev->dev;
847 struct hns_roce_v1_priv *priv;
848 struct hns_roce_db_table *db;
849 u32 sdb_ext_mod;
850 u32 odb_ext_mod;
851 u32 sdb_evt_mod;
852 u32 odb_evt_mod;
853 int ret = 0;
854
016a0059 855 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
9a443537 856 db = &priv->db_table;
857
858 memset(db, 0, sizeof(*db));
859
860 /* Default DB mode */
861 sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
862 odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
863 sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
864 odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
865
866 db->sdb_ext_mod = sdb_ext_mod;
867 db->odb_ext_mod = odb_ext_mod;
868
869 /* Init extend DB */
870 ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
871 if (ret) {
872 dev_err(dev, "Failed in extend DB configuration.\n");
873 return ret;
874 }
875
876 hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
877
878 return 0;
879}
880
d61d6de0 881static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
bfcc681b
SX
882{
883 struct hns_roce_recreate_lp_qp_work *lp_qp_work;
884 struct hns_roce_dev *hr_dev;
885
886 lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
887 work);
888 hr_dev = to_hr_dev(lp_qp_work->ib_dev);
889
890 hns_roce_v1_release_lp_qp(hr_dev);
891
892 if (hns_roce_v1_rsv_lp_qp(hr_dev))
893 dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
894
895 if (lp_qp_work->comp_flag)
896 complete(lp_qp_work->comp);
897
898 kfree(lp_qp_work);
899}
900
901static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
902{
903 struct device *dev = &hr_dev->pdev->dev;
904 struct hns_roce_recreate_lp_qp_work *lp_qp_work;
905 struct hns_roce_free_mr *free_mr;
906 struct hns_roce_v1_priv *priv;
907 struct completion comp;
908 unsigned long end =
909 msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies;
910
016a0059 911 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
bfcc681b
SX
912 free_mr = &priv->free_mr;
913
914 lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
915 GFP_KERNEL);
a74dc41d
WHX
916 if (!lp_qp_work)
917 return -ENOMEM;
bfcc681b
SX
918
919 INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
920
921 lp_qp_work->ib_dev = &(hr_dev->ib_dev);
922 lp_qp_work->comp = &comp;
923 lp_qp_work->comp_flag = 1;
924
925 init_completion(lp_qp_work->comp);
926
927 queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
928
929 while (time_before_eq(jiffies, end)) {
930 if (try_wait_for_completion(&comp))
931 return 0;
932 msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
933 }
934
935 lp_qp_work->comp_flag = 0;
936 if (try_wait_for_completion(&comp))
937 return 0;
938
939 dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
940 return -ETIMEDOUT;
941}
942
943static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
944{
945 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
946 struct device *dev = &hr_dev->pdev->dev;
947 struct ib_send_wr send_wr, *bad_wr;
948 int ret;
949
950 memset(&send_wr, 0, sizeof(send_wr));
951 send_wr.next = NULL;
952 send_wr.num_sge = 0;
953 send_wr.send_flags = 0;
954 send_wr.sg_list = NULL;
955 send_wr.wr_id = (unsigned long long)&send_wr;
956 send_wr.opcode = IB_WR_RDMA_WRITE;
957
958 ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
959 if (ret) {
960 dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
961 return ret;
962 }
963
964 return 0;
965}
966
967static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
968{
969 struct hns_roce_mr_free_work *mr_work;
970 struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
971 struct hns_roce_free_mr *free_mr;
972 struct hns_roce_cq *mr_free_cq;
973 struct hns_roce_v1_priv *priv;
974 struct hns_roce_dev *hr_dev;
975 struct hns_roce_mr *hr_mr;
976 struct hns_roce_qp *hr_qp;
977 struct device *dev;
978 unsigned long end =
979 msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
980 int i;
981 int ret;
5802883d 982 int ne = 0;
bfcc681b
SX
983
984 mr_work = container_of(work, struct hns_roce_mr_free_work, work);
985 hr_mr = (struct hns_roce_mr *)mr_work->mr;
986 hr_dev = to_hr_dev(mr_work->ib_dev);
987 dev = &hr_dev->pdev->dev;
988
016a0059 989 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
bfcc681b
SX
990 free_mr = &priv->free_mr;
991 mr_free_cq = free_mr->mr_free_cq;
992
993 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
994 hr_qp = free_mr->mr_free_qp[i];
5802883d 995 if (!hr_qp)
996 continue;
997 ne++;
998
bfcc681b
SX
999 ret = hns_roce_v1_send_lp_wqe(hr_qp);
1000 if (ret) {
1001 dev_err(dev,
1002 "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
1003 hr_qp->qpn, ret);
1004 goto free_work;
1005 }
1006 }
1007
5e437b1d 1008 if (!ne) {
978cb696 1009 dev_err(dev, "Reserved loop qp is absent!\n");
5e437b1d
WHX
1010 goto free_work;
1011 }
1012
bfcc681b
SX
1013 do {
1014 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
1015 if (ret < 0) {
1016 dev_err(dev,
1017 "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
1018 hr_qp->qpn, ret, hr_mr->key, ne);
1019 goto free_work;
1020 }
1021 ne -= ret;
98e77d9f
LR
1022 usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
1023 (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
bfcc681b
SX
1024 } while (ne && time_before_eq(jiffies, end));
1025
1026 if (ne != 0)
1027 dev_err(dev,
1028 "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
1029 hr_mr->key, ne);
1030
1031free_work:
1032 if (mr_work->comp_flag)
1033 complete(mr_work->comp);
1034 kfree(mr_work);
1035}
1036
d61d6de0
BVA
1037static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
1038 struct hns_roce_mr *mr)
bfcc681b
SX
1039{
1040 struct device *dev = &hr_dev->pdev->dev;
1041 struct hns_roce_mr_free_work *mr_work;
1042 struct hns_roce_free_mr *free_mr;
1043 struct hns_roce_v1_priv *priv;
1044 struct completion comp;
1045 unsigned long end =
1046 msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
1047 unsigned long start = jiffies;
1048 int npages;
1049 int ret = 0;
1050
016a0059 1051 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
bfcc681b
SX
1052 free_mr = &priv->free_mr;
1053
1054 if (mr->enabled) {
1055 if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
1056 & (hr_dev->caps.num_mtpts - 1)))
1057 dev_warn(dev, "HW2SW_MPT failed!\n");
1058 }
1059
1060 mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
1061 if (!mr_work) {
1062 ret = -ENOMEM;
1063 goto free_mr;
1064 }
1065
1066 INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
1067
1068 mr_work->ib_dev = &(hr_dev->ib_dev);
1069 mr_work->comp = &comp;
1070 mr_work->comp_flag = 1;
1071 mr_work->mr = (void *)mr;
1072 init_completion(mr_work->comp);
1073
1074 queue_work(free_mr->free_mr_wq, &(mr_work->work));
1075
1076 while (time_before_eq(jiffies, end)) {
1077 if (try_wait_for_completion(&comp))
1078 goto free_mr;
1079 msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
1080 }
1081
1082 mr_work->comp_flag = 0;
1083 if (try_wait_for_completion(&comp))
1084 goto free_mr;
1085
1086 dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
1087 ret = -ETIMEDOUT;
1088
1089free_mr:
1090 dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
1091 mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
1092
1093 if (mr->size != ~0ULL) {
1094 npages = ib_umem_page_count(mr->umem);
1095 dma_free_coherent(dev, npages * 8, mr->pbl_buf,
1096 mr->pbl_dma_addr);
1097 }
1098
1099 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
1100 key_to_hw_index(mr->key), 0);
1101
1102 if (mr->umem)
1103 ib_umem_release(mr->umem);
1104
1105 kfree(mr);
1106
1107 return ret;
1108}
1109
9a443537 1110static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
1111{
1112 struct device *dev = &hr_dev->pdev->dev;
1113 struct hns_roce_v1_priv *priv;
1114 struct hns_roce_db_table *db;
1115
016a0059 1116 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
9a443537 1117 db = &priv->db_table;
1118
1119 if (db->sdb_ext_mod) {
1120 dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
1121 db->ext_db->sdb_buf_list->buf,
1122 db->ext_db->sdb_buf_list->map);
1123 kfree(db->ext_db->sdb_buf_list);
1124 }
1125
1126 if (db->odb_ext_mod) {
1127 dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
1128 db->ext_db->odb_buf_list->buf,
1129 db->ext_db->odb_buf_list->map);
1130 kfree(db->ext_db->odb_buf_list);
1131 }
1132
1133 kfree(db->ext_db);
1134}
1135
1136static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
1137{
1138 int ret;
1139 int raq_shift = 0;
1140 dma_addr_t addr;
1141 u32 val;
1142 struct hns_roce_v1_priv *priv;
1143 struct hns_roce_raq_table *raq;
1144 struct device *dev = &hr_dev->pdev->dev;
1145
016a0059 1146 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
9a443537 1147 raq = &priv->raq_table;
1148
1149 raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
1150 if (!raq->e_raq_buf)
1151 return -ENOMEM;
1152
1153 raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
1154 &addr, GFP_KERNEL);
1155 if (!raq->e_raq_buf->buf) {
1156 ret = -ENOMEM;
1157 goto err_dma_alloc_raq;
1158 }
1159 raq->e_raq_buf->map = addr;
1160
1161 /* Configure raq extended address. 48bit 4K align*/
1162 roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
1163
1164 /* Configure raq_shift */
1165 raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
1166 val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
1167 roce_set_field(val, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
1168 ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
1169 /*
1170 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
1171 * using 4K page, and shift more 32 because of
1172 * caculating the high 32 bit value evaluated to hardware.
1173 */
1174 roce_set_field(val, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
1175 ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
1176 raq->e_raq_buf->map >> 44);
1177 roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
1178 dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
1179
1180 /* Configure raq threshold */
1181 val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
1182 roce_set_field(val, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
1183 ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
1184 HNS_ROCE_V1_EXT_RAQ_WF);
1185 roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
1186 dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
1187
1188 /* Enable extend raq */
1189 val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
1190 roce_set_field(val,
1191 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
1192 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
1193 POL_TIME_INTERVAL_VAL);
1194 roce_set_bit(val, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
1195 roce_set_field(val,
1196 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
1197 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
1198 2);
1199 roce_set_bit(val,
1200 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
1201 roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
1202 dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
1203
1204 /* Enable raq drop */
1205 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1206 roce_set_bit(val, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
1207 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1208 dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
1209
1210 return 0;
1211
1212err_dma_alloc_raq:
1213 kfree(raq->e_raq_buf);
1214 return ret;
1215}
1216
1217static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
1218{
1219 struct device *dev = &hr_dev->pdev->dev;
1220 struct hns_roce_v1_priv *priv;
1221 struct hns_roce_raq_table *raq;
1222
016a0059 1223 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
9a443537 1224 raq = &priv->raq_table;
1225
1226 dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
1227 raq->e_raq_buf->map);
1228 kfree(raq->e_raq_buf);
1229}
1230
1231static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
1232{
1233 u32 val;
1234
1235 if (enable_flag) {
1236 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1237 /* Open all ports */
1238 roce_set_field(val, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1239 ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
1240 ALL_PORT_VAL_OPEN);
1241 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1242 } else {
1243 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1244 /* Close all ports */
1245 roce_set_field(val, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1246 ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
1247 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1248 }
1249}
1250
97f0e39f
WHX
1251static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
1252{
1253 struct device *dev = &hr_dev->pdev->dev;
1254 struct hns_roce_v1_priv *priv;
1255 int ret;
1256
016a0059 1257 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
97f0e39f
WHX
1258
1259 priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
1260 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
1261 GFP_KERNEL);
1262 if (!priv->bt_table.qpc_buf.buf)
1263 return -ENOMEM;
1264
1265 priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
1266 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
1267 GFP_KERNEL);
1268 if (!priv->bt_table.mtpt_buf.buf) {
1269 ret = -ENOMEM;
1270 goto err_failed_alloc_mtpt_buf;
1271 }
1272
1273 priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
1274 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
1275 GFP_KERNEL);
1276 if (!priv->bt_table.cqc_buf.buf) {
1277 ret = -ENOMEM;
1278 goto err_failed_alloc_cqc_buf;
1279 }
1280
1281 return 0;
1282
1283err_failed_alloc_cqc_buf:
1284 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1285 priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1286
1287err_failed_alloc_mtpt_buf:
1288 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1289 priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1290
1291 return ret;
1292}
1293
1294static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
1295{
1296 struct device *dev = &hr_dev->pdev->dev;
1297 struct hns_roce_v1_priv *priv;
1298
016a0059 1299 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
97f0e39f
WHX
1300
1301 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1302 priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
1303
1304 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1305 priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1306
1307 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1308 priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1309}
1310
8f3e9f3e
WHX
1311static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
1312{
1313 struct device *dev = &hr_dev->pdev->dev;
1314 struct hns_roce_buf_list *tptr_buf;
1315 struct hns_roce_v1_priv *priv;
1316
016a0059 1317 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
8f3e9f3e
WHX
1318 tptr_buf = &priv->tptr_table.tptr_buf;
1319
1320 /*
1321 * This buffer will be used for CQ's tptr(tail pointer), also
1322 * named ci(customer index). Every CQ will use 2 bytes to save
1323 * cqe ci in hip06. Hardware will read this area to get new ci
1324 * when the queue is almost full.
1325 */
1326 tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1327 &tptr_buf->map, GFP_KERNEL);
1328 if (!tptr_buf->buf)
1329 return -ENOMEM;
1330
1331 hr_dev->tptr_dma_addr = tptr_buf->map;
1332 hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
1333
1334 return 0;
1335}
1336
1337static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
1338{
1339 struct device *dev = &hr_dev->pdev->dev;
1340 struct hns_roce_buf_list *tptr_buf;
1341 struct hns_roce_v1_priv *priv;
1342
016a0059 1343 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
8f3e9f3e
WHX
1344 tptr_buf = &priv->tptr_table.tptr_buf;
1345
1346 dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1347 tptr_buf->buf, tptr_buf->map);
1348}
1349
bfcc681b
SX
1350static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
1351{
1352 struct device *dev = &hr_dev->pdev->dev;
1353 struct hns_roce_free_mr *free_mr;
1354 struct hns_roce_v1_priv *priv;
1355 int ret = 0;
1356
016a0059 1357 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
bfcc681b
SX
1358 free_mr = &priv->free_mr;
1359
1360 free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
1361 if (!free_mr->free_mr_wq) {
1362 dev_err(dev, "Create free mr workqueue failed!\n");
1363 return -ENOMEM;
1364 }
1365
1366 ret = hns_roce_v1_rsv_lp_qp(hr_dev);
1367 if (ret) {
1368 dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
1369 flush_workqueue(free_mr->free_mr_wq);
1370 destroy_workqueue(free_mr->free_mr_wq);
1371 }
1372
1373 return ret;
1374}
1375
1376static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
1377{
1378 struct hns_roce_free_mr *free_mr;
1379 struct hns_roce_v1_priv *priv;
1380
016a0059 1381 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
bfcc681b
SX
1382 free_mr = &priv->free_mr;
1383
1384 flush_workqueue(free_mr->free_mr_wq);
1385 destroy_workqueue(free_mr->free_mr_wq);
1386
1387 hns_roce_v1_release_lp_qp(hr_dev);
1388}
1389
9a443537 1390/**
1391 * hns_roce_v1_reset - reset RoCE
1392 * @hr_dev: RoCE device struct pointer
1393 * @enable: true -- drop reset, false -- reset
1394 * return 0 - success , negative --fail
1395 */
d61d6de0 1396static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
9a443537 1397{
1398 struct device_node *dsaf_node;
1399 struct device *dev = &hr_dev->pdev->dev;
1400 struct device_node *np = dev->of_node;
528f1deb 1401 struct fwnode_handle *fwnode;
9a443537 1402 int ret;
1403
528f1deb
S
1404 /* check if this is DT/ACPI case */
1405 if (dev_of_node(dev)) {
1406 dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
1407 if (!dsaf_node) {
1408 dev_err(dev, "could not find dsaf-handle\n");
1409 return -EINVAL;
1410 }
1411 fwnode = &dsaf_node->fwnode;
1412 } else if (is_acpi_device_node(dev->fwnode)) {
1413 struct acpi_reference_args args;
1414
1415 ret = acpi_node_get_property_reference(dev->fwnode,
1416 "dsaf-handle", 0, &args);
1417 if (ret) {
1418 dev_err(dev, "could not find dsaf-handle\n");
1419 return ret;
1420 }
1421 fwnode = acpi_fwnode_handle(args.adev);
1422 } else {
1423 dev_err(dev, "cannot read data from DT or ACPI\n");
1424 return -ENXIO;
9a443537 1425 }
1426
528f1deb 1427 ret = hns_dsaf_roce_reset(fwnode, false);
9a443537 1428 if (ret)
1429 return ret;
1430
528f1deb 1431 if (dereset) {
9a443537 1432 msleep(SLEEP_TIME_INTERVAL);
528f1deb 1433 ret = hns_dsaf_roce_reset(fwnode, true);
9a443537 1434 }
1435
528f1deb 1436 return ret;
9a443537 1437}
1438
d838c481
WHX
1439static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev)
1440{
1441 struct device *dev = &hr_dev->pdev->dev;
1442 struct hns_roce_v1_priv *priv;
1443 struct hns_roce_des_qp *des_qp;
1444
016a0059 1445 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
d838c481
WHX
1446 des_qp = &priv->des_qp;
1447
1448 des_qp->requeue_flag = 1;
1449 des_qp->qp_wq = create_singlethread_workqueue("hns_roce_destroy_qp");
1450 if (!des_qp->qp_wq) {
1451 dev_err(dev, "Create destroy qp workqueue failed!\n");
1452 return -ENOMEM;
1453 }
1454
1455 return 0;
1456}
1457
1458static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
1459{
1460 struct hns_roce_v1_priv *priv;
1461 struct hns_roce_des_qp *des_qp;
1462
016a0059 1463 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
d838c481
WHX
1464 des_qp = &priv->des_qp;
1465
1466 des_qp->requeue_flag = 0;
1467 flush_workqueue(des_qp->qp_wq);
1468 destroy_workqueue(des_qp->qp_wq);
1469}
1470
d61d6de0 1471static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
9a443537 1472{
1473 int i = 0;
1474 struct hns_roce_caps *caps = &hr_dev->caps;
1475
1476 hr_dev->vendor_id = le32_to_cpu(roce_read(hr_dev, ROCEE_VENDOR_ID_REG));
1477 hr_dev->vendor_part_id = le32_to_cpu(roce_read(hr_dev,
1478 ROCEE_VENDOR_PART_ID_REG));
9a443537 1479 hr_dev->sys_image_guid = le32_to_cpu(roce_read(hr_dev,
1480 ROCEE_SYS_IMAGE_GUID_L_REG)) |
1481 ((u64)le32_to_cpu(roce_read(hr_dev,
1482 ROCEE_SYS_IMAGE_GUID_H_REG)) << 32);
8f3e9f3e 1483 hr_dev->hw_rev = HNS_ROCE_HW_VER1;
9a443537 1484
1485 caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
1486 caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
926a01dc 1487 caps->min_wqes = HNS_ROCE_MIN_WQE_NUM;
9a443537 1488 caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
93aa2187 1489 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
9a443537 1490 caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
1491 caps->max_sq_sg = HNS_ROCE_V1_SG_NUM;
1492 caps->max_rq_sg = HNS_ROCE_V1_SG_NUM;
1493 caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
1494 caps->num_uars = HNS_ROCE_V1_UAR_NUM;
1495 caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
b16f8188
YL
1496 caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM;
1497 caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM;
1498 caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
9a443537 1499 caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
1500 caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
1501 caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
1502 caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
1503 caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
1504 caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
1505 caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
1506 caps->qpc_entry_sz = HNS_ROCE_V1_QPC_ENTRY_SIZE;
1507 caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
1508 caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE;
1509 caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
1510 caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
1511 caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE;
1512 caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
9a443537 1513 caps->reserved_lkey = 0;
1514 caps->reserved_pds = 0;
1515 caps->reserved_mrws = 1;
1516 caps->reserved_uars = 0;
1517 caps->reserved_cqs = 0;
29a1fe5d 1518 caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE;
9a443537 1519
1520 for (i = 0; i < caps->num_ports; i++)
1521 caps->pkey_table_len[i] = 1;
1522
1523 for (i = 0; i < caps->num_ports; i++) {
1524 /* Six ports shared 16 GID in v1 engine */
1525 if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
1526 caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1527 caps->num_ports;
1528 else
1529 caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1530 caps->num_ports + 1;
1531 }
1532
b16f8188
YL
1533 caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
1534 caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
9a443537 1535 caps->local_ca_ack_delay = le32_to_cpu(roce_read(hr_dev,
1536 ROCEE_ACK_DELAY_REG));
1537 caps->max_mtu = IB_MTU_2048;
cfc85f3e
WHX
1538
1539 return 0;
9a443537 1540}
1541
d61d6de0 1542static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
9a443537 1543{
1544 int ret;
1545 u32 val;
1546 struct device *dev = &hr_dev->pdev->dev;
1547
1548 /* DMAE user config */
1549 val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
1550 roce_set_field(val, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
1551 ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
1552 roce_set_field(val, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
1553 ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
1554 1 << PAGES_SHIFT_16);
1555 roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
1556
1557 val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
1558 roce_set_field(val, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
1559 ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
1560 roce_set_field(val, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
1561 ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
1562 1 << PAGES_SHIFT_16);
1563
1564 ret = hns_roce_db_init(hr_dev);
1565 if (ret) {
1566 dev_err(dev, "doorbell init failed!\n");
1567 return ret;
1568 }
1569
1570 ret = hns_roce_raq_init(hr_dev);
1571 if (ret) {
1572 dev_err(dev, "raq init failed!\n");
1573 goto error_failed_raq_init;
1574 }
1575
97f0e39f
WHX
1576 ret = hns_roce_bt_init(hr_dev);
1577 if (ret) {
1578 dev_err(dev, "bt init failed!\n");
1579 goto error_failed_bt_init;
1580 }
1581
8f3e9f3e
WHX
1582 ret = hns_roce_tptr_init(hr_dev);
1583 if (ret) {
1584 dev_err(dev, "tptr init failed!\n");
1585 goto error_failed_tptr_init;
1586 }
1587
d838c481
WHX
1588 ret = hns_roce_des_qp_init(hr_dev);
1589 if (ret) {
1590 dev_err(dev, "des qp init failed!\n");
1591 goto error_failed_des_qp_init;
1592 }
1593
bfcc681b
SX
1594 ret = hns_roce_free_mr_init(hr_dev);
1595 if (ret) {
1596 dev_err(dev, "free mr init failed!\n");
1597 goto error_failed_free_mr_init;
1598 }
1599
d838c481
WHX
1600 hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
1601
9a443537 1602 return 0;
1603
bfcc681b
SX
1604error_failed_free_mr_init:
1605 hns_roce_des_qp_free(hr_dev);
1606
d838c481
WHX
1607error_failed_des_qp_init:
1608 hns_roce_tptr_free(hr_dev);
1609
8f3e9f3e
WHX
1610error_failed_tptr_init:
1611 hns_roce_bt_free(hr_dev);
1612
97f0e39f 1613error_failed_bt_init:
97f0e39f
WHX
1614 hns_roce_raq_free(hr_dev);
1615
9a443537 1616error_failed_raq_init:
1617 hns_roce_db_free(hr_dev);
1618 return ret;
1619}
1620
d61d6de0 1621static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
9a443537 1622{
d838c481 1623 hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
bfcc681b 1624 hns_roce_free_mr_free(hr_dev);
d838c481 1625 hns_roce_des_qp_free(hr_dev);
8f3e9f3e 1626 hns_roce_tptr_free(hr_dev);
97f0e39f 1627 hns_roce_bt_free(hr_dev);
9a443537 1628 hns_roce_raq_free(hr_dev);
1629 hns_roce_db_free(hr_dev);
1630}
1631
a680f2f3
WHX
1632static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
1633{
1634 u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
1635
1636 return (!!(status & (1 << HCR_GO_BIT)));
1637}
1638
281d0ccf
CIK
1639static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1640 u64 out_param, u32 in_modifier, u8 op_modifier,
1641 u16 op, u16 token, int event)
a680f2f3 1642{
cc4ed08b 1643 u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
a680f2f3
WHX
1644 unsigned long end;
1645 u32 val = 0;
1646
1647 end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
1648 while (hns_roce_v1_cmd_pending(hr_dev)) {
1649 if (time_after(jiffies, end)) {
1650 dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
1651 (int)jiffies, (int)end);
1652 return -EAGAIN;
1653 }
1654 cond_resched();
1655 }
1656
1657 roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
1658 op);
1659 roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
1660 ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
1661 roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
1662 roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
1663 roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
1664 ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
1665
1666 __raw_writeq(cpu_to_le64(in_param), hcr + 0);
1667 __raw_writeq(cpu_to_le64(out_param), hcr + 2);
1668 __raw_writel(cpu_to_le32(in_modifier), hcr + 4);
1669 /* Memory barrier */
1670 wmb();
1671
1672 __raw_writel(cpu_to_le32(val), hcr + 5);
1673
1674 mmiowb();
1675
1676 return 0;
1677}
1678
1679static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
1680 unsigned long timeout)
1681{
1682 u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
1683 unsigned long end = 0;
1684 u32 status = 0;
1685
1686 end = msecs_to_jiffies(timeout) + jiffies;
1687 while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
1688 cond_resched();
1689
1690 if (hns_roce_v1_cmd_pending(hr_dev)) {
1691 dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1692 return -ETIMEDOUT;
1693 }
1694
1695 status = le32_to_cpu((__force __be32)
1696 __raw_readl(hcr + HCR_STATUS_OFFSET));
1697 if ((status & STATUS_MASK) != 0x1) {
1698 dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
1699 return -EBUSY;
1700 }
1701
1702 return 0;
1703}
1704
b5ff0f61
WHX
1705static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
1706 int gid_index, union ib_gid *gid,
1707 const struct ib_gid_attr *attr)
9a443537 1708{
1709 u32 *p = NULL;
1710 u8 gid_idx = 0;
1711
1712 gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
1713
1714 p = (u32 *)&gid->raw[0];
1715 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
1716 (HNS_ROCE_V1_GID_NUM * gid_idx));
1717
1718 p = (u32 *)&gid->raw[4];
1719 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
1720 (HNS_ROCE_V1_GID_NUM * gid_idx));
1721
1722 p = (u32 *)&gid->raw[8];
1723 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
1724 (HNS_ROCE_V1_GID_NUM * gid_idx));
1725
1726 p = (u32 *)&gid->raw[0xc];
1727 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
1728 (HNS_ROCE_V1_GID_NUM * gid_idx));
b5ff0f61
WHX
1729
1730 return 0;
9a443537 1731}
1732
a74dc41d
WHX
1733static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
1734 u8 *addr)
9a443537 1735{
1736 u32 reg_smac_l;
1737 u16 reg_smac_h;
1738 u16 *p_h;
1739 u32 *p;
1740 u32 val;
1741
bfcc681b
SX
1742 /*
1743 * When mac changed, loopback may fail
1744 * because of smac not equal to dmac.
1745 * We Need to release and create reserved qp again.
1746 */
a74dc41d
WHX
1747 if (hr_dev->hw->dereg_mr) {
1748 int ret;
1749
1750 ret = hns_roce_v1_recreate_lp_qp(hr_dev);
1751 if (ret && ret != -ETIMEDOUT)
1752 return ret;
1753 }
bfcc681b 1754
9a443537 1755 p = (u32 *)(&addr[0]);
1756 reg_smac_l = *p;
1757 roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
1758 PHY_PORT_OFFSET * phy_port);
1759
1760 val = roce_read(hr_dev,
1761 ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1762 p_h = (u16 *)(&addr[4]);
1763 reg_smac_h = *p_h;
1764 roce_set_field(val, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
1765 ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
1766 roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1767 val);
a74dc41d
WHX
1768
1769 return 0;
9a443537 1770}
1771
d61d6de0
BVA
1772static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
1773 enum ib_mtu mtu)
9a443537 1774{
1775 u32 val;
1776
1777 val = roce_read(hr_dev,
1778 ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1779 roce_set_field(val, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
1780 ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
1781 roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1782 val);
1783}
1784
d61d6de0
BVA
1785static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1786 unsigned long mtpt_idx)
9a443537 1787{
1788 struct hns_roce_v1_mpt_entry *mpt_entry;
1789 struct scatterlist *sg;
1790 u64 *pages;
1791 int entry;
1792 int i;
1793
1794 /* MPT filled into mailbox buf */
1795 mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
1796 memset(mpt_entry, 0, sizeof(*mpt_entry));
1797
1798 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
1799 MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
1800 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
1801 MPT_BYTE_4_KEY_S, mr->key);
1802 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
1803 MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
1804 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
1805 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
1806 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
1807 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
1808 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
1809 MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
1810 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
1811 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
1812 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1813 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
1814 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1815 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
1816 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
1817 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
1818 0);
1819 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
1820
1821 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1822 MPT_BYTE_12_PBL_ADDR_H_S, 0);
1823 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
1824 MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
1825
1826 mpt_entry->virt_addr_l = (u32)mr->iova;
1827 mpt_entry->virt_addr_h = (u32)(mr->iova >> 32);
1828 mpt_entry->length = (u32)mr->size;
1829
1830 roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
1831 MPT_BYTE_28_PD_S, mr->pd);
1832 roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
1833 MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
1834 roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
1835 MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
1836
ad61dd30 1837 /* DMA memory register */
9a443537 1838 if (mr->type == MR_TYPE_DMA)
1839 return 0;
1840
1841 pages = (u64 *) __get_free_page(GFP_KERNEL);
1842 if (!pages)
1843 return -ENOMEM;
1844
1845 i = 0;
1846 for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
1847 pages[i] = ((u64)sg_dma_address(sg)) >> 12;
1848
1849 /* Directly record to MTPT table firstly 7 entry */
1850 if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM)
1851 break;
1852 i++;
1853 }
1854
1855 /* Register user mr */
1856 for (i = 0; i < HNS_ROCE_MAX_INNER_MTPT_NUM; i++) {
1857 switch (i) {
1858 case 0:
1859 mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
1860 roce_set_field(mpt_entry->mpt_byte_36,
1861 MPT_BYTE_36_PA0_H_M,
1862 MPT_BYTE_36_PA0_H_S,
1863 cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_32)));
1864 break;
1865 case 1:
1866 roce_set_field(mpt_entry->mpt_byte_36,
1867 MPT_BYTE_36_PA1_L_M,
1868 MPT_BYTE_36_PA1_L_S,
1869 cpu_to_le32((u32)(pages[i])));
1870 roce_set_field(mpt_entry->mpt_byte_40,
1871 MPT_BYTE_40_PA1_H_M,
1872 MPT_BYTE_40_PA1_H_S,
1873 cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_24)));
1874 break;
1875 case 2:
1876 roce_set_field(mpt_entry->mpt_byte_40,
1877 MPT_BYTE_40_PA2_L_M,
1878 MPT_BYTE_40_PA2_L_S,
1879 cpu_to_le32((u32)(pages[i])));
1880 roce_set_field(mpt_entry->mpt_byte_44,
1881 MPT_BYTE_44_PA2_H_M,
1882 MPT_BYTE_44_PA2_H_S,
1883 cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_16)));
1884 break;
1885 case 3:
1886 roce_set_field(mpt_entry->mpt_byte_44,
1887 MPT_BYTE_44_PA3_L_M,
1888 MPT_BYTE_44_PA3_L_S,
1889 cpu_to_le32((u32)(pages[i])));
1890 roce_set_field(mpt_entry->mpt_byte_48,
1891 MPT_BYTE_48_PA3_H_M,
1892 MPT_BYTE_48_PA3_H_S,
1893 cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_8)));
1894 break;
1895 case 4:
1896 mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
1897 roce_set_field(mpt_entry->mpt_byte_56,
1898 MPT_BYTE_56_PA4_H_M,
1899 MPT_BYTE_56_PA4_H_S,
1900 cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_32)));
1901 break;
1902 case 5:
1903 roce_set_field(mpt_entry->mpt_byte_56,
1904 MPT_BYTE_56_PA5_L_M,
1905 MPT_BYTE_56_PA5_L_S,
1906 cpu_to_le32((u32)(pages[i])));
1907 roce_set_field(mpt_entry->mpt_byte_60,
1908 MPT_BYTE_60_PA5_H_M,
1909 MPT_BYTE_60_PA5_H_S,
1910 cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_24)));
1911 break;
1912 case 6:
1913 roce_set_field(mpt_entry->mpt_byte_60,
1914 MPT_BYTE_60_PA6_L_M,
1915 MPT_BYTE_60_PA6_L_S,
1916 cpu_to_le32((u32)(pages[i])));
1917 roce_set_field(mpt_entry->mpt_byte_64,
1918 MPT_BYTE_64_PA6_H_M,
1919 MPT_BYTE_64_PA6_H_S,
1920 cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_16)));
1921 break;
1922 default:
1923 break;
1924 }
1925 }
1926
1927 free_page((unsigned long) pages);
1928
1929 mpt_entry->pbl_addr_l = (u32)(mr->pbl_dma_addr);
1930
1931 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1932 MPT_BYTE_12_PBL_ADDR_H_S,
1933 ((u32)(mr->pbl_dma_addr >> 32)));
1934
1935 return 0;
1936}
1937
1938static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
1939{
1940 return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
1941 n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
1942}
1943
1944static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
1945{
1946 struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
1947
1948 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
1949 return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
1950 !!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL;
1951}
1952
1953static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
1954{
1955 return get_sw_cqe(hr_cq, hr_cq->cons_index);
1956}
1957
d61d6de0 1958static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
9a443537 1959{
1960 u32 doorbell[2];
1961
1962 doorbell[0] = cons_index & ((hr_cq->cq_depth << 1) - 1);
5b0ff9a0 1963 doorbell[1] = 0;
9a443537 1964 roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
1965 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
1966 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
1967 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
1968 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
1969 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
1970 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
1971
1972 hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
1973}
1974
1975static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1976 struct hns_roce_srq *srq)
1977{
1978 struct hns_roce_cqe *cqe, *dest;
1979 u32 prod_index;
1980 int nfreed = 0;
1981 u8 owner_bit;
1982
1983 for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
1984 ++prod_index) {
1985 if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
1986 break;
1987 }
1988
1989 /*
e84e40be
S
1990 * Now backwards through the CQ, removing CQ entries
1991 * that match our QP by overwriting them with next entries.
1992 */
9a443537 1993 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
1994 cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
1995 if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
1996 CQE_BYTE_16_LOCAL_QPN_S) &
1997 HNS_ROCE_CQE_QPN_MASK) == qpn) {
1998 /* In v1 engine, not support SRQ */
1999 ++nfreed;
2000 } else if (nfreed) {
2001 dest = get_cqe(hr_cq, (prod_index + nfreed) &
2002 hr_cq->ib_cq.cqe);
2003 owner_bit = roce_get_bit(dest->cqe_byte_4,
2004 CQE_BYTE_4_OWNER_S);
2005 memcpy(dest, cqe, sizeof(*cqe));
2006 roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
2007 owner_bit);
2008 }
2009 }
2010
2011 if (nfreed) {
2012 hr_cq->cons_index += nfreed;
2013 /*
e84e40be
S
2014 * Make sure update of buffer contents is done before
2015 * updating consumer index.
2016 */
9a443537 2017 wmb();
2018
a4be892e 2019 hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
9a443537 2020 }
2021}
2022
2023static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2024 struct hns_roce_srq *srq)
2025{
2026 spin_lock_irq(&hr_cq->lock);
2027 __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
2028 spin_unlock_irq(&hr_cq->lock);
2029}
2030
d61d6de0
BVA
2031static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
2032 struct hns_roce_cq *hr_cq, void *mb_buf,
2033 u64 *mtts, dma_addr_t dma_handle, int nent,
2034 u32 vector)
9a443537 2035{
2036 struct hns_roce_cq_context *cq_context = NULL;
8f3e9f3e
WHX
2037 struct hns_roce_buf_list *tptr_buf;
2038 struct hns_roce_v1_priv *priv;
2039 dma_addr_t tptr_dma_addr;
2040 int offset;
2041
016a0059 2042 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
8f3e9f3e 2043 tptr_buf = &priv->tptr_table.tptr_buf;
9a443537 2044
2045 cq_context = mb_buf;
2046 memset(cq_context, 0, sizeof(*cq_context));
2047
8f3e9f3e
WHX
2048 /* Get the tptr for this CQ. */
2049 offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
2050 tptr_dma_addr = tptr_buf->map + offset;
2051 hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
9a443537 2052
2053 /* Register cq_context members */
2054 roce_set_field(cq_context->cqc_byte_4,
2055 CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
2056 CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
2057 roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
2058 CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
2059 cq_context->cqc_byte_4 = cpu_to_le32(cq_context->cqc_byte_4);
2060
2061 cq_context->cq_bt_l = (u32)dma_handle;
2062 cq_context->cq_bt_l = cpu_to_le32(cq_context->cq_bt_l);
2063
2064 roce_set_field(cq_context->cqc_byte_12,
2065 CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
2066 CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
2067 ((u64)dma_handle >> 32));
2068 roce_set_field(cq_context->cqc_byte_12,
2069 CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
2070 CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
2071 ilog2((unsigned int)nent));
2072 roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
2073 CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector);
2074 cq_context->cqc_byte_12 = cpu_to_le32(cq_context->cqc_byte_12);
2075
2076 cq_context->cur_cqe_ba0_l = (u32)(mtts[0]);
2077 cq_context->cur_cqe_ba0_l = cpu_to_le32(cq_context->cur_cqe_ba0_l);
2078
2079 roce_set_field(cq_context->cqc_byte_20,
2080 CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
2081 CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S,
2082 cpu_to_le32((mtts[0]) >> 32));
2083 /* Dedicated hardware, directly set 0 */
2084 roce_set_field(cq_context->cqc_byte_20,
2085 CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
2086 CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
2087 /**
2088 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
2089 * using 4K page, and shift more 32 because of
2090 * caculating the high 32 bit value evaluated to hardware.
2091 */
2092 roce_set_field(cq_context->cqc_byte_20,
2093 CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
2094 CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
8f3e9f3e 2095 tptr_dma_addr >> 44);
9a443537 2096 cq_context->cqc_byte_20 = cpu_to_le32(cq_context->cqc_byte_20);
2097
8f3e9f3e 2098 cq_context->cqe_tptr_addr_l = (u32)(tptr_dma_addr >> 12);
9a443537 2099
2100 roce_set_field(cq_context->cqc_byte_32,
2101 CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
2102 CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
2103 roce_set_bit(cq_context->cqc_byte_32,
2104 CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
2105 roce_set_bit(cq_context->cqc_byte_32,
2106 CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
2107 roce_set_bit(cq_context->cqc_byte_32,
2108 CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
2109 roce_set_bit(cq_context->cqc_byte_32,
2110 CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
2111 0);
e84e40be 2112 /* The initial value of cq's ci is 0 */
9a443537 2113 roce_set_field(cq_context->cqc_byte_32,
2114 CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
2115 CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
2116 cq_context->cqc_byte_32 = cpu_to_le32(cq_context->cqc_byte_32);
2117}
2118
b156269d 2119static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
2120{
2121 return -EOPNOTSUPP;
2122}
2123
d61d6de0
BVA
2124static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
2125 enum ib_cq_notify_flags flags)
9a443537 2126{
2127 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2128 u32 notification_flag;
2129 u32 doorbell[2];
9a443537 2130
2131 notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
2132 IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
2133 /*
e84e40be
S
2134 * flags = 0; Notification Flag = 1, next
2135 * flags = 1; Notification Flag = 0, solocited
2136 */
9a443537 2137 doorbell[0] = hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1);
2138 roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
2139 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
2140 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
2141 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
2142 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
2143 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
2144 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
2145 hr_cq->cqn | notification_flag);
2146
2147 hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2148
87809f83 2149 return 0;
9a443537 2150}
2151
2152static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
2153 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2154{
2155 int qpn;
2156 int is_send;
2157 u16 wqe_ctr;
2158 u32 status;
2159 u32 opcode;
2160 struct hns_roce_cqe *cqe;
2161 struct hns_roce_qp *hr_qp;
2162 struct hns_roce_wq *wq;
2163 struct hns_roce_wqe_ctrl_seg *sq_wqe;
2164 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2165 struct device *dev = &hr_dev->pdev->dev;
2166
2167 /* Find cqe according consumer index */
2168 cqe = next_cqe_sw(hr_cq);
2169 if (!cqe)
2170 return -EAGAIN;
2171
2172 ++hr_cq->cons_index;
2173 /* Memory barrier */
2174 rmb();
2175 /* 0->SQ, 1->RQ */
2176 is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
2177
2178 /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */
2179 if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2180 CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
2181 qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
2182 CQE_BYTE_20_PORT_NUM_S) +
2183 roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2184 CQE_BYTE_16_LOCAL_QPN_S) *
2185 HNS_ROCE_MAX_PORTS;
2186 } else {
2187 qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2188 CQE_BYTE_16_LOCAL_QPN_S);
2189 }
2190
2191 if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2192 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2193 if (unlikely(!hr_qp)) {
2194 dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
2195 hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
2196 return -EINVAL;
2197 }
2198
2199 *cur_qp = hr_qp;
2200 }
2201
2202 wc->qp = &(*cur_qp)->ibqp;
2203 wc->vendor_err = 0;
2204
2205 status = roce_get_field(cqe->cqe_byte_4,
2206 CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
2207 CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
2208 HNS_ROCE_CQE_STATUS_MASK;
2209 switch (status) {
2210 case HNS_ROCE_CQE_SUCCESS:
2211 wc->status = IB_WC_SUCCESS;
2212 break;
2213 case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
2214 wc->status = IB_WC_LOC_LEN_ERR;
2215 break;
2216 case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
2217 wc->status = IB_WC_LOC_QP_OP_ERR;
2218 break;
2219 case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
2220 wc->status = IB_WC_LOC_PROT_ERR;
2221 break;
2222 case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
2223 wc->status = IB_WC_WR_FLUSH_ERR;
2224 break;
2225 case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
2226 wc->status = IB_WC_MW_BIND_ERR;
2227 break;
2228 case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
2229 wc->status = IB_WC_BAD_RESP_ERR;
2230 break;
2231 case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
2232 wc->status = IB_WC_LOC_ACCESS_ERR;
2233 break;
2234 case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
2235 wc->status = IB_WC_REM_INV_REQ_ERR;
2236 break;
2237 case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
2238 wc->status = IB_WC_REM_ACCESS_ERR;
2239 break;
2240 case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
2241 wc->status = IB_WC_REM_OP_ERR;
2242 break;
2243 case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
2244 wc->status = IB_WC_RETRY_EXC_ERR;
2245 break;
2246 case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
2247 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2248 break;
2249 default:
2250 wc->status = IB_WC_GENERAL_ERR;
2251 break;
2252 }
2253
2254 /* CQE status error, directly return */
2255 if (wc->status != IB_WC_SUCCESS)
2256 return 0;
2257
2258 if (is_send) {
2259 /* SQ conrespond to CQE */
2260 sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
2261 CQE_BYTE_4_WQE_INDEX_M,
1bdab400
S
2262 CQE_BYTE_4_WQE_INDEX_S)&
2263 ((*cur_qp)->sq.wqe_cnt-1));
9a443537 2264 switch (sq_wqe->flag & HNS_ROCE_WQE_OPCODE_MASK) {
2265 case HNS_ROCE_WQE_OPCODE_SEND:
2266 wc->opcode = IB_WC_SEND;
2267 break;
2268 case HNS_ROCE_WQE_OPCODE_RDMA_READ:
2269 wc->opcode = IB_WC_RDMA_READ;
2270 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2271 break;
2272 case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
2273 wc->opcode = IB_WC_RDMA_WRITE;
2274 break;
2275 case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
2276 wc->opcode = IB_WC_LOCAL_INV;
2277 break;
2278 case HNS_ROCE_WQE_OPCODE_UD_SEND:
2279 wc->opcode = IB_WC_SEND;
2280 break;
2281 default:
2282 wc->status = IB_WC_GENERAL_ERR;
2283 break;
2284 }
2285 wc->wc_flags = (sq_wqe->flag & HNS_ROCE_WQE_IMM ?
2286 IB_WC_WITH_IMM : 0);
2287
2288 wq = &(*cur_qp)->sq;
2289 if ((*cur_qp)->sq_signal_bits) {
2290 /*
e84e40be
S
2291 * If sg_signal_bit is 1,
2292 * firstly tail pointer updated to wqe
2293 * which current cqe correspond to
2294 */
9a443537 2295 wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
2296 CQE_BYTE_4_WQE_INDEX_M,
2297 CQE_BYTE_4_WQE_INDEX_S);
2298 wq->tail += (wqe_ctr - (u16)wq->tail) &
2299 (wq->wqe_cnt - 1);
2300 }
2301 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2302 ++wq->tail;
5f110ac4 2303 } else {
9a443537 2304 /* RQ conrespond to CQE */
2305 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2306 opcode = roce_get_field(cqe->cqe_byte_4,
2307 CQE_BYTE_4_OPERATION_TYPE_M,
2308 CQE_BYTE_4_OPERATION_TYPE_S) &
2309 HNS_ROCE_CQE_OPCODE_MASK;
2310 switch (opcode) {
2311 case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
2312 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2313 wc->wc_flags = IB_WC_WITH_IMM;
ccb8a29e
JG
2314 wc->ex.imm_data =
2315 cpu_to_be32(le32_to_cpu(cqe->immediate_data));
9a443537 2316 break;
2317 case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
2318 if (roce_get_bit(cqe->cqe_byte_4,
2319 CQE_BYTE_4_IMM_INDICATOR_S)) {
2320 wc->opcode = IB_WC_RECV;
2321 wc->wc_flags = IB_WC_WITH_IMM;
ccb8a29e
JG
2322 wc->ex.imm_data = cpu_to_be32(
2323 le32_to_cpu(cqe->immediate_data));
9a443537 2324 } else {
2325 wc->opcode = IB_WC_RECV;
2326 wc->wc_flags = 0;
2327 }
2328 break;
2329 default:
2330 wc->status = IB_WC_GENERAL_ERR;
2331 break;
2332 }
2333
2334 /* Update tail pointer, record wr_id */
2335 wq = &(*cur_qp)->rq;
2336 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2337 ++wq->tail;
2338 wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
2339 CQE_BYTE_20_SL_S);
2340 wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
2341 CQE_BYTE_20_REMOTE_QPN_M,
2342 CQE_BYTE_20_REMOTE_QPN_S);
2343 wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
2344 CQE_BYTE_20_GRH_PRESENT_S) ?
2345 IB_WC_GRH : 0);
2346 wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
2347 CQE_BYTE_28_P_KEY_IDX_M,
2348 CQE_BYTE_28_P_KEY_IDX_S);
2349 }
2350
2351 return 0;
2352}
2353
2354int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2355{
2356 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2357 struct hns_roce_qp *cur_qp = NULL;
2358 unsigned long flags;
2359 int npolled;
2360 int ret = 0;
2361
2362 spin_lock_irqsave(&hr_cq->lock, flags);
2363
2364 for (npolled = 0; npolled < num_entries; ++npolled) {
2365 ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
2366 if (ret)
2367 break;
2368 }
2369
8f3e9f3e
WHX
2370 if (npolled) {
2371 *hr_cq->tptr_addr = hr_cq->cons_index &
2372 ((hr_cq->cq_depth << 1) - 1);
2373
2374 /* Memroy barrier */
2375 wmb();
a4be892e 2376 hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
8f3e9f3e 2377 }
9a443537 2378
2379 spin_unlock_irqrestore(&hr_cq->lock, flags);
2380
2381 if (ret == 0 || ret == -EAGAIN)
2382 return npolled;
2383 else
2384 return ret;
2385}
2386
d61d6de0
BVA
2387static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
2388 struct hns_roce_hem_table *table, int obj,
2389 int step_idx)
97f0e39f
WHX
2390{
2391 struct device *dev = &hr_dev->pdev->dev;
2392 struct hns_roce_v1_priv *priv;
2393 unsigned long end = 0, flags = 0;
2394 uint32_t bt_cmd_val[2] = {0};
2395 void __iomem *bt_cmd;
2396 u64 bt_ba = 0;
2397
016a0059 2398 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
97f0e39f
WHX
2399
2400 switch (table->type) {
2401 case HEM_TYPE_QPC:
2402 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2403 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
2404 bt_ba = priv->bt_table.qpc_buf.map >> 12;
2405 break;
2406 case HEM_TYPE_MTPT:
2407 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2408 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT);
2409 bt_ba = priv->bt_table.mtpt_buf.map >> 12;
2410 break;
2411 case HEM_TYPE_CQC:
2412 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2413 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
2414 bt_ba = priv->bt_table.cqc_buf.map >> 12;
2415 break;
2416 case HEM_TYPE_SRQC:
2417 dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
2418 return -EINVAL;
2419 default:
2420 return 0;
2421 }
2422 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
2423 ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
2424 roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
2425 roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
2426
2427 spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
2428
2429 bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
2430
2431 end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
2432 while (1) {
2433 if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
2434 if (!(time_before(jiffies, end))) {
2435 dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
2436 spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
2437 flags);
2438 return -EBUSY;
2439 }
2440 } else {
2441 break;
2442 }
2443 msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
2444 }
2445
2446 bt_cmd_val[0] = (uint32_t)bt_ba;
2447 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
2448 ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
2449 hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
2450
2451 spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
2452
2453 return 0;
2454}
2455
9a443537 2456static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
2457 struct hns_roce_mtt *mtt,
2458 enum hns_roce_qp_state cur_state,
2459 enum hns_roce_qp_state new_state,
2460 struct hns_roce_qp_context *context,
2461 struct hns_roce_qp *hr_qp)
2462{
2463 static const u16
2464 op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
2465 [HNS_ROCE_QP_STATE_RST] = {
2466 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2467 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2468 [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2469 },
2470 [HNS_ROCE_QP_STATE_INIT] = {
2471 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2472 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2473 /* Note: In v1 engine, HW doesn't support RST2INIT.
2474 * We use RST2INIT cmd instead of INIT2INIT.
2475 */
2476 [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2477 [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
2478 },
2479 [HNS_ROCE_QP_STATE_RTR] = {
2480 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2481 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2482 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
2483 },
2484 [HNS_ROCE_QP_STATE_RTS] = {
2485 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2486 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2487 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
2488 [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
2489 },
2490 [HNS_ROCE_QP_STATE_SQD] = {
2491 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2492 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2493 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
2494 [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
2495 },
2496 [HNS_ROCE_QP_STATE_ERR] = {
2497 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2498 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2499 }
2500 };
2501
2502 struct hns_roce_cmd_mailbox *mailbox;
2503 struct device *dev = &hr_dev->pdev->dev;
2504 int ret = 0;
2505
2506 if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
2507 new_state >= HNS_ROCE_QP_NUM_STATE ||
2508 !op[cur_state][new_state]) {
2509 dev_err(dev, "[modify_qp]not support state %d to %d\n",
2510 cur_state, new_state);
2511 return -EINVAL;
2512 }
2513
2514 if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
2515 return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2516 HNS_ROCE_CMD_2RST_QP,
6b877c32 2517 HNS_ROCE_CMD_TIMEOUT_MSECS);
9a443537 2518
2519 if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
2520 return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2521 HNS_ROCE_CMD_2ERR_QP,
6b877c32 2522 HNS_ROCE_CMD_TIMEOUT_MSECS);
9a443537 2523
2524 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2525 if (IS_ERR(mailbox))
2526 return PTR_ERR(mailbox);
2527
2528 memcpy(mailbox->buf, context, sizeof(*context));
2529
2530 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
2531 op[cur_state][new_state],
6b877c32 2532 HNS_ROCE_CMD_TIMEOUT_MSECS);
9a443537 2533
2534 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2535 return ret;
2536}
2537
2538static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2539 int attr_mask, enum ib_qp_state cur_state,
2540 enum ib_qp_state new_state)
2541{
2542 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2543 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2544 struct hns_roce_sqp_context *context;
2545 struct device *dev = &hr_dev->pdev->dev;
2546 dma_addr_t dma_handle = 0;
2547 int rq_pa_start;
2548 u32 reg_val;
2549 u64 *mtts;
cc4ed08b 2550 u32 __iomem *addr;
9a443537 2551
2552 context = kzalloc(sizeof(*context), GFP_KERNEL);
2553 if (!context)
2554 return -ENOMEM;
2555
2556 /* Search QP buf's MTTs */
6a93c77a 2557 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
9a443537 2558 hr_qp->mtt.first_seg, &dma_handle);
2559 if (!mtts) {
2560 dev_err(dev, "qp buf pa find failed\n");
2561 goto out;
2562 }
2563
2564 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2565 roce_set_field(context->qp1c_bytes_4,
2566 QP1C_BYTES_4_SQ_WQE_SHIFT_M,
2567 QP1C_BYTES_4_SQ_WQE_SHIFT_S,
2568 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2569 roce_set_field(context->qp1c_bytes_4,
2570 QP1C_BYTES_4_RQ_WQE_SHIFT_M,
2571 QP1C_BYTES_4_RQ_WQE_SHIFT_S,
2572 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2573 roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
2574 QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
2575
2576 context->sq_rq_bt_l = (u32)(dma_handle);
2577 roce_set_field(context->qp1c_bytes_12,
2578 QP1C_BYTES_12_SQ_RQ_BT_H_M,
2579 QP1C_BYTES_12_SQ_RQ_BT_H_S,
2580 ((u32)(dma_handle >> 32)));
2581
2582 roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
2583 QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
2584 roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
7716809e 2585 QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
9a443537 2586 roce_set_bit(context->qp1c_bytes_16,
2587 QP1C_BYTES_16_SIGNALING_TYPE_S,
2588 hr_qp->sq_signal_bits);
9a443537 2589 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
2590 1);
2591 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
2592 1);
2593 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
2594 0);
2595
2596 roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
2597 QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
2598 roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
2599 QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
2600
2601 rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
2602 context->cur_rq_wqe_ba_l = (u32)(mtts[rq_pa_start]);
2603
2604 roce_set_field(context->qp1c_bytes_28,
2605 QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
2606 QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
2607 (mtts[rq_pa_start]) >> 32);
2608 roce_set_field(context->qp1c_bytes_28,
2609 QP1C_BYTES_28_RQ_CUR_IDX_M,
2610 QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
2611
2612 roce_set_field(context->qp1c_bytes_32,
2613 QP1C_BYTES_32_RX_CQ_NUM_M,
2614 QP1C_BYTES_32_RX_CQ_NUM_S,
2615 to_hr_cq(ibqp->recv_cq)->cqn);
2616 roce_set_field(context->qp1c_bytes_32,
2617 QP1C_BYTES_32_TX_CQ_NUM_M,
2618 QP1C_BYTES_32_TX_CQ_NUM_S,
2619 to_hr_cq(ibqp->send_cq)->cqn);
2620
2621 context->cur_sq_wqe_ba_l = (u32)mtts[0];
2622
2623 roce_set_field(context->qp1c_bytes_40,
2624 QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
2625 QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
2626 (mtts[0]) >> 32);
2627 roce_set_field(context->qp1c_bytes_40,
2628 QP1C_BYTES_40_SQ_CUR_IDX_M,
2629 QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
2630
2631 /* Copy context to QP1C register */
cc4ed08b
BVA
2632 addr = (u32 __iomem *)(hr_dev->reg_base +
2633 ROCEE_QP1C_CFG0_0_REG +
2634 hr_qp->phy_port * sizeof(*context));
9a443537 2635
2636 writel(context->qp1c_bytes_4, addr);
2637 writel(context->sq_rq_bt_l, addr + 1);
2638 writel(context->qp1c_bytes_12, addr + 2);
2639 writel(context->qp1c_bytes_16, addr + 3);
2640 writel(context->qp1c_bytes_20, addr + 4);
2641 writel(context->cur_rq_wqe_ba_l, addr + 5);
2642 writel(context->qp1c_bytes_28, addr + 6);
2643 writel(context->qp1c_bytes_32, addr + 7);
2644 writel(context->cur_sq_wqe_ba_l, addr + 8);
c24bf895 2645 writel(context->qp1c_bytes_40, addr + 9);
9a443537 2646 }
2647
2648 /* Modify QP1C status */
2649 reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
7716809e 2650 hr_qp->phy_port * sizeof(*context));
9a443537 2651 roce_set_field(reg_val, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
2652 ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
2653 roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
7716809e 2654 hr_qp->phy_port * sizeof(*context), reg_val);
9a443537 2655
2656 hr_qp->state = new_state;
2657 if (new_state == IB_QPS_RESET) {
2658 hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
2659 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
2660 if (ibqp->send_cq != ibqp->recv_cq)
2661 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
2662 hr_qp->qpn, NULL);
2663
2664 hr_qp->rq.head = 0;
2665 hr_qp->rq.tail = 0;
2666 hr_qp->sq.head = 0;
2667 hr_qp->sq.tail = 0;
2668 hr_qp->sq_next_wqe = 0;
2669 }
2670
2671 kfree(context);
2672 return 0;
2673
2674out:
2675 kfree(context);
2676 return -EINVAL;
2677}
2678
2679static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2680 int attr_mask, enum ib_qp_state cur_state,
2681 enum ib_qp_state new_state)
2682{
2683 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2684 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2685 struct device *dev = &hr_dev->pdev->dev;
2686 struct hns_roce_qp_context *context;
d8966fcd 2687 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
9a443537 2688 dma_addr_t dma_handle_2 = 0;
2689 dma_addr_t dma_handle = 0;
2690 uint32_t doorbell[2] = {0};
2691 int rq_pa_start = 0;
9a443537 2692 u64 *mtts_2 = NULL;
2693 int ret = -EINVAL;
2694 u64 *mtts = NULL;
2695 int port;
d8966fcd 2696 u8 port_num;
9a443537 2697 u8 *dmac;
2698 u8 *smac;
2699
2700 context = kzalloc(sizeof(*context), GFP_KERNEL);
2701 if (!context)
2702 return -ENOMEM;
2703
2704 /* Search qp buf's mtts */
6a93c77a 2705 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
9a443537 2706 hr_qp->mtt.first_seg, &dma_handle);
2707 if (mtts == NULL) {
2708 dev_err(dev, "qp buf pa find failed\n");
2709 goto out;
2710 }
2711
2712 /* Search IRRL's mtts */
6a93c77a
SX
2713 mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
2714 hr_qp->qpn, &dma_handle_2);
9a443537 2715 if (mtts_2 == NULL) {
2716 dev_err(dev, "qp irrl_table find failed\n");
2717 goto out;
2718 }
2719
2720 /*
e84e40be
S
2721 * Reset to init
2722 * Mandatory param:
2723 * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
2724 * Optional param: NA
2725 */
9a443537 2726 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2727 roce_set_field(context->qpc_bytes_4,
2728 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2729 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2730 to_hr_qp_type(hr_qp->ibqp.qp_type));
2731
2732 roce_set_bit(context->qpc_bytes_4,
2733 QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2734 roce_set_bit(context->qpc_bytes_4,
2735 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2736 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
2737 roce_set_bit(context->qpc_bytes_4,
2738 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2739 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
2740 );
2741 roce_set_bit(context->qpc_bytes_4,
2742 QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
2743 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
2744 );
2745 roce_set_bit(context->qpc_bytes_4,
2746 QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2747 roce_set_field(context->qpc_bytes_4,
2748 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2749 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2750 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2751 roce_set_field(context->qpc_bytes_4,
2752 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2753 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2754 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2755 roce_set_field(context->qpc_bytes_4,
2756 QP_CONTEXT_QPC_BYTES_4_PD_M,
2757 QP_CONTEXT_QPC_BYTES_4_PD_S,
2758 to_hr_pd(ibqp->pd)->pdn);
2759 hr_qp->access_flags = attr->qp_access_flags;
2760 roce_set_field(context->qpc_bytes_8,
2761 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2762 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2763 to_hr_cq(ibqp->send_cq)->cqn);
2764 roce_set_field(context->qpc_bytes_8,
2765 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2766 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2767 to_hr_cq(ibqp->recv_cq)->cqn);
2768
2769 if (ibqp->srq)
2770 roce_set_field(context->qpc_bytes_12,
2771 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2772 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2773 to_hr_srq(ibqp->srq)->srqn);
2774
2775 roce_set_field(context->qpc_bytes_12,
2776 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2777 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2778 attr->pkey_index);
2779 hr_qp->pkey_index = attr->pkey_index;
2780 roce_set_field(context->qpc_bytes_16,
2781 QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2782 QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2783
2784 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
2785 roce_set_field(context->qpc_bytes_4,
2786 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2787 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2788 to_hr_qp_type(hr_qp->ibqp.qp_type));
2789 roce_set_bit(context->qpc_bytes_4,
2790 QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2791 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2792 roce_set_bit(context->qpc_bytes_4,
2793 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2794 !!(attr->qp_access_flags &
2795 IB_ACCESS_REMOTE_READ));
2796 roce_set_bit(context->qpc_bytes_4,
2797 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2798 !!(attr->qp_access_flags &
2799 IB_ACCESS_REMOTE_WRITE));
2800 } else {
2801 roce_set_bit(context->qpc_bytes_4,
2802 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2803 !!(hr_qp->access_flags &
2804 IB_ACCESS_REMOTE_READ));
2805 roce_set_bit(context->qpc_bytes_4,
2806 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2807 !!(hr_qp->access_flags &
2808 IB_ACCESS_REMOTE_WRITE));
2809 }
2810
2811 roce_set_bit(context->qpc_bytes_4,
2812 QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2813 roce_set_field(context->qpc_bytes_4,
2814 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2815 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2816 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2817 roce_set_field(context->qpc_bytes_4,
2818 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2819 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2820 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2821 roce_set_field(context->qpc_bytes_4,
2822 QP_CONTEXT_QPC_BYTES_4_PD_M,
2823 QP_CONTEXT_QPC_BYTES_4_PD_S,
2824 to_hr_pd(ibqp->pd)->pdn);
2825
2826 roce_set_field(context->qpc_bytes_8,
2827 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2828 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2829 to_hr_cq(ibqp->send_cq)->cqn);
2830 roce_set_field(context->qpc_bytes_8,
2831 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2832 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2833 to_hr_cq(ibqp->recv_cq)->cqn);
2834
2835 if (ibqp->srq)
2836 roce_set_field(context->qpc_bytes_12,
2837 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2838 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2839 to_hr_srq(ibqp->srq)->srqn);
2840 if (attr_mask & IB_QP_PKEY_INDEX)
2841 roce_set_field(context->qpc_bytes_12,
2842 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2843 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2844 attr->pkey_index);
2845 else
2846 roce_set_field(context->qpc_bytes_12,
2847 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2848 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2849 hr_qp->pkey_index);
2850
2851 roce_set_field(context->qpc_bytes_16,
2852 QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2853 QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2854 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
2855 if ((attr_mask & IB_QP_ALT_PATH) ||
2856 (attr_mask & IB_QP_ACCESS_FLAGS) ||
2857 (attr_mask & IB_QP_PKEY_INDEX) ||
2858 (attr_mask & IB_QP_QKEY)) {
2859 dev_err(dev, "INIT2RTR attr_mask error\n");
2860 goto out;
2861 }
2862
44c58487 2863 dmac = (u8 *)attr->ah_attr.roce.dmac;
9a443537 2864
2865 context->sq_rq_bt_l = (u32)(dma_handle);
2866 roce_set_field(context->qpc_bytes_24,
2867 QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
2868 QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
2869 ((u32)(dma_handle >> 32)));
2870 roce_set_bit(context->qpc_bytes_24,
2871 QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
2872 1);
2873 roce_set_field(context->qpc_bytes_24,
2874 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
2875 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
2876 attr->min_rnr_timer);
2877 context->irrl_ba_l = (u32)(dma_handle_2);
2878 roce_set_field(context->qpc_bytes_32,
2879 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
2880 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
2881 ((u32)(dma_handle_2 >> 32)) &
2882 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
2883 roce_set_field(context->qpc_bytes_32,
2884 QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
2885 QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
2886 roce_set_bit(context->qpc_bytes_32,
2887 QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
2888 1);
2889 roce_set_bit(context->qpc_bytes_32,
2890 QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
2891 hr_qp->sq_signal_bits);
2892
80596c67
LO
2893 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
2894 hr_qp->port;
2895 smac = (u8 *)hr_dev->dev_addr[port];
2896 /* when dmac equals smac or loop_idc is 1, it should loopback */
2897 if (ether_addr_equal_unaligned(dmac, smac) ||
2898 hr_dev->loop_idc == 0x1)
9a443537 2899 roce_set_bit(context->qpc_bytes_32,
80596c67 2900 QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
9a443537 2901
2902 roce_set_bit(context->qpc_bytes_32,
2903 QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
d8966fcd 2904 rdma_ah_get_ah_flags(&attr->ah_attr));
9a443537 2905 roce_set_field(context->qpc_bytes_32,
2906 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
2907 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
2908 ilog2((unsigned int)attr->max_dest_rd_atomic));
2909
512f4f16
LO
2910 if (attr_mask & IB_QP_DEST_QPN)
2911 roce_set_field(context->qpc_bytes_36,
2912 QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
2913 QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
2914 attr->dest_qp_num);
9a443537 2915
2916 /* Configure GID index */
d8966fcd 2917 port_num = rdma_ah_get_port_num(&attr->ah_attr);
9a443537 2918 roce_set_field(context->qpc_bytes_36,
2919 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
2920 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
d8966fcd
DC
2921 hns_get_gid_index(hr_dev,
2922 port_num - 1,
2923 grh->sgid_index));
9a443537 2924
2925 memcpy(&(context->dmac_l), dmac, 4);
2926
2927 roce_set_field(context->qpc_bytes_44,
2928 QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2929 QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
2930 *((u16 *)(&dmac[4])));
2931 roce_set_field(context->qpc_bytes_44,
2932 QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
2933 QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
d8966fcd 2934 rdma_ah_get_static_rate(&attr->ah_attr));
9a443537 2935 roce_set_field(context->qpc_bytes_44,
2936 QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
2937 QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
d8966fcd 2938 grh->hop_limit);
9a443537 2939
2940 roce_set_field(context->qpc_bytes_48,
2941 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
2942 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
d8966fcd 2943 grh->flow_label);
9a443537 2944 roce_set_field(context->qpc_bytes_48,
2945 QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
2946 QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
d8966fcd 2947 grh->traffic_class);
9a443537 2948 roce_set_field(context->qpc_bytes_48,
2949 QP_CONTEXT_QPC_BYTES_48_MTU_M,
2950 QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
2951
d8966fcd
DC
2952 memcpy(context->dgid, grh->dgid.raw,
2953 sizeof(grh->dgid.raw));
9a443537 2954
2955 dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
2956 roce_get_field(context->qpc_bytes_44,
2957 QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2958 QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
2959
2960 roce_set_field(context->qpc_bytes_68,
2961 QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
1fad5fab
LO
2962 QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
2963 hr_qp->rq.head);
9a443537 2964 roce_set_field(context->qpc_bytes_68,
2965 QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
2966 QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
2967
2968 rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
2969 context->cur_rq_wqe_ba_l = (u32)(mtts[rq_pa_start]);
2970
2971 roce_set_field(context->qpc_bytes_76,
2972 QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
2973 QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
2974 mtts[rq_pa_start] >> 32);
2975 roce_set_field(context->qpc_bytes_76,
2976 QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
2977 QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
2978
2979 context->rx_rnr_time = 0;
2980
2981 roce_set_field(context->qpc_bytes_84,
2982 QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
2983 QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
2984 attr->rq_psn - 1);
2985 roce_set_field(context->qpc_bytes_84,
2986 QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
2987 QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
2988
2989 roce_set_field(context->qpc_bytes_88,
2990 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
2991 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
2992 attr->rq_psn);
2993 roce_set_bit(context->qpc_bytes_88,
2994 QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
2995 roce_set_bit(context->qpc_bytes_88,
2996 QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
2997 roce_set_field(context->qpc_bytes_88,
2998 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
2999 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
3000 0);
3001 roce_set_field(context->qpc_bytes_88,
3002 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
3003 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
3004 0);
3005
3006 context->dma_length = 0;
3007 context->r_key = 0;
3008 context->va_l = 0;
3009 context->va_h = 0;
3010
3011 roce_set_field(context->qpc_bytes_108,
3012 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
3013 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
3014 roce_set_bit(context->qpc_bytes_108,
3015 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
3016 roce_set_bit(context->qpc_bytes_108,
3017 QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
3018
3019 roce_set_field(context->qpc_bytes_112,
3020 QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
3021 QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
3022 roce_set_field(context->qpc_bytes_112,
3023 QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
3024 QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
3025
3026 /* For chip resp ack */
3027 roce_set_field(context->qpc_bytes_156,
3028 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3029 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
7716809e 3030 hr_qp->phy_port);
9a443537 3031 roce_set_field(context->qpc_bytes_156,
3032 QP_CONTEXT_QPC_BYTES_156_SL_M,
d8966fcd
DC
3033 QP_CONTEXT_QPC_BYTES_156_SL_S,
3034 rdma_ah_get_sl(&attr->ah_attr));
3035 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
9a443537 3036 } else if (cur_state == IB_QPS_RTR &&
3037 new_state == IB_QPS_RTS) {
3038 /* If exist optional param, return error */
3039 if ((attr_mask & IB_QP_ALT_PATH) ||
3040 (attr_mask & IB_QP_ACCESS_FLAGS) ||
3041 (attr_mask & IB_QP_QKEY) ||
3042 (attr_mask & IB_QP_PATH_MIG_STATE) ||
3043 (attr_mask & IB_QP_CUR_STATE) ||
3044 (attr_mask & IB_QP_MIN_RNR_TIMER)) {
3045 dev_err(dev, "RTR2RTS attr_mask error\n");
3046 goto out;
3047 }
3048
3049 context->rx_cur_sq_wqe_ba_l = (u32)(mtts[0]);
3050
3051 roce_set_field(context->qpc_bytes_120,
3052 QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
3053 QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
3054 (mtts[0]) >> 32);
3055
3056 roce_set_field(context->qpc_bytes_124,
3057 QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
3058 QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
3059 roce_set_field(context->qpc_bytes_124,
3060 QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
3061 QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
3062
3063 roce_set_field(context->qpc_bytes_128,
3064 QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
3065 QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
3066 attr->sq_psn);
3067 roce_set_bit(context->qpc_bytes_128,
3068 QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
3069 roce_set_field(context->qpc_bytes_128,
3070 QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
3071 QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
3072 0);
3073 roce_set_bit(context->qpc_bytes_128,
3074 QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
3075
3076 roce_set_field(context->qpc_bytes_132,
3077 QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
3078 QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
3079 roce_set_field(context->qpc_bytes_132,
3080 QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
3081 QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
3082
3083 roce_set_field(context->qpc_bytes_136,
3084 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
3085 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
3086 attr->sq_psn);
3087 roce_set_field(context->qpc_bytes_136,
3088 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
3089 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
3090 attr->sq_psn);
3091
3092 roce_set_field(context->qpc_bytes_140,
3093 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
3094 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
3095 (attr->sq_psn >> SQ_PSN_SHIFT));
3096 roce_set_field(context->qpc_bytes_140,
3097 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
3098 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
3099 roce_set_bit(context->qpc_bytes_140,
3100 QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
3101
9a443537 3102 roce_set_field(context->qpc_bytes_148,
3103 QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
3104 QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
3105 roce_set_field(context->qpc_bytes_148,
3106 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
7c7a4ea1
LO
3107 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
3108 attr->retry_cnt);
9a443537 3109 roce_set_field(context->qpc_bytes_148,
3110 QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
7c7a4ea1
LO
3111 QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
3112 attr->rnr_retry);
9a443537 3113 roce_set_field(context->qpc_bytes_148,
3114 QP_CONTEXT_QPC_BYTES_148_LSN_M,
3115 QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
3116
3117 context->rnr_retry = 0;
3118
3119 roce_set_field(context->qpc_bytes_156,
3120 QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
3121 QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
3122 attr->retry_cnt);
c6c3bfea
LO
3123 if (attr->timeout < 0x12) {
3124 dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
3125 attr->timeout);
3126 roce_set_field(context->qpc_bytes_156,
3127 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3128 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3129 0x12);
3130 } else {
3131 roce_set_field(context->qpc_bytes_156,
3132 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3133 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3134 attr->timeout);
3135 }
9a443537 3136 roce_set_field(context->qpc_bytes_156,
3137 QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
3138 QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
3139 attr->rnr_retry);
3140 roce_set_field(context->qpc_bytes_156,
3141 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3142 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
7716809e 3143 hr_qp->phy_port);
9a443537 3144 roce_set_field(context->qpc_bytes_156,
3145 QP_CONTEXT_QPC_BYTES_156_SL_M,
d8966fcd
DC
3146 QP_CONTEXT_QPC_BYTES_156_SL_S,
3147 rdma_ah_get_sl(&attr->ah_attr));
3148 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
9a443537 3149 roce_set_field(context->qpc_bytes_156,
3150 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3151 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
3152 ilog2((unsigned int)attr->max_rd_atomic));
3153 roce_set_field(context->qpc_bytes_156,
3154 QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
3155 QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
3156 context->pkt_use_len = 0;
3157
3158 roce_set_field(context->qpc_bytes_164,
3159 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3160 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
3161 roce_set_field(context->qpc_bytes_164,
3162 QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
3163 QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
3164
3165 roce_set_field(context->qpc_bytes_168,
3166 QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
3167 QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
3168 attr->sq_psn);
3169 roce_set_field(context->qpc_bytes_168,
3170 QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
3171 QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
3172 roce_set_field(context->qpc_bytes_168,
3173 QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
3174 QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
3175 roce_set_bit(context->qpc_bytes_168,
3176 QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
3177 roce_set_bit(context->qpc_bytes_168,
3178 QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
3179 roce_set_bit(context->qpc_bytes_168,
3180 QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
3181 context->sge_use_len = 0;
3182
3183 roce_set_field(context->qpc_bytes_176,
3184 QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
3185 QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
3186 roce_set_field(context->qpc_bytes_176,
3187 QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
3188 QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
3189 0);
3190 roce_set_field(context->qpc_bytes_180,
3191 QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
3192 QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
3193 roce_set_field(context->qpc_bytes_180,
3194 QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
3195 QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
3196
3197 context->tx_cur_sq_wqe_ba_l = (u32)(mtts[0]);
3198
3199 roce_set_field(context->qpc_bytes_188,
3200 QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
3201 QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
3202 (mtts[0]) >> 32);
3203 roce_set_bit(context->qpc_bytes_188,
3204 QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
3205 roce_set_field(context->qpc_bytes_188,
3206 QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
3207 QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
3208 0);
deb17f6f 3209 } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
9a443537 3210 (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
3211 (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
3212 (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
3213 (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
3214 (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
3215 (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
deb17f6f
LO
3216 (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
3217 dev_err(dev, "not support this status migration\n");
9a443537 3218 goto out;
3219 }
3220
3221 /* Every status migrate must change state */
3222 roce_set_field(context->qpc_bytes_144,
3223 QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
1dec243a 3224 QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
9a443537 3225
3226 /* SW pass context to HW */
3227 ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
3228 to_hns_roce_state(cur_state),
3229 to_hns_roce_state(new_state), context,
3230 hr_qp);
3231 if (ret) {
3232 dev_err(dev, "hns_roce_qp_modify failed\n");
3233 goto out;
3234 }
3235
3236 /*
e84e40be
S
3237 * Use rst2init to instead of init2init with drv,
3238 * need to hw to flash RQ HEAD by DB again
3239 */
9a443537 3240 if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3241 /* Memory barrier */
3242 wmb();
9a443537 3243
509bf0c2
LO
3244 roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
3245 RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
3246 roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
3247 RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
3248 roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
3249 RQ_DOORBELL_U32_8_CMD_S, 1);
3250 roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
3251
3252 if (ibqp->uobject) {
3253 hr_qp->rq.db_reg_l = hr_dev->reg_base +
2d407888 3254 hr_dev->odb_offset +
509bf0c2 3255 DB_REG_OFFSET * hr_dev->priv_uar.index;
9a443537 3256 }
509bf0c2
LO
3257
3258 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
9a443537 3259 }
3260
3261 hr_qp->state = new_state;
3262
3263 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3264 hr_qp->resp_depth = attr->max_dest_rd_atomic;
7716809e
LO
3265 if (attr_mask & IB_QP_PORT) {
3266 hr_qp->port = attr->port_num - 1;
3267 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
3268 }
9a443537 3269
3270 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
3271 hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
3272 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
3273 if (ibqp->send_cq != ibqp->recv_cq)
3274 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
3275 hr_qp->qpn, NULL);
3276
3277 hr_qp->rq.head = 0;
3278 hr_qp->rq.tail = 0;
3279 hr_qp->sq.head = 0;
3280 hr_qp->sq.tail = 0;
3281 hr_qp->sq_next_wqe = 0;
3282 }
3283out:
3284 kfree(context);
3285 return ret;
3286}
3287
d61d6de0
BVA
3288static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
3289 const struct ib_qp_attr *attr, int attr_mask,
3290 enum ib_qp_state cur_state,
3291 enum ib_qp_state new_state)
9a443537 3292{
3293
3294 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
3295 return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
3296 new_state);
3297 else
3298 return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
3299 new_state);
3300}
3301
3302static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
3303{
3304 switch (state) {
3305 case HNS_ROCE_QP_STATE_RST:
3306 return IB_QPS_RESET;
3307 case HNS_ROCE_QP_STATE_INIT:
3308 return IB_QPS_INIT;
3309 case HNS_ROCE_QP_STATE_RTR:
3310 return IB_QPS_RTR;
3311 case HNS_ROCE_QP_STATE_RTS:
3312 return IB_QPS_RTS;
3313 case HNS_ROCE_QP_STATE_SQD:
3314 return IB_QPS_SQD;
3315 case HNS_ROCE_QP_STATE_ERR:
3316 return IB_QPS_ERR;
3317 default:
3318 return IB_QPS_ERR;
3319 }
3320}
3321
3322static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
3323 struct hns_roce_qp *hr_qp,
3324 struct hns_roce_qp_context *hr_context)
3325{
3326 struct hns_roce_cmd_mailbox *mailbox;
3327 int ret;
3328
3329 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3330 if (IS_ERR(mailbox))
3331 return PTR_ERR(mailbox);
3332
3333 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
3334 HNS_ROCE_CMD_QUERY_QP,
6b877c32 3335 HNS_ROCE_CMD_TIMEOUT_MSECS);
9a443537 3336 if (!ret)
3337 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
3338 else
3339 dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
3340
3341 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3342
3343 return ret;
3344}
3345
9eefa953
LO
3346static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3347 int qp_attr_mask,
3348 struct ib_qp_init_attr *qp_init_attr)
3349{
3350 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3351 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3352 struct hns_roce_sqp_context context;
3353 u32 addr;
3354
3355 mutex_lock(&hr_qp->mutex);
3356
3357 if (hr_qp->state == IB_QPS_RESET) {
3358 qp_attr->qp_state = IB_QPS_RESET;
3359 goto done;
3360 }
3361
3362 addr = ROCEE_QP1C_CFG0_0_REG +
3363 hr_qp->port * sizeof(struct hns_roce_sqp_context);
3364 context.qp1c_bytes_4 = roce_read(hr_dev, addr);
3365 context.sq_rq_bt_l = roce_read(hr_dev, addr + 1);
3366 context.qp1c_bytes_12 = roce_read(hr_dev, addr + 2);
3367 context.qp1c_bytes_16 = roce_read(hr_dev, addr + 3);
3368 context.qp1c_bytes_20 = roce_read(hr_dev, addr + 4);
3369 context.cur_rq_wqe_ba_l = roce_read(hr_dev, addr + 5);
3370 context.qp1c_bytes_28 = roce_read(hr_dev, addr + 6);
3371 context.qp1c_bytes_32 = roce_read(hr_dev, addr + 7);
3372 context.cur_sq_wqe_ba_l = roce_read(hr_dev, addr + 8);
3373 context.qp1c_bytes_40 = roce_read(hr_dev, addr + 9);
3374
3375 hr_qp->state = roce_get_field(context.qp1c_bytes_4,
3376 QP1C_BYTES_4_QP_STATE_M,
3377 QP1C_BYTES_4_QP_STATE_S);
3378 qp_attr->qp_state = hr_qp->state;
3379 qp_attr->path_mtu = IB_MTU_256;
3380 qp_attr->path_mig_state = IB_MIG_ARMED;
3381 qp_attr->qkey = QKEY_VAL;
2bf910d4 3382 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
9eefa953
LO
3383 qp_attr->rq_psn = 0;
3384 qp_attr->sq_psn = 0;
3385 qp_attr->dest_qp_num = 1;
3386 qp_attr->qp_access_flags = 6;
3387
3388 qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
3389 QP1C_BYTES_20_PKEY_IDX_M,
3390 QP1C_BYTES_20_PKEY_IDX_S);
3391 qp_attr->port_num = hr_qp->port + 1;
3392 qp_attr->sq_draining = 0;
3393 qp_attr->max_rd_atomic = 0;
3394 qp_attr->max_dest_rd_atomic = 0;
3395 qp_attr->min_rnr_timer = 0;
3396 qp_attr->timeout = 0;
3397 qp_attr->retry_cnt = 0;
3398 qp_attr->rnr_retry = 0;
3399 qp_attr->alt_timeout = 0;
3400
3401done:
3402 qp_attr->cur_qp_state = qp_attr->qp_state;
3403 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3404 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3405 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3406 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3407 qp_attr->cap.max_inline_data = 0;
3408 qp_init_attr->cap = qp_attr->cap;
3409 qp_init_attr->create_flags = 0;
3410
3411 mutex_unlock(&hr_qp->mutex);
3412
3413 return 0;
3414}
3415
3416static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3417 int qp_attr_mask,
3418 struct ib_qp_init_attr *qp_init_attr)
9a443537 3419{
3420 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3421 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3422 struct device *dev = &hr_dev->pdev->dev;
3423 struct hns_roce_qp_context *context;
3424 int tmp_qp_state = 0;
3425 int ret = 0;
3426 int state;
3427
3428 context = kzalloc(sizeof(*context), GFP_KERNEL);
3429 if (!context)
3430 return -ENOMEM;
3431
3432 memset(qp_attr, 0, sizeof(*qp_attr));
3433 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3434
3435 mutex_lock(&hr_qp->mutex);
3436
3437 if (hr_qp->state == IB_QPS_RESET) {
3438 qp_attr->qp_state = IB_QPS_RESET;
3439 goto done;
3440 }
3441
3442 ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
3443 if (ret) {
3444 dev_err(dev, "query qpc error\n");
3445 ret = -EINVAL;
3446 goto out;
3447 }
3448
3449 state = roce_get_field(context->qpc_bytes_144,
3450 QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3451 QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
3452 tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
3453 if (tmp_qp_state == -1) {
3454 dev_err(dev, "to_ib_qp_state error\n");
3455 ret = -EINVAL;
3456 goto out;
3457 }
3458 hr_qp->state = (u8)tmp_qp_state;
3459 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
3460 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
3461 QP_CONTEXT_QPC_BYTES_48_MTU_M,
3462 QP_CONTEXT_QPC_BYTES_48_MTU_S);
3463 qp_attr->path_mig_state = IB_MIG_ARMED;
2bf910d4 3464 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
9a443537 3465 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
3466 qp_attr->qkey = QKEY_VAL;
3467
3468 qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
3469 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3470 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
3471 qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
3472 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3473 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
3474 qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
3475 QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
3476 QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
3477 qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
3478 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
3479 ((roce_get_bit(context->qpc_bytes_4,
3480 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
3481 ((roce_get_bit(context->qpc_bytes_4,
3482 QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
3483
3484 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
3485 hr_qp->ibqp.qp_type == IB_QPT_UC) {
d8966fcd
DC
3486 struct ib_global_route *grh =
3487 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
3488
3489 rdma_ah_set_sl(&qp_attr->ah_attr,
3490 roce_get_field(context->qpc_bytes_156,
3491 QP_CONTEXT_QPC_BYTES_156_SL_M,
3492 QP_CONTEXT_QPC_BYTES_156_SL_S));
3493 rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
3494 grh->flow_label =
3495 roce_get_field(context->qpc_bytes_48,
3496 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
3497 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
3498 grh->sgid_index =
3499 roce_get_field(context->qpc_bytes_36,
3500 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
3501 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
3502 grh->hop_limit =
3503 roce_get_field(context->qpc_bytes_44,
3504 QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
3505 QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
3506 grh->traffic_class =
3507 roce_get_field(context->qpc_bytes_48,
3508 QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
3509 QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
3510
3511 memcpy(grh->dgid.raw, context->dgid,
3512 sizeof(grh->dgid.raw));
9a443537 3513 }
3514
3515 qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
3516 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
3517 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
dd783a21 3518 qp_attr->port_num = hr_qp->port + 1;
9a443537 3519 qp_attr->sq_draining = 0;
be7acd9d 3520 qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156,
9a443537 3521 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3522 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
be7acd9d 3523 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32,
9a443537 3524 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
3525 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
3526 qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
3527 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
3528 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
3529 qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
3530 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3531 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
3532 qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
3533 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3534 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
3535 qp_attr->rnr_retry = context->rnr_retry;
3536
3537done:
3538 qp_attr->cur_qp_state = qp_attr->qp_state;
3539 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3540 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3541
3542 if (!ibqp->uobject) {
3543 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3544 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3545 } else {
3546 qp_attr->cap.max_send_wr = 0;
3547 qp_attr->cap.max_send_sge = 0;
3548 }
3549
3550 qp_init_attr->cap = qp_attr->cap;
3551
3552out:
3553 mutex_unlock(&hr_qp->mutex);
3554 kfree(context);
3555 return ret;
3556}
3557
d61d6de0
BVA
3558static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3559 int qp_attr_mask,
3560 struct ib_qp_init_attr *qp_init_attr)
9eefa953
LO
3561{
3562 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3563
3564 return hr_qp->doorbell_qpn <= 1 ?
3565 hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
3566 hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
3567}
d838c481 3568
f44c863b
LO
3569static void hns_roce_check_sdb_status(struct hns_roce_dev *hr_dev,
3570 u32 *old_send, u32 *old_retry,
3571 u32 *tsp_st, u32 *success_flags)
3572{
3573 u32 sdb_retry_cnt;
3574 u32 sdb_send_ptr;
3575 u32 cur_cnt, old_cnt;
3576 u32 send_ptr;
3577
3578 sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
3579 sdb_retry_cnt = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
3580 cur_cnt = roce_get_field(sdb_send_ptr,
3581 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3582 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
3583 roce_get_field(sdb_retry_cnt,
3584 ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
3585 ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
3586 if (!roce_get_bit(*tsp_st, ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
3587 old_cnt = roce_get_field(*old_send,
3588 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3589 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
3590 roce_get_field(*old_retry,
3591 ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
3592 ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
3593 if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
3594 *success_flags = 1;
3595 } else {
3596 old_cnt = roce_get_field(*old_send,
3597 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3598 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
3599 if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) {
3600 *success_flags = 1;
3601 } else {
3602 send_ptr = roce_get_field(*old_send,
3603 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3604 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
3605 roce_get_field(sdb_retry_cnt,
3606 ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
3607 ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
3608 roce_set_field(*old_send,
3609 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3610 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
3611 send_ptr);
3612 }
3613 }
3614}
3615
d838c481
WHX
3616static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
3617 struct hns_roce_qp *hr_qp,
3618 u32 sdb_issue_ptr,
3619 u32 *sdb_inv_cnt,
3620 u32 *wait_stage)
9a443537 3621{
9a443537 3622 struct device *dev = &hr_dev->pdev->dev;
d838c481
WHX
3623 u32 sdb_send_ptr, old_send;
3624 u32 success_flags = 0;
d838c481 3625 unsigned long end;
f44c863b 3626 u32 old_retry;
d838c481
WHX
3627 u32 inv_cnt;
3628 u32 tsp_st;
3629
3630 if (*wait_stage > HNS_ROCE_V1_DB_STAGE2 ||
3631 *wait_stage < HNS_ROCE_V1_DB_STAGE1) {
3632 dev_err(dev, "QP(0x%lx) db status wait stage(%d) error!\n",
3633 hr_qp->qpn, *wait_stage);
3634 return -EINVAL;
3635 }
9a443537 3636
d838c481
WHX
3637 /* Calculate the total timeout for the entire verification process */
3638 end = msecs_to_jiffies(HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS) + jiffies;
3639
3640 if (*wait_stage == HNS_ROCE_V1_DB_STAGE1) {
3641 /* Query db process status, until hw process completely */
3642 sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
3643 while (roce_hw_index_cmp_lt(sdb_send_ptr, sdb_issue_ptr,
3644 ROCEE_SDB_PTR_CMP_BITS)) {
3645 if (!time_before(jiffies, end)) {
3646 dev_dbg(dev, "QP(0x%lx) db process stage1 timeout. issue 0x%x send 0x%x.\n",
3647 hr_qp->qpn, sdb_issue_ptr,
3648 sdb_send_ptr);
3649 return 0;
3650 }
3651
3652 msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
3653 sdb_send_ptr = roce_read(hr_dev,
9a443537 3654 ROCEE_SDB_SEND_PTR_REG);
d838c481 3655 }
9a443537 3656
d838c481
WHX
3657 if (roce_get_field(sdb_issue_ptr,
3658 ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
3659 ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) ==
3660 roce_get_field(sdb_send_ptr,
3661 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3662 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)) {
3663 old_send = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
3664 old_retry = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
9a443537 3665
9a443537 3666 do {
d838c481
WHX
3667 tsp_st = roce_read(hr_dev, ROCEE_TSP_BP_ST_REG);
3668 if (roce_get_bit(tsp_st,
3669 ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S) == 1) {
3670 *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
3671 return 0;
3672 }
3673
9a443537 3674 if (!time_before(jiffies, end)) {
d838c481
WHX
3675 dev_dbg(dev, "QP(0x%lx) db process stage1 timeout when send ptr equals issue ptr.\n"
3676 "issue 0x%x send 0x%x.\n",
3677 hr_qp->qpn, sdb_issue_ptr,
3678 sdb_send_ptr);
3679 return 0;
9a443537 3680 }
d838c481
WHX
3681
3682 msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
3683
f44c863b
LO
3684 hns_roce_check_sdb_status(hr_dev, &old_send,
3685 &old_retry, &tsp_st,
3686 &success_flags);
d838c481
WHX
3687 } while (!success_flags);
3688 }
3689
3690 *wait_stage = HNS_ROCE_V1_DB_STAGE2;
3691
3692 /* Get list pointer */
3693 *sdb_inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
3694 dev_dbg(dev, "QP(0x%lx) db process stage2. inv cnt = 0x%x.\n",
3695 hr_qp->qpn, *sdb_inv_cnt);
3696 }
3697
3698 if (*wait_stage == HNS_ROCE_V1_DB_STAGE2) {
3699 /* Query db's list status, until hw reversal */
3700 inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
3701 while (roce_hw_index_cmp_lt(inv_cnt,
3702 *sdb_inv_cnt + SDB_INV_CNT_OFFSET,
3703 ROCEE_SDB_CNT_CMP_BITS)) {
3704 if (!time_before(jiffies, end)) {
3705 dev_dbg(dev, "QP(0x%lx) db process stage2 timeout. inv cnt 0x%x.\n",
3706 hr_qp->qpn, inv_cnt);
3707 return 0;
3708 }
3709
3710 msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
3711 inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
9a443537 3712 }
d838c481
WHX
3713
3714 *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
3715 }
3716
3717 return 0;
3718}
3719
3720static int check_qp_reset_state(struct hns_roce_dev *hr_dev,
3721 struct hns_roce_qp *hr_qp,
3722 struct hns_roce_qp_work *qp_work_entry,
3723 int *is_timeout)
3724{
3725 struct device *dev = &hr_dev->pdev->dev;
3726 u32 sdb_issue_ptr;
3727 int ret;
3728
3729 if (hr_qp->state != IB_QPS_RESET) {
3730 /* Set qp to ERR, waiting for hw complete processing all dbs */
3731 ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
3732 IB_QPS_ERR);
3733 if (ret) {
3734 dev_err(dev, "Modify QP(0x%lx) to ERR failed!\n",
3735 hr_qp->qpn);
3736 return ret;
3737 }
3738
3739 /* Record issued doorbell */
3740 sdb_issue_ptr = roce_read(hr_dev, ROCEE_SDB_ISSUE_PTR_REG);
3741 qp_work_entry->sdb_issue_ptr = sdb_issue_ptr;
3742 qp_work_entry->db_wait_stage = HNS_ROCE_V1_DB_STAGE1;
3743
3744 /* Query db process status, until hw process completely */
3745 ret = check_qp_db_process_status(hr_dev, hr_qp, sdb_issue_ptr,
3746 &qp_work_entry->sdb_inv_cnt,
3747 &qp_work_entry->db_wait_stage);
3748 if (ret) {
3749 dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
3750 hr_qp->qpn);
3751 return ret;
3752 }
3753
3754 if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK) {
3755 qp_work_entry->sche_cnt = 0;
3756 *is_timeout = 1;
3757 return 0;
3758 }
3759
3760 /* Modify qp to reset before destroying qp */
3761 ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
3762 IB_QPS_RESET);
3763 if (ret) {
3764 dev_err(dev, "Modify QP(0x%lx) to RST failed!\n",
3765 hr_qp->qpn);
3766 return ret;
3767 }
3768 }
3769
3770 return 0;
3771}
3772
3773static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3774{
3775 struct hns_roce_qp_work *qp_work_entry;
3776 struct hns_roce_v1_priv *priv;
3777 struct hns_roce_dev *hr_dev;
3778 struct hns_roce_qp *hr_qp;
3779 struct device *dev;
58c4f0d8 3780 unsigned long qpn;
d838c481
WHX
3781 int ret;
3782
3783 qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
3784 hr_dev = to_hr_dev(qp_work_entry->ib_dev);
3785 dev = &hr_dev->pdev->dev;
016a0059 3786 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
d838c481 3787 hr_qp = qp_work_entry->qp;
58c4f0d8 3788 qpn = hr_qp->qpn;
d838c481 3789
58c4f0d8 3790 dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn);
d838c481
WHX
3791
3792 qp_work_entry->sche_cnt++;
3793
3794 /* Query db process status, until hw process completely */
3795 ret = check_qp_db_process_status(hr_dev, hr_qp,
3796 qp_work_entry->sdb_issue_ptr,
3797 &qp_work_entry->sdb_inv_cnt,
3798 &qp_work_entry->db_wait_stage);
3799 if (ret) {
3800 dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
58c4f0d8 3801 qpn);
d838c481
WHX
3802 return;
3803 }
3804
3805 if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK &&
3806 priv->des_qp.requeue_flag) {
3807 queue_work(priv->des_qp.qp_wq, work);
3808 return;
3809 }
3810
3811 /* Modify qp to reset before destroying qp */
3812 ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
3813 IB_QPS_RESET);
3814 if (ret) {
58c4f0d8 3815 dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn);
d838c481
WHX
3816 return;
3817 }
3818
3819 hns_roce_qp_remove(hr_dev, hr_qp);
3820 hns_roce_qp_free(hr_dev, hr_qp);
3821
3822 if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
3823 /* RC QP, release QPN */
58c4f0d8 3824 hns_roce_release_range_qp(hr_dev, qpn, 1);
d838c481
WHX
3825 kfree(hr_qp);
3826 } else
3827 kfree(hr_to_hr_sqp(hr_qp));
3828
3829 kfree(qp_work_entry);
3830
58c4f0d8 3831 dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn);
d838c481
WHX
3832}
3833
3834int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
3835{
3836 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3837 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3838 struct device *dev = &hr_dev->pdev->dev;
3839 struct hns_roce_qp_work qp_work_entry;
3840 struct hns_roce_qp_work *qp_work;
3841 struct hns_roce_v1_priv *priv;
3842 struct hns_roce_cq *send_cq, *recv_cq;
3843 int is_user = !!ibqp->pd->uobject;
3844 int is_timeout = 0;
3845 int ret;
3846
3847 ret = check_qp_reset_state(hr_dev, hr_qp, &qp_work_entry, &is_timeout);
3848 if (ret) {
3849 dev_err(dev, "QP reset state check failed(%d)!\n", ret);
3850 return ret;
9a443537 3851 }
3852
3853 send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
3854 recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
3855
3856 hns_roce_lock_cqs(send_cq, recv_cq);
9a443537 3857 if (!is_user) {
3858 __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
3859 to_hr_srq(hr_qp->ibqp.srq) : NULL);
3860 if (send_cq != recv_cq)
3861 __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
3862 }
9a443537 3863 hns_roce_unlock_cqs(send_cq, recv_cq);
3864
d838c481
WHX
3865 if (!is_timeout) {
3866 hns_roce_qp_remove(hr_dev, hr_qp);
3867 hns_roce_qp_free(hr_dev, hr_qp);
9a443537 3868
d838c481
WHX
3869 /* RC QP, release QPN */
3870 if (hr_qp->ibqp.qp_type == IB_QPT_RC)
3871 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
3872 }
9a443537 3873
3874 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
3875
d838c481 3876 if (is_user)
9a443537 3877 ib_umem_release(hr_qp->umem);
d838c481 3878 else {
9a443537 3879 kfree(hr_qp->sq.wrid);
3880 kfree(hr_qp->rq.wrid);
d838c481 3881
9a443537 3882 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
3883 }
9a443537 3884
d838c481
WHX
3885 if (!is_timeout) {
3886 if (hr_qp->ibqp.qp_type == IB_QPT_RC)
3887 kfree(hr_qp);
3888 else
3889 kfree(hr_to_hr_sqp(hr_qp));
3890 } else {
3891 qp_work = kzalloc(sizeof(*qp_work), GFP_KERNEL);
3892 if (!qp_work)
3893 return -ENOMEM;
3894
3895 INIT_WORK(&qp_work->work, hns_roce_v1_destroy_qp_work_fn);
3896 qp_work->ib_dev = &hr_dev->ib_dev;
3897 qp_work->qp = hr_qp;
3898 qp_work->db_wait_stage = qp_work_entry.db_wait_stage;
3899 qp_work->sdb_issue_ptr = qp_work_entry.sdb_issue_ptr;
3900 qp_work->sdb_inv_cnt = qp_work_entry.sdb_inv_cnt;
3901 qp_work->sche_cnt = qp_work_entry.sche_cnt;
3902
016a0059 3903 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
d838c481
WHX
3904 queue_work(priv->des_qp.qp_wq, &qp_work->work);
3905 dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn);
3906 }
9a443537 3907
3908 return 0;
3909}
3910
d61d6de0 3911static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
afb6b092
SX
3912{
3913 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3914 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3915 struct device *dev = &hr_dev->pdev->dev;
3916 u32 cqe_cnt_ori;
3917 u32 cqe_cnt_cur;
3918 u32 cq_buf_size;
3919 int wait_time = 0;
3920 int ret = 0;
3921
3922 hns_roce_free_cq(hr_dev, hr_cq);
3923
3924 /*
3925 * Before freeing cq buffer, we need to ensure that the outstanding CQE
3926 * have been written by checking the CQE counter.
3927 */
3928 cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3929 while (1) {
3930 if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
3931 HNS_ROCE_CQE_WCMD_EMPTY_BIT)
3932 break;
3933
3934 cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3935 if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
3936 break;
3937
3938 msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
3939 if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
3940 dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
3941 hr_cq->cqn);
3942 ret = -ETIMEDOUT;
3943 break;
3944 }
3945 wait_time++;
3946 }
3947
3948 hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
3949
3950 if (ibcq->uobject)
3951 ib_umem_release(hr_cq->umem);
3952 else {
3953 /* Free the buff of stored cq */
3954 cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
3955 hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf);
3956 }
3957
3958 kfree(hr_cq);
3959
3960 return ret;
3961}
3962
b16f8188
YL
3963static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
3964{
3965 roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
3966 (req_not << eq->log_entries), eq->doorbell);
b16f8188
YL
3967}
3968
3969static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
3970 struct hns_roce_aeqe *aeqe, int qpn)
3971{
3972 struct device *dev = &hr_dev->pdev->dev;
3973
3974 dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
3975 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3976 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3977 case HNS_ROCE_LWQCE_QPC_ERROR:
3978 dev_warn(dev, "QP %d, QPC error.\n", qpn);
3979 break;
3980 case HNS_ROCE_LWQCE_MTU_ERROR:
3981 dev_warn(dev, "QP %d, MTU error.\n", qpn);
3982 break;
3983 case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
3984 dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
3985 break;
3986 case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
3987 dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
3988 break;
3989 case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
3990 dev_warn(dev, "QP %d, WQE shift error\n", qpn);
3991 break;
3992 case HNS_ROCE_LWQCE_SL_ERROR:
3993 dev_warn(dev, "QP %d, SL error.\n", qpn);
3994 break;
3995 case HNS_ROCE_LWQCE_PORT_ERROR:
3996 dev_warn(dev, "QP %d, port error.\n", qpn);
3997 break;
3998 default:
3999 break;
4000 }
4001}
4002
4003static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
4004 struct hns_roce_aeqe *aeqe,
4005 int qpn)
4006{
4007 struct device *dev = &hr_dev->pdev->dev;
4008
4009 dev_warn(dev, "Local Access Violation Work Queue Error.\n");
4010 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
4011 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
4012 case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
4013 dev_warn(dev, "QP %d, R_key violation.\n", qpn);
4014 break;
4015 case HNS_ROCE_LAVWQE_LENGTH_ERROR:
4016 dev_warn(dev, "QP %d, length error.\n", qpn);
4017 break;
4018 case HNS_ROCE_LAVWQE_VA_ERROR:
4019 dev_warn(dev, "QP %d, VA error.\n", qpn);
4020 break;
4021 case HNS_ROCE_LAVWQE_PD_ERROR:
4022 dev_err(dev, "QP %d, PD error.\n", qpn);
4023 break;
4024 case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
4025 dev_warn(dev, "QP %d, rw acc error.\n", qpn);
4026 break;
4027 case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
4028 dev_warn(dev, "QP %d, key state error.\n", qpn);
4029 break;
4030 case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
4031 dev_warn(dev, "QP %d, MR operation error.\n", qpn);
4032 break;
4033 default:
4034 break;
4035 }
4036}
4037
4038static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
4039 struct hns_roce_aeqe *aeqe,
4040 int event_type)
4041{
4042 struct device *dev = &hr_dev->pdev->dev;
4043 int phy_port;
4044 int qpn;
4045
4046 qpn = roce_get_field(aeqe->event.qp_event.qp,
4047 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
4048 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
4049 phy_port = roce_get_field(aeqe->event.qp_event.qp,
4050 HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
4051 HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
4052 if (qpn <= 1)
4053 qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
4054
4055 switch (event_type) {
4056 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4057 dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
4058 "QP %d, phy_port %d.\n", qpn, phy_port);
4059 break;
4060 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4061 hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
4062 break;
4063 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4064 hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
4065 break;
4066 default:
4067 break;
4068 }
4069
4070 hns_roce_qp_event(hr_dev, qpn, event_type);
4071}
4072
4073static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
4074 struct hns_roce_aeqe *aeqe,
4075 int event_type)
4076{
4077 struct device *dev = &hr_dev->pdev->dev;
4078 u32 cqn;
4079
4080 cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
4081 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
4082 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
4083
4084 switch (event_type) {
4085 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4086 dev_warn(dev, "CQ 0x%x access err.\n", cqn);
4087 break;
4088 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4089 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
4090 break;
4091 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
4092 dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
4093 break;
4094 default:
4095 break;
4096 }
4097
4098 hns_roce_cq_event(hr_dev, cqn, event_type);
4099}
4100
4101static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
4102 struct hns_roce_aeqe *aeqe)
4103{
4104 struct device *dev = &hr_dev->pdev->dev;
4105
4106 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
4107 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
4108 case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
4109 dev_warn(dev, "SDB overflow.\n");
4110 break;
4111 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
4112 dev_warn(dev, "SDB almost overflow.\n");
4113 break;
4114 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
4115 dev_warn(dev, "SDB almost empty.\n");
4116 break;
4117 case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
4118 dev_warn(dev, "ODB overflow.\n");
4119 break;
4120 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
4121 dev_warn(dev, "ODB almost overflow.\n");
4122 break;
4123 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
4124 dev_warn(dev, "SDB almost empty.\n");
4125 break;
4126 default:
4127 break;
4128 }
4129}
4130
4131static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
4132{
4133 unsigned long off = (entry & (eq->entries - 1)) *
4134 HNS_ROCE_AEQ_ENTRY_SIZE;
4135
4136 return (struct hns_roce_aeqe *)((u8 *)
4137 (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
4138 off % HNS_ROCE_BA_SIZE);
4139}
4140
4141static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
4142{
4143 struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
4144
4145 return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
4146 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
4147}
4148
4149static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
4150 struct hns_roce_eq *eq)
4151{
4152 struct device *dev = &hr_dev->pdev->dev;
4153 struct hns_roce_aeqe *aeqe;
4154 int aeqes_found = 0;
4155 int event_type;
4156
4157 while ((aeqe = next_aeqe_sw_v1(eq))) {
4044a3f4
YL
4158
4159 /* Make sure we read the AEQ entry after we have checked the
4160 * ownership bit
4161 */
4162 dma_rmb();
4163
b16f8188
YL
4164 dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
4165 roce_get_field(aeqe->asyn,
4166 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
4167 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
b16f8188
YL
4168 event_type = roce_get_field(aeqe->asyn,
4169 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
4170 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
4171 switch (event_type) {
4172 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4173 dev_warn(dev, "PATH MIG not supported\n");
4174 break;
4175 case HNS_ROCE_EVENT_TYPE_COMM_EST:
4176 dev_warn(dev, "COMMUNICATION established\n");
4177 break;
4178 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4179 dev_warn(dev, "SQ DRAINED not supported\n");
4180 break;
4181 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4182 dev_warn(dev, "PATH MIG failed\n");
4183 break;
4184 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4185 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4186 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4187 hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
4188 break;
4189 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4190 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4191 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4192 dev_warn(dev, "SRQ not support!\n");
4193 break;
4194 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4195 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4196 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
4197 hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
4198 break;
4199 case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
4200 dev_warn(dev, "port change.\n");
4201 break;
4202 case HNS_ROCE_EVENT_TYPE_MB:
4203 hns_roce_cmd_event(hr_dev,
4204 le16_to_cpu(aeqe->event.cmd.token),
4205 aeqe->event.cmd.status,
4206 le64_to_cpu(aeqe->event.cmd.out_param
4207 ));
4208 break;
4209 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4210 hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
4211 break;
4212 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
4213 dev_warn(dev, "CEQ 0x%lx overflow.\n",
4214 roce_get_field(aeqe->event.ce_event.ceqe,
4215 HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
4216 HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
4217 break;
4218 default:
4219 dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
4220 event_type, eq->eqn, eq->cons_index);
4221 break;
4222 }
4223
4224 eq->cons_index++;
4225 aeqes_found = 1;
4226
4227 if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
4228 dev_warn(dev, "cons_index overflow, set back to 0.\n");
4229 eq->cons_index = 0;
4230 }
4231 }
4232
4233 set_eq_cons_index_v1(eq, 0);
4234
4235 return aeqes_found;
4236}
4237
4238static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
4239{
4240 unsigned long off = (entry & (eq->entries - 1)) *
4241 HNS_ROCE_CEQ_ENTRY_SIZE;
4242
4243 return (struct hns_roce_ceqe *)((u8 *)
4244 (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
4245 off % HNS_ROCE_BA_SIZE);
4246}
4247
4248static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
4249{
4250 struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
4251
4252 return (!!(roce_get_bit(ceqe->comp,
4253 HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
4254 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
4255}
4256
4257static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
4258 struct hns_roce_eq *eq)
4259{
4260 struct hns_roce_ceqe *ceqe;
4261 int ceqes_found = 0;
4262 u32 cqn;
4263
4264 while ((ceqe = next_ceqe_sw_v1(eq))) {
4044a3f4
YL
4265
4266 /* Make sure we read CEQ entry after we have checked the
4267 * ownership bit
4268 */
4269 dma_rmb();
4270
b16f8188
YL
4271 cqn = roce_get_field(ceqe->comp,
4272 HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
4273 HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
4274 hns_roce_cq_completion(hr_dev, cqn);
4275
4276 ++eq->cons_index;
4277 ceqes_found = 1;
4278
4279 if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth - 1) {
4280 dev_warn(&eq->hr_dev->pdev->dev,
4281 "cons_index overflow, set back to 0.\n");
4282 eq->cons_index = 0;
4283 }
4284 }
4285
4286 set_eq_cons_index_v1(eq, 0);
4287
4288 return ceqes_found;
4289}
4290
4291static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
4292{
4293 struct hns_roce_eq *eq = eq_ptr;
4294 struct hns_roce_dev *hr_dev = eq->hr_dev;
4295 int int_work = 0;
4296
4297 if (eq->type_flag == HNS_ROCE_CEQ)
4298 /* CEQ irq routine, CEQ is pulse irq, not clear */
4299 int_work = hns_roce_v1_ceq_int(hr_dev, eq);
4300 else
4301 /* AEQ irq routine, AEQ is pulse irq, not clear */
4302 int_work = hns_roce_v1_aeq_int(hr_dev, eq);
4303
4304 return IRQ_RETVAL(int_work);
4305}
4306
4307static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
4308{
4309 struct hns_roce_dev *hr_dev = dev_id;
4310 struct device *dev = &hr_dev->pdev->dev;
4311 int int_work = 0;
4312 u32 caepaemask_val;
4313 u32 cealmovf_val;
4314 u32 caepaest_val;
4315 u32 aeshift_val;
4316 u32 ceshift_val;
4317 u32 cemask_val;
4318 int i;
4319
4320 /*
4321 * Abnormal interrupt:
4322 * AEQ overflow, ECC multi-bit err, CEQ overflow must clear
4323 * interrupt, mask irq, clear irq, cancel mask operation
4324 */
4325 aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
4326
4327 /* AEQE overflow */
4328 if (roce_get_bit(aeshift_val,
4329 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
4330 dev_warn(dev, "AEQ overflow!\n");
4331
4332 /* Set mask */
4333 caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4334 roce_set_bit(caepaemask_val,
4335 ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4336 HNS_ROCE_INT_MASK_ENABLE);
4337 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
4338
4339 /* Clear int state(INT_WC : write 1 clear) */
4340 caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
4341 roce_set_bit(caepaest_val,
4342 ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
4343 roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
4344
4345 /* Clear mask */
4346 caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4347 roce_set_bit(caepaemask_val,
4348 ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4349 HNS_ROCE_INT_MASK_DISABLE);
4350 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
4351 }
4352
4353 /* CEQ almost overflow */
4354 for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4355 ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
4356 i * CEQ_REG_OFFSET);
4357
4358 if (roce_get_bit(ceshift_val,
4359 ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
4360 dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
4361 int_work++;
4362
4363 /* Set mask */
4364 cemask_val = roce_read(hr_dev,
4365 ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4366 i * CEQ_REG_OFFSET);
4367 roce_set_bit(cemask_val,
4368 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4369 HNS_ROCE_INT_MASK_ENABLE);
4370 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4371 i * CEQ_REG_OFFSET, cemask_val);
4372
4373 /* Clear int state(INT_WC : write 1 clear) */
4374 cealmovf_val = roce_read(hr_dev,
4375 ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4376 i * CEQ_REG_OFFSET);
4377 roce_set_bit(cealmovf_val,
4378 ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
4379 1);
4380 roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4381 i * CEQ_REG_OFFSET, cealmovf_val);
4382
4383 /* Clear mask */
4384 cemask_val = roce_read(hr_dev,
4385 ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4386 i * CEQ_REG_OFFSET);
4387 roce_set_bit(cemask_val,
4388 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4389 HNS_ROCE_INT_MASK_DISABLE);
4390 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4391 i * CEQ_REG_OFFSET, cemask_val);
4392 }
4393 }
4394
4395 /* ECC multi-bit error alarm */
4396 dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
4397 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
4398 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
4399 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
4400
4401 dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
4402 roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
4403 roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
4404 roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
4405
4406 return IRQ_RETVAL(int_work);
4407}
4408
4409static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
4410{
4411 u32 aemask_val;
4412 int masken = 0;
4413 int i;
4414
4415 /* AEQ INT */
4416 aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4417 roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4418 masken);
4419 roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
4420 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
4421
4422 /* CEQ INT */
4423 for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4424 /* IRQ mask */
4425 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4426 i * CEQ_REG_OFFSET, masken);
4427 }
4428}
4429
4430static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
4431 struct hns_roce_eq *eq)
4432{
4433 int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
4434 HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4435 int i;
4436
4437 if (!eq->buf_list)
4438 return;
4439
4440 for (i = 0; i < npages; ++i)
4441 dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
4442 eq->buf_list[i].buf, eq->buf_list[i].map);
4443
4444 kfree(eq->buf_list);
4445}
4446
4447static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
4448 int enable_flag)
4449{
4450 void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
4451 u32 val;
4452
4453 val = readl(eqc);
4454
4455 if (enable_flag)
4456 roce_set_field(val,
4457 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4458 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4459 HNS_ROCE_EQ_STAT_VALID);
4460 else
4461 roce_set_field(val,
4462 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4463 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4464 HNS_ROCE_EQ_STAT_INVALID);
4465 writel(val, eqc);
4466}
4467
4468static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
4469 struct hns_roce_eq *eq)
4470{
4471 void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
4472 struct device *dev = &hr_dev->pdev->dev;
4473 dma_addr_t tmp_dma_addr;
4474 u32 eqconsindx_val = 0;
4475 u32 eqcuridx_val = 0;
4476 u32 eqshift_val = 0;
4477 int num_bas;
4478 int ret;
4479 int i;
4480
4481 num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
4482 HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4483
4484 if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
4485 dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
4486 (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
4487 num_bas);
4488 return -EINVAL;
4489 }
4490
4491 eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
4492 if (!eq->buf_list)
4493 return -ENOMEM;
4494
4495 for (i = 0; i < num_bas; ++i) {
4496 eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
4497 &tmp_dma_addr,
4498 GFP_KERNEL);
4499 if (!eq->buf_list[i].buf) {
4500 ret = -ENOMEM;
4501 goto err_out_free_pages;
4502 }
4503
4504 eq->buf_list[i].map = tmp_dma_addr;
4505 memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
4506 }
4507 eq->cons_index = 0;
4508 roce_set_field(eqshift_val,
4509 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4510 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4511 HNS_ROCE_EQ_STAT_INVALID);
4512 roce_set_field(eqshift_val,
4513 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
4514 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
4515 eq->log_entries);
4516 writel(eqshift_val, eqc);
4517
4518 /* Configure eq extended address 12~44bit */
4519 writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
4520
4521 /*
4522 * Configure eq extended address 45~49 bit.
4523 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
4524 * using 4K page, and shift more 32 because of
4525 * caculating the high 32 bit value evaluated to hardware.
4526 */
4527 roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
4528 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
4529 eq->buf_list[0].map >> 44);
4530 roce_set_field(eqcuridx_val,
4531 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
4532 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
4533 writel(eqcuridx_val, eqc + 8);
4534
4535 /* Configure eq consumer index */
4536 roce_set_field(eqconsindx_val,
4537 ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
4538 ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
4539 writel(eqconsindx_val, eqc + 0xc);
4540
4541 return 0;
4542
4543err_out_free_pages:
4544 for (i -= 1; i >= 0; i--)
4545 dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
4546 eq->buf_list[i].map);
4547
4548 kfree(eq->buf_list);
4549 return ret;
4550}
4551
4552static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
4553{
4554 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4555 struct device *dev = &hr_dev->pdev->dev;
4556 struct hns_roce_eq *eq;
4557 int irq_num;
4558 int eq_num;
4559 int ret;
4560 int i, j;
4561
4562 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4563 irq_num = eq_num + hr_dev->caps.num_other_vectors;
4564
4565 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
4566 if (!eq_table->eq)
4567 return -ENOMEM;
4568
4569 eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
4570 GFP_KERNEL);
4571 if (!eq_table->eqc_base) {
4572 ret = -ENOMEM;
4573 goto err_eqc_base_alloc_fail;
4574 }
4575
4576 for (i = 0; i < eq_num; i++) {
4577 eq = &eq_table->eq[i];
4578 eq->hr_dev = hr_dev;
4579 eq->eqn = i;
4580 eq->irq = hr_dev->irq[i];
4581 eq->log_page_size = PAGE_SHIFT;
4582
4583 if (i < hr_dev->caps.num_comp_vectors) {
4584 /* CEQ */
4585 eq_table->eqc_base[i] = hr_dev->reg_base +
4586 ROCEE_CAEP_CEQC_SHIFT_0_REG +
4587 CEQ_REG_OFFSET * i;
4588 eq->type_flag = HNS_ROCE_CEQ;
4589 eq->doorbell = hr_dev->reg_base +
4590 ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
4591 CEQ_REG_OFFSET * i;
4592 eq->entries = hr_dev->caps.ceqe_depth;
4593 eq->log_entries = ilog2(eq->entries);
4594 eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
4595 } else {
4596 /* AEQ */
4597 eq_table->eqc_base[i] = hr_dev->reg_base +
4598 ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
4599 eq->type_flag = HNS_ROCE_AEQ;
4600 eq->doorbell = hr_dev->reg_base +
4601 ROCEE_CAEP_AEQE_CONS_IDX_REG;
4602 eq->entries = hr_dev->caps.aeqe_depth;
4603 eq->log_entries = ilog2(eq->entries);
4604 eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
4605 }
4606 }
4607
4608 /* Disable irq */
4609 hns_roce_v1_int_mask_enable(hr_dev);
4610
4611 /* Configure ce int interval */
4612 roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
4613 HNS_ROCE_CEQ_DEFAULT_INTERVAL);
4614
4615 /* Configure ce int burst num */
4616 roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
4617 HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
4618
4619 for (i = 0; i < eq_num; i++) {
4620 ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
4621 if (ret) {
4622 dev_err(dev, "eq create failed\n");
4623 goto err_create_eq_fail;
4624 }
4625 }
4626
4627 for (j = 0; j < irq_num; j++) {
4628 if (j < eq_num)
4629 ret = request_irq(hr_dev->irq[j],
4630 hns_roce_v1_msix_interrupt_eq, 0,
4631 hr_dev->irq_names[j],
4632 &eq_table->eq[j]);
4633 else
4634 ret = request_irq(hr_dev->irq[j],
4635 hns_roce_v1_msix_interrupt_abn, 0,
4636 hr_dev->irq_names[j], hr_dev);
4637
4638 if (ret) {
4639 dev_err(dev, "request irq error!\n");
4640 goto err_request_irq_fail;
4641 }
4642 }
4643
4644 for (i = 0; i < eq_num; i++)
4645 hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
4646
4647 return 0;
4648
4649err_request_irq_fail:
4650 for (j -= 1; j >= 0; j--)
4651 free_irq(hr_dev->irq[j], &eq_table->eq[j]);
4652
4653err_create_eq_fail:
4654 for (i -= 1; i >= 0; i--)
4655 hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4656
4657 kfree(eq_table->eqc_base);
4658
4659err_eqc_base_alloc_fail:
4660 kfree(eq_table->eq);
4661
4662 return ret;
4663}
4664
4665static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
4666{
4667 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4668 int irq_num;
4669 int eq_num;
4670 int i;
4671
4672 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4673 irq_num = eq_num + hr_dev->caps.num_other_vectors;
4674 for (i = 0; i < eq_num; i++) {
4675 /* Disable EQ */
4676 hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
4677
4678 free_irq(hr_dev->irq[i], &eq_table->eq[i]);
4679
4680 hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4681 }
4682 for (i = eq_num; i < irq_num; i++)
4683 free_irq(hr_dev->irq[i], hr_dev);
4684
4685 kfree(eq_table->eqc_base);
4686 kfree(eq_table->eq);
4687}
4688
08805fdb 4689static const struct hns_roce_hw hns_roce_hw_v1 = {
9a443537 4690 .reset = hns_roce_v1_reset,
4691 .hw_profile = hns_roce_v1_profile,
4692 .hw_init = hns_roce_v1_init,
4693 .hw_exit = hns_roce_v1_exit,
a680f2f3
WHX
4694 .post_mbox = hns_roce_v1_post_mbox,
4695 .chk_mbox = hns_roce_v1_chk_mbox,
9a443537 4696 .set_gid = hns_roce_v1_set_gid,
4697 .set_mac = hns_roce_v1_set_mac,
4698 .set_mtu = hns_roce_v1_set_mtu,
4699 .write_mtpt = hns_roce_v1_write_mtpt,
4700 .write_cqc = hns_roce_v1_write_cqc,
b156269d 4701 .modify_cq = hns_roce_v1_modify_cq,
97f0e39f 4702 .clear_hem = hns_roce_v1_clear_hem,
9a443537 4703 .modify_qp = hns_roce_v1_modify_qp,
4704 .query_qp = hns_roce_v1_query_qp,
4705 .destroy_qp = hns_roce_v1_destroy_qp,
4706 .post_send = hns_roce_v1_post_send,
4707 .post_recv = hns_roce_v1_post_recv,
4708 .req_notify_cq = hns_roce_v1_req_notify_cq,
4709 .poll_cq = hns_roce_v1_poll_cq,
bfcc681b 4710 .dereg_mr = hns_roce_v1_dereg_mr,
afb6b092 4711 .destroy_cq = hns_roce_v1_destroy_cq,
b16f8188
YL
4712 .init_eq = hns_roce_v1_init_eq_table,
4713 .cleanup_eq = hns_roce_v1_cleanup_eq_table,
9a443537 4714};
08805fdb
WHX
4715
4716static const struct of_device_id hns_roce_of_match[] = {
4717 { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
4718 {},
4719};
4720MODULE_DEVICE_TABLE(of, hns_roce_of_match);
4721
4722static const struct acpi_device_id hns_roce_acpi_match[] = {
4723 { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
4724 {},
4725};
4726MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
4727
4728static int hns_roce_node_match(struct device *dev, void *fwnode)
4729{
4730 return dev->fwnode == fwnode;
4731}
4732
4733static struct
4734platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
4735{
4736 struct device *dev;
4737
4738 /* get the 'device' corresponding to the matching 'fwnode' */
4739 dev = bus_find_device(&platform_bus_type, NULL,
4740 fwnode, hns_roce_node_match);
4741 /* get the platform device */
4742 return dev ? to_platform_device(dev) : NULL;
4743}
4744
4745static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
4746{
4747 struct device *dev = &hr_dev->pdev->dev;
4748 struct platform_device *pdev = NULL;
4749 struct net_device *netdev = NULL;
4750 struct device_node *net_node;
4751 struct resource *res;
4752 int port_cnt = 0;
4753 u8 phy_port;
4754 int ret;
4755 int i;
4756
4757 /* check if we are compatible with the underlying SoC */
4758 if (dev_of_node(dev)) {
4759 const struct of_device_id *of_id;
4760
4761 of_id = of_match_node(hns_roce_of_match, dev->of_node);
4762 if (!of_id) {
4763 dev_err(dev, "device is not compatible!\n");
4764 return -ENXIO;
4765 }
4766 hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
4767 if (!hr_dev->hw) {
4768 dev_err(dev, "couldn't get H/W specific DT data!\n");
4769 return -ENXIO;
4770 }
4771 } else if (is_acpi_device_node(dev->fwnode)) {
4772 const struct acpi_device_id *acpi_id;
4773
4774 acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
4775 if (!acpi_id) {
4776 dev_err(dev, "device is not compatible!\n");
4777 return -ENXIO;
4778 }
4779 hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
4780 if (!hr_dev->hw) {
4781 dev_err(dev, "couldn't get H/W specific ACPI data!\n");
4782 return -ENXIO;
4783 }
4784 } else {
4785 dev_err(dev, "can't read compatibility data from DT or ACPI\n");
4786 return -ENXIO;
4787 }
4788
4789 /* get the mapped register base address */
4790 res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
08805fdb
WHX
4791 hr_dev->reg_base = devm_ioremap_resource(dev, res);
4792 if (IS_ERR(hr_dev->reg_base))
4793 return PTR_ERR(hr_dev->reg_base);
4794
4795 /* read the node_guid of IB device from the DT or ACPI */
4796 ret = device_property_read_u8_array(dev, "node-guid",
4797 (u8 *)&hr_dev->ib_dev.node_guid,
4798 GUID_LEN);
4799 if (ret) {
4800 dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
4801 return ret;
4802 }
4803
4804 /* get the RoCE associated ethernet ports or netdevices */
4805 for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
4806 if (dev_of_node(dev)) {
4807 net_node = of_parse_phandle(dev->of_node, "eth-handle",
4808 i);
4809 if (!net_node)
4810 continue;
4811 pdev = of_find_device_by_node(net_node);
4812 } else if (is_acpi_device_node(dev->fwnode)) {
4813 struct acpi_reference_args args;
4814 struct fwnode_handle *fwnode;
4815
4816 ret = acpi_node_get_property_reference(dev->fwnode,
4817 "eth-handle",
4818 i, &args);
4819 if (ret)
4820 continue;
4821 fwnode = acpi_fwnode_handle(args.adev);
4822 pdev = hns_roce_find_pdev(fwnode);
4823 } else {
4824 dev_err(dev, "cannot read data from DT or ACPI\n");
4825 return -ENXIO;
4826 }
4827
4828 if (pdev) {
4829 netdev = platform_get_drvdata(pdev);
4830 phy_port = (u8)i;
4831 if (netdev) {
4832 hr_dev->iboe.netdevs[port_cnt] = netdev;
4833 hr_dev->iboe.phy_port[port_cnt] = phy_port;
4834 } else {
4835 dev_err(dev, "no netdev found with pdev %s\n",
4836 pdev->name);
4837 return -ENODEV;
4838 }
4839 port_cnt++;
4840 }
4841 }
4842
4843 if (port_cnt == 0) {
4844 dev_err(dev, "unable to get eth-handle for available ports!\n");
4845 return -EINVAL;
4846 }
4847
4848 hr_dev->caps.num_ports = port_cnt;
4849
4850 /* cmd issue mode: 0 is poll, 1 is event */
4851 hr_dev->cmd_mod = 1;
4852 hr_dev->loop_idc = 0;
2d407888
WHX
4853 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
4854 hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
08805fdb
WHX
4855
4856 /* read the interrupt names from the DT or ACPI */
4857 ret = device_property_read_string_array(dev, "interrupt-names",
4858 hr_dev->irq_names,
b16f8188 4859 HNS_ROCE_V1_MAX_IRQ_NUM);
08805fdb
WHX
4860 if (ret < 0) {
4861 dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
4862 return ret;
4863 }
4864
4865 /* fetch the interrupt numbers */
b16f8188 4866 for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
08805fdb
WHX
4867 hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
4868 if (hr_dev->irq[i] <= 0) {
4869 dev_err(dev, "platform get of irq[=%d] failed!\n", i);
4870 return -EINVAL;
4871 }
4872 }
4873
4874 return 0;
4875}
4876
4877/**
4878 * hns_roce_probe - RoCE driver entrance
4879 * @pdev: pointer to platform device
4880 * Return : int
4881 *
4882 */
4883static int hns_roce_probe(struct platform_device *pdev)
4884{
4885 int ret;
4886 struct hns_roce_dev *hr_dev;
4887 struct device *dev = &pdev->dev;
4888
4889 hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
4890 if (!hr_dev)
4891 return -ENOMEM;
4892
016a0059
WHX
4893 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
4894 if (!hr_dev->priv) {
4895 ret = -ENOMEM;
4896 goto error_failed_kzalloc;
4897 }
4898
08805fdb 4899 hr_dev->pdev = pdev;
13ca970e 4900 hr_dev->dev = dev;
08805fdb
WHX
4901 platform_set_drvdata(pdev, hr_dev);
4902
4903 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
4904 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
4905 dev_err(dev, "Not usable DMA addressing mode\n");
4906 ret = -EIO;
4907 goto error_failed_get_cfg;
4908 }
4909
4910 ret = hns_roce_get_cfg(hr_dev);
4911 if (ret) {
4912 dev_err(dev, "Get Configuration failed!\n");
4913 goto error_failed_get_cfg;
4914 }
4915
4916 ret = hns_roce_init(hr_dev);
4917 if (ret) {
4918 dev_err(dev, "RoCE engine init failed!\n");
4919 goto error_failed_get_cfg;
4920 }
4921
4922 return 0;
4923
4924error_failed_get_cfg:
016a0059
WHX
4925 kfree(hr_dev->priv);
4926
4927error_failed_kzalloc:
08805fdb
WHX
4928 ib_dealloc_device(&hr_dev->ib_dev);
4929
4930 return ret;
4931}
4932
4933/**
4934 * hns_roce_remove - remove RoCE device
4935 * @pdev: pointer to platform device
4936 */
4937static int hns_roce_remove(struct platform_device *pdev)
4938{
4939 struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
4940
4941 hns_roce_exit(hr_dev);
016a0059 4942 kfree(hr_dev->priv);
08805fdb
WHX
4943 ib_dealloc_device(&hr_dev->ib_dev);
4944
4945 return 0;
4946}
4947
4948static struct platform_driver hns_roce_driver = {
4949 .probe = hns_roce_probe,
4950 .remove = hns_roce_remove,
4951 .driver = {
4952 .name = DRV_NAME,
4953 .of_match_table = hns_roce_of_match,
4954 .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
4955 },
4956};
4957
4958module_platform_driver(hns_roce_driver);
4959
4960MODULE_LICENSE("Dual BSD/GPL");
4961MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
4962MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
4963MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
4964MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");