2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/platform_device.h>
34 #include <linux/acpi.h>
35 #include <linux/etherdevice.h>
36 #include <linux/interrupt.h>
38 #include <linux/of_platform.h>
39 #include <rdma/ib_umem.h>
40 #include "hns_roce_common.h"
41 #include "hns_roce_device.h"
42 #include "hns_roce_cmd.h"
43 #include "hns_roce_hem.h"
44 #include "hns_roce_hw_v1.h"
46 static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
48 dseg->lkey = cpu_to_le32(sg->lkey);
49 dseg->addr = cpu_to_le64(sg->addr);
50 dseg->len = cpu_to_le32(sg->length);
53 static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
56 rseg->raddr = cpu_to_le64(remote_addr);
57 rseg->rkey = cpu_to_le32(rkey);
61 static int hns_roce_v1_post_send(struct ib_qp *ibqp,
62 const struct ib_send_wr *wr,
63 const struct ib_send_wr **bad_wr)
65 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
66 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
67 struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
68 struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
69 struct hns_roce_wqe_data_seg *dseg = NULL;
70 struct hns_roce_qp *qp = to_hr_qp(ibqp);
71 struct device *dev = &hr_dev->pdev->dev;
72 struct hns_roce_sq_db sq_db;
73 int ps_opcode = 0, i = 0;
74 unsigned long flags = 0;
83 if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
84 ibqp->qp_type != IB_QPT_RC)) {
85 dev_err(dev, "un-supported QP type\n");
90 spin_lock_irqsave(&qp->sq.lock, flags);
91 ind = qp->sq_next_wqe;
92 for (nreq = 0; wr; ++nreq, wr = wr->next) {
93 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
99 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
100 dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
101 wr->num_sge, qp->sq.max_gs);
107 wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
108 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
111 /* Corresponding to the RC and RD type wqe process separately */
112 if (ibqp->qp_type == IB_QPT_GSI) {
114 roce_set_field(ud_sq_wqe->dmac_h,
115 UD_SEND_WQE_U32_4_DMAC_0_M,
116 UD_SEND_WQE_U32_4_DMAC_0_S,
118 roce_set_field(ud_sq_wqe->dmac_h,
119 UD_SEND_WQE_U32_4_DMAC_1_M,
120 UD_SEND_WQE_U32_4_DMAC_1_S,
122 roce_set_field(ud_sq_wqe->dmac_h,
123 UD_SEND_WQE_U32_4_DMAC_2_M,
124 UD_SEND_WQE_U32_4_DMAC_2_S,
126 roce_set_field(ud_sq_wqe->dmac_h,
127 UD_SEND_WQE_U32_4_DMAC_3_M,
128 UD_SEND_WQE_U32_4_DMAC_3_S,
131 roce_set_field(ud_sq_wqe->u32_8,
132 UD_SEND_WQE_U32_8_DMAC_4_M,
133 UD_SEND_WQE_U32_8_DMAC_4_S,
135 roce_set_field(ud_sq_wqe->u32_8,
136 UD_SEND_WQE_U32_8_DMAC_5_M,
137 UD_SEND_WQE_U32_8_DMAC_5_S,
140 smac = (u8 *)hr_dev->dev_addr[qp->port];
141 loopback = ether_addr_equal_unaligned(ah->av.mac,
143 roce_set_bit(ud_sq_wqe->u32_8,
144 UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
147 roce_set_field(ud_sq_wqe->u32_8,
148 UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
149 UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
150 HNS_ROCE_WQE_OPCODE_SEND);
151 roce_set_field(ud_sq_wqe->u32_8,
152 UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
153 UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
155 roce_set_bit(ud_sq_wqe->u32_8,
156 UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
159 ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
160 cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
161 (wr->send_flags & IB_SEND_SOLICITED ?
162 cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
163 ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
164 cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
166 roce_set_field(ud_sq_wqe->u32_16,
167 UD_SEND_WQE_U32_16_DEST_QP_M,
168 UD_SEND_WQE_U32_16_DEST_QP_S,
169 ud_wr(wr)->remote_qpn);
170 roce_set_field(ud_sq_wqe->u32_16,
171 UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
172 UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
175 roce_set_field(ud_sq_wqe->u32_36,
176 UD_SEND_WQE_U32_36_FLOW_LABEL_M,
177 UD_SEND_WQE_U32_36_FLOW_LABEL_S,
178 ah->av.sl_tclass_flowlabel &
179 HNS_ROCE_FLOW_LABEL_MASK);
180 roce_set_field(ud_sq_wqe->u32_36,
181 UD_SEND_WQE_U32_36_PRIORITY_M,
182 UD_SEND_WQE_U32_36_PRIORITY_S,
183 le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
185 roce_set_field(ud_sq_wqe->u32_36,
186 UD_SEND_WQE_U32_36_SGID_INDEX_M,
187 UD_SEND_WQE_U32_36_SGID_INDEX_S,
188 hns_get_gid_index(hr_dev, qp->phy_port,
191 roce_set_field(ud_sq_wqe->u32_40,
192 UD_SEND_WQE_U32_40_HOP_LIMIT_M,
193 UD_SEND_WQE_U32_40_HOP_LIMIT_S,
195 roce_set_field(ud_sq_wqe->u32_40,
196 UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
197 UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S,
198 ah->av.sl_tclass_flowlabel >>
199 HNS_ROCE_TCLASS_SHIFT);
201 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
204 cpu_to_le32((u32)wr->sg_list[0].addr);
206 cpu_to_le32((wr->sg_list[0].addr) >> 32);
208 cpu_to_le32(wr->sg_list[0].lkey);
211 cpu_to_le32((u32)wr->sg_list[1].addr);
213 cpu_to_le32((wr->sg_list[1].addr) >> 32);
215 cpu_to_le32(wr->sg_list[1].lkey);
217 } else if (ibqp->qp_type == IB_QPT_RC) {
221 memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
222 for (i = 0; i < wr->num_sge; i++)
223 tmp_len += wr->sg_list[i].length;
226 cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len);
231 switch (wr->opcode) {
232 case IB_WR_SEND_WITH_IMM:
233 case IB_WR_RDMA_WRITE_WITH_IMM:
234 ctrl->imm_data = wr->ex.imm_data;
236 case IB_WR_SEND_WITH_INV:
238 cpu_to_le32(wr->ex.invalidate_rkey);
245 /*Ctrl field, ctrl set type: sig, solic, imm, fence */
246 /* SO wait for conforming application scenarios */
247 ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
248 cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
249 (wr->send_flags & IB_SEND_SOLICITED ?
250 cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
251 ((wr->opcode == IB_WR_SEND_WITH_IMM ||
252 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
253 cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
254 (wr->send_flags & IB_SEND_FENCE ?
255 (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
257 wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
259 switch (wr->opcode) {
260 case IB_WR_RDMA_READ:
261 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
262 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
265 case IB_WR_RDMA_WRITE:
266 case IB_WR_RDMA_WRITE_WITH_IMM:
267 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
268 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
272 case IB_WR_SEND_WITH_INV:
273 case IB_WR_SEND_WITH_IMM:
274 ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
276 case IB_WR_LOCAL_INV:
278 case IB_WR_ATOMIC_CMP_AND_SWP:
279 case IB_WR_ATOMIC_FETCH_AND_ADD:
282 ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
285 ctrl->flag |= cpu_to_le32(ps_opcode);
286 wqe += sizeof(struct hns_roce_wqe_raddr_seg);
289 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
290 if (le32_to_cpu(ctrl->msg_length) >
291 hr_dev->caps.max_sq_inline) {
294 dev_err(dev, "inline len(1-%d)=%d, illegal",
296 hr_dev->caps.max_sq_inline);
299 for (i = 0; i < wr->num_sge; i++) {
300 memcpy(wqe, ((void *) (uintptr_t)
301 wr->sg_list[i].addr),
302 wr->sg_list[i].length);
303 wqe += wr->sg_list[i].length;
305 ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE);
308 for (i = 0; i < wr->num_sge; i++)
309 set_data_seg(dseg + i, wr->sg_list + i);
311 ctrl->flag |= cpu_to_le32(wr->num_sge <<
312 HNS_ROCE_WQE_SGE_NUM_BIT);
327 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
328 SQ_DOORBELL_U32_4_SQ_HEAD_S,
329 (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
330 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
331 SQ_DOORBELL_U32_4_SL_S, qp->sl);
332 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
333 SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
334 roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
335 SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
336 roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
338 doorbell[0] = le32_to_cpu(sq_db.u32_4);
339 doorbell[1] = le32_to_cpu(sq_db.u32_8);
341 hns_roce_write64_k((__le32 *)doorbell, qp->sq.db_reg_l);
342 qp->sq_next_wqe = ind;
345 spin_unlock_irqrestore(&qp->sq.lock, flags);
350 static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
351 const struct ib_recv_wr *wr,
352 const struct ib_recv_wr **bad_wr)
359 unsigned long flags = 0;
360 struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
361 struct hns_roce_wqe_data_seg *scat = NULL;
362 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
363 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
364 struct device *dev = &hr_dev->pdev->dev;
365 struct hns_roce_rq_db rq_db;
366 uint32_t doorbell[2] = {0};
368 spin_lock_irqsave(&hr_qp->rq.lock, flags);
369 ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
371 for (nreq = 0; wr; ++nreq, wr = wr->next) {
372 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
373 hr_qp->ibqp.recv_cq)) {
379 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
380 dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
381 wr->num_sge, hr_qp->rq.max_gs);
387 ctrl = get_recv_wqe(hr_qp, ind);
389 roce_set_field(ctrl->rwqe_byte_12,
390 RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
391 RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
394 scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
396 for (i = 0; i < wr->num_sge; i++)
397 set_data_seg(scat + i, wr->sg_list + i);
399 hr_qp->rq.wrid[ind] = wr->wr_id;
401 ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
406 hr_qp->rq.head += nreq;
410 if (ibqp->qp_type == IB_QPT_GSI) {
413 /* SW update GSI rq header */
414 reg_val = roce_read(to_hr_dev(ibqp->device),
415 ROCEE_QP1C_CFG3_0_REG +
416 QP1C_CFGN_OFFSET * hr_qp->phy_port);
417 tmp = cpu_to_le32(reg_val);
419 ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
420 ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
422 reg_val = le32_to_cpu(tmp);
423 roce_write(to_hr_dev(ibqp->device),
424 ROCEE_QP1C_CFG3_0_REG +
425 QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
430 roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
431 RQ_DOORBELL_U32_4_RQ_HEAD_S,
433 roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
434 RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
435 roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
436 RQ_DOORBELL_U32_8_CMD_S, 1);
437 roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
440 doorbell[0] = le32_to_cpu(rq_db.u32_4);
441 doorbell[1] = le32_to_cpu(rq_db.u32_8);
443 hns_roce_write64_k((__le32 *)doorbell,
447 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
452 static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
453 int sdb_mode, int odb_mode)
458 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
459 tmp = cpu_to_le32(val);
460 roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
461 roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
462 val = le32_to_cpu(tmp);
463 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
466 static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
472 /* Configure SDB/ODB extend mode */
473 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
474 tmp = cpu_to_le32(val);
475 roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
476 roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
477 val = le32_to_cpu(tmp);
478 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
481 static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
488 val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
489 tmp = cpu_to_le32(val);
490 roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
491 ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
492 roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
493 ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
494 val = le32_to_cpu(tmp);
495 roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
498 static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
505 val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
506 tmp = cpu_to_le32(val);
507 roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
508 ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
509 roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
510 ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
511 val = le32_to_cpu(tmp);
512 roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
515 static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
518 struct device *dev = &hr_dev->pdev->dev;
519 struct hns_roce_v1_priv *priv;
520 struct hns_roce_db_table *db;
521 dma_addr_t sdb_dma_addr;
525 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
526 db = &priv->db_table;
528 /* Configure extend SDB threshold */
529 roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
530 roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
532 /* Configure extend SDB base addr */
533 sdb_dma_addr = db->ext_db->sdb_buf_list->map;
534 roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
536 /* Configure extend SDB depth */
537 val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
538 tmp = cpu_to_le32(val);
539 roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
540 ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
541 db->ext_db->esdb_dep);
543 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
544 * using 4K page, and shift more 32 because of
545 * caculating the high 32 bit value evaluated to hardware.
547 roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
548 ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
549 val = le32_to_cpu(tmp);
550 roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
552 dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
553 dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n",
554 ext_sdb_alept, ext_sdb_alful);
557 static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
560 struct device *dev = &hr_dev->pdev->dev;
561 struct hns_roce_v1_priv *priv;
562 struct hns_roce_db_table *db;
563 dma_addr_t odb_dma_addr;
567 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
568 db = &priv->db_table;
570 /* Configure extend ODB threshold */
571 roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
572 roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
574 /* Configure extend ODB base addr */
575 odb_dma_addr = db->ext_db->odb_buf_list->map;
576 roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
578 /* Configure extend ODB depth */
579 val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
580 tmp = cpu_to_le32(val);
581 roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
582 ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
583 db->ext_db->eodb_dep);
584 roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
585 ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
586 db->ext_db->eodb_dep);
587 val = le32_to_cpu(tmp);
588 roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
590 dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
591 dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
592 ext_odb_alept, ext_odb_alful);
595 static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
598 struct device *dev = &hr_dev->pdev->dev;
599 struct hns_roce_v1_priv *priv;
600 struct hns_roce_db_table *db;
601 dma_addr_t sdb_dma_addr;
602 dma_addr_t odb_dma_addr;
605 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
606 db = &priv->db_table;
608 db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
613 db->ext_db->sdb_buf_list = kmalloc(
614 sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
615 if (!db->ext_db->sdb_buf_list) {
617 goto ext_sdb_buf_fail_out;
620 db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
621 HNS_ROCE_V1_EXT_SDB_SIZE,
622 &sdb_dma_addr, GFP_KERNEL);
623 if (!db->ext_db->sdb_buf_list->buf) {
625 goto alloc_sq_db_buf_fail;
627 db->ext_db->sdb_buf_list->map = sdb_dma_addr;
629 db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
630 hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
631 HNS_ROCE_V1_EXT_SDB_ALFUL);
633 hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
634 HNS_ROCE_V1_SDB_ALFUL);
637 db->ext_db->odb_buf_list = kmalloc(
638 sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
639 if (!db->ext_db->odb_buf_list) {
641 goto ext_odb_buf_fail_out;
644 db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
645 HNS_ROCE_V1_EXT_ODB_SIZE,
646 &odb_dma_addr, GFP_KERNEL);
647 if (!db->ext_db->odb_buf_list->buf) {
649 goto alloc_otr_db_buf_fail;
651 db->ext_db->odb_buf_list->map = odb_dma_addr;
653 db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
654 hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
655 HNS_ROCE_V1_EXT_ODB_ALFUL);
657 hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
658 HNS_ROCE_V1_ODB_ALFUL);
660 hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
664 alloc_otr_db_buf_fail:
665 kfree(db->ext_db->odb_buf_list);
667 ext_odb_buf_fail_out:
669 dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
670 db->ext_db->sdb_buf_list->buf,
671 db->ext_db->sdb_buf_list->map);
674 alloc_sq_db_buf_fail:
676 kfree(db->ext_db->sdb_buf_list);
678 ext_sdb_buf_fail_out:
683 static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
686 struct device *dev = &hr_dev->pdev->dev;
687 struct ib_qp_init_attr init_attr;
690 memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
691 init_attr.qp_type = IB_QPT_RC;
692 init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
693 init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM;
694 init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM;
696 qp = hns_roce_create_qp(pd, &init_attr, NULL);
698 dev_err(dev, "Create loop qp for mr free failed!");
705 static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
707 struct hns_roce_caps *caps = &hr_dev->caps;
708 struct device *dev = &hr_dev->pdev->dev;
709 struct ib_cq_init_attr cq_init_attr;
710 struct hns_roce_free_mr *free_mr;
711 struct ib_qp_attr attr = { 0 };
712 struct hns_roce_v1_priv *priv;
713 struct hns_roce_qp *hr_qp;
714 struct ib_device *ibdev;
722 u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
727 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
728 free_mr = &priv->free_mr;
730 /* Reserved cq for loop qp */
731 cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
732 cq_init_attr.comp_vector = 0;
734 ibdev = &hr_dev->ib_dev;
735 cq = rdma_zalloc_drv_obj(ibdev, ib_cq);
739 ret = hns_roce_ib_create_cq(cq, &cq_init_attr, NULL);
741 dev_err(dev, "Create cq for reserved loop qp failed!");
742 goto alloc_cq_failed;
744 free_mr->mr_free_cq = to_hr_cq(cq);
745 free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev;
746 free_mr->mr_free_cq->ib_cq.uobject = NULL;
747 free_mr->mr_free_cq->ib_cq.comp_handler = NULL;
748 free_mr->mr_free_cq->ib_cq.event_handler = NULL;
749 free_mr->mr_free_cq->ib_cq.cq_context = NULL;
750 atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
752 pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
755 goto alloc_mem_failed;
759 ret = hns_roce_alloc_pd(pd, NULL);
761 goto alloc_pd_failed;
763 free_mr->mr_free_pd = to_hr_pd(pd);
764 free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
765 free_mr->mr_free_pd->ibpd.uobject = NULL;
766 free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
767 atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
769 attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
771 attr.min_rnr_timer = 0;
772 /* Disable read ability */
773 attr.max_dest_rd_atomic = 0;
774 attr.max_rd_atomic = 0;
775 /* Use arbitrary values as rq_psn and sq_psn */
776 attr.rq_psn = 0x0808;
777 attr.sq_psn = 0x0808;
781 attr.path_mtu = IB_MTU_256;
782 attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
783 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
784 rdma_ah_set_static_rate(&attr.ah_attr, 3);
786 subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
787 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
788 phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
789 (i % HNS_ROCE_MAX_PORTS);
790 sl = i / HNS_ROCE_MAX_PORTS;
792 for (j = 0; j < caps->num_ports; j++) {
793 if (hr_dev->iboe.phy_port[j] == phy_port) {
803 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
804 if (!free_mr->mr_free_qp[i]) {
805 dev_err(dev, "Create loop qp failed!\n");
807 goto create_lp_qp_failed;
809 hr_qp = free_mr->mr_free_qp[i];
812 hr_qp->phy_port = phy_port;
813 hr_qp->ibqp.qp_type = IB_QPT_RC;
814 hr_qp->ibqp.device = &hr_dev->ib_dev;
815 hr_qp->ibqp.uobject = NULL;
816 atomic_set(&hr_qp->ibqp.usecnt, 0);
818 hr_qp->ibqp.recv_cq = cq;
819 hr_qp->ibqp.send_cq = cq;
821 rdma_ah_set_port_num(&attr.ah_attr, port + 1);
822 rdma_ah_set_sl(&attr.ah_attr, sl);
823 attr.port_num = port + 1;
825 attr.dest_qp_num = hr_qp->qpn;
826 memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
827 hr_dev->dev_addr[port],
830 memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
831 memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
832 memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
836 rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
838 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
839 IB_QPS_RESET, IB_QPS_INIT);
841 dev_err(dev, "modify qp failed(%d)!\n", ret);
842 goto create_lp_qp_failed;
845 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
846 IB_QPS_INIT, IB_QPS_RTR);
848 dev_err(dev, "modify qp failed(%d)!\n", ret);
849 goto create_lp_qp_failed;
852 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
853 IB_QPS_RTR, IB_QPS_RTS);
855 dev_err(dev, "modify qp failed(%d)!\n", ret);
856 goto create_lp_qp_failed;
863 for (i -= 1; i >= 0; i--) {
864 hr_qp = free_mr->mr_free_qp[i];
865 if (hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL))
866 dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
869 hns_roce_dealloc_pd(pd, NULL);
875 hns_roce_ib_destroy_cq(cq, NULL);
881 static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
883 struct device *dev = &hr_dev->pdev->dev;
884 struct hns_roce_free_mr *free_mr;
885 struct hns_roce_v1_priv *priv;
886 struct hns_roce_qp *hr_qp;
890 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
891 free_mr = &priv->free_mr;
893 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
894 hr_qp = free_mr->mr_free_qp[i];
898 ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL);
900 dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
904 hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
905 kfree(&free_mr->mr_free_cq->ib_cq);
906 hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
907 kfree(&free_mr->mr_free_pd->ibpd);
910 static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
912 struct device *dev = &hr_dev->pdev->dev;
913 struct hns_roce_v1_priv *priv;
914 struct hns_roce_db_table *db;
921 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
922 db = &priv->db_table;
924 memset(db, 0, sizeof(*db));
926 /* Default DB mode */
927 sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
928 odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
929 sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
930 odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
932 db->sdb_ext_mod = sdb_ext_mod;
933 db->odb_ext_mod = odb_ext_mod;
936 ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
938 dev_err(dev, "Failed in extend DB configuration.\n");
942 hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
947 static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
949 struct hns_roce_recreate_lp_qp_work *lp_qp_work;
950 struct hns_roce_dev *hr_dev;
952 lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
954 hr_dev = to_hr_dev(lp_qp_work->ib_dev);
956 hns_roce_v1_release_lp_qp(hr_dev);
958 if (hns_roce_v1_rsv_lp_qp(hr_dev))
959 dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
961 if (lp_qp_work->comp_flag)
962 complete(lp_qp_work->comp);
967 static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
969 struct device *dev = &hr_dev->pdev->dev;
970 struct hns_roce_recreate_lp_qp_work *lp_qp_work;
971 struct hns_roce_free_mr *free_mr;
972 struct hns_roce_v1_priv *priv;
973 struct completion comp;
974 long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
976 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
977 free_mr = &priv->free_mr;
979 lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
984 INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
986 lp_qp_work->ib_dev = &(hr_dev->ib_dev);
987 lp_qp_work->comp = ∁
988 lp_qp_work->comp_flag = 1;
990 init_completion(lp_qp_work->comp);
992 queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
995 if (try_wait_for_completion(&comp))
997 msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
998 end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE;
1001 lp_qp_work->comp_flag = 0;
1002 if (try_wait_for_completion(&comp))
1005 dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
1009 static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
1011 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
1012 struct device *dev = &hr_dev->pdev->dev;
1013 struct ib_send_wr send_wr;
1014 const struct ib_send_wr *bad_wr;
1017 memset(&send_wr, 0, sizeof(send_wr));
1018 send_wr.next = NULL;
1019 send_wr.num_sge = 0;
1020 send_wr.send_flags = 0;
1021 send_wr.sg_list = NULL;
1022 send_wr.wr_id = (unsigned long long)&send_wr;
1023 send_wr.opcode = IB_WR_RDMA_WRITE;
1025 ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
1027 dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
1034 static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
1036 struct hns_roce_mr_free_work *mr_work;
1037 struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
1038 struct hns_roce_free_mr *free_mr;
1039 struct hns_roce_cq *mr_free_cq;
1040 struct hns_roce_v1_priv *priv;
1041 struct hns_roce_dev *hr_dev;
1042 struct hns_roce_mr *hr_mr;
1043 struct hns_roce_qp *hr_qp;
1046 msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
1051 mr_work = container_of(work, struct hns_roce_mr_free_work, work);
1052 hr_mr = (struct hns_roce_mr *)mr_work->mr;
1053 hr_dev = to_hr_dev(mr_work->ib_dev);
1054 dev = &hr_dev->pdev->dev;
1056 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1057 free_mr = &priv->free_mr;
1058 mr_free_cq = free_mr->mr_free_cq;
1060 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
1061 hr_qp = free_mr->mr_free_qp[i];
1066 ret = hns_roce_v1_send_lp_wqe(hr_qp);
1069 "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
1076 dev_err(dev, "Reserved loop qp is absent!\n");
1081 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
1082 if (ret < 0 && hr_qp) {
1084 "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
1085 hr_qp->qpn, ret, hr_mr->key, ne);
1089 usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
1090 (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
1091 } while (ne && time_before_eq(jiffies, end));
1095 "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
1099 if (mr_work->comp_flag)
1100 complete(mr_work->comp);
1104 static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
1105 struct hns_roce_mr *mr, struct ib_udata *udata)
1107 struct device *dev = &hr_dev->pdev->dev;
1108 struct hns_roce_mr_free_work *mr_work;
1109 struct hns_roce_free_mr *free_mr;
1110 struct hns_roce_v1_priv *priv;
1111 struct completion comp;
1112 long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
1113 unsigned long start = jiffies;
1117 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1118 free_mr = &priv->free_mr;
1121 if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
1122 & (hr_dev->caps.num_mtpts - 1)))
1123 dev_warn(dev, "HW2SW_MPT failed!\n");
1126 mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
1132 INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
1134 mr_work->ib_dev = &(hr_dev->ib_dev);
1135 mr_work->comp = ∁
1136 mr_work->comp_flag = 1;
1137 mr_work->mr = (void *)mr;
1138 init_completion(mr_work->comp);
1140 queue_work(free_mr->free_mr_wq, &(mr_work->work));
1143 if (try_wait_for_completion(&comp))
1145 msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
1146 end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE;
1149 mr_work->comp_flag = 0;
1150 if (try_wait_for_completion(&comp))
1153 dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
1157 dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
1158 mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
1160 if (mr->size != ~0ULL) {
1161 npages = ib_umem_page_count(mr->umem);
1162 dma_free_coherent(dev, npages * 8, mr->pbl_buf,
1166 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
1167 key_to_hw_index(mr->key), 0);
1169 ib_umem_release(mr->umem);
1176 static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
1178 struct device *dev = &hr_dev->pdev->dev;
1179 struct hns_roce_v1_priv *priv;
1180 struct hns_roce_db_table *db;
1182 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1183 db = &priv->db_table;
1185 if (db->sdb_ext_mod) {
1186 dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
1187 db->ext_db->sdb_buf_list->buf,
1188 db->ext_db->sdb_buf_list->map);
1189 kfree(db->ext_db->sdb_buf_list);
1192 if (db->odb_ext_mod) {
1193 dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
1194 db->ext_db->odb_buf_list->buf,
1195 db->ext_db->odb_buf_list->map);
1196 kfree(db->ext_db->odb_buf_list);
1202 static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
1209 struct hns_roce_v1_priv *priv;
1210 struct hns_roce_raq_table *raq;
1211 struct device *dev = &hr_dev->pdev->dev;
1213 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1214 raq = &priv->raq_table;
1216 raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
1217 if (!raq->e_raq_buf)
1220 raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
1222 if (!raq->e_raq_buf->buf) {
1224 goto err_dma_alloc_raq;
1226 raq->e_raq_buf->map = addr;
1228 /* Configure raq extended address. 48bit 4K align*/
1229 roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
1231 /* Configure raq_shift */
1232 raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
1233 val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
1234 tmp = cpu_to_le32(val);
1235 roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
1236 ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
1238 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
1239 * using 4K page, and shift more 32 because of
1240 * caculating the high 32 bit value evaluated to hardware.
1242 roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
1243 ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
1244 raq->e_raq_buf->map >> 44);
1245 val = le32_to_cpu(tmp);
1246 roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
1247 dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
1249 /* Configure raq threshold */
1250 val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
1251 tmp = cpu_to_le32(val);
1252 roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
1253 ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
1254 HNS_ROCE_V1_EXT_RAQ_WF);
1255 val = le32_to_cpu(tmp);
1256 roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
1257 dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
1259 /* Enable extend raq */
1260 val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
1261 tmp = cpu_to_le32(val);
1263 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
1264 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
1265 POL_TIME_INTERVAL_VAL);
1266 roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
1268 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
1269 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
1272 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
1273 val = le32_to_cpu(tmp);
1274 roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
1275 dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
1277 /* Enable raq drop */
1278 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1279 tmp = cpu_to_le32(val);
1280 roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
1281 val = le32_to_cpu(tmp);
1282 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1283 dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
1288 kfree(raq->e_raq_buf);
1292 static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
1294 struct device *dev = &hr_dev->pdev->dev;
1295 struct hns_roce_v1_priv *priv;
1296 struct hns_roce_raq_table *raq;
1298 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1299 raq = &priv->raq_table;
1301 dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
1302 raq->e_raq_buf->map);
1303 kfree(raq->e_raq_buf);
1306 static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
1312 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1313 /* Open all ports */
1314 tmp = cpu_to_le32(val);
1315 roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1316 ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
1318 val = le32_to_cpu(tmp);
1319 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1321 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1322 /* Close all ports */
1323 tmp = cpu_to_le32(val);
1324 roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1325 ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
1326 val = le32_to_cpu(tmp);
1327 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1331 static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
1333 struct device *dev = &hr_dev->pdev->dev;
1334 struct hns_roce_v1_priv *priv;
1337 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1339 priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
1340 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
1342 if (!priv->bt_table.qpc_buf.buf)
1345 priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
1346 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
1348 if (!priv->bt_table.mtpt_buf.buf) {
1350 goto err_failed_alloc_mtpt_buf;
1353 priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
1354 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
1356 if (!priv->bt_table.cqc_buf.buf) {
1358 goto err_failed_alloc_cqc_buf;
1363 err_failed_alloc_cqc_buf:
1364 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1365 priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1367 err_failed_alloc_mtpt_buf:
1368 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1369 priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1374 static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
1376 struct device *dev = &hr_dev->pdev->dev;
1377 struct hns_roce_v1_priv *priv;
1379 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1381 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1382 priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
1384 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1385 priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1387 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1388 priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1391 static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
1393 struct device *dev = &hr_dev->pdev->dev;
1394 struct hns_roce_buf_list *tptr_buf;
1395 struct hns_roce_v1_priv *priv;
1397 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1398 tptr_buf = &priv->tptr_table.tptr_buf;
1401 * This buffer will be used for CQ's tptr(tail pointer), also
1402 * named ci(customer index). Every CQ will use 2 bytes to save
1403 * cqe ci in hip06. Hardware will read this area to get new ci
1404 * when the queue is almost full.
1406 tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1407 &tptr_buf->map, GFP_KERNEL);
1411 hr_dev->tptr_dma_addr = tptr_buf->map;
1412 hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
1417 static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
1419 struct device *dev = &hr_dev->pdev->dev;
1420 struct hns_roce_buf_list *tptr_buf;
1421 struct hns_roce_v1_priv *priv;
1423 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1424 tptr_buf = &priv->tptr_table.tptr_buf;
1426 dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1427 tptr_buf->buf, tptr_buf->map);
1430 static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
1432 struct device *dev = &hr_dev->pdev->dev;
1433 struct hns_roce_free_mr *free_mr;
1434 struct hns_roce_v1_priv *priv;
1437 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1438 free_mr = &priv->free_mr;
1440 free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
1441 if (!free_mr->free_mr_wq) {
1442 dev_err(dev, "Create free mr workqueue failed!\n");
1446 ret = hns_roce_v1_rsv_lp_qp(hr_dev);
1448 dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
1449 flush_workqueue(free_mr->free_mr_wq);
1450 destroy_workqueue(free_mr->free_mr_wq);
1456 static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
1458 struct hns_roce_free_mr *free_mr;
1459 struct hns_roce_v1_priv *priv;
1461 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1462 free_mr = &priv->free_mr;
1464 flush_workqueue(free_mr->free_mr_wq);
1465 destroy_workqueue(free_mr->free_mr_wq);
1467 hns_roce_v1_release_lp_qp(hr_dev);
1471 * hns_roce_v1_reset - reset RoCE
1472 * @hr_dev: RoCE device struct pointer
1473 * @enable: true -- drop reset, false -- reset
1474 * return 0 - success , negative --fail
1476 static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
1478 struct device_node *dsaf_node;
1479 struct device *dev = &hr_dev->pdev->dev;
1480 struct device_node *np = dev->of_node;
1481 struct fwnode_handle *fwnode;
1484 /* check if this is DT/ACPI case */
1485 if (dev_of_node(dev)) {
1486 dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
1488 dev_err(dev, "could not find dsaf-handle\n");
1491 fwnode = &dsaf_node->fwnode;
1492 } else if (is_acpi_device_node(dev->fwnode)) {
1493 struct fwnode_reference_args args;
1495 ret = acpi_node_get_property_reference(dev->fwnode,
1496 "dsaf-handle", 0, &args);
1498 dev_err(dev, "could not find dsaf-handle\n");
1501 fwnode = args.fwnode;
1503 dev_err(dev, "cannot read data from DT or ACPI\n");
1507 ret = hns_dsaf_roce_reset(fwnode, false);
1512 msleep(SLEEP_TIME_INTERVAL);
1513 ret = hns_dsaf_roce_reset(fwnode, true);
1519 static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
1522 struct hns_roce_caps *caps = &hr_dev->caps;
1524 hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG);
1525 hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG);
1526 hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) |
1527 ((u64)roce_read(hr_dev,
1528 ROCEE_SYS_IMAGE_GUID_H_REG) << 32);
1529 hr_dev->hw_rev = HNS_ROCE_HW_VER1;
1531 caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
1532 caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
1533 caps->min_wqes = HNS_ROCE_MIN_WQE_NUM;
1534 caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
1535 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
1536 caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
1537 caps->max_sq_sg = HNS_ROCE_V1_SG_NUM;
1538 caps->max_rq_sg = HNS_ROCE_V1_SG_NUM;
1539 caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
1540 caps->num_uars = HNS_ROCE_V1_UAR_NUM;
1541 caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
1542 caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM;
1543 caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM;
1544 caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
1545 caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
1546 caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
1547 caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
1548 caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
1549 caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
1550 caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
1551 caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
1552 caps->qpc_entry_sz = HNS_ROCE_V1_QPC_ENTRY_SIZE;
1553 caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
1554 caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE;
1555 caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
1556 caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
1557 caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE;
1558 caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
1559 caps->reserved_lkey = 0;
1560 caps->reserved_pds = 0;
1561 caps->reserved_mrws = 1;
1562 caps->reserved_uars = 0;
1563 caps->reserved_cqs = 0;
1564 caps->reserved_qps = 12; /* 2 SQP per port, six ports total 12 */
1565 caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE;
1567 for (i = 0; i < caps->num_ports; i++)
1568 caps->pkey_table_len[i] = 1;
1570 for (i = 0; i < caps->num_ports; i++) {
1571 /* Six ports shared 16 GID in v1 engine */
1572 if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
1573 caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1576 caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1577 caps->num_ports + 1;
1580 caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
1581 caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
1582 caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG);
1583 caps->max_mtu = IB_MTU_2048;
1588 static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
1593 struct device *dev = &hr_dev->pdev->dev;
1595 /* DMAE user config */
1596 val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
1597 tmp = cpu_to_le32(val);
1598 roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
1599 ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
1600 roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
1601 ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
1602 1 << PAGES_SHIFT_16);
1603 val = le32_to_cpu(tmp);
1604 roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
1606 val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
1607 tmp = cpu_to_le32(val);
1608 roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
1609 ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
1610 roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
1611 ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
1612 1 << PAGES_SHIFT_16);
1614 ret = hns_roce_db_init(hr_dev);
1616 dev_err(dev, "doorbell init failed!\n");
1620 ret = hns_roce_raq_init(hr_dev);
1622 dev_err(dev, "raq init failed!\n");
1623 goto error_failed_raq_init;
1626 ret = hns_roce_bt_init(hr_dev);
1628 dev_err(dev, "bt init failed!\n");
1629 goto error_failed_bt_init;
1632 ret = hns_roce_tptr_init(hr_dev);
1634 dev_err(dev, "tptr init failed!\n");
1635 goto error_failed_tptr_init;
1638 ret = hns_roce_free_mr_init(hr_dev);
1640 dev_err(dev, "free mr init failed!\n");
1641 goto error_failed_free_mr_init;
1644 hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
1648 error_failed_free_mr_init:
1649 hns_roce_tptr_free(hr_dev);
1651 error_failed_tptr_init:
1652 hns_roce_bt_free(hr_dev);
1654 error_failed_bt_init:
1655 hns_roce_raq_free(hr_dev);
1657 error_failed_raq_init:
1658 hns_roce_db_free(hr_dev);
1662 static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
1664 hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
1665 hns_roce_free_mr_free(hr_dev);
1666 hns_roce_tptr_free(hr_dev);
1667 hns_roce_bt_free(hr_dev);
1668 hns_roce_raq_free(hr_dev);
1669 hns_roce_db_free(hr_dev);
1672 static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
1674 u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
1676 return (!!(status & (1 << HCR_GO_BIT)));
1679 static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1680 u64 out_param, u32 in_modifier, u8 op_modifier,
1681 u16 op, u16 token, int event)
1683 u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
1688 end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
1689 while (hns_roce_v1_cmd_pending(hr_dev)) {
1690 if (time_after(jiffies, end)) {
1691 dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
1692 (int)jiffies, (int)end);
1698 tmp = cpu_to_le32(val);
1699 roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
1701 roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
1702 ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
1703 roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
1704 roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
1705 roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M,
1706 ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
1708 val = le32_to_cpu(tmp);
1709 writeq(in_param, hcr + 0);
1710 writeq(out_param, hcr + 2);
1711 writel(in_modifier, hcr + 4);
1712 /* Memory barrier */
1715 writel(val, hcr + 5);
1720 static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
1721 unsigned long timeout)
1723 u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
1724 unsigned long end = 0;
1727 end = msecs_to_jiffies(timeout) + jiffies;
1728 while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
1731 if (hns_roce_v1_cmd_pending(hr_dev)) {
1732 dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1736 status = le32_to_cpu((__force __le32)
1737 __raw_readl(hcr + HCR_STATUS_OFFSET));
1738 if ((status & STATUS_MASK) != 0x1) {
1739 dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
1746 static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
1747 int gid_index, const union ib_gid *gid,
1748 const struct ib_gid_attr *attr)
1750 unsigned long flags;
1754 gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
1756 spin_lock_irqsave(&hr_dev->iboe.lock, flags);
1758 p = (u32 *)&gid->raw[0];
1759 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
1760 (HNS_ROCE_V1_GID_NUM * gid_idx));
1762 p = (u32 *)&gid->raw[4];
1763 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
1764 (HNS_ROCE_V1_GID_NUM * gid_idx));
1766 p = (u32 *)&gid->raw[8];
1767 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
1768 (HNS_ROCE_V1_GID_NUM * gid_idx));
1770 p = (u32 *)&gid->raw[0xc];
1771 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
1772 (HNS_ROCE_V1_GID_NUM * gid_idx));
1774 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
1779 static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
1790 * When mac changed, loopback may fail
1791 * because of smac not equal to dmac.
1792 * We Need to release and create reserved qp again.
1794 if (hr_dev->hw->dereg_mr) {
1797 ret = hns_roce_v1_recreate_lp_qp(hr_dev);
1798 if (ret && ret != -ETIMEDOUT)
1802 p = (u32 *)(&addr[0]);
1804 roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
1805 PHY_PORT_OFFSET * phy_port);
1807 val = roce_read(hr_dev,
1808 ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1809 tmp = cpu_to_le32(val);
1810 p_h = (u16 *)(&addr[4]);
1812 roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
1813 ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
1814 val = le32_to_cpu(tmp);
1815 roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1821 static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
1827 val = roce_read(hr_dev,
1828 ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1829 tmp = cpu_to_le32(val);
1830 roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
1831 ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
1832 val = le32_to_cpu(tmp);
1833 roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1837 static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1838 unsigned long mtpt_idx)
1840 struct hns_roce_v1_mpt_entry *mpt_entry;
1841 struct sg_dma_page_iter sg_iter;
1845 /* MPT filled into mailbox buf */
1846 mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
1847 memset(mpt_entry, 0, sizeof(*mpt_entry));
1849 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
1850 MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
1851 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
1852 MPT_BYTE_4_KEY_S, mr->key);
1853 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
1854 MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
1855 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
1856 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
1857 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
1858 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
1859 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
1860 MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
1861 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
1862 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
1863 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1864 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
1865 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1866 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
1867 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
1868 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
1870 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
1872 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1873 MPT_BYTE_12_PBL_ADDR_H_S, 0);
1874 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
1875 MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
1877 mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova);
1878 mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32));
1879 mpt_entry->length = cpu_to_le32((u32)mr->size);
1881 roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
1882 MPT_BYTE_28_PD_S, mr->pd);
1883 roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
1884 MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
1885 roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
1886 MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
1888 /* DMA memory register */
1889 if (mr->type == MR_TYPE_DMA)
1892 pages = (u64 *) __get_free_page(GFP_KERNEL);
1897 for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
1898 pages[i] = ((u64)sg_page_iter_dma_address(&sg_iter)) >> 12;
1900 /* Directly record to MTPT table firstly 7 entry */
1901 if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM)
1906 /* Register user mr */
1907 for (i = 0; i < HNS_ROCE_MAX_INNER_MTPT_NUM; i++) {
1910 mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
1911 roce_set_field(mpt_entry->mpt_byte_36,
1912 MPT_BYTE_36_PA0_H_M,
1913 MPT_BYTE_36_PA0_H_S,
1914 (u32)(pages[i] >> PAGES_SHIFT_32));
1917 roce_set_field(mpt_entry->mpt_byte_36,
1918 MPT_BYTE_36_PA1_L_M,
1919 MPT_BYTE_36_PA1_L_S, (u32)(pages[i]));
1920 roce_set_field(mpt_entry->mpt_byte_40,
1921 MPT_BYTE_40_PA1_H_M,
1922 MPT_BYTE_40_PA1_H_S,
1923 (u32)(pages[i] >> PAGES_SHIFT_24));
1926 roce_set_field(mpt_entry->mpt_byte_40,
1927 MPT_BYTE_40_PA2_L_M,
1928 MPT_BYTE_40_PA2_L_S, (u32)(pages[i]));
1929 roce_set_field(mpt_entry->mpt_byte_44,
1930 MPT_BYTE_44_PA2_H_M,
1931 MPT_BYTE_44_PA2_H_S,
1932 (u32)(pages[i] >> PAGES_SHIFT_16));
1935 roce_set_field(mpt_entry->mpt_byte_44,
1936 MPT_BYTE_44_PA3_L_M,
1937 MPT_BYTE_44_PA3_L_S, (u32)(pages[i]));
1938 roce_set_field(mpt_entry->mpt_byte_48,
1939 MPT_BYTE_48_PA3_H_M,
1940 MPT_BYTE_48_PA3_H_S,
1941 (u32)(pages[i] >> PAGES_SHIFT_8));
1944 mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
1945 roce_set_field(mpt_entry->mpt_byte_56,
1946 MPT_BYTE_56_PA4_H_M,
1947 MPT_BYTE_56_PA4_H_S,
1948 (u32)(pages[i] >> PAGES_SHIFT_32));
1951 roce_set_field(mpt_entry->mpt_byte_56,
1952 MPT_BYTE_56_PA5_L_M,
1953 MPT_BYTE_56_PA5_L_S, (u32)(pages[i]));
1954 roce_set_field(mpt_entry->mpt_byte_60,
1955 MPT_BYTE_60_PA5_H_M,
1956 MPT_BYTE_60_PA5_H_S,
1957 (u32)(pages[i] >> PAGES_SHIFT_24));
1960 roce_set_field(mpt_entry->mpt_byte_60,
1961 MPT_BYTE_60_PA6_L_M,
1962 MPT_BYTE_60_PA6_L_S, (u32)(pages[i]));
1963 roce_set_field(mpt_entry->mpt_byte_64,
1964 MPT_BYTE_64_PA6_H_M,
1965 MPT_BYTE_64_PA6_H_S,
1966 (u32)(pages[i] >> PAGES_SHIFT_16));
1973 free_page((unsigned long) pages);
1975 mpt_entry->pbl_addr_l = cpu_to_le32((u32)(mr->pbl_dma_addr));
1977 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1978 MPT_BYTE_12_PBL_ADDR_H_S,
1979 ((u32)(mr->pbl_dma_addr >> 32)));
1984 static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
1986 return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
1987 n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
1990 static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
1992 struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
1994 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
1995 return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
1996 !!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL;
1999 static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
2001 return get_sw_cqe(hr_cq, hr_cq->cons_index);
2004 static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
2008 doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1));
2010 roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
2011 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
2012 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
2013 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
2014 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
2015 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
2016 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
2018 hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2021 static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2022 struct hns_roce_srq *srq)
2024 struct hns_roce_cqe *cqe, *dest;
2029 for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
2031 if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
2036 * Now backwards through the CQ, removing CQ entries
2037 * that match our QP by overwriting them with next entries.
2039 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2040 cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2041 if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2042 CQE_BYTE_16_LOCAL_QPN_S) &
2043 HNS_ROCE_CQE_QPN_MASK) == qpn) {
2044 /* In v1 engine, not support SRQ */
2046 } else if (nfreed) {
2047 dest = get_cqe(hr_cq, (prod_index + nfreed) &
2049 owner_bit = roce_get_bit(dest->cqe_byte_4,
2050 CQE_BYTE_4_OWNER_S);
2051 memcpy(dest, cqe, sizeof(*cqe));
2052 roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
2058 hr_cq->cons_index += nfreed;
2060 * Make sure update of buffer contents is done before
2061 * updating consumer index.
2065 hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
2069 static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2070 struct hns_roce_srq *srq)
2072 spin_lock_irq(&hr_cq->lock);
2073 __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
2074 spin_unlock_irq(&hr_cq->lock);
2077 static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
2078 struct hns_roce_cq *hr_cq, void *mb_buf,
2079 u64 *mtts, dma_addr_t dma_handle, int nent,
2082 struct hns_roce_cq_context *cq_context = NULL;
2083 struct hns_roce_buf_list *tptr_buf;
2084 struct hns_roce_v1_priv *priv;
2085 dma_addr_t tptr_dma_addr;
2088 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
2089 tptr_buf = &priv->tptr_table.tptr_buf;
2091 cq_context = mb_buf;
2092 memset(cq_context, 0, sizeof(*cq_context));
2094 /* Get the tptr for this CQ. */
2095 offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
2096 tptr_dma_addr = tptr_buf->map + offset;
2097 hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
2099 /* Register cq_context members */
2100 roce_set_field(cq_context->cqc_byte_4,
2101 CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
2102 CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
2103 roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
2104 CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
2106 cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle);
2108 roce_set_field(cq_context->cqc_byte_12,
2109 CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
2110 CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
2111 ((u64)dma_handle >> 32));
2112 roce_set_field(cq_context->cqc_byte_12,
2113 CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
2114 CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
2115 ilog2((unsigned int)nent));
2116 roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
2117 CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector);
2119 cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
2121 roce_set_field(cq_context->cqc_byte_20,
2122 CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
2123 CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32);
2124 /* Dedicated hardware, directly set 0 */
2125 roce_set_field(cq_context->cqc_byte_20,
2126 CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
2127 CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
2129 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
2130 * using 4K page, and shift more 32 because of
2131 * caculating the high 32 bit value evaluated to hardware.
2133 roce_set_field(cq_context->cqc_byte_20,
2134 CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
2135 CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
2136 tptr_dma_addr >> 44);
2138 cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12));
2140 roce_set_field(cq_context->cqc_byte_32,
2141 CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
2142 CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
2143 roce_set_bit(cq_context->cqc_byte_32,
2144 CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
2145 roce_set_bit(cq_context->cqc_byte_32,
2146 CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
2147 roce_set_bit(cq_context->cqc_byte_32,
2148 CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
2149 roce_set_bit(cq_context->cqc_byte_32,
2150 CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
2152 /* The initial value of cq's ci is 0 */
2153 roce_set_field(cq_context->cqc_byte_32,
2154 CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
2155 CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
2158 static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
2163 static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
2164 enum ib_cq_notify_flags flags)
2166 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2167 u32 notification_flag;
2170 notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
2171 IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
2173 * flags = 0; Notification Flag = 1, next
2174 * flags = 1; Notification Flag = 0, solocited
2177 cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2178 roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
2179 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
2180 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
2181 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
2182 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
2183 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
2184 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
2185 hr_cq->cqn | notification_flag);
2187 hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2192 static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
2193 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2200 struct hns_roce_cqe *cqe;
2201 struct hns_roce_qp *hr_qp;
2202 struct hns_roce_wq *wq;
2203 struct hns_roce_wqe_ctrl_seg *sq_wqe;
2204 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2205 struct device *dev = &hr_dev->pdev->dev;
2207 /* Find cqe according consumer index */
2208 cqe = next_cqe_sw(hr_cq);
2212 ++hr_cq->cons_index;
2213 /* Memory barrier */
2216 is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
2218 /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */
2219 if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2220 CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
2221 qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
2222 CQE_BYTE_20_PORT_NUM_S) +
2223 roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2224 CQE_BYTE_16_LOCAL_QPN_S) *
2227 qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2228 CQE_BYTE_16_LOCAL_QPN_S);
2231 if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2232 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2233 if (unlikely(!hr_qp)) {
2234 dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
2235 hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
2242 wc->qp = &(*cur_qp)->ibqp;
2245 status = roce_get_field(cqe->cqe_byte_4,
2246 CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
2247 CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
2248 HNS_ROCE_CQE_STATUS_MASK;
2250 case HNS_ROCE_CQE_SUCCESS:
2251 wc->status = IB_WC_SUCCESS;
2253 case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
2254 wc->status = IB_WC_LOC_LEN_ERR;
2256 case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
2257 wc->status = IB_WC_LOC_QP_OP_ERR;
2259 case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
2260 wc->status = IB_WC_LOC_PROT_ERR;
2262 case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
2263 wc->status = IB_WC_WR_FLUSH_ERR;
2265 case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
2266 wc->status = IB_WC_MW_BIND_ERR;
2268 case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
2269 wc->status = IB_WC_BAD_RESP_ERR;
2271 case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
2272 wc->status = IB_WC_LOC_ACCESS_ERR;
2274 case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
2275 wc->status = IB_WC_REM_INV_REQ_ERR;
2277 case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
2278 wc->status = IB_WC_REM_ACCESS_ERR;
2280 case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
2281 wc->status = IB_WC_REM_OP_ERR;
2283 case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
2284 wc->status = IB_WC_RETRY_EXC_ERR;
2286 case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
2287 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2290 wc->status = IB_WC_GENERAL_ERR;
2294 /* CQE status error, directly return */
2295 if (wc->status != IB_WC_SUCCESS)
2299 /* SQ conrespond to CQE */
2300 sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
2301 CQE_BYTE_4_WQE_INDEX_M,
2302 CQE_BYTE_4_WQE_INDEX_S)&
2303 ((*cur_qp)->sq.wqe_cnt-1));
2304 switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) {
2305 case HNS_ROCE_WQE_OPCODE_SEND:
2306 wc->opcode = IB_WC_SEND;
2308 case HNS_ROCE_WQE_OPCODE_RDMA_READ:
2309 wc->opcode = IB_WC_RDMA_READ;
2310 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2312 case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
2313 wc->opcode = IB_WC_RDMA_WRITE;
2315 case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
2316 wc->opcode = IB_WC_LOCAL_INV;
2318 case HNS_ROCE_WQE_OPCODE_UD_SEND:
2319 wc->opcode = IB_WC_SEND;
2322 wc->status = IB_WC_GENERAL_ERR;
2325 wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ?
2326 IB_WC_WITH_IMM : 0);
2328 wq = &(*cur_qp)->sq;
2329 if ((*cur_qp)->sq_signal_bits) {
2331 * If sg_signal_bit is 1,
2332 * firstly tail pointer updated to wqe
2333 * which current cqe correspond to
2335 wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
2336 CQE_BYTE_4_WQE_INDEX_M,
2337 CQE_BYTE_4_WQE_INDEX_S);
2338 wq->tail += (wqe_ctr - (u16)wq->tail) &
2341 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2344 /* RQ conrespond to CQE */
2345 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2346 opcode = roce_get_field(cqe->cqe_byte_4,
2347 CQE_BYTE_4_OPERATION_TYPE_M,
2348 CQE_BYTE_4_OPERATION_TYPE_S) &
2349 HNS_ROCE_CQE_OPCODE_MASK;
2351 case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
2352 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2353 wc->wc_flags = IB_WC_WITH_IMM;
2355 cpu_to_be32(le32_to_cpu(cqe->immediate_data));
2357 case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
2358 if (roce_get_bit(cqe->cqe_byte_4,
2359 CQE_BYTE_4_IMM_INDICATOR_S)) {
2360 wc->opcode = IB_WC_RECV;
2361 wc->wc_flags = IB_WC_WITH_IMM;
2362 wc->ex.imm_data = cpu_to_be32(
2363 le32_to_cpu(cqe->immediate_data));
2365 wc->opcode = IB_WC_RECV;
2370 wc->status = IB_WC_GENERAL_ERR;
2374 /* Update tail pointer, record wr_id */
2375 wq = &(*cur_qp)->rq;
2376 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2378 wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
2380 wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
2381 CQE_BYTE_20_REMOTE_QPN_M,
2382 CQE_BYTE_20_REMOTE_QPN_S);
2383 wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
2384 CQE_BYTE_20_GRH_PRESENT_S) ?
2386 wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
2387 CQE_BYTE_28_P_KEY_IDX_M,
2388 CQE_BYTE_28_P_KEY_IDX_S);
2394 int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2396 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2397 struct hns_roce_qp *cur_qp = NULL;
2398 unsigned long flags;
2402 spin_lock_irqsave(&hr_cq->lock, flags);
2404 for (npolled = 0; npolled < num_entries; ++npolled) {
2405 ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
2411 *hr_cq->tptr_addr = hr_cq->cons_index &
2412 ((hr_cq->cq_depth << 1) - 1);
2414 /* Memroy barrier */
2416 hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
2419 spin_unlock_irqrestore(&hr_cq->lock, flags);
2421 if (ret == 0 || ret == -EAGAIN)
2427 static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
2428 struct hns_roce_hem_table *table, int obj,
2431 struct device *dev = &hr_dev->pdev->dev;
2432 struct hns_roce_v1_priv *priv;
2433 unsigned long flags = 0;
2434 long end = HW_SYNC_TIMEOUT_MSECS;
2435 __le32 bt_cmd_val[2] = {0};
2436 void __iomem *bt_cmd;
2439 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
2441 switch (table->type) {
2443 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2444 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
2445 bt_ba = priv->bt_table.qpc_buf.map >> 12;
2448 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2449 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT);
2450 bt_ba = priv->bt_table.mtpt_buf.map >> 12;
2453 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2454 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
2455 bt_ba = priv->bt_table.cqc_buf.map >> 12;
2458 dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
2463 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
2464 ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
2465 roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
2466 roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
2468 spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
2470 bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
2473 if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
2475 dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
2476 spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
2483 mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
2484 end -= HW_SYNC_SLEEP_TIME_INTERVAL;
2487 bt_cmd_val[0] = (__le32)bt_ba;
2488 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
2489 ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
2490 hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
2492 spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
2497 static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
2498 struct hns_roce_mtt *mtt,
2499 enum hns_roce_qp_state cur_state,
2500 enum hns_roce_qp_state new_state,
2501 struct hns_roce_qp_context *context,
2502 struct hns_roce_qp *hr_qp)
2505 op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
2506 [HNS_ROCE_QP_STATE_RST] = {
2507 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2508 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2509 [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2511 [HNS_ROCE_QP_STATE_INIT] = {
2512 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2513 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2514 /* Note: In v1 engine, HW doesn't support RST2INIT.
2515 * We use RST2INIT cmd instead of INIT2INIT.
2517 [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2518 [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
2520 [HNS_ROCE_QP_STATE_RTR] = {
2521 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2522 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2523 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
2525 [HNS_ROCE_QP_STATE_RTS] = {
2526 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2527 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2528 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
2529 [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
2531 [HNS_ROCE_QP_STATE_SQD] = {
2532 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2533 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2534 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
2535 [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
2537 [HNS_ROCE_QP_STATE_ERR] = {
2538 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2539 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2543 struct hns_roce_cmd_mailbox *mailbox;
2544 struct device *dev = &hr_dev->pdev->dev;
2547 if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
2548 new_state >= HNS_ROCE_QP_NUM_STATE ||
2549 !op[cur_state][new_state]) {
2550 dev_err(dev, "[modify_qp]not support state %d to %d\n",
2551 cur_state, new_state);
2555 if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
2556 return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2557 HNS_ROCE_CMD_2RST_QP,
2558 HNS_ROCE_CMD_TIMEOUT_MSECS);
2560 if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
2561 return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2562 HNS_ROCE_CMD_2ERR_QP,
2563 HNS_ROCE_CMD_TIMEOUT_MSECS);
2565 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2566 if (IS_ERR(mailbox))
2567 return PTR_ERR(mailbox);
2569 memcpy(mailbox->buf, context, sizeof(*context));
2571 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
2572 op[cur_state][new_state],
2573 HNS_ROCE_CMD_TIMEOUT_MSECS);
2575 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2579 static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2580 int attr_mask, enum ib_qp_state cur_state,
2581 enum ib_qp_state new_state)
2583 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2584 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2585 struct hns_roce_sqp_context *context;
2586 struct device *dev = &hr_dev->pdev->dev;
2587 dma_addr_t dma_handle = 0;
2594 context = kzalloc(sizeof(*context), GFP_KERNEL);
2598 /* Search QP buf's MTTs */
2599 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
2600 hr_qp->mtt.first_seg, &dma_handle);
2602 dev_err(dev, "qp buf pa find failed\n");
2606 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2607 roce_set_field(context->qp1c_bytes_4,
2608 QP1C_BYTES_4_SQ_WQE_SHIFT_M,
2609 QP1C_BYTES_4_SQ_WQE_SHIFT_S,
2610 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2611 roce_set_field(context->qp1c_bytes_4,
2612 QP1C_BYTES_4_RQ_WQE_SHIFT_M,
2613 QP1C_BYTES_4_RQ_WQE_SHIFT_S,
2614 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2615 roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
2616 QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
2618 context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
2619 roce_set_field(context->qp1c_bytes_12,
2620 QP1C_BYTES_12_SQ_RQ_BT_H_M,
2621 QP1C_BYTES_12_SQ_RQ_BT_H_S,
2622 ((u32)(dma_handle >> 32)));
2624 roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
2625 QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
2626 roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
2627 QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
2628 roce_set_bit(context->qp1c_bytes_16,
2629 QP1C_BYTES_16_SIGNALING_TYPE_S,
2630 le32_to_cpu(hr_qp->sq_signal_bits));
2631 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
2633 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
2635 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
2638 roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
2639 QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
2640 roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
2641 QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
2643 rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
2644 context->cur_rq_wqe_ba_l =
2645 cpu_to_le32((u32)(mtts[rq_pa_start]));
2647 roce_set_field(context->qp1c_bytes_28,
2648 QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
2649 QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
2650 (mtts[rq_pa_start]) >> 32);
2651 roce_set_field(context->qp1c_bytes_28,
2652 QP1C_BYTES_28_RQ_CUR_IDX_M,
2653 QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
2655 roce_set_field(context->qp1c_bytes_32,
2656 QP1C_BYTES_32_RX_CQ_NUM_M,
2657 QP1C_BYTES_32_RX_CQ_NUM_S,
2658 to_hr_cq(ibqp->recv_cq)->cqn);
2659 roce_set_field(context->qp1c_bytes_32,
2660 QP1C_BYTES_32_TX_CQ_NUM_M,
2661 QP1C_BYTES_32_TX_CQ_NUM_S,
2662 to_hr_cq(ibqp->send_cq)->cqn);
2664 context->cur_sq_wqe_ba_l = cpu_to_le32((u32)mtts[0]);
2666 roce_set_field(context->qp1c_bytes_40,
2667 QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
2668 QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
2670 roce_set_field(context->qp1c_bytes_40,
2671 QP1C_BYTES_40_SQ_CUR_IDX_M,
2672 QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
2674 /* Copy context to QP1C register */
2675 addr = (u32 __iomem *)(hr_dev->reg_base +
2676 ROCEE_QP1C_CFG0_0_REG +
2677 hr_qp->phy_port * sizeof(*context));
2679 writel(le32_to_cpu(context->qp1c_bytes_4), addr);
2680 writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1);
2681 writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2);
2682 writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3);
2683 writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4);
2684 writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5);
2685 writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6);
2686 writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7);
2687 writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8);
2688 writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9);
2691 /* Modify QP1C status */
2692 reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2693 hr_qp->phy_port * sizeof(*context));
2694 tmp = cpu_to_le32(reg_val);
2695 roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
2696 ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
2697 reg_val = le32_to_cpu(tmp);
2698 roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2699 hr_qp->phy_port * sizeof(*context), reg_val);
2701 hr_qp->state = new_state;
2702 if (new_state == IB_QPS_RESET) {
2703 hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
2704 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
2705 if (ibqp->send_cq != ibqp->recv_cq)
2706 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
2713 hr_qp->sq_next_wqe = 0;
2724 static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2725 int attr_mask, enum ib_qp_state cur_state,
2726 enum ib_qp_state new_state)
2728 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2729 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2730 struct device *dev = &hr_dev->pdev->dev;
2731 struct hns_roce_qp_context *context;
2732 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2733 dma_addr_t dma_handle_2 = 0;
2734 dma_addr_t dma_handle = 0;
2735 __le32 doorbell[2] = {0};
2736 int rq_pa_start = 0;
2745 context = kzalloc(sizeof(*context), GFP_KERNEL);
2749 /* Search qp buf's mtts */
2750 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
2751 hr_qp->mtt.first_seg, &dma_handle);
2753 dev_err(dev, "qp buf pa find failed\n");
2757 /* Search IRRL's mtts */
2758 mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
2759 hr_qp->qpn, &dma_handle_2);
2760 if (mtts_2 == NULL) {
2761 dev_err(dev, "qp irrl_table find failed\n");
2768 * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
2769 * Optional param: NA
2771 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2772 roce_set_field(context->qpc_bytes_4,
2773 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2774 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2775 to_hr_qp_type(hr_qp->ibqp.qp_type));
2777 roce_set_bit(context->qpc_bytes_4,
2778 QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2779 roce_set_bit(context->qpc_bytes_4,
2780 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2781 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
2782 roce_set_bit(context->qpc_bytes_4,
2783 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2784 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
2786 roce_set_bit(context->qpc_bytes_4,
2787 QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
2788 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
2790 roce_set_bit(context->qpc_bytes_4,
2791 QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2792 roce_set_field(context->qpc_bytes_4,
2793 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2794 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2795 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2796 roce_set_field(context->qpc_bytes_4,
2797 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2798 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2799 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2800 roce_set_field(context->qpc_bytes_4,
2801 QP_CONTEXT_QPC_BYTES_4_PD_M,
2802 QP_CONTEXT_QPC_BYTES_4_PD_S,
2803 to_hr_pd(ibqp->pd)->pdn);
2804 hr_qp->access_flags = attr->qp_access_flags;
2805 roce_set_field(context->qpc_bytes_8,
2806 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2807 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2808 to_hr_cq(ibqp->send_cq)->cqn);
2809 roce_set_field(context->qpc_bytes_8,
2810 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2811 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2812 to_hr_cq(ibqp->recv_cq)->cqn);
2815 roce_set_field(context->qpc_bytes_12,
2816 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2817 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2818 to_hr_srq(ibqp->srq)->srqn);
2820 roce_set_field(context->qpc_bytes_12,
2821 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2822 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2824 hr_qp->pkey_index = attr->pkey_index;
2825 roce_set_field(context->qpc_bytes_16,
2826 QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2827 QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2829 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
2830 roce_set_field(context->qpc_bytes_4,
2831 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2832 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2833 to_hr_qp_type(hr_qp->ibqp.qp_type));
2834 roce_set_bit(context->qpc_bytes_4,
2835 QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2836 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2837 roce_set_bit(context->qpc_bytes_4,
2838 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2839 !!(attr->qp_access_flags &
2840 IB_ACCESS_REMOTE_READ));
2841 roce_set_bit(context->qpc_bytes_4,
2842 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2843 !!(attr->qp_access_flags &
2844 IB_ACCESS_REMOTE_WRITE));
2846 roce_set_bit(context->qpc_bytes_4,
2847 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2848 !!(hr_qp->access_flags &
2849 IB_ACCESS_REMOTE_READ));
2850 roce_set_bit(context->qpc_bytes_4,
2851 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2852 !!(hr_qp->access_flags &
2853 IB_ACCESS_REMOTE_WRITE));
2856 roce_set_bit(context->qpc_bytes_4,
2857 QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2858 roce_set_field(context->qpc_bytes_4,
2859 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2860 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2861 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2862 roce_set_field(context->qpc_bytes_4,
2863 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2864 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2865 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2866 roce_set_field(context->qpc_bytes_4,
2867 QP_CONTEXT_QPC_BYTES_4_PD_M,
2868 QP_CONTEXT_QPC_BYTES_4_PD_S,
2869 to_hr_pd(ibqp->pd)->pdn);
2871 roce_set_field(context->qpc_bytes_8,
2872 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2873 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2874 to_hr_cq(ibqp->send_cq)->cqn);
2875 roce_set_field(context->qpc_bytes_8,
2876 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2877 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2878 to_hr_cq(ibqp->recv_cq)->cqn);
2881 roce_set_field(context->qpc_bytes_12,
2882 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2883 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2884 to_hr_srq(ibqp->srq)->srqn);
2885 if (attr_mask & IB_QP_PKEY_INDEX)
2886 roce_set_field(context->qpc_bytes_12,
2887 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2888 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2891 roce_set_field(context->qpc_bytes_12,
2892 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2893 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2896 roce_set_field(context->qpc_bytes_16,
2897 QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2898 QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2899 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
2900 if ((attr_mask & IB_QP_ALT_PATH) ||
2901 (attr_mask & IB_QP_ACCESS_FLAGS) ||
2902 (attr_mask & IB_QP_PKEY_INDEX) ||
2903 (attr_mask & IB_QP_QKEY)) {
2904 dev_err(dev, "INIT2RTR attr_mask error\n");
2908 dmac = (u8 *)attr->ah_attr.roce.dmac;
2910 context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
2911 roce_set_field(context->qpc_bytes_24,
2912 QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
2913 QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
2914 ((u32)(dma_handle >> 32)));
2915 roce_set_bit(context->qpc_bytes_24,
2916 QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
2918 roce_set_field(context->qpc_bytes_24,
2919 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
2920 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
2921 attr->min_rnr_timer);
2922 context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2));
2923 roce_set_field(context->qpc_bytes_32,
2924 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
2925 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
2926 ((u32)(dma_handle_2 >> 32)) &
2927 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
2928 roce_set_field(context->qpc_bytes_32,
2929 QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
2930 QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
2931 roce_set_bit(context->qpc_bytes_32,
2932 QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
2934 roce_set_bit(context->qpc_bytes_32,
2935 QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
2936 le32_to_cpu(hr_qp->sq_signal_bits));
2938 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
2940 smac = (u8 *)hr_dev->dev_addr[port];
2941 /* when dmac equals smac or loop_idc is 1, it should loopback */
2942 if (ether_addr_equal_unaligned(dmac, smac) ||
2943 hr_dev->loop_idc == 0x1)
2944 roce_set_bit(context->qpc_bytes_32,
2945 QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
2947 roce_set_bit(context->qpc_bytes_32,
2948 QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
2949 rdma_ah_get_ah_flags(&attr->ah_attr));
2950 roce_set_field(context->qpc_bytes_32,
2951 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
2952 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
2953 ilog2((unsigned int)attr->max_dest_rd_atomic));
2955 if (attr_mask & IB_QP_DEST_QPN)
2956 roce_set_field(context->qpc_bytes_36,
2957 QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
2958 QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
2961 /* Configure GID index */
2962 port_num = rdma_ah_get_port_num(&attr->ah_attr);
2963 roce_set_field(context->qpc_bytes_36,
2964 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
2965 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
2966 hns_get_gid_index(hr_dev,
2970 memcpy(&(context->dmac_l), dmac, 4);
2972 roce_set_field(context->qpc_bytes_44,
2973 QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2974 QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
2975 *((u16 *)(&dmac[4])));
2976 roce_set_field(context->qpc_bytes_44,
2977 QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
2978 QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
2979 rdma_ah_get_static_rate(&attr->ah_attr));
2980 roce_set_field(context->qpc_bytes_44,
2981 QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
2982 QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
2985 roce_set_field(context->qpc_bytes_48,
2986 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
2987 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
2989 roce_set_field(context->qpc_bytes_48,
2990 QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
2991 QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
2992 grh->traffic_class);
2993 roce_set_field(context->qpc_bytes_48,
2994 QP_CONTEXT_QPC_BYTES_48_MTU_M,
2995 QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
2997 memcpy(context->dgid, grh->dgid.raw,
2998 sizeof(grh->dgid.raw));
3000 dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
3001 roce_get_field(context->qpc_bytes_44,
3002 QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
3003 QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
3005 roce_set_field(context->qpc_bytes_68,
3006 QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
3007 QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
3009 roce_set_field(context->qpc_bytes_68,
3010 QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
3011 QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
3013 rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
3014 context->cur_rq_wqe_ba_l =
3015 cpu_to_le32((u32)(mtts[rq_pa_start]));
3017 roce_set_field(context->qpc_bytes_76,
3018 QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
3019 QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
3020 mtts[rq_pa_start] >> 32);
3021 roce_set_field(context->qpc_bytes_76,
3022 QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
3023 QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
3025 context->rx_rnr_time = 0;
3027 roce_set_field(context->qpc_bytes_84,
3028 QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
3029 QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
3031 roce_set_field(context->qpc_bytes_84,
3032 QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
3033 QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
3035 roce_set_field(context->qpc_bytes_88,
3036 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3037 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
3039 roce_set_bit(context->qpc_bytes_88,
3040 QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
3041 roce_set_bit(context->qpc_bytes_88,
3042 QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
3043 roce_set_field(context->qpc_bytes_88,
3044 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
3045 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
3047 roce_set_field(context->qpc_bytes_88,
3048 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
3049 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
3052 context->dma_length = 0;
3057 roce_set_field(context->qpc_bytes_108,
3058 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
3059 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
3060 roce_set_bit(context->qpc_bytes_108,
3061 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
3062 roce_set_bit(context->qpc_bytes_108,
3063 QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
3065 roce_set_field(context->qpc_bytes_112,
3066 QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
3067 QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
3068 roce_set_field(context->qpc_bytes_112,
3069 QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
3070 QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
3072 /* For chip resp ack */
3073 roce_set_field(context->qpc_bytes_156,
3074 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3075 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3077 roce_set_field(context->qpc_bytes_156,
3078 QP_CONTEXT_QPC_BYTES_156_SL_M,
3079 QP_CONTEXT_QPC_BYTES_156_SL_S,
3080 rdma_ah_get_sl(&attr->ah_attr));
3081 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3082 } else if (cur_state == IB_QPS_RTR &&
3083 new_state == IB_QPS_RTS) {
3084 /* If exist optional param, return error */
3085 if ((attr_mask & IB_QP_ALT_PATH) ||
3086 (attr_mask & IB_QP_ACCESS_FLAGS) ||
3087 (attr_mask & IB_QP_QKEY) ||
3088 (attr_mask & IB_QP_PATH_MIG_STATE) ||
3089 (attr_mask & IB_QP_CUR_STATE) ||
3090 (attr_mask & IB_QP_MIN_RNR_TIMER)) {
3091 dev_err(dev, "RTR2RTS attr_mask error\n");
3095 context->rx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
3097 roce_set_field(context->qpc_bytes_120,
3098 QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
3099 QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
3102 roce_set_field(context->qpc_bytes_124,
3103 QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
3104 QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
3105 roce_set_field(context->qpc_bytes_124,
3106 QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
3107 QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
3109 roce_set_field(context->qpc_bytes_128,
3110 QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
3111 QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
3113 roce_set_bit(context->qpc_bytes_128,
3114 QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
3115 roce_set_field(context->qpc_bytes_128,
3116 QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
3117 QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
3119 roce_set_bit(context->qpc_bytes_128,
3120 QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
3122 roce_set_field(context->qpc_bytes_132,
3123 QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
3124 QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
3125 roce_set_field(context->qpc_bytes_132,
3126 QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
3127 QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
3129 roce_set_field(context->qpc_bytes_136,
3130 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
3131 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
3133 roce_set_field(context->qpc_bytes_136,
3134 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
3135 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
3138 roce_set_field(context->qpc_bytes_140,
3139 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
3140 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
3141 (attr->sq_psn >> SQ_PSN_SHIFT));
3142 roce_set_field(context->qpc_bytes_140,
3143 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
3144 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
3145 roce_set_bit(context->qpc_bytes_140,
3146 QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
3148 roce_set_field(context->qpc_bytes_148,
3149 QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
3150 QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
3151 roce_set_field(context->qpc_bytes_148,
3152 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3153 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
3155 roce_set_field(context->qpc_bytes_148,
3156 QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
3157 QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
3159 roce_set_field(context->qpc_bytes_148,
3160 QP_CONTEXT_QPC_BYTES_148_LSN_M,
3161 QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
3163 context->rnr_retry = 0;
3165 roce_set_field(context->qpc_bytes_156,
3166 QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
3167 QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
3169 if (attr->timeout < 0x12) {
3170 dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
3172 roce_set_field(context->qpc_bytes_156,
3173 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3174 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3177 roce_set_field(context->qpc_bytes_156,
3178 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3179 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3182 roce_set_field(context->qpc_bytes_156,
3183 QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
3184 QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
3186 roce_set_field(context->qpc_bytes_156,
3187 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3188 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3190 roce_set_field(context->qpc_bytes_156,
3191 QP_CONTEXT_QPC_BYTES_156_SL_M,
3192 QP_CONTEXT_QPC_BYTES_156_SL_S,
3193 rdma_ah_get_sl(&attr->ah_attr));
3194 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3195 roce_set_field(context->qpc_bytes_156,
3196 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3197 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
3198 ilog2((unsigned int)attr->max_rd_atomic));
3199 roce_set_field(context->qpc_bytes_156,
3200 QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
3201 QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
3202 context->pkt_use_len = 0;
3204 roce_set_field(context->qpc_bytes_164,
3205 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3206 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
3207 roce_set_field(context->qpc_bytes_164,
3208 QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
3209 QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
3211 roce_set_field(context->qpc_bytes_168,
3212 QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
3213 QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
3215 roce_set_field(context->qpc_bytes_168,
3216 QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
3217 QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
3218 roce_set_field(context->qpc_bytes_168,
3219 QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
3220 QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
3221 roce_set_bit(context->qpc_bytes_168,
3222 QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
3223 roce_set_bit(context->qpc_bytes_168,
3224 QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
3225 roce_set_bit(context->qpc_bytes_168,
3226 QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
3227 context->sge_use_len = 0;
3229 roce_set_field(context->qpc_bytes_176,
3230 QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
3231 QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
3232 roce_set_field(context->qpc_bytes_176,
3233 QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
3234 QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
3236 roce_set_field(context->qpc_bytes_180,
3237 QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
3238 QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
3239 roce_set_field(context->qpc_bytes_180,
3240 QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
3241 QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
3243 context->tx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
3245 roce_set_field(context->qpc_bytes_188,
3246 QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
3247 QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
3249 roce_set_bit(context->qpc_bytes_188,
3250 QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
3251 roce_set_field(context->qpc_bytes_188,
3252 QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
3253 QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
3255 } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
3256 (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
3257 (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
3258 (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
3259 (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
3260 (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
3261 (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
3262 (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
3263 dev_err(dev, "not support this status migration\n");
3267 /* Every status migrate must change state */
3268 roce_set_field(context->qpc_bytes_144,
3269 QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3270 QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
3272 /* SW pass context to HW */
3273 ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
3274 to_hns_roce_state(cur_state),
3275 to_hns_roce_state(new_state), context,
3278 dev_err(dev, "hns_roce_qp_modify failed\n");
3283 * Use rst2init to instead of init2init with drv,
3284 * need to hw to flash RQ HEAD by DB again
3286 if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3287 /* Memory barrier */
3290 roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
3291 RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
3292 roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
3293 RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
3294 roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
3295 RQ_DOORBELL_U32_8_CMD_S, 1);
3296 roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
3298 if (ibqp->uobject) {
3299 hr_qp->rq.db_reg_l = hr_dev->reg_base +
3300 hr_dev->odb_offset +
3301 DB_REG_OFFSET * hr_dev->priv_uar.index;
3304 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
3307 hr_qp->state = new_state;
3309 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3310 hr_qp->resp_depth = attr->max_dest_rd_atomic;
3311 if (attr_mask & IB_QP_PORT) {
3312 hr_qp->port = attr->port_num - 1;
3313 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
3316 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
3317 hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
3318 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
3319 if (ibqp->send_cq != ibqp->recv_cq)
3320 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
3327 hr_qp->sq_next_wqe = 0;
3334 static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
3335 const struct ib_qp_attr *attr, int attr_mask,
3336 enum ib_qp_state cur_state,
3337 enum ib_qp_state new_state)
3340 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
3341 return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
3344 return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
3348 static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
3351 case HNS_ROCE_QP_STATE_RST:
3352 return IB_QPS_RESET;
3353 case HNS_ROCE_QP_STATE_INIT:
3355 case HNS_ROCE_QP_STATE_RTR:
3357 case HNS_ROCE_QP_STATE_RTS:
3359 case HNS_ROCE_QP_STATE_SQD:
3361 case HNS_ROCE_QP_STATE_ERR:
3368 static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
3369 struct hns_roce_qp *hr_qp,
3370 struct hns_roce_qp_context *hr_context)
3372 struct hns_roce_cmd_mailbox *mailbox;
3375 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3376 if (IS_ERR(mailbox))
3377 return PTR_ERR(mailbox);
3379 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
3380 HNS_ROCE_CMD_QUERY_QP,
3381 HNS_ROCE_CMD_TIMEOUT_MSECS);
3383 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
3385 dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
3387 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3392 static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3394 struct ib_qp_init_attr *qp_init_attr)
3396 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3397 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3398 struct hns_roce_sqp_context context;
3401 mutex_lock(&hr_qp->mutex);
3403 if (hr_qp->state == IB_QPS_RESET) {
3404 qp_attr->qp_state = IB_QPS_RESET;
3408 addr = ROCEE_QP1C_CFG0_0_REG +
3409 hr_qp->port * sizeof(struct hns_roce_sqp_context);
3410 context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr));
3411 context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1));
3412 context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2));
3413 context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3));
3414 context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4));
3415 context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5));
3416 context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6));
3417 context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7));
3418 context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8));
3419 context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9));
3421 hr_qp->state = roce_get_field(context.qp1c_bytes_4,
3422 QP1C_BYTES_4_QP_STATE_M,
3423 QP1C_BYTES_4_QP_STATE_S);
3424 qp_attr->qp_state = hr_qp->state;
3425 qp_attr->path_mtu = IB_MTU_256;
3426 qp_attr->path_mig_state = IB_MIG_ARMED;
3427 qp_attr->qkey = QKEY_VAL;
3428 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
3429 qp_attr->rq_psn = 0;
3430 qp_attr->sq_psn = 0;
3431 qp_attr->dest_qp_num = 1;
3432 qp_attr->qp_access_flags = 6;
3434 qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
3435 QP1C_BYTES_20_PKEY_IDX_M,
3436 QP1C_BYTES_20_PKEY_IDX_S);
3437 qp_attr->port_num = hr_qp->port + 1;
3438 qp_attr->sq_draining = 0;
3439 qp_attr->max_rd_atomic = 0;
3440 qp_attr->max_dest_rd_atomic = 0;
3441 qp_attr->min_rnr_timer = 0;
3442 qp_attr->timeout = 0;
3443 qp_attr->retry_cnt = 0;
3444 qp_attr->rnr_retry = 0;
3445 qp_attr->alt_timeout = 0;
3448 qp_attr->cur_qp_state = qp_attr->qp_state;
3449 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3450 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3451 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3452 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3453 qp_attr->cap.max_inline_data = 0;
3454 qp_init_attr->cap = qp_attr->cap;
3455 qp_init_attr->create_flags = 0;
3457 mutex_unlock(&hr_qp->mutex);
3462 static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3464 struct ib_qp_init_attr *qp_init_attr)
3466 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3467 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3468 struct device *dev = &hr_dev->pdev->dev;
3469 struct hns_roce_qp_context *context;
3470 int tmp_qp_state = 0;
3474 context = kzalloc(sizeof(*context), GFP_KERNEL);
3478 memset(qp_attr, 0, sizeof(*qp_attr));
3479 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3481 mutex_lock(&hr_qp->mutex);
3483 if (hr_qp->state == IB_QPS_RESET) {
3484 qp_attr->qp_state = IB_QPS_RESET;
3488 ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
3490 dev_err(dev, "query qpc error\n");
3495 state = roce_get_field(context->qpc_bytes_144,
3496 QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3497 QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
3498 tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
3499 if (tmp_qp_state == -1) {
3500 dev_err(dev, "to_ib_qp_state error\n");
3504 hr_qp->state = (u8)tmp_qp_state;
3505 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
3506 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
3507 QP_CONTEXT_QPC_BYTES_48_MTU_M,
3508 QP_CONTEXT_QPC_BYTES_48_MTU_S);
3509 qp_attr->path_mig_state = IB_MIG_ARMED;
3510 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
3511 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
3512 qp_attr->qkey = QKEY_VAL;
3514 qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
3515 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3516 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
3517 qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
3518 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3519 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
3520 qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
3521 QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
3522 QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
3523 qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
3524 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
3525 ((roce_get_bit(context->qpc_bytes_4,
3526 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
3527 ((roce_get_bit(context->qpc_bytes_4,
3528 QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
3530 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
3531 hr_qp->ibqp.qp_type == IB_QPT_UC) {
3532 struct ib_global_route *grh =
3533 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
3535 rdma_ah_set_sl(&qp_attr->ah_attr,
3536 roce_get_field(context->qpc_bytes_156,
3537 QP_CONTEXT_QPC_BYTES_156_SL_M,
3538 QP_CONTEXT_QPC_BYTES_156_SL_S));
3539 rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
3541 roce_get_field(context->qpc_bytes_48,
3542 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
3543 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
3545 roce_get_field(context->qpc_bytes_36,
3546 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
3547 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
3549 roce_get_field(context->qpc_bytes_44,
3550 QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
3551 QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
3552 grh->traffic_class =
3553 roce_get_field(context->qpc_bytes_48,
3554 QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
3555 QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
3557 memcpy(grh->dgid.raw, context->dgid,
3558 sizeof(grh->dgid.raw));
3561 qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
3562 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
3563 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
3564 qp_attr->port_num = hr_qp->port + 1;
3565 qp_attr->sq_draining = 0;
3566 qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156,
3567 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3568 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
3569 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32,
3570 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
3571 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
3572 qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
3573 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
3574 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
3575 qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
3576 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3577 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
3578 qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
3579 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3580 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
3581 qp_attr->rnr_retry = (u8)context->rnr_retry;
3584 qp_attr->cur_qp_state = qp_attr->qp_state;
3585 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3586 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3588 if (!ibqp->uobject) {
3589 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3590 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3592 qp_attr->cap.max_send_wr = 0;
3593 qp_attr->cap.max_send_sge = 0;
3596 qp_init_attr->cap = qp_attr->cap;
3599 mutex_unlock(&hr_qp->mutex);
3604 static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3606 struct ib_qp_init_attr *qp_init_attr)
3608 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3610 return hr_qp->doorbell_qpn <= 1 ?
3611 hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
3612 hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
3615 int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
3617 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3618 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3619 struct hns_roce_cq *send_cq, *recv_cq;
3622 ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET);
3626 send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
3627 recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
3629 hns_roce_lock_cqs(send_cq, recv_cq);
3631 __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
3632 to_hr_srq(hr_qp->ibqp.srq) : NULL);
3633 if (send_cq != recv_cq)
3634 __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
3636 hns_roce_unlock_cqs(send_cq, recv_cq);
3638 hns_roce_qp_remove(hr_dev, hr_qp);
3639 hns_roce_qp_free(hr_dev, hr_qp);
3641 /* RC QP, release QPN */
3642 if (hr_qp->ibqp.qp_type == IB_QPT_RC)
3643 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
3645 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
3647 ib_umem_release(hr_qp->umem);
3649 kfree(hr_qp->sq.wrid);
3650 kfree(hr_qp->rq.wrid);
3652 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
3655 if (hr_qp->ibqp.qp_type == IB_QPT_RC)
3658 kfree(hr_to_hr_sqp(hr_qp));
3662 static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
3664 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3665 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3666 struct device *dev = &hr_dev->pdev->dev;
3672 hns_roce_free_cq(hr_dev, hr_cq);
3675 * Before freeing cq buffer, we need to ensure that the outstanding CQE
3676 * have been written by checking the CQE counter.
3678 cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3680 if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
3681 HNS_ROCE_CQE_WCMD_EMPTY_BIT)
3684 cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3685 if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
3688 msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
3689 if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
3690 dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
3697 hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
3699 ib_umem_release(hr_cq->umem);
3701 /* Free the buff of stored cq */
3702 cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
3703 hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf);
3707 static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
3709 roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
3710 (req_not << eq->log_entries), eq->doorbell);
3713 static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
3714 struct hns_roce_aeqe *aeqe, int qpn)
3716 struct device *dev = &hr_dev->pdev->dev;
3718 dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
3719 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3720 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3721 case HNS_ROCE_LWQCE_QPC_ERROR:
3722 dev_warn(dev, "QP %d, QPC error.\n", qpn);
3724 case HNS_ROCE_LWQCE_MTU_ERROR:
3725 dev_warn(dev, "QP %d, MTU error.\n", qpn);
3727 case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
3728 dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
3730 case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
3731 dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
3733 case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
3734 dev_warn(dev, "QP %d, WQE shift error\n", qpn);
3736 case HNS_ROCE_LWQCE_SL_ERROR:
3737 dev_warn(dev, "QP %d, SL error.\n", qpn);
3739 case HNS_ROCE_LWQCE_PORT_ERROR:
3740 dev_warn(dev, "QP %d, port error.\n", qpn);
3747 static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
3748 struct hns_roce_aeqe *aeqe,
3751 struct device *dev = &hr_dev->pdev->dev;
3753 dev_warn(dev, "Local Access Violation Work Queue Error.\n");
3754 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3755 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3756 case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
3757 dev_warn(dev, "QP %d, R_key violation.\n", qpn);
3759 case HNS_ROCE_LAVWQE_LENGTH_ERROR:
3760 dev_warn(dev, "QP %d, length error.\n", qpn);
3762 case HNS_ROCE_LAVWQE_VA_ERROR:
3763 dev_warn(dev, "QP %d, VA error.\n", qpn);
3765 case HNS_ROCE_LAVWQE_PD_ERROR:
3766 dev_err(dev, "QP %d, PD error.\n", qpn);
3768 case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
3769 dev_warn(dev, "QP %d, rw acc error.\n", qpn);
3771 case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
3772 dev_warn(dev, "QP %d, key state error.\n", qpn);
3774 case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
3775 dev_warn(dev, "QP %d, MR operation error.\n", qpn);
3782 static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
3783 struct hns_roce_aeqe *aeqe,
3786 struct device *dev = &hr_dev->pdev->dev;
3790 qpn = roce_get_field(aeqe->event.qp_event.qp,
3791 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
3792 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
3793 phy_port = roce_get_field(aeqe->event.qp_event.qp,
3794 HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
3795 HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
3797 qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
3799 switch (event_type) {
3800 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3801 dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
3802 "QP %d, phy_port %d.\n", qpn, phy_port);
3804 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3805 hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
3807 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3808 hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
3814 hns_roce_qp_event(hr_dev, qpn, event_type);
3817 static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
3818 struct hns_roce_aeqe *aeqe,
3821 struct device *dev = &hr_dev->pdev->dev;
3824 cqn = roce_get_field(aeqe->event.cq_event.cq,
3825 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
3826 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
3828 switch (event_type) {
3829 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3830 dev_warn(dev, "CQ 0x%x access err.\n", cqn);
3832 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3833 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
3835 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
3836 dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
3842 hns_roce_cq_event(hr_dev, cqn, event_type);
3845 static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
3846 struct hns_roce_aeqe *aeqe)
3848 struct device *dev = &hr_dev->pdev->dev;
3850 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3851 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3852 case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
3853 dev_warn(dev, "SDB overflow.\n");
3855 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
3856 dev_warn(dev, "SDB almost overflow.\n");
3858 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
3859 dev_warn(dev, "SDB almost empty.\n");
3861 case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
3862 dev_warn(dev, "ODB overflow.\n");
3864 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
3865 dev_warn(dev, "ODB almost overflow.\n");
3867 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
3868 dev_warn(dev, "SDB almost empty.\n");
3875 static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
3877 unsigned long off = (entry & (eq->entries - 1)) *
3878 HNS_ROCE_AEQ_ENTRY_SIZE;
3880 return (struct hns_roce_aeqe *)((u8 *)
3881 (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
3882 off % HNS_ROCE_BA_SIZE);
3885 static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
3887 struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
3889 return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
3890 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
3893 static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
3894 struct hns_roce_eq *eq)
3896 struct device *dev = &hr_dev->pdev->dev;
3897 struct hns_roce_aeqe *aeqe;
3898 int aeqes_found = 0;
3901 while ((aeqe = next_aeqe_sw_v1(eq))) {
3903 /* Make sure we read the AEQ entry after we have checked the
3908 dev_dbg(dev, "aeqe = %pK, aeqe->asyn.event_type = 0x%lx\n",
3910 roce_get_field(aeqe->asyn,
3911 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
3912 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
3913 event_type = roce_get_field(aeqe->asyn,
3914 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
3915 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
3916 switch (event_type) {
3917 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
3918 dev_warn(dev, "PATH MIG not supported\n");
3920 case HNS_ROCE_EVENT_TYPE_COMM_EST:
3921 dev_warn(dev, "COMMUNICATION established\n");
3923 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
3924 dev_warn(dev, "SQ DRAINED not supported\n");
3926 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
3927 dev_warn(dev, "PATH MIG failed\n");
3929 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3930 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3931 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3932 hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
3934 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
3935 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
3936 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
3937 dev_warn(dev, "SRQ not support!\n");
3939 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3940 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3941 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
3942 hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
3944 case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
3945 dev_warn(dev, "port change.\n");
3947 case HNS_ROCE_EVENT_TYPE_MB:
3948 hns_roce_cmd_event(hr_dev,
3949 le16_to_cpu(aeqe->event.cmd.token),
3950 aeqe->event.cmd.status,
3951 le64_to_cpu(aeqe->event.cmd.out_param
3954 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
3955 hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
3957 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
3958 dev_warn(dev, "CEQ 0x%lx overflow.\n",
3959 roce_get_field(aeqe->event.ce_event.ceqe,
3960 HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
3961 HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
3964 dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
3965 event_type, eq->eqn, eq->cons_index);
3972 if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
3973 dev_warn(dev, "cons_index overflow, set back to 0.\n");
3978 set_eq_cons_index_v1(eq, 0);
3983 static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
3985 unsigned long off = (entry & (eq->entries - 1)) *
3986 HNS_ROCE_CEQ_ENTRY_SIZE;
3988 return (struct hns_roce_ceqe *)((u8 *)
3989 (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
3990 off % HNS_ROCE_BA_SIZE);
3993 static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
3995 struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
3997 return (!!(roce_get_bit(ceqe->comp,
3998 HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
3999 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
4002 static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
4003 struct hns_roce_eq *eq)
4005 struct hns_roce_ceqe *ceqe;
4006 int ceqes_found = 0;
4009 while ((ceqe = next_ceqe_sw_v1(eq))) {
4011 /* Make sure we read CEQ entry after we have checked the
4016 cqn = roce_get_field(ceqe->comp,
4017 HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
4018 HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
4019 hns_roce_cq_completion(hr_dev, cqn);
4024 if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth - 1) {
4025 dev_warn(&eq->hr_dev->pdev->dev,
4026 "cons_index overflow, set back to 0.\n");
4031 set_eq_cons_index_v1(eq, 0);
4036 static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
4038 struct hns_roce_eq *eq = eq_ptr;
4039 struct hns_roce_dev *hr_dev = eq->hr_dev;
4042 if (eq->type_flag == HNS_ROCE_CEQ)
4043 /* CEQ irq routine, CEQ is pulse irq, not clear */
4044 int_work = hns_roce_v1_ceq_int(hr_dev, eq);
4046 /* AEQ irq routine, AEQ is pulse irq, not clear */
4047 int_work = hns_roce_v1_aeq_int(hr_dev, eq);
4049 return IRQ_RETVAL(int_work);
4052 static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
4054 struct hns_roce_dev *hr_dev = dev_id;
4055 struct device *dev = &hr_dev->pdev->dev;
4067 * Abnormal interrupt:
4068 * AEQ overflow, ECC multi-bit err, CEQ overflow must clear
4069 * interrupt, mask irq, clear irq, cancel mask operation
4071 aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
4072 tmp = cpu_to_le32(aeshift_val);
4075 if (roce_get_bit(tmp,
4076 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
4077 dev_warn(dev, "AEQ overflow!\n");
4080 caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4081 tmp = cpu_to_le32(caepaemask_val);
4082 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4083 HNS_ROCE_INT_MASK_ENABLE);
4084 caepaemask_val = le32_to_cpu(tmp);
4085 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
4087 /* Clear int state(INT_WC : write 1 clear) */
4088 caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
4089 tmp = cpu_to_le32(caepaest_val);
4090 roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
4091 caepaest_val = le32_to_cpu(tmp);
4092 roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
4095 caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4096 tmp = cpu_to_le32(caepaemask_val);
4097 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4098 HNS_ROCE_INT_MASK_DISABLE);
4099 caepaemask_val = le32_to_cpu(tmp);
4100 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
4103 /* CEQ almost overflow */
4104 for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4105 ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
4106 i * CEQ_REG_OFFSET);
4107 tmp = cpu_to_le32(ceshift_val);
4109 if (roce_get_bit(tmp,
4110 ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
4111 dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
4115 cemask_val = roce_read(hr_dev,
4116 ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4117 i * CEQ_REG_OFFSET);
4118 tmp = cpu_to_le32(cemask_val);
4120 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4121 HNS_ROCE_INT_MASK_ENABLE);
4122 cemask_val = le32_to_cpu(tmp);
4123 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4124 i * CEQ_REG_OFFSET, cemask_val);
4126 /* Clear int state(INT_WC : write 1 clear) */
4127 cealmovf_val = roce_read(hr_dev,
4128 ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4129 i * CEQ_REG_OFFSET);
4130 tmp = cpu_to_le32(cealmovf_val);
4132 ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
4134 cealmovf_val = le32_to_cpu(tmp);
4135 roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4136 i * CEQ_REG_OFFSET, cealmovf_val);
4139 cemask_val = roce_read(hr_dev,
4140 ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4141 i * CEQ_REG_OFFSET);
4142 tmp = cpu_to_le32(cemask_val);
4144 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4145 HNS_ROCE_INT_MASK_DISABLE);
4146 cemask_val = le32_to_cpu(tmp);
4147 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4148 i * CEQ_REG_OFFSET, cemask_val);
4152 /* ECC multi-bit error alarm */
4153 dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
4154 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
4155 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
4156 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
4158 dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
4159 roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
4160 roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
4161 roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
4163 return IRQ_RETVAL(int_work);
4166 static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
4174 aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4175 tmp = cpu_to_le32(aemask_val);
4176 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4178 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
4179 aemask_val = le32_to_cpu(tmp);
4180 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
4183 for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4185 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4186 i * CEQ_REG_OFFSET, masken);
4190 static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
4191 struct hns_roce_eq *eq)
4193 int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
4194 HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4200 for (i = 0; i < npages; ++i)
4201 dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
4202 eq->buf_list[i].buf, eq->buf_list[i].map);
4204 kfree(eq->buf_list);
4207 static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
4210 void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
4215 tmp = cpu_to_le32(val);
4219 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4220 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4221 HNS_ROCE_EQ_STAT_VALID);
4224 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4225 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4226 HNS_ROCE_EQ_STAT_INVALID);
4228 val = le32_to_cpu(tmp);
4232 static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
4233 struct hns_roce_eq *eq)
4235 void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
4236 struct device *dev = &hr_dev->pdev->dev;
4237 dma_addr_t tmp_dma_addr;
4238 u32 eqconsindx_val = 0;
4239 u32 eqcuridx_val = 0;
4240 u32 eqshift_val = 0;
4248 num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
4249 HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4251 if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
4252 dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
4253 (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
4258 eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
4262 for (i = 0; i < num_bas; ++i) {
4263 eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
4266 if (!eq->buf_list[i].buf) {
4268 goto err_out_free_pages;
4271 eq->buf_list[i].map = tmp_dma_addr;
4274 roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4275 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4276 HNS_ROCE_EQ_STAT_INVALID);
4277 roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
4278 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
4280 eqshift_val = le32_to_cpu(tmp);
4281 writel(eqshift_val, eqc);
4283 /* Configure eq extended address 12~44bit */
4284 writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
4287 * Configure eq extended address 45~49 bit.
4288 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
4289 * using 4K page, and shift more 32 because of
4290 * caculating the high 32 bit value evaluated to hardware.
4292 roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
4293 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
4294 eq->buf_list[0].map >> 44);
4295 roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
4296 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
4297 eqcuridx_val = le32_to_cpu(tmp1);
4298 writel(eqcuridx_val, eqc + 8);
4300 /* Configure eq consumer index */
4301 roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
4302 ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
4303 eqconsindx_val = le32_to_cpu(tmp2);
4304 writel(eqconsindx_val, eqc + 0xc);
4309 for (i -= 1; i >= 0; i--)
4310 dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
4311 eq->buf_list[i].map);
4313 kfree(eq->buf_list);
4317 static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
4319 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4320 struct device *dev = &hr_dev->pdev->dev;
4321 struct hns_roce_eq *eq;
4327 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4328 irq_num = eq_num + hr_dev->caps.num_other_vectors;
4330 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
4334 eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
4336 if (!eq_table->eqc_base) {
4338 goto err_eqc_base_alloc_fail;
4341 for (i = 0; i < eq_num; i++) {
4342 eq = &eq_table->eq[i];
4343 eq->hr_dev = hr_dev;
4345 eq->irq = hr_dev->irq[i];
4346 eq->log_page_size = PAGE_SHIFT;
4348 if (i < hr_dev->caps.num_comp_vectors) {
4350 eq_table->eqc_base[i] = hr_dev->reg_base +
4351 ROCEE_CAEP_CEQC_SHIFT_0_REG +
4353 eq->type_flag = HNS_ROCE_CEQ;
4354 eq->doorbell = hr_dev->reg_base +
4355 ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
4357 eq->entries = hr_dev->caps.ceqe_depth;
4358 eq->log_entries = ilog2(eq->entries);
4359 eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
4362 eq_table->eqc_base[i] = hr_dev->reg_base +
4363 ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
4364 eq->type_flag = HNS_ROCE_AEQ;
4365 eq->doorbell = hr_dev->reg_base +
4366 ROCEE_CAEP_AEQE_CONS_IDX_REG;
4367 eq->entries = hr_dev->caps.aeqe_depth;
4368 eq->log_entries = ilog2(eq->entries);
4369 eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
4374 hns_roce_v1_int_mask_enable(hr_dev);
4376 /* Configure ce int interval */
4377 roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
4378 HNS_ROCE_CEQ_DEFAULT_INTERVAL);
4380 /* Configure ce int burst num */
4381 roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
4382 HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
4384 for (i = 0; i < eq_num; i++) {
4385 ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
4387 dev_err(dev, "eq create failed\n");
4388 goto err_create_eq_fail;
4392 for (j = 0; j < irq_num; j++) {
4394 ret = request_irq(hr_dev->irq[j],
4395 hns_roce_v1_msix_interrupt_eq, 0,
4396 hr_dev->irq_names[j],
4399 ret = request_irq(hr_dev->irq[j],
4400 hns_roce_v1_msix_interrupt_abn, 0,
4401 hr_dev->irq_names[j], hr_dev);
4404 dev_err(dev, "request irq error!\n");
4405 goto err_request_irq_fail;
4409 for (i = 0; i < eq_num; i++)
4410 hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
4414 err_request_irq_fail:
4415 for (j -= 1; j >= 0; j--)
4416 free_irq(hr_dev->irq[j], &eq_table->eq[j]);
4419 for (i -= 1; i >= 0; i--)
4420 hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4422 kfree(eq_table->eqc_base);
4424 err_eqc_base_alloc_fail:
4425 kfree(eq_table->eq);
4430 static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
4432 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4437 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4438 irq_num = eq_num + hr_dev->caps.num_other_vectors;
4439 for (i = 0; i < eq_num; i++) {
4441 hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
4443 free_irq(hr_dev->irq[i], &eq_table->eq[i]);
4445 hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4447 for (i = eq_num; i < irq_num; i++)
4448 free_irq(hr_dev->irq[i], hr_dev);
4450 kfree(eq_table->eqc_base);
4451 kfree(eq_table->eq);
4454 static const struct ib_device_ops hns_roce_v1_dev_ops = {
4455 .destroy_qp = hns_roce_v1_destroy_qp,
4456 .modify_cq = hns_roce_v1_modify_cq,
4457 .poll_cq = hns_roce_v1_poll_cq,
4458 .post_recv = hns_roce_v1_post_recv,
4459 .post_send = hns_roce_v1_post_send,
4460 .query_qp = hns_roce_v1_query_qp,
4461 .req_notify_cq = hns_roce_v1_req_notify_cq,
4464 static const struct hns_roce_hw hns_roce_hw_v1 = {
4465 .reset = hns_roce_v1_reset,
4466 .hw_profile = hns_roce_v1_profile,
4467 .hw_init = hns_roce_v1_init,
4468 .hw_exit = hns_roce_v1_exit,
4469 .post_mbox = hns_roce_v1_post_mbox,
4470 .chk_mbox = hns_roce_v1_chk_mbox,
4471 .set_gid = hns_roce_v1_set_gid,
4472 .set_mac = hns_roce_v1_set_mac,
4473 .set_mtu = hns_roce_v1_set_mtu,
4474 .write_mtpt = hns_roce_v1_write_mtpt,
4475 .write_cqc = hns_roce_v1_write_cqc,
4476 .modify_cq = hns_roce_v1_modify_cq,
4477 .clear_hem = hns_roce_v1_clear_hem,
4478 .modify_qp = hns_roce_v1_modify_qp,
4479 .query_qp = hns_roce_v1_query_qp,
4480 .destroy_qp = hns_roce_v1_destroy_qp,
4481 .post_send = hns_roce_v1_post_send,
4482 .post_recv = hns_roce_v1_post_recv,
4483 .req_notify_cq = hns_roce_v1_req_notify_cq,
4484 .poll_cq = hns_roce_v1_poll_cq,
4485 .dereg_mr = hns_roce_v1_dereg_mr,
4486 .destroy_cq = hns_roce_v1_destroy_cq,
4487 .init_eq = hns_roce_v1_init_eq_table,
4488 .cleanup_eq = hns_roce_v1_cleanup_eq_table,
4489 .hns_roce_dev_ops = &hns_roce_v1_dev_ops,
4492 static const struct of_device_id hns_roce_of_match[] = {
4493 { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
4496 MODULE_DEVICE_TABLE(of, hns_roce_of_match);
4498 static const struct acpi_device_id hns_roce_acpi_match[] = {
4499 { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
4502 MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
4504 static int hns_roce_node_match(struct device *dev, const void *fwnode)
4506 return dev->fwnode == fwnode;
4510 platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
4514 /* get the 'device' corresponding to the matching 'fwnode' */
4515 dev = bus_find_device(&platform_bus_type, NULL,
4516 fwnode, hns_roce_node_match);
4517 /* get the platform device */
4518 return dev ? to_platform_device(dev) : NULL;
4521 static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
4523 struct device *dev = &hr_dev->pdev->dev;
4524 struct platform_device *pdev = NULL;
4525 struct net_device *netdev = NULL;
4526 struct device_node *net_node;
4527 struct resource *res;
4533 /* check if we are compatible with the underlying SoC */
4534 if (dev_of_node(dev)) {
4535 const struct of_device_id *of_id;
4537 of_id = of_match_node(hns_roce_of_match, dev->of_node);
4539 dev_err(dev, "device is not compatible!\n");
4542 hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
4544 dev_err(dev, "couldn't get H/W specific DT data!\n");
4547 } else if (is_acpi_device_node(dev->fwnode)) {
4548 const struct acpi_device_id *acpi_id;
4550 acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
4552 dev_err(dev, "device is not compatible!\n");
4555 hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
4557 dev_err(dev, "couldn't get H/W specific ACPI data!\n");
4561 dev_err(dev, "can't read compatibility data from DT or ACPI\n");
4565 /* get the mapped register base address */
4566 res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
4567 hr_dev->reg_base = devm_ioremap_resource(dev, res);
4568 if (IS_ERR(hr_dev->reg_base))
4569 return PTR_ERR(hr_dev->reg_base);
4571 /* read the node_guid of IB device from the DT or ACPI */
4572 ret = device_property_read_u8_array(dev, "node-guid",
4573 (u8 *)&hr_dev->ib_dev.node_guid,
4576 dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
4580 /* get the RoCE associated ethernet ports or netdevices */
4581 for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
4582 if (dev_of_node(dev)) {
4583 net_node = of_parse_phandle(dev->of_node, "eth-handle",
4587 pdev = of_find_device_by_node(net_node);
4588 } else if (is_acpi_device_node(dev->fwnode)) {
4589 struct fwnode_reference_args args;
4591 ret = acpi_node_get_property_reference(dev->fwnode,
4596 pdev = hns_roce_find_pdev(args.fwnode);
4598 dev_err(dev, "cannot read data from DT or ACPI\n");
4603 netdev = platform_get_drvdata(pdev);
4606 hr_dev->iboe.netdevs[port_cnt] = netdev;
4607 hr_dev->iboe.phy_port[port_cnt] = phy_port;
4609 dev_err(dev, "no netdev found with pdev %s\n",
4617 if (port_cnt == 0) {
4618 dev_err(dev, "unable to get eth-handle for available ports!\n");
4622 hr_dev->caps.num_ports = port_cnt;
4624 /* cmd issue mode: 0 is poll, 1 is event */
4625 hr_dev->cmd_mod = 1;
4626 hr_dev->loop_idc = 0;
4627 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
4628 hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
4630 /* read the interrupt names from the DT or ACPI */
4631 ret = device_property_read_string_array(dev, "interrupt-names",
4633 HNS_ROCE_V1_MAX_IRQ_NUM);
4635 dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
4639 /* fetch the interrupt numbers */
4640 for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
4641 hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
4642 if (hr_dev->irq[i] <= 0)
4650 * hns_roce_probe - RoCE driver entrance
4651 * @pdev: pointer to platform device
4655 static int hns_roce_probe(struct platform_device *pdev)
4658 struct hns_roce_dev *hr_dev;
4659 struct device *dev = &pdev->dev;
4661 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
4665 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
4666 if (!hr_dev->priv) {
4668 goto error_failed_kzalloc;
4671 hr_dev->pdev = pdev;
4673 platform_set_drvdata(pdev, hr_dev);
4675 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
4676 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
4677 dev_err(dev, "Not usable DMA addressing mode\n");
4679 goto error_failed_get_cfg;
4682 ret = hns_roce_get_cfg(hr_dev);
4684 dev_err(dev, "Get Configuration failed!\n");
4685 goto error_failed_get_cfg;
4688 ret = hns_roce_init(hr_dev);
4690 dev_err(dev, "RoCE engine init failed!\n");
4691 goto error_failed_get_cfg;
4696 error_failed_get_cfg:
4697 kfree(hr_dev->priv);
4699 error_failed_kzalloc:
4700 ib_dealloc_device(&hr_dev->ib_dev);
4706 * hns_roce_remove - remove RoCE device
4707 * @pdev: pointer to platform device
4709 static int hns_roce_remove(struct platform_device *pdev)
4711 struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
4713 hns_roce_exit(hr_dev);
4714 kfree(hr_dev->priv);
4715 ib_dealloc_device(&hr_dev->ib_dev);
4720 static struct platform_driver hns_roce_driver = {
4721 .probe = hns_roce_probe,
4722 .remove = hns_roce_remove,
4725 .of_match_table = hns_roce_of_match,
4726 .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
4730 module_platform_driver(hns_roce_driver);
4732 MODULE_LICENSE("Dual BSD/GPL");
4733 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
4734 MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
4735 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
4736 MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");