RDMA: Check attr_mask during modify_qp
[linux-2.6-block.git] / drivers / infiniband / hw / hns / hns_roce_hw_v1.c
1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/platform_device.h>
34 #include <linux/acpi.h>
35 #include <linux/etherdevice.h>
36 #include <linux/interrupt.h>
37 #include <linux/of.h>
38 #include <linux/of_platform.h>
39 #include <rdma/ib_umem.h>
40 #include "hns_roce_common.h"
41 #include "hns_roce_device.h"
42 #include "hns_roce_cmd.h"
43 #include "hns_roce_hem.h"
44 #include "hns_roce_hw_v1.h"
45
46 static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
47 {
48         dseg->lkey = cpu_to_le32(sg->lkey);
49         dseg->addr = cpu_to_le64(sg->addr);
50         dseg->len  = cpu_to_le32(sg->length);
51 }
52
53 static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
54                           u32 rkey)
55 {
56         rseg->raddr = cpu_to_le64(remote_addr);
57         rseg->rkey  = cpu_to_le32(rkey);
58         rseg->len   = 0;
59 }
60
61 static int hns_roce_v1_post_send(struct ib_qp *ibqp,
62                                  const struct ib_send_wr *wr,
63                                  const struct ib_send_wr **bad_wr)
64 {
65         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
66         struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
67         struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
68         struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
69         struct hns_roce_wqe_data_seg *dseg = NULL;
70         struct hns_roce_qp *qp = to_hr_qp(ibqp);
71         struct device *dev = &hr_dev->pdev->dev;
72         struct hns_roce_sq_db sq_db = {};
73         int ps_opcode, i;
74         unsigned long flags = 0;
75         void *wqe = NULL;
76         __le32 doorbell[2];
77         int ret = 0;
78         int loopback;
79         u32 wqe_idx;
80         int nreq;
81         u8 *smac;
82
83         if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
84                 ibqp->qp_type != IB_QPT_RC)) {
85                 dev_err(dev, "un-supported QP type\n");
86                 *bad_wr = NULL;
87                 return -EOPNOTSUPP;
88         }
89
90         spin_lock_irqsave(&qp->sq.lock, flags);
91
92         for (nreq = 0; wr; ++nreq, wr = wr->next) {
93                 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
94                         ret = -ENOMEM;
95                         *bad_wr = wr;
96                         goto out;
97                 }
98
99                 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
100
101                 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
102                         dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
103                                 wr->num_sge, qp->sq.max_gs);
104                         ret = -EINVAL;
105                         *bad_wr = wr;
106                         goto out;
107                 }
108
109                 wqe = hns_roce_get_send_wqe(qp, wqe_idx);
110                 qp->sq.wrid[wqe_idx] = wr->wr_id;
111
112                 /* Corresponding to the RC and RD type wqe process separately */
113                 if (ibqp->qp_type == IB_QPT_GSI) {
114                         ud_sq_wqe = wqe;
115                         roce_set_field(ud_sq_wqe->dmac_h,
116                                        UD_SEND_WQE_U32_4_DMAC_0_M,
117                                        UD_SEND_WQE_U32_4_DMAC_0_S,
118                                        ah->av.mac[0]);
119                         roce_set_field(ud_sq_wqe->dmac_h,
120                                        UD_SEND_WQE_U32_4_DMAC_1_M,
121                                        UD_SEND_WQE_U32_4_DMAC_1_S,
122                                        ah->av.mac[1]);
123                         roce_set_field(ud_sq_wqe->dmac_h,
124                                        UD_SEND_WQE_U32_4_DMAC_2_M,
125                                        UD_SEND_WQE_U32_4_DMAC_2_S,
126                                        ah->av.mac[2]);
127                         roce_set_field(ud_sq_wqe->dmac_h,
128                                        UD_SEND_WQE_U32_4_DMAC_3_M,
129                                        UD_SEND_WQE_U32_4_DMAC_3_S,
130                                        ah->av.mac[3]);
131
132                         roce_set_field(ud_sq_wqe->u32_8,
133                                        UD_SEND_WQE_U32_8_DMAC_4_M,
134                                        UD_SEND_WQE_U32_8_DMAC_4_S,
135                                        ah->av.mac[4]);
136                         roce_set_field(ud_sq_wqe->u32_8,
137                                        UD_SEND_WQE_U32_8_DMAC_5_M,
138                                        UD_SEND_WQE_U32_8_DMAC_5_S,
139                                        ah->av.mac[5]);
140
141                         smac = (u8 *)hr_dev->dev_addr[qp->port];
142                         loopback = ether_addr_equal_unaligned(ah->av.mac,
143                                                               smac) ? 1 : 0;
144                         roce_set_bit(ud_sq_wqe->u32_8,
145                                      UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
146                                      loopback);
147
148                         roce_set_field(ud_sq_wqe->u32_8,
149                                        UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
150                                        UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
151                                        HNS_ROCE_WQE_OPCODE_SEND);
152                         roce_set_field(ud_sq_wqe->u32_8,
153                                        UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
154                                        UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
155                                        2);
156                         roce_set_bit(ud_sq_wqe->u32_8,
157                                 UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
158                                 1);
159
160                         ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
161                                 cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
162                                 (wr->send_flags & IB_SEND_SOLICITED ?
163                                 cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
164                                 ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
165                                 cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
166
167                         roce_set_field(ud_sq_wqe->u32_16,
168                                        UD_SEND_WQE_U32_16_DEST_QP_M,
169                                        UD_SEND_WQE_U32_16_DEST_QP_S,
170                                        ud_wr(wr)->remote_qpn);
171                         roce_set_field(ud_sq_wqe->u32_16,
172                                        UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
173                                        UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
174                                        ah->av.stat_rate);
175
176                         roce_set_field(ud_sq_wqe->u32_36,
177                                        UD_SEND_WQE_U32_36_FLOW_LABEL_M,
178                                        UD_SEND_WQE_U32_36_FLOW_LABEL_S,
179                                        ah->av.flowlabel);
180                         roce_set_field(ud_sq_wqe->u32_36,
181                                       UD_SEND_WQE_U32_36_PRIORITY_M,
182                                       UD_SEND_WQE_U32_36_PRIORITY_S,
183                                       ah->av.sl);
184                         roce_set_field(ud_sq_wqe->u32_36,
185                                        UD_SEND_WQE_U32_36_SGID_INDEX_M,
186                                        UD_SEND_WQE_U32_36_SGID_INDEX_S,
187                                        hns_get_gid_index(hr_dev, qp->phy_port,
188                                                          ah->av.gid_index));
189
190                         roce_set_field(ud_sq_wqe->u32_40,
191                                        UD_SEND_WQE_U32_40_HOP_LIMIT_M,
192                                        UD_SEND_WQE_U32_40_HOP_LIMIT_S,
193                                        ah->av.hop_limit);
194                         roce_set_field(ud_sq_wqe->u32_40,
195                                        UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
196                                        UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S,
197                                        ah->av.tclass);
198
199                         memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
200
201                         ud_sq_wqe->va0_l =
202                                        cpu_to_le32((u32)wr->sg_list[0].addr);
203                         ud_sq_wqe->va0_h =
204                                        cpu_to_le32((wr->sg_list[0].addr) >> 32);
205                         ud_sq_wqe->l_key0 =
206                                        cpu_to_le32(wr->sg_list[0].lkey);
207
208                         ud_sq_wqe->va1_l =
209                                        cpu_to_le32((u32)wr->sg_list[1].addr);
210                         ud_sq_wqe->va1_h =
211                                        cpu_to_le32((wr->sg_list[1].addr) >> 32);
212                         ud_sq_wqe->l_key1 =
213                                        cpu_to_le32(wr->sg_list[1].lkey);
214                 } else if (ibqp->qp_type == IB_QPT_RC) {
215                         u32 tmp_len = 0;
216
217                         ctrl = wqe;
218                         memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
219                         for (i = 0; i < wr->num_sge; i++)
220                                 tmp_len += wr->sg_list[i].length;
221
222                         ctrl->msg_length =
223                           cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len);
224
225                         ctrl->sgl_pa_h = 0;
226                         ctrl->flag = 0;
227
228                         switch (wr->opcode) {
229                         case IB_WR_SEND_WITH_IMM:
230                         case IB_WR_RDMA_WRITE_WITH_IMM:
231                                 ctrl->imm_data = wr->ex.imm_data;
232                                 break;
233                         case IB_WR_SEND_WITH_INV:
234                                 ctrl->inv_key =
235                                         cpu_to_le32(wr->ex.invalidate_rkey);
236                                 break;
237                         default:
238                                 ctrl->imm_data = 0;
239                                 break;
240                         }
241
242                         /*Ctrl field, ctrl set type: sig, solic, imm, fence */
243                         /* SO wait for conforming application scenarios */
244                         ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
245                                       cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
246                                       (wr->send_flags & IB_SEND_SOLICITED ?
247                                       cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
248                                       ((wr->opcode == IB_WR_SEND_WITH_IMM ||
249                                       wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
250                                       cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
251                                       (wr->send_flags & IB_SEND_FENCE ?
252                                       (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
253
254                         wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
255
256                         switch (wr->opcode) {
257                         case IB_WR_RDMA_READ:
258                                 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
259                                 set_raddr_seg(wqe,  rdma_wr(wr)->remote_addr,
260                                                rdma_wr(wr)->rkey);
261                                 break;
262                         case IB_WR_RDMA_WRITE:
263                         case IB_WR_RDMA_WRITE_WITH_IMM:
264                                 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
265                                 set_raddr_seg(wqe,  rdma_wr(wr)->remote_addr,
266                                               rdma_wr(wr)->rkey);
267                                 break;
268                         case IB_WR_SEND:
269                         case IB_WR_SEND_WITH_INV:
270                         case IB_WR_SEND_WITH_IMM:
271                                 ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
272                                 break;
273                         case IB_WR_LOCAL_INV:
274                         case IB_WR_ATOMIC_CMP_AND_SWP:
275                         case IB_WR_ATOMIC_FETCH_AND_ADD:
276                         case IB_WR_LSO:
277                         default:
278                                 ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
279                                 break;
280                         }
281                         ctrl->flag |= cpu_to_le32(ps_opcode);
282                         wqe += sizeof(struct hns_roce_wqe_raddr_seg);
283
284                         dseg = wqe;
285                         if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
286                                 if (le32_to_cpu(ctrl->msg_length) >
287                                     hr_dev->caps.max_sq_inline) {
288                                         ret = -EINVAL;
289                                         *bad_wr = wr;
290                                         dev_err(dev, "inline len(1-%d)=%d, illegal",
291                                                 ctrl->msg_length,
292                                                 hr_dev->caps.max_sq_inline);
293                                         goto out;
294                                 }
295                                 for (i = 0; i < wr->num_sge; i++) {
296                                         memcpy(wqe, ((void *) (uintptr_t)
297                                                wr->sg_list[i].addr),
298                                                wr->sg_list[i].length);
299                                         wqe += wr->sg_list[i].length;
300                                 }
301                                 ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE);
302                         } else {
303                                 /*sqe num is two */
304                                 for (i = 0; i < wr->num_sge; i++)
305                                         set_data_seg(dseg + i, wr->sg_list + i);
306
307                                 ctrl->flag |= cpu_to_le32(wr->num_sge <<
308                                               HNS_ROCE_WQE_SGE_NUM_BIT);
309                         }
310                 }
311         }
312
313 out:
314         /* Set DB return */
315         if (likely(nreq)) {
316                 qp->sq.head += nreq;
317                 /* Memory barrier */
318                 wmb();
319
320                 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
321                                SQ_DOORBELL_U32_4_SQ_HEAD_S,
322                               (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
323                 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
324                                SQ_DOORBELL_U32_4_SL_S, qp->sl);
325                 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
326                                SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
327                 roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
328                                SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
329                 roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
330
331                 doorbell[0] = sq_db.u32_4;
332                 doorbell[1] = sq_db.u32_8;
333
334                 hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
335         }
336
337         spin_unlock_irqrestore(&qp->sq.lock, flags);
338
339         return ret;
340 }
341
342 static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
343                                  const struct ib_recv_wr *wr,
344                                  const struct ib_recv_wr **bad_wr)
345 {
346         struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
347         struct hns_roce_wqe_data_seg *scat = NULL;
348         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
349         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
350         struct device *dev = &hr_dev->pdev->dev;
351         struct hns_roce_rq_db rq_db = {};
352         __le32 doorbell[2] = {0};
353         unsigned long flags = 0;
354         unsigned int wqe_idx;
355         int ret = 0;
356         int nreq = 0;
357         int i = 0;
358         u32 reg_val;
359
360         spin_lock_irqsave(&hr_qp->rq.lock, flags);
361
362         for (nreq = 0; wr; ++nreq, wr = wr->next) {
363                 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
364                         hr_qp->ibqp.recv_cq)) {
365                         ret = -ENOMEM;
366                         *bad_wr = wr;
367                         goto out;
368                 }
369
370                 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
371
372                 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
373                         dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
374                                 wr->num_sge, hr_qp->rq.max_gs);
375                         ret = -EINVAL;
376                         *bad_wr = wr;
377                         goto out;
378                 }
379
380                 ctrl = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
381
382                 roce_set_field(ctrl->rwqe_byte_12,
383                                RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
384                                RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
385                                wr->num_sge);
386
387                 scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
388
389                 for (i = 0; i < wr->num_sge; i++)
390                         set_data_seg(scat + i, wr->sg_list + i);
391
392                 hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
393         }
394
395 out:
396         if (likely(nreq)) {
397                 hr_qp->rq.head += nreq;
398                 /* Memory barrier */
399                 wmb();
400
401                 if (ibqp->qp_type == IB_QPT_GSI) {
402                         __le32 tmp;
403
404                         /* SW update GSI rq header */
405                         reg_val = roce_read(to_hr_dev(ibqp->device),
406                                             ROCEE_QP1C_CFG3_0_REG +
407                                             QP1C_CFGN_OFFSET * hr_qp->phy_port);
408                         tmp = cpu_to_le32(reg_val);
409                         roce_set_field(tmp,
410                                        ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
411                                        ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
412                                        hr_qp->rq.head);
413                         reg_val = le32_to_cpu(tmp);
414                         roce_write(to_hr_dev(ibqp->device),
415                                    ROCEE_QP1C_CFG3_0_REG +
416                                    QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
417                 } else {
418                         roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
419                                        RQ_DOORBELL_U32_4_RQ_HEAD_S,
420                                        hr_qp->rq.head);
421                         roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
422                                        RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
423                         roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
424                                        RQ_DOORBELL_U32_8_CMD_S, 1);
425                         roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
426                                      1);
427
428                         doorbell[0] = rq_db.u32_4;
429                         doorbell[1] = rq_db.u32_8;
430
431                         hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
432                 }
433         }
434         spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
435
436         return ret;
437 }
438
439 static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
440                                        int sdb_mode, int odb_mode)
441 {
442         __le32 tmp;
443         u32 val;
444
445         val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
446         tmp = cpu_to_le32(val);
447         roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
448         roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
449         val = le32_to_cpu(tmp);
450         roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
451 }
452
453 static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
454                                      u32 odb_mode)
455 {
456         __le32 tmp;
457         u32 val;
458
459         /* Configure SDB/ODB extend mode */
460         val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
461         tmp = cpu_to_le32(val);
462         roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
463         roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
464         val = le32_to_cpu(tmp);
465         roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
466 }
467
468 static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
469                              u32 sdb_alful)
470 {
471         __le32 tmp;
472         u32 val;
473
474         /* Configure SDB */
475         val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
476         tmp = cpu_to_le32(val);
477         roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
478                        ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
479         roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
480                        ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
481         val = le32_to_cpu(tmp);
482         roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
483 }
484
485 static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
486                              u32 odb_alful)
487 {
488         __le32 tmp;
489         u32 val;
490
491         /* Configure ODB */
492         val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
493         tmp = cpu_to_le32(val);
494         roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
495                        ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
496         roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
497                        ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
498         val = le32_to_cpu(tmp);
499         roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
500 }
501
502 static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
503                                  u32 ext_sdb_alful)
504 {
505         struct hns_roce_v1_priv *priv = hr_dev->priv;
506         struct hns_roce_db_table *db = &priv->db_table;
507         struct device *dev = &hr_dev->pdev->dev;
508         dma_addr_t sdb_dma_addr;
509         __le32 tmp;
510         u32 val;
511
512         /* Configure extend SDB threshold */
513         roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
514         roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
515
516         /* Configure extend SDB base addr */
517         sdb_dma_addr = db->ext_db->sdb_buf_list->map;
518         roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
519
520         /* Configure extend SDB depth */
521         val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
522         tmp = cpu_to_le32(val);
523         roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
524                        ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
525                        db->ext_db->esdb_dep);
526         /*
527          * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
528          * using 4K page, and shift more 32 because of
529          * caculating the high 32 bit value evaluated to hardware.
530          */
531         roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
532                        ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
533         val = le32_to_cpu(tmp);
534         roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
535
536         dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
537         dev_dbg(dev, "ext SDB threshold: empty: 0x%x, ful: 0x%x\n",
538                 ext_sdb_alept, ext_sdb_alful);
539 }
540
541 static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
542                                  u32 ext_odb_alful)
543 {
544         struct hns_roce_v1_priv *priv = hr_dev->priv;
545         struct hns_roce_db_table *db = &priv->db_table;
546         struct device *dev = &hr_dev->pdev->dev;
547         dma_addr_t odb_dma_addr;
548         __le32 tmp;
549         u32 val;
550
551         /* Configure extend ODB threshold */
552         roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
553         roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
554
555         /* Configure extend ODB base addr */
556         odb_dma_addr = db->ext_db->odb_buf_list->map;
557         roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
558
559         /* Configure extend ODB depth */
560         val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
561         tmp = cpu_to_le32(val);
562         roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
563                        ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
564                        db->ext_db->eodb_dep);
565         roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
566                        ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
567                        db->ext_db->eodb_dep);
568         val = le32_to_cpu(tmp);
569         roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
570
571         dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
572         dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
573                 ext_odb_alept, ext_odb_alful);
574 }
575
576 static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
577                                 u32 odb_ext_mod)
578 {
579         struct hns_roce_v1_priv *priv = hr_dev->priv;
580         struct hns_roce_db_table *db = &priv->db_table;
581         struct device *dev = &hr_dev->pdev->dev;
582         dma_addr_t sdb_dma_addr;
583         dma_addr_t odb_dma_addr;
584         int ret = 0;
585
586         db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
587         if (!db->ext_db)
588                 return -ENOMEM;
589
590         if (sdb_ext_mod) {
591                 db->ext_db->sdb_buf_list = kmalloc(
592                                 sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
593                 if (!db->ext_db->sdb_buf_list) {
594                         ret = -ENOMEM;
595                         goto ext_sdb_buf_fail_out;
596                 }
597
598                 db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
599                                                      HNS_ROCE_V1_EXT_SDB_SIZE,
600                                                      &sdb_dma_addr, GFP_KERNEL);
601                 if (!db->ext_db->sdb_buf_list->buf) {
602                         ret = -ENOMEM;
603                         goto alloc_sq_db_buf_fail;
604                 }
605                 db->ext_db->sdb_buf_list->map = sdb_dma_addr;
606
607                 db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
608                 hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
609                                      HNS_ROCE_V1_EXT_SDB_ALFUL);
610         } else
611                 hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
612                                  HNS_ROCE_V1_SDB_ALFUL);
613
614         if (odb_ext_mod) {
615                 db->ext_db->odb_buf_list = kmalloc(
616                                 sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
617                 if (!db->ext_db->odb_buf_list) {
618                         ret = -ENOMEM;
619                         goto ext_odb_buf_fail_out;
620                 }
621
622                 db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
623                                                      HNS_ROCE_V1_EXT_ODB_SIZE,
624                                                      &odb_dma_addr, GFP_KERNEL);
625                 if (!db->ext_db->odb_buf_list->buf) {
626                         ret = -ENOMEM;
627                         goto alloc_otr_db_buf_fail;
628                 }
629                 db->ext_db->odb_buf_list->map = odb_dma_addr;
630
631                 db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
632                 hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
633                                      HNS_ROCE_V1_EXT_ODB_ALFUL);
634         } else
635                 hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
636                                  HNS_ROCE_V1_ODB_ALFUL);
637
638         hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
639
640         return 0;
641
642 alloc_otr_db_buf_fail:
643         kfree(db->ext_db->odb_buf_list);
644
645 ext_odb_buf_fail_out:
646         if (sdb_ext_mod) {
647                 dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
648                                   db->ext_db->sdb_buf_list->buf,
649                                   db->ext_db->sdb_buf_list->map);
650         }
651
652 alloc_sq_db_buf_fail:
653         if (sdb_ext_mod)
654                 kfree(db->ext_db->sdb_buf_list);
655
656 ext_sdb_buf_fail_out:
657         kfree(db->ext_db);
658         return ret;
659 }
660
661 static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
662                                                     struct ib_pd *pd)
663 {
664         struct device *dev = &hr_dev->pdev->dev;
665         struct ib_qp_init_attr init_attr;
666         struct ib_qp *qp;
667
668         memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
669         init_attr.qp_type               = IB_QPT_RC;
670         init_attr.sq_sig_type           = IB_SIGNAL_ALL_WR;
671         init_attr.cap.max_recv_wr       = HNS_ROCE_MIN_WQE_NUM;
672         init_attr.cap.max_send_wr       = HNS_ROCE_MIN_WQE_NUM;
673
674         qp = hns_roce_create_qp(pd, &init_attr, NULL);
675         if (IS_ERR(qp)) {
676                 dev_err(dev, "Create loop qp for mr free failed!");
677                 return NULL;
678         }
679
680         return to_hr_qp(qp);
681 }
682
683 static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
684 {
685         struct hns_roce_v1_priv *priv = hr_dev->priv;
686         struct hns_roce_free_mr *free_mr = &priv->free_mr;
687         struct hns_roce_caps *caps = &hr_dev->caps;
688         struct ib_device *ibdev = &hr_dev->ib_dev;
689         struct device *dev = &hr_dev->pdev->dev;
690         struct ib_cq_init_attr cq_init_attr;
691         struct ib_qp_attr attr = { 0 };
692         struct hns_roce_qp *hr_qp;
693         struct ib_cq *cq;
694         struct ib_pd *pd;
695         union ib_gid dgid;
696         __be64 subnet_prefix;
697         int attr_mask = 0;
698         int ret;
699         int i, j;
700         u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
701         u8 phy_port;
702         u8 port = 0;
703         u8 sl;
704
705         /* Reserved cq for loop qp */
706         cq_init_attr.cqe                = HNS_ROCE_MIN_WQE_NUM * 2;
707         cq_init_attr.comp_vector        = 0;
708
709         cq = rdma_zalloc_drv_obj(ibdev, ib_cq);
710         if (!cq)
711                 return -ENOMEM;
712
713         ret = hns_roce_create_cq(cq, &cq_init_attr, NULL);
714         if (ret) {
715                 dev_err(dev, "Create cq for reserved loop qp failed!");
716                 goto alloc_cq_failed;
717         }
718         free_mr->mr_free_cq = to_hr_cq(cq);
719         free_mr->mr_free_cq->ib_cq.device               = &hr_dev->ib_dev;
720         free_mr->mr_free_cq->ib_cq.uobject              = NULL;
721         free_mr->mr_free_cq->ib_cq.comp_handler         = NULL;
722         free_mr->mr_free_cq->ib_cq.event_handler        = NULL;
723         free_mr->mr_free_cq->ib_cq.cq_context           = NULL;
724         atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
725
726         pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
727         if (!pd) {
728                 ret = -ENOMEM;
729                 goto alloc_mem_failed;
730         }
731
732         pd->device  = ibdev;
733         ret = hns_roce_alloc_pd(pd, NULL);
734         if (ret)
735                 goto alloc_pd_failed;
736
737         free_mr->mr_free_pd = to_hr_pd(pd);
738         free_mr->mr_free_pd->ibpd.device  = &hr_dev->ib_dev;
739         free_mr->mr_free_pd->ibpd.uobject = NULL;
740         free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
741         atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
742
743         attr.qp_access_flags    = IB_ACCESS_REMOTE_WRITE;
744         attr.pkey_index         = 0;
745         attr.min_rnr_timer      = 0;
746         /* Disable read ability */
747         attr.max_dest_rd_atomic = 0;
748         attr.max_rd_atomic      = 0;
749         /* Use arbitrary values as rq_psn and sq_psn */
750         attr.rq_psn             = 0x0808;
751         attr.sq_psn             = 0x0808;
752         attr.retry_cnt          = 7;
753         attr.rnr_retry          = 7;
754         attr.timeout            = 0x12;
755         attr.path_mtu           = IB_MTU_256;
756         attr.ah_attr.type       = RDMA_AH_ATTR_TYPE_ROCE;
757         rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
758         rdma_ah_set_static_rate(&attr.ah_attr, 3);
759
760         subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
761         for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
762                 phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
763                                 (i % HNS_ROCE_MAX_PORTS);
764                 sl = i / HNS_ROCE_MAX_PORTS;
765
766                 for (j = 0; j < caps->num_ports; j++) {
767                         if (hr_dev->iboe.phy_port[j] == phy_port) {
768                                 queue_en[i] = 1;
769                                 port = j;
770                                 break;
771                         }
772                 }
773
774                 if (!queue_en[i])
775                         continue;
776
777                 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
778                 if (!free_mr->mr_free_qp[i]) {
779                         dev_err(dev, "Create loop qp failed!\n");
780                         ret = -ENOMEM;
781                         goto create_lp_qp_failed;
782                 }
783                 hr_qp = free_mr->mr_free_qp[i];
784
785                 hr_qp->port             = port;
786                 hr_qp->phy_port         = phy_port;
787                 hr_qp->ibqp.qp_type     = IB_QPT_RC;
788                 hr_qp->ibqp.device      = &hr_dev->ib_dev;
789                 hr_qp->ibqp.uobject     = NULL;
790                 atomic_set(&hr_qp->ibqp.usecnt, 0);
791                 hr_qp->ibqp.pd          = pd;
792                 hr_qp->ibqp.recv_cq     = cq;
793                 hr_qp->ibqp.send_cq     = cq;
794
795                 rdma_ah_set_port_num(&attr.ah_attr, port + 1);
796                 rdma_ah_set_sl(&attr.ah_attr, sl);
797                 attr.port_num           = port + 1;
798
799                 attr.dest_qp_num        = hr_qp->qpn;
800                 memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
801                        hr_dev->dev_addr[port],
802                        ETH_ALEN);
803
804                 memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
805                 memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
806                 memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
807                 dgid.raw[11] = 0xff;
808                 dgid.raw[12] = 0xfe;
809                 dgid.raw[8] ^= 2;
810                 rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
811
812                 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
813                                             IB_QPS_RESET, IB_QPS_INIT);
814                 if (ret) {
815                         dev_err(dev, "modify qp failed(%d)!\n", ret);
816                         goto create_lp_qp_failed;
817                 }
818
819                 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
820                                             IB_QPS_INIT, IB_QPS_RTR);
821                 if (ret) {
822                         dev_err(dev, "modify qp failed(%d)!\n", ret);
823                         goto create_lp_qp_failed;
824                 }
825
826                 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
827                                             IB_QPS_RTR, IB_QPS_RTS);
828                 if (ret) {
829                         dev_err(dev, "modify qp failed(%d)!\n", ret);
830                         goto create_lp_qp_failed;
831                 }
832         }
833
834         return 0;
835
836 create_lp_qp_failed:
837         for (i -= 1; i >= 0; i--) {
838                 hr_qp = free_mr->mr_free_qp[i];
839                 if (hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL))
840                         dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
841         }
842
843         hns_roce_dealloc_pd(pd, NULL);
844
845 alloc_pd_failed:
846         kfree(pd);
847
848 alloc_mem_failed:
849         hns_roce_destroy_cq(cq, NULL);
850 alloc_cq_failed:
851         kfree(cq);
852         return ret;
853 }
854
855 static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
856 {
857         struct hns_roce_v1_priv *priv = hr_dev->priv;
858         struct hns_roce_free_mr *free_mr = &priv->free_mr;
859         struct device *dev = &hr_dev->pdev->dev;
860         struct hns_roce_qp *hr_qp;
861         int ret;
862         int i;
863
864         for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
865                 hr_qp = free_mr->mr_free_qp[i];
866                 if (!hr_qp)
867                         continue;
868
869                 ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL);
870                 if (ret)
871                         dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
872                                 i, ret);
873         }
874
875         hns_roce_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
876         kfree(&free_mr->mr_free_cq->ib_cq);
877         hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
878         kfree(&free_mr->mr_free_pd->ibpd);
879 }
880
881 static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
882 {
883         struct hns_roce_v1_priv *priv = hr_dev->priv;
884         struct hns_roce_db_table *db = &priv->db_table;
885         struct device *dev = &hr_dev->pdev->dev;
886         u32 sdb_ext_mod;
887         u32 odb_ext_mod;
888         u32 sdb_evt_mod;
889         u32 odb_evt_mod;
890         int ret;
891
892         memset(db, 0, sizeof(*db));
893
894         /* Default DB mode */
895         sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
896         odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
897         sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
898         odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
899
900         db->sdb_ext_mod = sdb_ext_mod;
901         db->odb_ext_mod = odb_ext_mod;
902
903         /* Init extend DB */
904         ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
905         if (ret) {
906                 dev_err(dev, "Failed in extend DB configuration.\n");
907                 return ret;
908         }
909
910         hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
911
912         return 0;
913 }
914
915 static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
916 {
917         struct hns_roce_recreate_lp_qp_work *lp_qp_work;
918         struct hns_roce_dev *hr_dev;
919
920         lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
921                                   work);
922         hr_dev = to_hr_dev(lp_qp_work->ib_dev);
923
924         hns_roce_v1_release_lp_qp(hr_dev);
925
926         if (hns_roce_v1_rsv_lp_qp(hr_dev))
927                 dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
928
929         if (lp_qp_work->comp_flag)
930                 complete(lp_qp_work->comp);
931
932         kfree(lp_qp_work);
933 }
934
935 static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
936 {
937         long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
938         struct hns_roce_v1_priv *priv = hr_dev->priv;
939         struct hns_roce_free_mr *free_mr = &priv->free_mr;
940         struct hns_roce_recreate_lp_qp_work *lp_qp_work;
941         struct device *dev = &hr_dev->pdev->dev;
942         struct completion comp;
943
944         lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
945                              GFP_KERNEL);
946         if (!lp_qp_work)
947                 return -ENOMEM;
948
949         INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
950
951         lp_qp_work->ib_dev = &(hr_dev->ib_dev);
952         lp_qp_work->comp = &comp;
953         lp_qp_work->comp_flag = 1;
954
955         init_completion(lp_qp_work->comp);
956
957         queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
958
959         while (end > 0) {
960                 if (try_wait_for_completion(&comp))
961                         return 0;
962                 msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
963                 end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE;
964         }
965
966         lp_qp_work->comp_flag = 0;
967         if (try_wait_for_completion(&comp))
968                 return 0;
969
970         dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
971         return -ETIMEDOUT;
972 }
973
974 static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
975 {
976         struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
977         struct device *dev = &hr_dev->pdev->dev;
978         struct ib_send_wr send_wr;
979         const struct ib_send_wr *bad_wr;
980         int ret;
981
982         memset(&send_wr, 0, sizeof(send_wr));
983         send_wr.next    = NULL;
984         send_wr.num_sge = 0;
985         send_wr.send_flags = 0;
986         send_wr.sg_list = NULL;
987         send_wr.wr_id   = (unsigned long long)&send_wr;
988         send_wr.opcode  = IB_WR_RDMA_WRITE;
989
990         ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
991         if (ret) {
992                 dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
993                 return ret;
994         }
995
996         return 0;
997 }
998
999 static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
1000 {
1001         unsigned long end =
1002                 msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
1003         struct hns_roce_mr_free_work *mr_work =
1004                 container_of(work, struct hns_roce_mr_free_work, work);
1005         struct hns_roce_dev *hr_dev = to_hr_dev(mr_work->ib_dev);
1006         struct hns_roce_v1_priv *priv = hr_dev->priv;
1007         struct hns_roce_free_mr *free_mr = &priv->free_mr;
1008         struct hns_roce_cq *mr_free_cq = free_mr->mr_free_cq;
1009         struct hns_roce_mr *hr_mr = mr_work->mr;
1010         struct device *dev = &hr_dev->pdev->dev;
1011         struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
1012         struct hns_roce_qp *hr_qp;
1013         int ne = 0;
1014         int ret;
1015         int i;
1016
1017         for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
1018                 hr_qp = free_mr->mr_free_qp[i];
1019                 if (!hr_qp)
1020                         continue;
1021                 ne++;
1022
1023                 ret = hns_roce_v1_send_lp_wqe(hr_qp);
1024                 if (ret) {
1025                         dev_err(dev,
1026                              "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
1027                              hr_qp->qpn, ret);
1028                         goto free_work;
1029                 }
1030         }
1031
1032         if (!ne) {
1033                 dev_err(dev, "Reserved loop qp is absent!\n");
1034                 goto free_work;
1035         }
1036
1037         do {
1038                 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
1039                 if (ret < 0 && hr_qp) {
1040                         dev_err(dev,
1041                            "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
1042                            hr_qp->qpn, ret, hr_mr->key, ne);
1043                         goto free_work;
1044                 }
1045                 ne -= ret;
1046                 usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
1047                              (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
1048         } while (ne && time_before_eq(jiffies, end));
1049
1050         if (ne != 0)
1051                 dev_err(dev,
1052                         "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
1053                         hr_mr->key, ne);
1054
1055 free_work:
1056         if (mr_work->comp_flag)
1057                 complete(mr_work->comp);
1058         kfree(mr_work);
1059 }
1060
1061 static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
1062                                 struct hns_roce_mr *mr, struct ib_udata *udata)
1063 {
1064         struct hns_roce_v1_priv *priv = hr_dev->priv;
1065         struct hns_roce_free_mr *free_mr = &priv->free_mr;
1066         long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
1067         struct device *dev = &hr_dev->pdev->dev;
1068         struct hns_roce_mr_free_work *mr_work;
1069         unsigned long start = jiffies;
1070         struct completion comp;
1071         int ret = 0;
1072
1073         if (mr->enabled) {
1074                 if (hns_roce_hw_destroy_mpt(hr_dev, NULL,
1075                                             key_to_hw_index(mr->key) &
1076                                             (hr_dev->caps.num_mtpts - 1)))
1077                         dev_warn(dev, "DESTROY_MPT failed!\n");
1078         }
1079
1080         mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
1081         if (!mr_work) {
1082                 ret = -ENOMEM;
1083                 goto free_mr;
1084         }
1085
1086         INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
1087
1088         mr_work->ib_dev = &(hr_dev->ib_dev);
1089         mr_work->comp = &comp;
1090         mr_work->comp_flag = 1;
1091         mr_work->mr = (void *)mr;
1092         init_completion(mr_work->comp);
1093
1094         queue_work(free_mr->free_mr_wq, &(mr_work->work));
1095
1096         while (end > 0) {
1097                 if (try_wait_for_completion(&comp))
1098                         goto free_mr;
1099                 msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
1100                 end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE;
1101         }
1102
1103         mr_work->comp_flag = 0;
1104         if (try_wait_for_completion(&comp))
1105                 goto free_mr;
1106
1107         dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
1108         ret = -ETIMEDOUT;
1109
1110 free_mr:
1111         dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
1112                 mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
1113
1114         hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
1115                              key_to_hw_index(mr->key), 0);
1116         hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
1117         kfree(mr);
1118
1119         return ret;
1120 }
1121
1122 static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
1123 {
1124         struct hns_roce_v1_priv *priv = hr_dev->priv;
1125         struct hns_roce_db_table *db = &priv->db_table;
1126         struct device *dev = &hr_dev->pdev->dev;
1127
1128         if (db->sdb_ext_mod) {
1129                 dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
1130                                   db->ext_db->sdb_buf_list->buf,
1131                                   db->ext_db->sdb_buf_list->map);
1132                 kfree(db->ext_db->sdb_buf_list);
1133         }
1134
1135         if (db->odb_ext_mod) {
1136                 dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
1137                                   db->ext_db->odb_buf_list->buf,
1138                                   db->ext_db->odb_buf_list->map);
1139                 kfree(db->ext_db->odb_buf_list);
1140         }
1141
1142         kfree(db->ext_db);
1143 }
1144
1145 static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
1146 {
1147         struct hns_roce_v1_priv *priv = hr_dev->priv;
1148         struct hns_roce_raq_table *raq = &priv->raq_table;
1149         struct device *dev = &hr_dev->pdev->dev;
1150         dma_addr_t addr;
1151         int raq_shift;
1152         __le32 tmp;
1153         u32 val;
1154         int ret;
1155
1156         raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
1157         if (!raq->e_raq_buf)
1158                 return -ENOMEM;
1159
1160         raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
1161                                                  &addr, GFP_KERNEL);
1162         if (!raq->e_raq_buf->buf) {
1163                 ret = -ENOMEM;
1164                 goto err_dma_alloc_raq;
1165         }
1166         raq->e_raq_buf->map = addr;
1167
1168         /* Configure raq extended address. 48bit 4K align*/
1169         roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
1170
1171         /* Configure raq_shift */
1172         raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
1173         val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
1174         tmp = cpu_to_le32(val);
1175         roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
1176                        ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
1177         /*
1178          * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
1179          * using 4K page, and shift more 32 because of
1180          * caculating the high 32 bit value evaluated to hardware.
1181          */
1182         roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
1183                        ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
1184                        raq->e_raq_buf->map >> 44);
1185         val = le32_to_cpu(tmp);
1186         roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
1187         dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
1188
1189         /* Configure raq threshold */
1190         val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
1191         tmp = cpu_to_le32(val);
1192         roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
1193                        ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
1194                        HNS_ROCE_V1_EXT_RAQ_WF);
1195         val = le32_to_cpu(tmp);
1196         roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
1197         dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
1198
1199         /* Enable extend raq */
1200         val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
1201         tmp = cpu_to_le32(val);
1202         roce_set_field(tmp,
1203                        ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
1204                        ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
1205                        POL_TIME_INTERVAL_VAL);
1206         roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
1207         roce_set_field(tmp,
1208                        ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
1209                        ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
1210                        2);
1211         roce_set_bit(tmp,
1212                      ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
1213         val = le32_to_cpu(tmp);
1214         roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
1215         dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
1216
1217         /* Enable raq drop */
1218         val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1219         tmp = cpu_to_le32(val);
1220         roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
1221         val = le32_to_cpu(tmp);
1222         roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1223         dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
1224
1225         return 0;
1226
1227 err_dma_alloc_raq:
1228         kfree(raq->e_raq_buf);
1229         return ret;
1230 }
1231
1232 static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
1233 {
1234         struct hns_roce_v1_priv *priv = hr_dev->priv;
1235         struct hns_roce_raq_table *raq = &priv->raq_table;
1236         struct device *dev = &hr_dev->pdev->dev;
1237
1238         dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
1239                           raq->e_raq_buf->map);
1240         kfree(raq->e_raq_buf);
1241 }
1242
1243 static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
1244 {
1245         __le32 tmp;
1246         u32 val;
1247
1248         if (enable_flag) {
1249                 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1250                  /* Open all ports */
1251                 tmp = cpu_to_le32(val);
1252                 roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1253                                ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
1254                                ALL_PORT_VAL_OPEN);
1255                 val = le32_to_cpu(tmp);
1256                 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1257         } else {
1258                 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1259                 /* Close all ports */
1260                 tmp = cpu_to_le32(val);
1261                 roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1262                                ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
1263                 val = le32_to_cpu(tmp);
1264                 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1265         }
1266 }
1267
1268 static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
1269 {
1270         struct hns_roce_v1_priv *priv = hr_dev->priv;
1271         struct device *dev = &hr_dev->pdev->dev;
1272         int ret;
1273
1274         priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
1275                 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
1276                 GFP_KERNEL);
1277         if (!priv->bt_table.qpc_buf.buf)
1278                 return -ENOMEM;
1279
1280         priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
1281                 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
1282                 GFP_KERNEL);
1283         if (!priv->bt_table.mtpt_buf.buf) {
1284                 ret = -ENOMEM;
1285                 goto err_failed_alloc_mtpt_buf;
1286         }
1287
1288         priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
1289                 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
1290                 GFP_KERNEL);
1291         if (!priv->bt_table.cqc_buf.buf) {
1292                 ret = -ENOMEM;
1293                 goto err_failed_alloc_cqc_buf;
1294         }
1295
1296         return 0;
1297
1298 err_failed_alloc_cqc_buf:
1299         dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1300                 priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1301
1302 err_failed_alloc_mtpt_buf:
1303         dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1304                 priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1305
1306         return ret;
1307 }
1308
1309 static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
1310 {
1311         struct hns_roce_v1_priv *priv = hr_dev->priv;
1312         struct device *dev = &hr_dev->pdev->dev;
1313
1314         dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1315                 priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
1316
1317         dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1318                 priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1319
1320         dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1321                 priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1322 }
1323
1324 static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
1325 {
1326         struct hns_roce_v1_priv *priv = hr_dev->priv;
1327         struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
1328         struct device *dev = &hr_dev->pdev->dev;
1329
1330         /*
1331          * This buffer will be used for CQ's tptr(tail pointer), also
1332          * named ci(customer index). Every CQ will use 2 bytes to save
1333          * cqe ci in hip06. Hardware will read this area to get new ci
1334          * when the queue is almost full.
1335          */
1336         tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1337                                            &tptr_buf->map, GFP_KERNEL);
1338         if (!tptr_buf->buf)
1339                 return -ENOMEM;
1340
1341         hr_dev->tptr_dma_addr = tptr_buf->map;
1342         hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
1343
1344         return 0;
1345 }
1346
1347 static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
1348 {
1349         struct hns_roce_v1_priv *priv = hr_dev->priv;
1350         struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
1351         struct device *dev = &hr_dev->pdev->dev;
1352
1353         dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1354                           tptr_buf->buf, tptr_buf->map);
1355 }
1356
1357 static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
1358 {
1359         struct hns_roce_v1_priv *priv = hr_dev->priv;
1360         struct hns_roce_free_mr *free_mr = &priv->free_mr;
1361         struct device *dev = &hr_dev->pdev->dev;
1362         int ret;
1363
1364         free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
1365         if (!free_mr->free_mr_wq) {
1366                 dev_err(dev, "Create free mr workqueue failed!\n");
1367                 return -ENOMEM;
1368         }
1369
1370         ret = hns_roce_v1_rsv_lp_qp(hr_dev);
1371         if (ret) {
1372                 dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
1373                 flush_workqueue(free_mr->free_mr_wq);
1374                 destroy_workqueue(free_mr->free_mr_wq);
1375         }
1376
1377         return ret;
1378 }
1379
1380 static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
1381 {
1382         struct hns_roce_v1_priv *priv = hr_dev->priv;
1383         struct hns_roce_free_mr *free_mr = &priv->free_mr;
1384
1385         flush_workqueue(free_mr->free_mr_wq);
1386         destroy_workqueue(free_mr->free_mr_wq);
1387
1388         hns_roce_v1_release_lp_qp(hr_dev);
1389 }
1390
1391 /**
1392  * hns_roce_v1_reset - reset RoCE
1393  * @hr_dev: RoCE device struct pointer
1394  * @enable: true -- drop reset, false -- reset
1395  * return 0 - success , negative --fail
1396  */
1397 static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
1398 {
1399         struct device_node *dsaf_node;
1400         struct device *dev = &hr_dev->pdev->dev;
1401         struct device_node *np = dev->of_node;
1402         struct fwnode_handle *fwnode;
1403         int ret;
1404
1405         /* check if this is DT/ACPI case */
1406         if (dev_of_node(dev)) {
1407                 dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
1408                 if (!dsaf_node) {
1409                         dev_err(dev, "could not find dsaf-handle\n");
1410                         return -EINVAL;
1411                 }
1412                 fwnode = &dsaf_node->fwnode;
1413         } else if (is_acpi_device_node(dev->fwnode)) {
1414                 struct fwnode_reference_args args;
1415
1416                 ret = acpi_node_get_property_reference(dev->fwnode,
1417                                                        "dsaf-handle", 0, &args);
1418                 if (ret) {
1419                         dev_err(dev, "could not find dsaf-handle\n");
1420                         return ret;
1421                 }
1422                 fwnode = args.fwnode;
1423         } else {
1424                 dev_err(dev, "cannot read data from DT or ACPI\n");
1425                 return -ENXIO;
1426         }
1427
1428         ret = hns_dsaf_roce_reset(fwnode, false);
1429         if (ret)
1430                 return ret;
1431
1432         if (dereset) {
1433                 msleep(SLEEP_TIME_INTERVAL);
1434                 ret = hns_dsaf_roce_reset(fwnode, true);
1435         }
1436
1437         return ret;
1438 }
1439
1440 static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
1441 {
1442         struct hns_roce_caps *caps = &hr_dev->caps;
1443         int i;
1444
1445         hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG);
1446         hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG);
1447         hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) |
1448                                 ((u64)roce_read(hr_dev,
1449                                             ROCEE_SYS_IMAGE_GUID_H_REG) << 32);
1450         hr_dev->hw_rev          = HNS_ROCE_HW_VER1;
1451
1452         caps->num_qps           = HNS_ROCE_V1_MAX_QP_NUM;
1453         caps->max_wqes          = HNS_ROCE_V1_MAX_WQE_NUM;
1454         caps->min_wqes          = HNS_ROCE_MIN_WQE_NUM;
1455         caps->num_cqs           = HNS_ROCE_V1_MAX_CQ_NUM;
1456         caps->min_cqes          = HNS_ROCE_MIN_CQE_NUM;
1457         caps->max_cqes          = HNS_ROCE_V1_MAX_CQE_NUM;
1458         caps->max_sq_sg         = HNS_ROCE_V1_SG_NUM;
1459         caps->max_rq_sg         = HNS_ROCE_V1_SG_NUM;
1460         caps->max_sq_inline     = HNS_ROCE_V1_INLINE_SIZE;
1461         caps->num_uars          = HNS_ROCE_V1_UAR_NUM;
1462         caps->phy_num_uars      = HNS_ROCE_V1_PHY_UAR_NUM;
1463         caps->num_aeq_vectors   = HNS_ROCE_V1_AEQE_VEC_NUM;
1464         caps->num_comp_vectors  = HNS_ROCE_V1_COMP_VEC_NUM;
1465         caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
1466         caps->num_mtpts         = HNS_ROCE_V1_MAX_MTPT_NUM;
1467         caps->num_mtt_segs      = HNS_ROCE_V1_MAX_MTT_SEGS;
1468         caps->num_pds           = HNS_ROCE_V1_MAX_PD_NUM;
1469         caps->max_qp_init_rdma  = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
1470         caps->max_qp_dest_rdma  = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
1471         caps->max_sq_desc_sz    = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
1472         caps->max_rq_desc_sz    = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
1473         caps->qpc_sz            = HNS_ROCE_V1_QPC_SIZE;
1474         caps->irrl_entry_sz     = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
1475         caps->cqc_entry_sz      = HNS_ROCE_V1_CQC_ENTRY_SIZE;
1476         caps->mtpt_entry_sz     = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
1477         caps->mtt_entry_sz      = HNS_ROCE_V1_MTT_ENTRY_SIZE;
1478         caps->cqe_sz            = HNS_ROCE_V1_CQE_SIZE;
1479         caps->page_size_cap     = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
1480         caps->reserved_lkey     = 0;
1481         caps->reserved_pds      = 0;
1482         caps->reserved_mrws     = 1;
1483         caps->reserved_uars     = 0;
1484         caps->reserved_cqs      = 0;
1485         caps->reserved_qps      = 12; /* 2 SQP per port, six ports total 12 */
1486         caps->chunk_sz          = HNS_ROCE_V1_TABLE_CHUNK_SIZE;
1487
1488         for (i = 0; i < caps->num_ports; i++)
1489                 caps->pkey_table_len[i] = 1;
1490
1491         for (i = 0; i < caps->num_ports; i++) {
1492                 /* Six ports shared 16 GID in v1 engine */
1493                 if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
1494                         caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1495                                                  caps->num_ports;
1496                 else
1497                         caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1498                                                  caps->num_ports + 1;
1499         }
1500
1501         caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
1502         caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
1503         caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG);
1504         caps->max_mtu = IB_MTU_2048;
1505
1506         return 0;
1507 }
1508
1509 static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
1510 {
1511         int ret;
1512         u32 val;
1513         __le32 tmp;
1514         struct device *dev = &hr_dev->pdev->dev;
1515
1516         /* DMAE user config */
1517         val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
1518         tmp = cpu_to_le32(val);
1519         roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
1520                        ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
1521         roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
1522                        ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
1523                        1 << PAGES_SHIFT_16);
1524         val = le32_to_cpu(tmp);
1525         roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
1526
1527         val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
1528         tmp = cpu_to_le32(val);
1529         roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
1530                        ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
1531         roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
1532                        ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
1533                        1 << PAGES_SHIFT_16);
1534
1535         ret = hns_roce_db_init(hr_dev);
1536         if (ret) {
1537                 dev_err(dev, "doorbell init failed!\n");
1538                 return ret;
1539         }
1540
1541         ret = hns_roce_raq_init(hr_dev);
1542         if (ret) {
1543                 dev_err(dev, "raq init failed!\n");
1544                 goto error_failed_raq_init;
1545         }
1546
1547         ret = hns_roce_bt_init(hr_dev);
1548         if (ret) {
1549                 dev_err(dev, "bt init failed!\n");
1550                 goto error_failed_bt_init;
1551         }
1552
1553         ret = hns_roce_tptr_init(hr_dev);
1554         if (ret) {
1555                 dev_err(dev, "tptr init failed!\n");
1556                 goto error_failed_tptr_init;
1557         }
1558
1559         ret = hns_roce_free_mr_init(hr_dev);
1560         if (ret) {
1561                 dev_err(dev, "free mr init failed!\n");
1562                 goto error_failed_free_mr_init;
1563         }
1564
1565         hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
1566
1567         return 0;
1568
1569 error_failed_free_mr_init:
1570         hns_roce_tptr_free(hr_dev);
1571
1572 error_failed_tptr_init:
1573         hns_roce_bt_free(hr_dev);
1574
1575 error_failed_bt_init:
1576         hns_roce_raq_free(hr_dev);
1577
1578 error_failed_raq_init:
1579         hns_roce_db_free(hr_dev);
1580         return ret;
1581 }
1582
1583 static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
1584 {
1585         hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
1586         hns_roce_free_mr_free(hr_dev);
1587         hns_roce_tptr_free(hr_dev);
1588         hns_roce_bt_free(hr_dev);
1589         hns_roce_raq_free(hr_dev);
1590         hns_roce_db_free(hr_dev);
1591 }
1592
1593 static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
1594 {
1595         u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
1596
1597         return (!!(status & (1 << HCR_GO_BIT)));
1598 }
1599
1600 static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1601                                  u64 out_param, u32 in_modifier, u8 op_modifier,
1602                                  u16 op, u16 token, int event)
1603 {
1604         u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
1605         unsigned long end;
1606         u32 val = 0;
1607         __le32 tmp;
1608
1609         end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
1610         while (hns_roce_v1_cmd_pending(hr_dev)) {
1611                 if (time_after(jiffies, end)) {
1612                         dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
1613                                 (int)jiffies, (int)end);
1614                         return -EAGAIN;
1615                 }
1616                 cond_resched();
1617         }
1618
1619         tmp = cpu_to_le32(val);
1620         roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
1621                        op);
1622         roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
1623                        ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
1624         roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
1625         roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
1626         roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M,
1627                        ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
1628
1629         val = le32_to_cpu(tmp);
1630         writeq(in_param, hcr + 0);
1631         writeq(out_param, hcr + 2);
1632         writel(in_modifier, hcr + 4);
1633         /* Memory barrier */
1634         wmb();
1635
1636         writel(val, hcr + 5);
1637
1638         return 0;
1639 }
1640
1641 static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
1642                                 unsigned long timeout)
1643 {
1644         u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
1645         unsigned long end;
1646         u32 status = 0;
1647
1648         end = msecs_to_jiffies(timeout) + jiffies;
1649         while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
1650                 cond_resched();
1651
1652         if (hns_roce_v1_cmd_pending(hr_dev)) {
1653                 dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1654                 return -ETIMEDOUT;
1655         }
1656
1657         status = le32_to_cpu((__force __le32)
1658                               __raw_readl(hcr + HCR_STATUS_OFFSET));
1659         if ((status & STATUS_MASK) != 0x1) {
1660                 dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
1661                 return -EBUSY;
1662         }
1663
1664         return 0;
1665 }
1666
1667 static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
1668                                int gid_index, const union ib_gid *gid,
1669                                const struct ib_gid_attr *attr)
1670 {
1671         unsigned long flags;
1672         u32 *p = NULL;
1673         u8 gid_idx;
1674
1675         gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
1676
1677         spin_lock_irqsave(&hr_dev->iboe.lock, flags);
1678
1679         p = (u32 *)&gid->raw[0];
1680         roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
1681                        (HNS_ROCE_V1_GID_NUM * gid_idx));
1682
1683         p = (u32 *)&gid->raw[4];
1684         roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
1685                        (HNS_ROCE_V1_GID_NUM * gid_idx));
1686
1687         p = (u32 *)&gid->raw[8];
1688         roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
1689                        (HNS_ROCE_V1_GID_NUM * gid_idx));
1690
1691         p = (u32 *)&gid->raw[0xc];
1692         roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
1693                        (HNS_ROCE_V1_GID_NUM * gid_idx));
1694
1695         spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
1696
1697         return 0;
1698 }
1699
1700 static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
1701                                u8 *addr)
1702 {
1703         u32 reg_smac_l;
1704         u16 reg_smac_h;
1705         __le32 tmp;
1706         u16 *p_h;
1707         u32 *p;
1708         u32 val;
1709
1710         /*
1711          * When mac changed, loopback may fail
1712          * because of smac not equal to dmac.
1713          * We Need to release and create reserved qp again.
1714          */
1715         if (hr_dev->hw->dereg_mr) {
1716                 int ret;
1717
1718                 ret = hns_roce_v1_recreate_lp_qp(hr_dev);
1719                 if (ret && ret != -ETIMEDOUT)
1720                         return ret;
1721         }
1722
1723         p = (u32 *)(&addr[0]);
1724         reg_smac_l = *p;
1725         roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
1726                        PHY_PORT_OFFSET * phy_port);
1727
1728         val = roce_read(hr_dev,
1729                         ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1730         tmp = cpu_to_le32(val);
1731         p_h = (u16 *)(&addr[4]);
1732         reg_smac_h  = *p_h;
1733         roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
1734                        ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
1735         val = le32_to_cpu(tmp);
1736         roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1737                    val);
1738
1739         return 0;
1740 }
1741
1742 static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
1743                                 enum ib_mtu mtu)
1744 {
1745         __le32 tmp;
1746         u32 val;
1747
1748         val = roce_read(hr_dev,
1749                         ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1750         tmp = cpu_to_le32(val);
1751         roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
1752                        ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
1753         val = le32_to_cpu(tmp);
1754         roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1755                    val);
1756 }
1757
1758 static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf,
1759                                   struct hns_roce_mr *mr,
1760                                   unsigned long mtpt_idx)
1761 {
1762         u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 };
1763         struct ib_device *ibdev = &hr_dev->ib_dev;
1764         struct hns_roce_v1_mpt_entry *mpt_entry;
1765         dma_addr_t pbl_ba;
1766         int count;
1767         int i;
1768
1769         /* MPT filled into mailbox buf */
1770         mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
1771         memset(mpt_entry, 0, sizeof(*mpt_entry));
1772
1773         roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
1774                        MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
1775         roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
1776                        MPT_BYTE_4_KEY_S, mr->key);
1777         roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
1778                        MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
1779         roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
1780         roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
1781                      (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
1782         roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
1783         roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
1784                        MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
1785         roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
1786         roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
1787                      (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1788         roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
1789                      (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1790         roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
1791                      (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
1792         roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
1793                      0);
1794         roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
1795
1796         roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1797                        MPT_BYTE_12_PBL_ADDR_H_S, 0);
1798         roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
1799                        MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
1800
1801         mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova);
1802         mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32));
1803         mpt_entry->length = cpu_to_le32((u32)mr->size);
1804
1805         roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
1806                        MPT_BYTE_28_PD_S, mr->pd);
1807         roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
1808                        MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
1809         roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
1810                        MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
1811
1812         /* DMA memory register */
1813         if (mr->type == MR_TYPE_DMA)
1814                 return 0;
1815
1816         count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
1817                                   ARRAY_SIZE(pages), &pbl_ba);
1818         if (count < 1) {
1819                 ibdev_err(ibdev, "failed to find PBL mtr, count = %d.", count);
1820                 return -ENOBUFS;
1821         }
1822
1823         /* Register user mr */
1824         for (i = 0; i < count; i++) {
1825                 switch (i) {
1826                 case 0:
1827                         mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
1828                         roce_set_field(mpt_entry->mpt_byte_36,
1829                                 MPT_BYTE_36_PA0_H_M,
1830                                 MPT_BYTE_36_PA0_H_S,
1831                                 (u32)(pages[i] >> PAGES_SHIFT_32));
1832                         break;
1833                 case 1:
1834                         roce_set_field(mpt_entry->mpt_byte_36,
1835                                        MPT_BYTE_36_PA1_L_M,
1836                                        MPT_BYTE_36_PA1_L_S, (u32)(pages[i]));
1837                         roce_set_field(mpt_entry->mpt_byte_40,
1838                                 MPT_BYTE_40_PA1_H_M,
1839                                 MPT_BYTE_40_PA1_H_S,
1840                                 (u32)(pages[i] >> PAGES_SHIFT_24));
1841                         break;
1842                 case 2:
1843                         roce_set_field(mpt_entry->mpt_byte_40,
1844                                        MPT_BYTE_40_PA2_L_M,
1845                                        MPT_BYTE_40_PA2_L_S, (u32)(pages[i]));
1846                         roce_set_field(mpt_entry->mpt_byte_44,
1847                                 MPT_BYTE_44_PA2_H_M,
1848                                 MPT_BYTE_44_PA2_H_S,
1849                                 (u32)(pages[i] >> PAGES_SHIFT_16));
1850                         break;
1851                 case 3:
1852                         roce_set_field(mpt_entry->mpt_byte_44,
1853                                        MPT_BYTE_44_PA3_L_M,
1854                                        MPT_BYTE_44_PA3_L_S, (u32)(pages[i]));
1855                         roce_set_field(mpt_entry->mpt_byte_48,
1856                                 MPT_BYTE_48_PA3_H_M,
1857                                 MPT_BYTE_48_PA3_H_S,
1858                                 (u32)(pages[i] >> PAGES_SHIFT_8));
1859                         break;
1860                 case 4:
1861                         mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
1862                         roce_set_field(mpt_entry->mpt_byte_56,
1863                                 MPT_BYTE_56_PA4_H_M,
1864                                 MPT_BYTE_56_PA4_H_S,
1865                                 (u32)(pages[i] >> PAGES_SHIFT_32));
1866                         break;
1867                 case 5:
1868                         roce_set_field(mpt_entry->mpt_byte_56,
1869                                        MPT_BYTE_56_PA5_L_M,
1870                                        MPT_BYTE_56_PA5_L_S, (u32)(pages[i]));
1871                         roce_set_field(mpt_entry->mpt_byte_60,
1872                                 MPT_BYTE_60_PA5_H_M,
1873                                 MPT_BYTE_60_PA5_H_S,
1874                                 (u32)(pages[i] >> PAGES_SHIFT_24));
1875                         break;
1876                 case 6:
1877                         roce_set_field(mpt_entry->mpt_byte_60,
1878                                        MPT_BYTE_60_PA6_L_M,
1879                                        MPT_BYTE_60_PA6_L_S, (u32)(pages[i]));
1880                         roce_set_field(mpt_entry->mpt_byte_64,
1881                                 MPT_BYTE_64_PA6_H_M,
1882                                 MPT_BYTE_64_PA6_H_S,
1883                                 (u32)(pages[i] >> PAGES_SHIFT_16));
1884                         break;
1885                 default:
1886                         break;
1887                 }
1888         }
1889
1890         mpt_entry->pbl_addr_l = cpu_to_le32(pbl_ba);
1891         roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1892                        MPT_BYTE_12_PBL_ADDR_H_S, upper_32_bits(pbl_ba));
1893
1894         return 0;
1895 }
1896
1897 static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
1898 {
1899         return hns_roce_buf_offset(hr_cq->mtr.kmem, n * HNS_ROCE_V1_CQE_SIZE);
1900 }
1901
1902 static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
1903 {
1904         struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
1905
1906         /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
1907         return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
1908                 !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL;
1909 }
1910
1911 static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
1912 {
1913         return get_sw_cqe(hr_cq, hr_cq->cons_index);
1914 }
1915
1916 static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
1917 {
1918         __le32 doorbell[2];
1919
1920         doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1));
1921         doorbell[1] = 0;
1922         roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
1923         roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
1924                        ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
1925         roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
1926                        ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
1927         roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
1928                        ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
1929
1930         hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
1931 }
1932
1933 static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1934                                    struct hns_roce_srq *srq)
1935 {
1936         struct hns_roce_cqe *cqe, *dest;
1937         u32 prod_index;
1938         int nfreed = 0;
1939         u8 owner_bit;
1940
1941         for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
1942              ++prod_index) {
1943                 if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
1944                         break;
1945         }
1946
1947         /*
1948          * Now backwards through the CQ, removing CQ entries
1949          * that match our QP by overwriting them with next entries.
1950          */
1951         while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
1952                 cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
1953                 if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
1954                                      CQE_BYTE_16_LOCAL_QPN_S) &
1955                                      HNS_ROCE_CQE_QPN_MASK) == qpn) {
1956                         /* In v1 engine, not support SRQ */
1957                         ++nfreed;
1958                 } else if (nfreed) {
1959                         dest = get_cqe(hr_cq, (prod_index + nfreed) &
1960                                        hr_cq->ib_cq.cqe);
1961                         owner_bit = roce_get_bit(dest->cqe_byte_4,
1962                                                  CQE_BYTE_4_OWNER_S);
1963                         memcpy(dest, cqe, sizeof(*cqe));
1964                         roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
1965                                      owner_bit);
1966                 }
1967         }
1968
1969         if (nfreed) {
1970                 hr_cq->cons_index += nfreed;
1971                 /*
1972                  * Make sure update of buffer contents is done before
1973                  * updating consumer index.
1974                  */
1975                 wmb();
1976
1977                 hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
1978         }
1979 }
1980
1981 static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1982                                  struct hns_roce_srq *srq)
1983 {
1984         spin_lock_irq(&hr_cq->lock);
1985         __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
1986         spin_unlock_irq(&hr_cq->lock);
1987 }
1988
1989 static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
1990                                   struct hns_roce_cq *hr_cq, void *mb_buf,
1991                                   u64 *mtts, dma_addr_t dma_handle)
1992 {
1993         struct hns_roce_v1_priv *priv = hr_dev->priv;
1994         struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
1995         struct hns_roce_cq_context *cq_context = mb_buf;
1996         dma_addr_t tptr_dma_addr;
1997         int offset;
1998
1999         memset(cq_context, 0, sizeof(*cq_context));
2000
2001         /* Get the tptr for this CQ. */
2002         offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
2003         tptr_dma_addr = tptr_buf->map + offset;
2004         hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
2005
2006         /* Register cq_context members */
2007         roce_set_field(cq_context->cqc_byte_4,
2008                        CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
2009                        CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
2010         roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
2011                        CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
2012
2013         cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle);
2014
2015         roce_set_field(cq_context->cqc_byte_12,
2016                        CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
2017                        CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
2018                        ((u64)dma_handle >> 32));
2019         roce_set_field(cq_context->cqc_byte_12,
2020                        CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
2021                        CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
2022                        ilog2(hr_cq->cq_depth));
2023         roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
2024                        CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector);
2025
2026         cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
2027
2028         roce_set_field(cq_context->cqc_byte_20,
2029                        CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
2030                        CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32);
2031         /* Dedicated hardware, directly set 0 */
2032         roce_set_field(cq_context->cqc_byte_20,
2033                        CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
2034                        CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
2035         /**
2036          * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
2037          * using 4K page, and shift more 32 because of
2038          * caculating the high 32 bit value evaluated to hardware.
2039          */
2040         roce_set_field(cq_context->cqc_byte_20,
2041                        CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
2042                        CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
2043                        tptr_dma_addr >> 44);
2044
2045         cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12));
2046
2047         roce_set_field(cq_context->cqc_byte_32,
2048                        CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
2049                        CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
2050         roce_set_bit(cq_context->cqc_byte_32,
2051                      CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
2052         roce_set_bit(cq_context->cqc_byte_32,
2053                      CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
2054         roce_set_bit(cq_context->cqc_byte_32,
2055                      CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
2056         roce_set_bit(cq_context->cqc_byte_32,
2057                      CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
2058                      0);
2059         /* The initial value of cq's ci is 0 */
2060         roce_set_field(cq_context->cqc_byte_32,
2061                        CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
2062                        CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
2063 }
2064
2065 static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
2066                                      enum ib_cq_notify_flags flags)
2067 {
2068         struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2069         u32 notification_flag;
2070         __le32 doorbell[2] = {};
2071
2072         notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
2073                             IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
2074         /*
2075          * flags = 0; Notification Flag = 1, next
2076          * flags = 1; Notification Flag = 0, solocited
2077          */
2078         doorbell[0] =
2079                 cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2080         roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
2081         roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
2082                        ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
2083         roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
2084                        ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
2085         roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
2086                        ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
2087                        hr_cq->cqn | notification_flag);
2088
2089         hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2090
2091         return 0;
2092 }
2093
2094 static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
2095                                 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2096 {
2097         int qpn;
2098         int is_send;
2099         u16 wqe_ctr;
2100         u32 status;
2101         u32 opcode;
2102         struct hns_roce_cqe *cqe;
2103         struct hns_roce_qp *hr_qp;
2104         struct hns_roce_wq *wq;
2105         struct hns_roce_wqe_ctrl_seg *sq_wqe;
2106         struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2107         struct device *dev = &hr_dev->pdev->dev;
2108
2109         /* Find cqe according consumer index */
2110         cqe = next_cqe_sw(hr_cq);
2111         if (!cqe)
2112                 return -EAGAIN;
2113
2114         ++hr_cq->cons_index;
2115         /* Memory barrier */
2116         rmb();
2117         /* 0->SQ, 1->RQ */
2118         is_send  = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
2119
2120         /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */
2121         if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2122                            CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
2123                 qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
2124                                      CQE_BYTE_20_PORT_NUM_S) +
2125                       roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2126                                      CQE_BYTE_16_LOCAL_QPN_S) *
2127                                      HNS_ROCE_MAX_PORTS;
2128         } else {
2129                 qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2130                                      CQE_BYTE_16_LOCAL_QPN_S);
2131         }
2132
2133         if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2134                 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2135                 if (unlikely(!hr_qp)) {
2136                         dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
2137                                 hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
2138                         return -EINVAL;
2139                 }
2140
2141                 *cur_qp = hr_qp;
2142         }
2143
2144         wc->qp = &(*cur_qp)->ibqp;
2145         wc->vendor_err = 0;
2146
2147         status = roce_get_field(cqe->cqe_byte_4,
2148                                 CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
2149                                 CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
2150                                 HNS_ROCE_CQE_STATUS_MASK;
2151         switch (status) {
2152         case HNS_ROCE_CQE_SUCCESS:
2153                 wc->status = IB_WC_SUCCESS;
2154                 break;
2155         case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
2156                 wc->status = IB_WC_LOC_LEN_ERR;
2157                 break;
2158         case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
2159                 wc->status = IB_WC_LOC_QP_OP_ERR;
2160                 break;
2161         case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
2162                 wc->status = IB_WC_LOC_PROT_ERR;
2163                 break;
2164         case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
2165                 wc->status = IB_WC_WR_FLUSH_ERR;
2166                 break;
2167         case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
2168                 wc->status = IB_WC_MW_BIND_ERR;
2169                 break;
2170         case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
2171                 wc->status = IB_WC_BAD_RESP_ERR;
2172                 break;
2173         case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
2174                 wc->status = IB_WC_LOC_ACCESS_ERR;
2175                 break;
2176         case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
2177                 wc->status = IB_WC_REM_INV_REQ_ERR;
2178                 break;
2179         case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
2180                 wc->status = IB_WC_REM_ACCESS_ERR;
2181                 break;
2182         case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
2183                 wc->status = IB_WC_REM_OP_ERR;
2184                 break;
2185         case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
2186                 wc->status = IB_WC_RETRY_EXC_ERR;
2187                 break;
2188         case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
2189                 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2190                 break;
2191         default:
2192                 wc->status = IB_WC_GENERAL_ERR;
2193                 break;
2194         }
2195
2196         /* CQE status error, directly return */
2197         if (wc->status != IB_WC_SUCCESS)
2198                 return 0;
2199
2200         if (is_send) {
2201                 /* SQ conrespond to CQE */
2202                 sq_wqe = hns_roce_get_send_wqe(*cur_qp,
2203                                                 roce_get_field(cqe->cqe_byte_4,
2204                                                 CQE_BYTE_4_WQE_INDEX_M,
2205                                                 CQE_BYTE_4_WQE_INDEX_S) &
2206                                                 ((*cur_qp)->sq.wqe_cnt-1));
2207                 switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) {
2208                 case HNS_ROCE_WQE_OPCODE_SEND:
2209                         wc->opcode = IB_WC_SEND;
2210                         break;
2211                 case HNS_ROCE_WQE_OPCODE_RDMA_READ:
2212                         wc->opcode = IB_WC_RDMA_READ;
2213                         wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2214                         break;
2215                 case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
2216                         wc->opcode = IB_WC_RDMA_WRITE;
2217                         break;
2218                 case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
2219                         wc->opcode = IB_WC_LOCAL_INV;
2220                         break;
2221                 case HNS_ROCE_WQE_OPCODE_UD_SEND:
2222                         wc->opcode = IB_WC_SEND;
2223                         break;
2224                 default:
2225                         wc->status = IB_WC_GENERAL_ERR;
2226                         break;
2227                 }
2228                 wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ?
2229                                 IB_WC_WITH_IMM : 0);
2230
2231                 wq = &(*cur_qp)->sq;
2232                 if ((*cur_qp)->sq_signal_bits) {
2233                         /*
2234                          * If sg_signal_bit is 1,
2235                          * firstly tail pointer updated to wqe
2236                          * which current cqe correspond to
2237                          */
2238                         wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
2239                                                       CQE_BYTE_4_WQE_INDEX_M,
2240                                                       CQE_BYTE_4_WQE_INDEX_S);
2241                         wq->tail += (wqe_ctr - (u16)wq->tail) &
2242                                     (wq->wqe_cnt - 1);
2243                 }
2244                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2245                 ++wq->tail;
2246         } else {
2247                 /* RQ conrespond to CQE */
2248                 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2249                 opcode = roce_get_field(cqe->cqe_byte_4,
2250                                         CQE_BYTE_4_OPERATION_TYPE_M,
2251                                         CQE_BYTE_4_OPERATION_TYPE_S) &
2252                                         HNS_ROCE_CQE_OPCODE_MASK;
2253                 switch (opcode) {
2254                 case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
2255                         wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2256                         wc->wc_flags = IB_WC_WITH_IMM;
2257                         wc->ex.imm_data =
2258                                 cpu_to_be32(le32_to_cpu(cqe->immediate_data));
2259                         break;
2260                 case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
2261                         if (roce_get_bit(cqe->cqe_byte_4,
2262                                          CQE_BYTE_4_IMM_INDICATOR_S)) {
2263                                 wc->opcode = IB_WC_RECV;
2264                                 wc->wc_flags = IB_WC_WITH_IMM;
2265                                 wc->ex.imm_data = cpu_to_be32(
2266                                         le32_to_cpu(cqe->immediate_data));
2267                         } else {
2268                                 wc->opcode = IB_WC_RECV;
2269                                 wc->wc_flags = 0;
2270                         }
2271                         break;
2272                 default:
2273                         wc->status = IB_WC_GENERAL_ERR;
2274                         break;
2275                 }
2276
2277                 /* Update tail pointer, record wr_id */
2278                 wq = &(*cur_qp)->rq;
2279                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2280                 ++wq->tail;
2281                 wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
2282                                             CQE_BYTE_20_SL_S);
2283                 wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
2284                                                 CQE_BYTE_20_REMOTE_QPN_M,
2285                                                 CQE_BYTE_20_REMOTE_QPN_S);
2286                 wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
2287                                               CQE_BYTE_20_GRH_PRESENT_S) ?
2288                                               IB_WC_GRH : 0);
2289                 wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
2290                                                      CQE_BYTE_28_P_KEY_IDX_M,
2291                                                      CQE_BYTE_28_P_KEY_IDX_S);
2292         }
2293
2294         return 0;
2295 }
2296
2297 int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2298 {
2299         struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2300         struct hns_roce_qp *cur_qp = NULL;
2301         unsigned long flags;
2302         int npolled;
2303         int ret = 0;
2304
2305         spin_lock_irqsave(&hr_cq->lock, flags);
2306
2307         for (npolled = 0; npolled < num_entries; ++npolled) {
2308                 ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
2309                 if (ret)
2310                         break;
2311         }
2312
2313         if (npolled) {
2314                 *hr_cq->tptr_addr = hr_cq->cons_index &
2315                         ((hr_cq->cq_depth << 1) - 1);
2316
2317                 /* Memroy barrier */
2318                 wmb();
2319                 hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
2320         }
2321
2322         spin_unlock_irqrestore(&hr_cq->lock, flags);
2323
2324         if (ret == 0 || ret == -EAGAIN)
2325                 return npolled;
2326         else
2327                 return ret;
2328 }
2329
2330 static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
2331                                  struct hns_roce_hem_table *table, int obj,
2332                                  int step_idx)
2333 {
2334         struct hns_roce_v1_priv *priv = hr_dev->priv;
2335         struct device *dev = &hr_dev->pdev->dev;
2336         long end = HW_SYNC_TIMEOUT_MSECS;
2337         __le32 bt_cmd_val[2] = {0};
2338         unsigned long flags = 0;
2339         void __iomem *bt_cmd;
2340         u64 bt_ba = 0;
2341
2342         switch (table->type) {
2343         case HEM_TYPE_QPC:
2344                 bt_ba = priv->bt_table.qpc_buf.map >> 12;
2345                 break;
2346         case HEM_TYPE_MTPT:
2347                 bt_ba = priv->bt_table.mtpt_buf.map >> 12;
2348                 break;
2349         case HEM_TYPE_CQC:
2350                 bt_ba = priv->bt_table.cqc_buf.map >> 12;
2351                 break;
2352         case HEM_TYPE_SRQC:
2353                 dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
2354                 return -EINVAL;
2355         default:
2356                 return 0;
2357         }
2358         roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2359                         ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
2360         roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
2361                 ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
2362         roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
2363         roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
2364
2365         spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
2366
2367         bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
2368
2369         while (1) {
2370                 if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
2371                         if (!end) {
2372                                 dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
2373                                 spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
2374                                         flags);
2375                                 return -EBUSY;
2376                         }
2377                 } else {
2378                         break;
2379                 }
2380                 mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
2381                 end -= HW_SYNC_SLEEP_TIME_INTERVAL;
2382         }
2383
2384         bt_cmd_val[0] = cpu_to_le32(bt_ba);
2385         roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
2386                 ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
2387         hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
2388
2389         spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
2390
2391         return 0;
2392 }
2393
2394 static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
2395                                  enum hns_roce_qp_state cur_state,
2396                                  enum hns_roce_qp_state new_state,
2397                                  struct hns_roce_qp_context *context,
2398                                  struct hns_roce_qp *hr_qp)
2399 {
2400         static const u16
2401         op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
2402                 [HNS_ROCE_QP_STATE_RST] = {
2403                 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2404                 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2405                 [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2406                 },
2407                 [HNS_ROCE_QP_STATE_INIT] = {
2408                 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2409                 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2410                 /* Note: In v1 engine, HW doesn't support RST2INIT.
2411                  * We use RST2INIT cmd instead of INIT2INIT.
2412                  */
2413                 [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2414                 [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
2415                 },
2416                 [HNS_ROCE_QP_STATE_RTR] = {
2417                 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2418                 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2419                 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
2420                 },
2421                 [HNS_ROCE_QP_STATE_RTS] = {
2422                 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2423                 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2424                 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
2425                 [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
2426                 },
2427                 [HNS_ROCE_QP_STATE_SQD] = {
2428                 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2429                 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2430                 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
2431                 [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
2432                 },
2433                 [HNS_ROCE_QP_STATE_ERR] = {
2434                 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2435                 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2436                 }
2437         };
2438
2439         struct hns_roce_cmd_mailbox *mailbox;
2440         struct device *dev = &hr_dev->pdev->dev;
2441         int ret;
2442
2443         if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
2444             new_state >= HNS_ROCE_QP_NUM_STATE ||
2445             !op[cur_state][new_state]) {
2446                 dev_err(dev, "[modify_qp]not support state %d to %d\n",
2447                         cur_state, new_state);
2448                 return -EINVAL;
2449         }
2450
2451         if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
2452                 return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2453                                          HNS_ROCE_CMD_2RST_QP,
2454                                          HNS_ROCE_CMD_TIMEOUT_MSECS);
2455
2456         if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
2457                 return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2458                                          HNS_ROCE_CMD_2ERR_QP,
2459                                          HNS_ROCE_CMD_TIMEOUT_MSECS);
2460
2461         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2462         if (IS_ERR(mailbox))
2463                 return PTR_ERR(mailbox);
2464
2465         memcpy(mailbox->buf, context, sizeof(*context));
2466
2467         ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
2468                                 op[cur_state][new_state],
2469                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
2470
2471         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2472         return ret;
2473 }
2474
2475 static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
2476                         u64 *sq_ba, u64 *rq_ba, dma_addr_t *bt_ba)
2477 {
2478         struct ib_device *ibdev = &hr_dev->ib_dev;
2479         int count;
2480
2481         count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, sq_ba, 1, bt_ba);
2482         if (count < 1) {
2483                 ibdev_err(ibdev, "Failed to find SQ ba\n");
2484                 return -ENOBUFS;
2485         }
2486
2487         count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, rq_ba,
2488                                   1, NULL);
2489         if (!count) {
2490                 ibdev_err(ibdev, "Failed to find RQ ba\n");
2491                 return -ENOBUFS;
2492         }
2493
2494         return 0;
2495 }
2496
2497 static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2498                              int attr_mask, enum ib_qp_state cur_state,
2499                              enum ib_qp_state new_state)
2500 {
2501         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2502         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2503         struct hns_roce_sqp_context *context;
2504         dma_addr_t dma_handle = 0;
2505         u32 __iomem *addr;
2506         u64 sq_ba = 0;
2507         u64 rq_ba = 0;
2508         __le32 tmp;
2509         u32 reg_val;
2510
2511         context = kzalloc(sizeof(*context), GFP_KERNEL);
2512         if (!context)
2513                 return -ENOMEM;
2514
2515         /* Search QP buf's MTTs */
2516         if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle))
2517                 goto out;
2518
2519         if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2520                 roce_set_field(context->qp1c_bytes_4,
2521                                QP1C_BYTES_4_SQ_WQE_SHIFT_M,
2522                                QP1C_BYTES_4_SQ_WQE_SHIFT_S,
2523                                ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2524                 roce_set_field(context->qp1c_bytes_4,
2525                                QP1C_BYTES_4_RQ_WQE_SHIFT_M,
2526                                QP1C_BYTES_4_RQ_WQE_SHIFT_S,
2527                                ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2528                 roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
2529                                QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
2530
2531                 context->sq_rq_bt_l = cpu_to_le32(dma_handle);
2532                 roce_set_field(context->qp1c_bytes_12,
2533                                QP1C_BYTES_12_SQ_RQ_BT_H_M,
2534                                QP1C_BYTES_12_SQ_RQ_BT_H_S,
2535                                upper_32_bits(dma_handle));
2536
2537                 roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
2538                                QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
2539                 roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
2540                                QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
2541                 roce_set_bit(context->qp1c_bytes_16,
2542                              QP1C_BYTES_16_SIGNALING_TYPE_S,
2543                              hr_qp->sq_signal_bits);
2544                 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
2545                              1);
2546                 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
2547                              1);
2548                 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
2549                              0);
2550
2551                 roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
2552                                QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
2553                 roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
2554                                QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
2555
2556                 context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba);
2557
2558                 roce_set_field(context->qp1c_bytes_28,
2559                                QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
2560                                QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
2561                                upper_32_bits(rq_ba));
2562                 roce_set_field(context->qp1c_bytes_28,
2563                                QP1C_BYTES_28_RQ_CUR_IDX_M,
2564                                QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
2565
2566                 roce_set_field(context->qp1c_bytes_32,
2567                                QP1C_BYTES_32_RX_CQ_NUM_M,
2568                                QP1C_BYTES_32_RX_CQ_NUM_S,
2569                                to_hr_cq(ibqp->recv_cq)->cqn);
2570                 roce_set_field(context->qp1c_bytes_32,
2571                                QP1C_BYTES_32_TX_CQ_NUM_M,
2572                                QP1C_BYTES_32_TX_CQ_NUM_S,
2573                                to_hr_cq(ibqp->send_cq)->cqn);
2574
2575                 context->cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
2576
2577                 roce_set_field(context->qp1c_bytes_40,
2578                                QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
2579                                QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
2580                                upper_32_bits(sq_ba));
2581                 roce_set_field(context->qp1c_bytes_40,
2582                                QP1C_BYTES_40_SQ_CUR_IDX_M,
2583                                QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
2584
2585                 /* Copy context to QP1C register */
2586                 addr = (u32 __iomem *)(hr_dev->reg_base +
2587                                        ROCEE_QP1C_CFG0_0_REG +
2588                                        hr_qp->phy_port * sizeof(*context));
2589
2590                 writel(le32_to_cpu(context->qp1c_bytes_4), addr);
2591                 writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1);
2592                 writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2);
2593                 writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3);
2594                 writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4);
2595                 writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5);
2596                 writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6);
2597                 writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7);
2598                 writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8);
2599                 writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9);
2600         }
2601
2602         /* Modify QP1C status */
2603         reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2604                             hr_qp->phy_port * sizeof(*context));
2605         tmp = cpu_to_le32(reg_val);
2606         roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
2607                        ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
2608         reg_val = le32_to_cpu(tmp);
2609         roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2610                     hr_qp->phy_port * sizeof(*context), reg_val);
2611
2612         hr_qp->state = new_state;
2613         if (new_state == IB_QPS_RESET) {
2614                 hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
2615                                      ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
2616                 if (ibqp->send_cq != ibqp->recv_cq)
2617                         hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
2618                                              hr_qp->qpn, NULL);
2619
2620                 hr_qp->rq.head = 0;
2621                 hr_qp->rq.tail = 0;
2622                 hr_qp->sq.head = 0;
2623                 hr_qp->sq.tail = 0;
2624         }
2625
2626         kfree(context);
2627         return 0;
2628
2629 out:
2630         kfree(context);
2631         return -EINVAL;
2632 }
2633
2634 static bool check_qp_state(enum ib_qp_state cur_state,
2635                            enum ib_qp_state new_state)
2636 {
2637         static const bool sm[][IB_QPS_ERR + 1] = {
2638                 [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
2639                                    [IB_QPS_INIT] = true },
2640                 [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
2641                                   [IB_QPS_INIT] = true,
2642                                   [IB_QPS_RTR] = true,
2643                                   [IB_QPS_ERR] = true },
2644                 [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
2645                                  [IB_QPS_RTS] = true,
2646                                  [IB_QPS_ERR] = true },
2647                 [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true },
2648                 [IB_QPS_SQD] = {},
2649                 [IB_QPS_SQE] = {},
2650                 [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
2651         };
2652
2653         return sm[cur_state][new_state];
2654 }
2655
2656 static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2657                             int attr_mask, enum ib_qp_state cur_state,
2658                             enum ib_qp_state new_state)
2659 {
2660         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2661         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2662         struct device *dev = &hr_dev->pdev->dev;
2663         struct hns_roce_qp_context *context;
2664         const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2665         dma_addr_t dma_handle_2 = 0;
2666         dma_addr_t dma_handle = 0;
2667         __le32 doorbell[2] = {0};
2668         u64 *mtts_2 = NULL;
2669         int ret = -EINVAL;
2670         u64 sq_ba = 0;
2671         u64 rq_ba = 0;
2672         int port;
2673         u8 port_num;
2674         u8 *dmac;
2675         u8 *smac;
2676
2677         if (!check_qp_state(cur_state, new_state)) {
2678                 ibdev_err(ibqp->device,
2679                           "not support QP(%u) status from %d to %d\n",
2680                           ibqp->qp_num, cur_state, new_state);
2681                 return -EINVAL;
2682         }
2683
2684         context = kzalloc(sizeof(*context), GFP_KERNEL);
2685         if (!context)
2686                 return -ENOMEM;
2687
2688         /* Search qp buf's mtts */
2689         if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle))
2690                 goto out;
2691
2692         /* Search IRRL's mtts */
2693         mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
2694                                      hr_qp->qpn, &dma_handle_2);
2695         if (mtts_2 == NULL) {
2696                 dev_err(dev, "qp irrl_table find failed\n");
2697                 goto out;
2698         }
2699
2700         /*
2701          * Reset to init
2702          *      Mandatory param:
2703          *      IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
2704          *      Optional param: NA
2705          */
2706         if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2707                 roce_set_field(context->qpc_bytes_4,
2708                                QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2709                                QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2710                                to_hr_qp_type(hr_qp->ibqp.qp_type));
2711
2712                 roce_set_bit(context->qpc_bytes_4,
2713                              QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2714                 roce_set_bit(context->qpc_bytes_4,
2715                              QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2716                              !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
2717                 roce_set_bit(context->qpc_bytes_4,
2718                              QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2719                              !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
2720                              );
2721                 roce_set_bit(context->qpc_bytes_4,
2722                              QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
2723                              !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
2724                              );
2725                 roce_set_bit(context->qpc_bytes_4,
2726                              QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2727                 roce_set_field(context->qpc_bytes_4,
2728                                QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2729                                QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2730                                ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2731                 roce_set_field(context->qpc_bytes_4,
2732                                QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2733                                QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2734                                ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2735                 roce_set_field(context->qpc_bytes_4,
2736                                QP_CONTEXT_QPC_BYTES_4_PD_M,
2737                                QP_CONTEXT_QPC_BYTES_4_PD_S,
2738                                to_hr_pd(ibqp->pd)->pdn);
2739                 hr_qp->access_flags = attr->qp_access_flags;
2740                 roce_set_field(context->qpc_bytes_8,
2741                                QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2742                                QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2743                                to_hr_cq(ibqp->send_cq)->cqn);
2744                 roce_set_field(context->qpc_bytes_8,
2745                                QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2746                                QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2747                                to_hr_cq(ibqp->recv_cq)->cqn);
2748
2749                 if (ibqp->srq)
2750                         roce_set_field(context->qpc_bytes_12,
2751                                        QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2752                                        QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2753                                        to_hr_srq(ibqp->srq)->srqn);
2754
2755                 roce_set_field(context->qpc_bytes_12,
2756                                QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2757                                QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2758                                attr->pkey_index);
2759                 hr_qp->pkey_index = attr->pkey_index;
2760                 roce_set_field(context->qpc_bytes_16,
2761                                QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2762                                QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2763
2764         } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
2765                 roce_set_field(context->qpc_bytes_4,
2766                                QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2767                                QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2768                                to_hr_qp_type(hr_qp->ibqp.qp_type));
2769                 roce_set_bit(context->qpc_bytes_4,
2770                              QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2771                 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2772                         roce_set_bit(context->qpc_bytes_4,
2773                                      QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2774                                      !!(attr->qp_access_flags &
2775                                      IB_ACCESS_REMOTE_READ));
2776                         roce_set_bit(context->qpc_bytes_4,
2777                                      QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2778                                      !!(attr->qp_access_flags &
2779                                      IB_ACCESS_REMOTE_WRITE));
2780                 } else {
2781                         roce_set_bit(context->qpc_bytes_4,
2782                                      QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2783                                      !!(hr_qp->access_flags &
2784                                      IB_ACCESS_REMOTE_READ));
2785                         roce_set_bit(context->qpc_bytes_4,
2786                                      QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2787                                      !!(hr_qp->access_flags &
2788                                      IB_ACCESS_REMOTE_WRITE));
2789                 }
2790
2791                 roce_set_bit(context->qpc_bytes_4,
2792                              QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2793                 roce_set_field(context->qpc_bytes_4,
2794                                QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2795                                QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2796                                ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2797                 roce_set_field(context->qpc_bytes_4,
2798                                QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2799                                QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2800                                ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2801                 roce_set_field(context->qpc_bytes_4,
2802                                QP_CONTEXT_QPC_BYTES_4_PD_M,
2803                                QP_CONTEXT_QPC_BYTES_4_PD_S,
2804                                to_hr_pd(ibqp->pd)->pdn);
2805
2806                 roce_set_field(context->qpc_bytes_8,
2807                                QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2808                                QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2809                                to_hr_cq(ibqp->send_cq)->cqn);
2810                 roce_set_field(context->qpc_bytes_8,
2811                                QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2812                                QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2813                                to_hr_cq(ibqp->recv_cq)->cqn);
2814
2815                 if (ibqp->srq)
2816                         roce_set_field(context->qpc_bytes_12,
2817                                        QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2818                                        QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2819                                        to_hr_srq(ibqp->srq)->srqn);
2820                 if (attr_mask & IB_QP_PKEY_INDEX)
2821                         roce_set_field(context->qpc_bytes_12,
2822                                        QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2823                                        QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2824                                        attr->pkey_index);
2825                 else
2826                         roce_set_field(context->qpc_bytes_12,
2827                                        QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2828                                        QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2829                                        hr_qp->pkey_index);
2830
2831                 roce_set_field(context->qpc_bytes_16,
2832                                QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2833                                QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2834         } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
2835                 if ((attr_mask & IB_QP_ALT_PATH) ||
2836                     (attr_mask & IB_QP_ACCESS_FLAGS) ||
2837                     (attr_mask & IB_QP_PKEY_INDEX) ||
2838                     (attr_mask & IB_QP_QKEY)) {
2839                         dev_err(dev, "INIT2RTR attr_mask error\n");
2840                         goto out;
2841                 }
2842
2843                 dmac = (u8 *)attr->ah_attr.roce.dmac;
2844
2845                 context->sq_rq_bt_l = cpu_to_le32(dma_handle);
2846                 roce_set_field(context->qpc_bytes_24,
2847                                QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
2848                                QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
2849                                upper_32_bits(dma_handle));
2850                 roce_set_bit(context->qpc_bytes_24,
2851                              QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
2852                              1);
2853                 roce_set_field(context->qpc_bytes_24,
2854                                QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
2855                                QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
2856                                attr->min_rnr_timer);
2857                 context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2));
2858                 roce_set_field(context->qpc_bytes_32,
2859                                QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
2860                                QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
2861                                ((u32)(dma_handle_2 >> 32)) &
2862                                 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
2863                 roce_set_field(context->qpc_bytes_32,
2864                                QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
2865                                QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
2866                 roce_set_bit(context->qpc_bytes_32,
2867                              QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
2868                              1);
2869                 roce_set_bit(context->qpc_bytes_32,
2870                              QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
2871                              hr_qp->sq_signal_bits);
2872
2873                 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
2874                         hr_qp->port;
2875                 smac = (u8 *)hr_dev->dev_addr[port];
2876                 /* when dmac equals smac or loop_idc is 1, it should loopback */
2877                 if (ether_addr_equal_unaligned(dmac, smac) ||
2878                     hr_dev->loop_idc == 0x1)
2879                         roce_set_bit(context->qpc_bytes_32,
2880                               QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
2881
2882                 roce_set_bit(context->qpc_bytes_32,
2883                              QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
2884                              rdma_ah_get_ah_flags(&attr->ah_attr));
2885                 roce_set_field(context->qpc_bytes_32,
2886                                QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
2887                                QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
2888                                ilog2((unsigned int)attr->max_dest_rd_atomic));
2889
2890                 if (attr_mask & IB_QP_DEST_QPN)
2891                         roce_set_field(context->qpc_bytes_36,
2892                                        QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
2893                                        QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
2894                                        attr->dest_qp_num);
2895
2896                 /* Configure GID index */
2897                 port_num = rdma_ah_get_port_num(&attr->ah_attr);
2898                 roce_set_field(context->qpc_bytes_36,
2899                                QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
2900                                QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
2901                                 hns_get_gid_index(hr_dev,
2902                                                   port_num - 1,
2903                                                   grh->sgid_index));
2904
2905                 memcpy(&(context->dmac_l), dmac, 4);
2906
2907                 roce_set_field(context->qpc_bytes_44,
2908                                QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2909                                QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
2910                                *((u16 *)(&dmac[4])));
2911                 roce_set_field(context->qpc_bytes_44,
2912                                QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
2913                                QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
2914                                rdma_ah_get_static_rate(&attr->ah_attr));
2915                 roce_set_field(context->qpc_bytes_44,
2916                                QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
2917                                QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
2918                                grh->hop_limit);
2919
2920                 roce_set_field(context->qpc_bytes_48,
2921                                QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
2922                                QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
2923                                grh->flow_label);
2924                 roce_set_field(context->qpc_bytes_48,
2925                                QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
2926                                QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
2927                                grh->traffic_class);
2928                 roce_set_field(context->qpc_bytes_48,
2929                                QP_CONTEXT_QPC_BYTES_48_MTU_M,
2930                                QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
2931
2932                 memcpy(context->dgid, grh->dgid.raw,
2933                        sizeof(grh->dgid.raw));
2934
2935                 dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
2936                         roce_get_field(context->qpc_bytes_44,
2937                                        QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2938                                        QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
2939
2940                 roce_set_field(context->qpc_bytes_68,
2941                                QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
2942                                QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
2943                                hr_qp->rq.head);
2944                 roce_set_field(context->qpc_bytes_68,
2945                                QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
2946                                QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
2947
2948                 context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba);
2949
2950                 roce_set_field(context->qpc_bytes_76,
2951                         QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
2952                         QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
2953                         upper_32_bits(rq_ba));
2954                 roce_set_field(context->qpc_bytes_76,
2955                                QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
2956                                QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
2957
2958                 context->rx_rnr_time = 0;
2959
2960                 roce_set_field(context->qpc_bytes_84,
2961                                QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
2962                                QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
2963                                attr->rq_psn - 1);
2964                 roce_set_field(context->qpc_bytes_84,
2965                                QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
2966                                QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
2967
2968                 roce_set_field(context->qpc_bytes_88,
2969                                QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
2970                                QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
2971                                attr->rq_psn);
2972                 roce_set_bit(context->qpc_bytes_88,
2973                              QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
2974                 roce_set_bit(context->qpc_bytes_88,
2975                              QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
2976                 roce_set_field(context->qpc_bytes_88,
2977                         QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
2978                         QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
2979                         0);
2980                 roce_set_field(context->qpc_bytes_88,
2981                                QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
2982                                QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
2983                                0);
2984
2985                 context->dma_length = 0;
2986                 context->r_key = 0;
2987                 context->va_l = 0;
2988                 context->va_h = 0;
2989
2990                 roce_set_field(context->qpc_bytes_108,
2991                                QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
2992                                QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
2993                 roce_set_bit(context->qpc_bytes_108,
2994                              QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
2995                 roce_set_bit(context->qpc_bytes_108,
2996                              QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
2997
2998                 roce_set_field(context->qpc_bytes_112,
2999                                QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
3000                                QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
3001                 roce_set_field(context->qpc_bytes_112,
3002                                QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
3003                                QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
3004
3005                 /* For chip resp ack */
3006                 roce_set_field(context->qpc_bytes_156,
3007                                QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3008                                QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3009                                hr_qp->phy_port);
3010                 roce_set_field(context->qpc_bytes_156,
3011                                QP_CONTEXT_QPC_BYTES_156_SL_M,
3012                                QP_CONTEXT_QPC_BYTES_156_SL_S,
3013                                rdma_ah_get_sl(&attr->ah_attr));
3014                 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3015         } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
3016                 /* If exist optional param, return error */
3017                 if ((attr_mask & IB_QP_ALT_PATH) ||
3018                     (attr_mask & IB_QP_ACCESS_FLAGS) ||
3019                     (attr_mask & IB_QP_QKEY) ||
3020                     (attr_mask & IB_QP_PATH_MIG_STATE) ||
3021                     (attr_mask & IB_QP_CUR_STATE) ||
3022                     (attr_mask & IB_QP_MIN_RNR_TIMER)) {
3023                         dev_err(dev, "RTR2RTS attr_mask error\n");
3024                         goto out;
3025                 }
3026
3027                 context->rx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
3028
3029                 roce_set_field(context->qpc_bytes_120,
3030                                QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
3031                                QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
3032                                upper_32_bits(sq_ba));
3033
3034                 roce_set_field(context->qpc_bytes_124,
3035                                QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
3036                                QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
3037                 roce_set_field(context->qpc_bytes_124,
3038                                QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
3039                                QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
3040
3041                 roce_set_field(context->qpc_bytes_128,
3042                                QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
3043                                QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
3044                                attr->sq_psn);
3045                 roce_set_bit(context->qpc_bytes_128,
3046                              QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
3047                 roce_set_field(context->qpc_bytes_128,
3048                              QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
3049                              QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
3050                              0);
3051                 roce_set_bit(context->qpc_bytes_128,
3052                              QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
3053
3054                 roce_set_field(context->qpc_bytes_132,
3055                                QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
3056                                QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
3057                 roce_set_field(context->qpc_bytes_132,
3058                                QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
3059                                QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
3060
3061                 roce_set_field(context->qpc_bytes_136,
3062                                QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
3063                                QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
3064                                attr->sq_psn);
3065                 roce_set_field(context->qpc_bytes_136,
3066                                QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
3067                                QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
3068                                attr->sq_psn);
3069
3070                 roce_set_field(context->qpc_bytes_140,
3071                                QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
3072                                QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
3073                                (attr->sq_psn >> SQ_PSN_SHIFT));
3074                 roce_set_field(context->qpc_bytes_140,
3075                                QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
3076                                QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
3077                 roce_set_bit(context->qpc_bytes_140,
3078                              QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
3079
3080                 roce_set_field(context->qpc_bytes_148,
3081                                QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
3082                                QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
3083                 roce_set_field(context->qpc_bytes_148,
3084                                QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3085                                QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
3086                                attr->retry_cnt);
3087                 roce_set_field(context->qpc_bytes_148,
3088                                QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
3089                                QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
3090                                attr->rnr_retry);
3091                 roce_set_field(context->qpc_bytes_148,
3092                                QP_CONTEXT_QPC_BYTES_148_LSN_M,
3093                                QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
3094
3095                 context->rnr_retry = 0;
3096
3097                 roce_set_field(context->qpc_bytes_156,
3098                                QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
3099                                QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
3100                                attr->retry_cnt);
3101                 if (attr->timeout < 0x12) {
3102                         dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
3103                                  attr->timeout);
3104                         roce_set_field(context->qpc_bytes_156,
3105                                        QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3106                                        QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3107                                        0x12);
3108                 } else {
3109                         roce_set_field(context->qpc_bytes_156,
3110                                        QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3111                                        QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3112                                        attr->timeout);
3113                 }
3114                 roce_set_field(context->qpc_bytes_156,
3115                                QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
3116                                QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
3117                                attr->rnr_retry);
3118                 roce_set_field(context->qpc_bytes_156,
3119                                QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3120                                QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3121                                hr_qp->phy_port);
3122                 roce_set_field(context->qpc_bytes_156,
3123                                QP_CONTEXT_QPC_BYTES_156_SL_M,
3124                                QP_CONTEXT_QPC_BYTES_156_SL_S,
3125                                rdma_ah_get_sl(&attr->ah_attr));
3126                 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3127                 roce_set_field(context->qpc_bytes_156,
3128                                QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3129                                QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
3130                                ilog2((unsigned int)attr->max_rd_atomic));
3131                 roce_set_field(context->qpc_bytes_156,
3132                                QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
3133                                QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
3134                 context->pkt_use_len = 0;
3135
3136                 roce_set_field(context->qpc_bytes_164,
3137                                QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3138                                QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
3139                 roce_set_field(context->qpc_bytes_164,
3140                                QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
3141                                QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
3142
3143                 roce_set_field(context->qpc_bytes_168,
3144                                QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
3145                                QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
3146                                attr->sq_psn);
3147                 roce_set_field(context->qpc_bytes_168,
3148                                QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
3149                                QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
3150                 roce_set_field(context->qpc_bytes_168,
3151                                QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
3152                                QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
3153                 roce_set_bit(context->qpc_bytes_168,
3154                              QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
3155                 roce_set_bit(context->qpc_bytes_168,
3156                              QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
3157                 roce_set_bit(context->qpc_bytes_168,
3158                              QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
3159                 context->sge_use_len = 0;
3160
3161                 roce_set_field(context->qpc_bytes_176,
3162                                QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
3163                                QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
3164                 roce_set_field(context->qpc_bytes_176,
3165                                QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
3166                                QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
3167                                0);
3168                 roce_set_field(context->qpc_bytes_180,
3169                                QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
3170                                QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
3171                 roce_set_field(context->qpc_bytes_180,
3172                                QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
3173                                QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
3174
3175                 context->tx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
3176
3177                 roce_set_field(context->qpc_bytes_188,
3178                                QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
3179                                QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
3180                                upper_32_bits(sq_ba));
3181                 roce_set_bit(context->qpc_bytes_188,
3182                              QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
3183                 roce_set_field(context->qpc_bytes_188,
3184                                QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
3185                                QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
3186                                0);
3187         }
3188
3189         /* Every status migrate must change state */
3190         roce_set_field(context->qpc_bytes_144,
3191                        QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3192                        QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
3193
3194         /* SW pass context to HW */
3195         ret = hns_roce_v1_qp_modify(hr_dev, to_hns_roce_state(cur_state),
3196                                     to_hns_roce_state(new_state), context,
3197                                     hr_qp);
3198         if (ret) {
3199                 dev_err(dev, "hns_roce_qp_modify failed\n");
3200                 goto out;
3201         }
3202
3203         /*
3204          * Use rst2init to instead of init2init with drv,
3205          * need to hw to flash RQ HEAD by DB again
3206          */
3207         if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3208                 /* Memory barrier */
3209                 wmb();
3210
3211                 roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
3212                                RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
3213                 roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
3214                                RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
3215                 roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
3216                                RQ_DOORBELL_U32_8_CMD_S, 1);
3217                 roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
3218
3219                 if (ibqp->uobject) {
3220                         hr_qp->rq.db_reg_l = hr_dev->reg_base +
3221                                      hr_dev->odb_offset +
3222                                      DB_REG_OFFSET * hr_dev->priv_uar.index;
3223                 }
3224
3225                 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
3226         }
3227
3228         hr_qp->state = new_state;
3229
3230         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3231                 hr_qp->resp_depth = attr->max_dest_rd_atomic;
3232         if (attr_mask & IB_QP_PORT) {
3233                 hr_qp->port = attr->port_num - 1;
3234                 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
3235         }
3236
3237         if (new_state == IB_QPS_RESET && !ibqp->uobject) {
3238                 hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
3239                                      ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
3240                 if (ibqp->send_cq != ibqp->recv_cq)
3241                         hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
3242                                              hr_qp->qpn, NULL);
3243
3244                 hr_qp->rq.head = 0;
3245                 hr_qp->rq.tail = 0;
3246                 hr_qp->sq.head = 0;
3247                 hr_qp->sq.tail = 0;
3248         }
3249 out:
3250         kfree(context);
3251         return ret;
3252 }
3253
3254 static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
3255                                  const struct ib_qp_attr *attr, int attr_mask,
3256                                  enum ib_qp_state cur_state,
3257                                  enum ib_qp_state new_state)
3258 {
3259         if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
3260                 return -EOPNOTSUPP;
3261
3262         if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
3263                 return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
3264                                          new_state);
3265         else
3266                 return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
3267                                         new_state);
3268 }
3269
3270 static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
3271 {
3272         switch (state) {
3273         case HNS_ROCE_QP_STATE_RST:
3274                 return IB_QPS_RESET;
3275         case HNS_ROCE_QP_STATE_INIT:
3276                 return IB_QPS_INIT;
3277         case HNS_ROCE_QP_STATE_RTR:
3278                 return IB_QPS_RTR;
3279         case HNS_ROCE_QP_STATE_RTS:
3280                 return IB_QPS_RTS;
3281         case HNS_ROCE_QP_STATE_SQD:
3282                 return IB_QPS_SQD;
3283         case HNS_ROCE_QP_STATE_ERR:
3284                 return IB_QPS_ERR;
3285         default:
3286                 return IB_QPS_ERR;
3287         }
3288 }
3289
3290 static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
3291                                  struct hns_roce_qp *hr_qp,
3292                                  struct hns_roce_qp_context *hr_context)
3293 {
3294         struct hns_roce_cmd_mailbox *mailbox;
3295         int ret;
3296
3297         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3298         if (IS_ERR(mailbox))
3299                 return PTR_ERR(mailbox);
3300
3301         ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
3302                                 HNS_ROCE_CMD_QUERY_QP,
3303                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
3304         if (!ret)
3305                 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
3306         else
3307                 dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
3308
3309         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3310
3311         return ret;
3312 }
3313
3314 static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3315                              int qp_attr_mask,
3316                              struct ib_qp_init_attr *qp_init_attr)
3317 {
3318         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3319         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3320         struct hns_roce_sqp_context context;
3321         u32 addr;
3322
3323         mutex_lock(&hr_qp->mutex);
3324
3325         if (hr_qp->state == IB_QPS_RESET) {
3326                 qp_attr->qp_state = IB_QPS_RESET;
3327                 goto done;
3328         }
3329
3330         addr = ROCEE_QP1C_CFG0_0_REG +
3331                 hr_qp->port * sizeof(struct hns_roce_sqp_context);
3332         context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr));
3333         context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1));
3334         context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2));
3335         context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3));
3336         context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4));
3337         context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5));
3338         context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6));
3339         context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7));
3340         context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8));
3341         context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9));
3342
3343         hr_qp->state = roce_get_field(context.qp1c_bytes_4,
3344                                       QP1C_BYTES_4_QP_STATE_M,
3345                                       QP1C_BYTES_4_QP_STATE_S);
3346         qp_attr->qp_state       = hr_qp->state;
3347         qp_attr->path_mtu       = IB_MTU_256;
3348         qp_attr->path_mig_state = IB_MIG_ARMED;
3349         qp_attr->qkey           = QKEY_VAL;
3350         qp_attr->ah_attr.type   = RDMA_AH_ATTR_TYPE_ROCE;
3351         qp_attr->rq_psn         = 0;
3352         qp_attr->sq_psn         = 0;
3353         qp_attr->dest_qp_num    = 1;
3354         qp_attr->qp_access_flags = 6;
3355
3356         qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
3357                                              QP1C_BYTES_20_PKEY_IDX_M,
3358                                              QP1C_BYTES_20_PKEY_IDX_S);
3359         qp_attr->port_num = hr_qp->port + 1;
3360         qp_attr->sq_draining = 0;
3361         qp_attr->max_rd_atomic = 0;
3362         qp_attr->max_dest_rd_atomic = 0;
3363         qp_attr->min_rnr_timer = 0;
3364         qp_attr->timeout = 0;
3365         qp_attr->retry_cnt = 0;
3366         qp_attr->rnr_retry = 0;
3367         qp_attr->alt_timeout = 0;
3368
3369 done:
3370         qp_attr->cur_qp_state = qp_attr->qp_state;
3371         qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3372         qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3373         qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3374         qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3375         qp_attr->cap.max_inline_data = 0;
3376         qp_init_attr->cap = qp_attr->cap;
3377         qp_init_attr->create_flags = 0;
3378
3379         mutex_unlock(&hr_qp->mutex);
3380
3381         return 0;
3382 }
3383
3384 static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3385                             int qp_attr_mask,
3386                             struct ib_qp_init_attr *qp_init_attr)
3387 {
3388         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3389         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3390         struct device *dev = &hr_dev->pdev->dev;
3391         struct hns_roce_qp_context *context;
3392         int tmp_qp_state;
3393         int ret = 0;
3394         int state;
3395
3396         context = kzalloc(sizeof(*context), GFP_KERNEL);
3397         if (!context)
3398                 return -ENOMEM;
3399
3400         memset(qp_attr, 0, sizeof(*qp_attr));
3401         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3402
3403         mutex_lock(&hr_qp->mutex);
3404
3405         if (hr_qp->state == IB_QPS_RESET) {
3406                 qp_attr->qp_state = IB_QPS_RESET;
3407                 goto done;
3408         }
3409
3410         ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
3411         if (ret) {
3412                 dev_err(dev, "query qpc error\n");
3413                 ret = -EINVAL;
3414                 goto out;
3415         }
3416
3417         state = roce_get_field(context->qpc_bytes_144,
3418                                QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3419                                QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
3420         tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
3421         if (tmp_qp_state == -1) {
3422                 dev_err(dev, "to_ib_qp_state error\n");
3423                 ret = -EINVAL;
3424                 goto out;
3425         }
3426         hr_qp->state = (u8)tmp_qp_state;
3427         qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
3428         qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
3429                                                QP_CONTEXT_QPC_BYTES_48_MTU_M,
3430                                                QP_CONTEXT_QPC_BYTES_48_MTU_S);
3431         qp_attr->path_mig_state = IB_MIG_ARMED;
3432         qp_attr->ah_attr.type   = RDMA_AH_ATTR_TYPE_ROCE;
3433         if (hr_qp->ibqp.qp_type == IB_QPT_UD)
3434                 qp_attr->qkey = QKEY_VAL;
3435
3436         qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
3437                                          QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3438                                          QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
3439         qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
3440                                              QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3441                                              QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
3442         qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
3443                                         QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
3444                                         QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
3445         qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
3446                         QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
3447                                    ((roce_get_bit(context->qpc_bytes_4,
3448                         QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
3449                                    ((roce_get_bit(context->qpc_bytes_4,
3450                         QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
3451
3452         if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
3453             hr_qp->ibqp.qp_type == IB_QPT_UC) {
3454                 struct ib_global_route *grh =
3455                         rdma_ah_retrieve_grh(&qp_attr->ah_attr);
3456
3457                 rdma_ah_set_sl(&qp_attr->ah_attr,
3458                                roce_get_field(context->qpc_bytes_156,
3459                                               QP_CONTEXT_QPC_BYTES_156_SL_M,
3460                                               QP_CONTEXT_QPC_BYTES_156_SL_S));
3461                 rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
3462                 grh->flow_label =
3463                         roce_get_field(context->qpc_bytes_48,
3464                                        QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
3465                                        QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
3466                 grh->sgid_index =
3467                         roce_get_field(context->qpc_bytes_36,
3468                                        QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
3469                                        QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
3470                 grh->hop_limit =
3471                         roce_get_field(context->qpc_bytes_44,
3472                                        QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
3473                                        QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
3474                 grh->traffic_class =
3475                         roce_get_field(context->qpc_bytes_48,
3476                                        QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
3477                                        QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
3478
3479                 memcpy(grh->dgid.raw, context->dgid,
3480                        sizeof(grh->dgid.raw));
3481         }
3482
3483         qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
3484                               QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
3485                               QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
3486         qp_attr->port_num = hr_qp->port + 1;
3487         qp_attr->sq_draining = 0;
3488         qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156,
3489                                  QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3490                                  QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
3491         qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32,
3492                                  QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
3493                                  QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
3494         qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
3495                         QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
3496                         QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
3497         qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
3498                             QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3499                             QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
3500         qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
3501                              QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3502                              QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
3503         qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry);
3504
3505 done:
3506         qp_attr->cur_qp_state = qp_attr->qp_state;
3507         qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3508         qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3509
3510         if (!ibqp->uobject) {
3511                 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3512                 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3513         } else {
3514                 qp_attr->cap.max_send_wr = 0;
3515                 qp_attr->cap.max_send_sge = 0;
3516         }
3517
3518         qp_init_attr->cap = qp_attr->cap;
3519
3520 out:
3521         mutex_unlock(&hr_qp->mutex);
3522         kfree(context);
3523         return ret;
3524 }
3525
3526 static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3527                                 int qp_attr_mask,
3528                                 struct ib_qp_init_attr *qp_init_attr)
3529 {
3530         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3531
3532         return hr_qp->doorbell_qpn <= 1 ?
3533                 hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
3534                 hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
3535 }
3536
3537 int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
3538 {
3539         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3540         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3541         struct hns_roce_cq *send_cq, *recv_cq;
3542         int ret;
3543
3544         ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET);
3545         if (ret)
3546                 return ret;
3547
3548         send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
3549         recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
3550
3551         hns_roce_lock_cqs(send_cq, recv_cq);
3552         if (!udata) {
3553                 if (recv_cq)
3554                         __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn,
3555                                                (hr_qp->ibqp.srq ?
3556                                                 to_hr_srq(hr_qp->ibqp.srq) :
3557                                                 NULL));
3558
3559                 if (send_cq && send_cq != recv_cq)
3560                         __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
3561         }
3562         hns_roce_qp_remove(hr_dev, hr_qp);
3563         hns_roce_unlock_cqs(send_cq, recv_cq);
3564
3565         hns_roce_qp_destroy(hr_dev, hr_qp, udata);
3566
3567         return 0;
3568 }
3569
3570 static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
3571 {
3572         struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3573         struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3574         struct device *dev = &hr_dev->pdev->dev;
3575         u32 cqe_cnt_ori;
3576         u32 cqe_cnt_cur;
3577         int wait_time = 0;
3578
3579         /*
3580          * Before freeing cq buffer, we need to ensure that the outstanding CQE
3581          * have been written by checking the CQE counter.
3582          */
3583         cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3584         while (1) {
3585                 if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
3586                     HNS_ROCE_CQE_WCMD_EMPTY_BIT)
3587                         break;
3588
3589                 cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3590                 if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
3591                         break;
3592
3593                 msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
3594                 if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
3595                         dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
3596                                 hr_cq->cqn);
3597                         break;
3598                 }
3599                 wait_time++;
3600         }
3601         return 0;
3602 }
3603
3604 static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
3605 {
3606         roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
3607                       (req_not << eq->log_entries), eq->doorbell);
3608 }
3609
3610 static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
3611                                             struct hns_roce_aeqe *aeqe, int qpn)
3612 {
3613         struct device *dev = &hr_dev->pdev->dev;
3614
3615         dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
3616         switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3617                                HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3618         case HNS_ROCE_LWQCE_QPC_ERROR:
3619                 dev_warn(dev, "QP %d, QPC error.\n", qpn);
3620                 break;
3621         case HNS_ROCE_LWQCE_MTU_ERROR:
3622                 dev_warn(dev, "QP %d, MTU error.\n", qpn);
3623                 break;
3624         case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
3625                 dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
3626                 break;
3627         case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
3628                 dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
3629                 break;
3630         case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
3631                 dev_warn(dev, "QP %d, WQE shift error\n", qpn);
3632                 break;
3633         case HNS_ROCE_LWQCE_SL_ERROR:
3634                 dev_warn(dev, "QP %d, SL error.\n", qpn);
3635                 break;
3636         case HNS_ROCE_LWQCE_PORT_ERROR:
3637                 dev_warn(dev, "QP %d, port error.\n", qpn);
3638                 break;
3639         default:
3640                 break;
3641         }
3642 }
3643
3644 static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
3645                                                    struct hns_roce_aeqe *aeqe,
3646                                                    int qpn)
3647 {
3648         struct device *dev = &hr_dev->pdev->dev;
3649
3650         dev_warn(dev, "Local Access Violation Work Queue Error.\n");
3651         switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3652                                HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3653         case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
3654                 dev_warn(dev, "QP %d, R_key violation.\n", qpn);
3655                 break;
3656         case HNS_ROCE_LAVWQE_LENGTH_ERROR:
3657                 dev_warn(dev, "QP %d, length error.\n", qpn);
3658                 break;
3659         case HNS_ROCE_LAVWQE_VA_ERROR:
3660                 dev_warn(dev, "QP %d, VA error.\n", qpn);
3661                 break;
3662         case HNS_ROCE_LAVWQE_PD_ERROR:
3663                 dev_err(dev, "QP %d, PD error.\n", qpn);
3664                 break;
3665         case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
3666                 dev_warn(dev, "QP %d, rw acc error.\n", qpn);
3667                 break;
3668         case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
3669                 dev_warn(dev, "QP %d, key state error.\n", qpn);
3670                 break;
3671         case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
3672                 dev_warn(dev, "QP %d, MR operation error.\n", qpn);
3673                 break;
3674         default:
3675                 break;
3676         }
3677 }
3678
3679 static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
3680                                       struct hns_roce_aeqe *aeqe,
3681                                       int event_type)
3682 {
3683         struct device *dev = &hr_dev->pdev->dev;
3684         int phy_port;
3685         int qpn;
3686
3687         qpn = roce_get_field(aeqe->event.qp_event.qp,
3688                              HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
3689                              HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
3690         phy_port = roce_get_field(aeqe->event.qp_event.qp,
3691                                   HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
3692                                   HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
3693         if (qpn <= 1)
3694                 qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
3695
3696         switch (event_type) {
3697         case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3698                 dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
3699                          "QP %d, phy_port %d.\n", qpn, phy_port);
3700                 break;
3701         case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3702                 hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
3703                 break;
3704         case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3705                 hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
3706                 break;
3707         default:
3708                 break;
3709         }
3710
3711         hns_roce_qp_event(hr_dev, qpn, event_type);
3712 }
3713
3714 static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
3715                                       struct hns_roce_aeqe *aeqe,
3716                                       int event_type)
3717 {
3718         struct device *dev = &hr_dev->pdev->dev;
3719         u32 cqn;
3720
3721         cqn = roce_get_field(aeqe->event.cq_event.cq,
3722                           HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
3723                           HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
3724
3725         switch (event_type) {
3726         case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3727                 dev_warn(dev, "CQ 0x%x access err.\n", cqn);
3728                 break;
3729         case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3730                 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
3731                 break;
3732         case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
3733                 dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
3734                 break;
3735         default:
3736                 break;
3737         }
3738
3739         hns_roce_cq_event(hr_dev, cqn, event_type);
3740 }
3741
3742 static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
3743                                            struct hns_roce_aeqe *aeqe)
3744 {
3745         struct device *dev = &hr_dev->pdev->dev;
3746
3747         switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3748                                HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3749         case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
3750                 dev_warn(dev, "SDB overflow.\n");
3751                 break;
3752         case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
3753                 dev_warn(dev, "SDB almost overflow.\n");
3754                 break;
3755         case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
3756                 dev_warn(dev, "SDB almost empty.\n");
3757                 break;
3758         case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
3759                 dev_warn(dev, "ODB overflow.\n");
3760                 break;
3761         case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
3762                 dev_warn(dev, "ODB almost overflow.\n");
3763                 break;
3764         case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
3765                 dev_warn(dev, "SDB almost empty.\n");
3766                 break;
3767         default:
3768                 break;
3769         }
3770 }
3771
3772 static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
3773 {
3774         unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQE_SIZE;
3775
3776         return (struct hns_roce_aeqe *)((u8 *)
3777                 (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
3778                 off % HNS_ROCE_BA_SIZE);
3779 }
3780
3781 static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
3782 {
3783         struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
3784
3785         return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
3786                 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
3787 }
3788
3789 static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
3790                                struct hns_roce_eq *eq)
3791 {
3792         struct device *dev = &hr_dev->pdev->dev;
3793         struct hns_roce_aeqe *aeqe;
3794         int aeqes_found = 0;
3795         int event_type;
3796
3797         while ((aeqe = next_aeqe_sw_v1(eq))) {
3798
3799                 /* Make sure we read the AEQ entry after we have checked the
3800                  * ownership bit
3801                  */
3802                 dma_rmb();
3803
3804                 dev_dbg(dev, "aeqe = %pK, aeqe->asyn.event_type = 0x%lx\n",
3805                         aeqe,
3806                         roce_get_field(aeqe->asyn,
3807                                        HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
3808                                        HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
3809                 event_type = roce_get_field(aeqe->asyn,
3810                                             HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
3811                                             HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
3812                 switch (event_type) {
3813                 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
3814                         dev_warn(dev, "PATH MIG not supported\n");
3815                         break;
3816                 case HNS_ROCE_EVENT_TYPE_COMM_EST:
3817                         dev_warn(dev, "COMMUNICATION established\n");
3818                         break;
3819                 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
3820                         dev_warn(dev, "SQ DRAINED not supported\n");
3821                         break;
3822                 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
3823                         dev_warn(dev, "PATH MIG failed\n");
3824                         break;
3825                 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3826                 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3827                 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3828                         hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
3829                         break;
3830                 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
3831                 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
3832                 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
3833                         dev_warn(dev, "SRQ not support!\n");
3834                         break;
3835                 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3836                 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3837                 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
3838                         hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
3839                         break;
3840                 case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
3841                         dev_warn(dev, "port change.\n");
3842                         break;
3843                 case HNS_ROCE_EVENT_TYPE_MB:
3844                         hns_roce_cmd_event(hr_dev,
3845                                            le16_to_cpu(aeqe->event.cmd.token),
3846                                            aeqe->event.cmd.status,
3847                                            le64_to_cpu(aeqe->event.cmd.out_param
3848                                            ));
3849                         break;
3850                 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
3851                         hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
3852                         break;
3853                 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
3854                         dev_warn(dev, "CEQ 0x%lx overflow.\n",
3855                         roce_get_field(aeqe->event.ce_event.ceqe,
3856                                      HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
3857                                      HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
3858                         break;
3859                 default:
3860                         dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
3861                                  event_type, eq->eqn, eq->cons_index);
3862                         break;
3863                 }
3864
3865                 eq->cons_index++;
3866                 aeqes_found = 1;
3867
3868                 if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1)
3869                         eq->cons_index = 0;
3870         }
3871
3872         set_eq_cons_index_v1(eq, 0);
3873
3874         return aeqes_found;
3875 }
3876
3877 static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
3878 {
3879         unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQE_SIZE;
3880
3881         return (struct hns_roce_ceqe *)((u8 *)
3882                         (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
3883                         off % HNS_ROCE_BA_SIZE);
3884 }
3885
3886 static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
3887 {
3888         struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
3889
3890         return (!!(roce_get_bit(ceqe->comp,
3891                 HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
3892                 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
3893 }
3894
3895 static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
3896                                struct hns_roce_eq *eq)
3897 {
3898         struct hns_roce_ceqe *ceqe;
3899         int ceqes_found = 0;
3900         u32 cqn;
3901
3902         while ((ceqe = next_ceqe_sw_v1(eq))) {
3903
3904                 /* Make sure we read CEQ entry after we have checked the
3905                  * ownership bit
3906                  */
3907                 dma_rmb();
3908
3909                 cqn = roce_get_field(ceqe->comp,
3910                                      HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
3911                                      HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
3912                 hns_roce_cq_completion(hr_dev, cqn);
3913
3914                 ++eq->cons_index;
3915                 ceqes_found = 1;
3916
3917                 if (eq->cons_index >
3918                     EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1)
3919                         eq->cons_index = 0;
3920         }
3921
3922         set_eq_cons_index_v1(eq, 0);
3923
3924         return ceqes_found;
3925 }
3926
3927 static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
3928 {
3929         struct hns_roce_eq  *eq  = eq_ptr;
3930         struct hns_roce_dev *hr_dev = eq->hr_dev;
3931         int int_work;
3932
3933         if (eq->type_flag == HNS_ROCE_CEQ)
3934                 /* CEQ irq routine, CEQ is pulse irq, not clear */
3935                 int_work = hns_roce_v1_ceq_int(hr_dev, eq);
3936         else
3937                 /* AEQ irq routine, AEQ is pulse irq, not clear */
3938                 int_work = hns_roce_v1_aeq_int(hr_dev, eq);
3939
3940         return IRQ_RETVAL(int_work);
3941 }
3942
3943 static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
3944 {
3945         struct hns_roce_dev *hr_dev = dev_id;
3946         struct device *dev = &hr_dev->pdev->dev;
3947         int int_work = 0;
3948         u32 caepaemask_val;
3949         u32 cealmovf_val;
3950         u32 caepaest_val;
3951         u32 aeshift_val;
3952         u32 ceshift_val;
3953         u32 cemask_val;
3954         __le32 tmp;
3955         int i;
3956
3957         /*
3958          * Abnormal interrupt:
3959          * AEQ overflow, ECC multi-bit err, CEQ overflow must clear
3960          * interrupt, mask irq, clear irq, cancel mask operation
3961          */
3962         aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
3963         tmp = cpu_to_le32(aeshift_val);
3964
3965         /* AEQE overflow */
3966         if (roce_get_bit(tmp,
3967                 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
3968                 dev_warn(dev, "AEQ overflow!\n");
3969
3970                 /* Set mask */
3971                 caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
3972                 tmp = cpu_to_le32(caepaemask_val);
3973                 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
3974                              HNS_ROCE_INT_MASK_ENABLE);
3975                 caepaemask_val = le32_to_cpu(tmp);
3976                 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
3977
3978                 /* Clear int state(INT_WC : write 1 clear) */
3979                 caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
3980                 tmp = cpu_to_le32(caepaest_val);
3981                 roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
3982                 caepaest_val = le32_to_cpu(tmp);
3983                 roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
3984
3985                 /* Clear mask */
3986                 caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
3987                 tmp = cpu_to_le32(caepaemask_val);
3988                 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
3989                              HNS_ROCE_INT_MASK_DISABLE);
3990                 caepaemask_val = le32_to_cpu(tmp);
3991                 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
3992         }
3993
3994         /* CEQ almost overflow */
3995         for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
3996                 ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
3997                                         i * CEQ_REG_OFFSET);
3998                 tmp = cpu_to_le32(ceshift_val);
3999
4000                 if (roce_get_bit(tmp,
4001                         ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
4002                         dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
4003                         int_work++;
4004
4005                         /* Set mask */
4006                         cemask_val = roce_read(hr_dev,
4007                                                ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4008                                                i * CEQ_REG_OFFSET);
4009                         tmp = cpu_to_le32(cemask_val);
4010                         roce_set_bit(tmp,
4011                                 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4012                                 HNS_ROCE_INT_MASK_ENABLE);
4013                         cemask_val = le32_to_cpu(tmp);
4014                         roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4015                                    i * CEQ_REG_OFFSET, cemask_val);
4016
4017                         /* Clear int state(INT_WC : write 1 clear) */
4018                         cealmovf_val = roce_read(hr_dev,
4019                                        ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4020                                        i * CEQ_REG_OFFSET);
4021                         tmp = cpu_to_le32(cealmovf_val);
4022                         roce_set_bit(tmp,
4023                                      ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
4024                                      1);
4025                         cealmovf_val = le32_to_cpu(tmp);
4026                         roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4027                                    i * CEQ_REG_OFFSET, cealmovf_val);
4028
4029                         /* Clear mask */
4030                         cemask_val = roce_read(hr_dev,
4031                                      ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4032                                      i * CEQ_REG_OFFSET);
4033                         tmp = cpu_to_le32(cemask_val);
4034                         roce_set_bit(tmp,
4035                                ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4036                                HNS_ROCE_INT_MASK_DISABLE);
4037                         cemask_val = le32_to_cpu(tmp);
4038                         roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4039                                    i * CEQ_REG_OFFSET, cemask_val);
4040                 }
4041         }
4042
4043         /* ECC multi-bit error alarm */
4044         dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
4045                  roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
4046                  roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
4047                  roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
4048
4049         dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
4050                  roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
4051                  roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
4052                  roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
4053
4054         return IRQ_RETVAL(int_work);
4055 }
4056
4057 static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
4058 {
4059         u32 aemask_val;
4060         int masken = 0;
4061         __le32 tmp;
4062         int i;
4063
4064         /* AEQ INT */
4065         aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4066         tmp = cpu_to_le32(aemask_val);
4067         roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4068                      masken);
4069         roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
4070         aemask_val = le32_to_cpu(tmp);
4071         roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
4072
4073         /* CEQ INT */
4074         for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4075                 /* IRQ mask */
4076                 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4077                            i * CEQ_REG_OFFSET, masken);
4078         }
4079 }
4080
4081 static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
4082                                 struct hns_roce_eq *eq)
4083 {
4084         int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
4085                       HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4086         int i;
4087
4088         if (!eq->buf_list)
4089                 return;
4090
4091         for (i = 0; i < npages; ++i)
4092                 dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
4093                                   eq->buf_list[i].buf, eq->buf_list[i].map);
4094
4095         kfree(eq->buf_list);
4096 }
4097
4098 static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
4099                                   int enable_flag)
4100 {
4101         void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
4102         __le32 tmp;
4103         u32 val;
4104
4105         val = readl(eqc);
4106         tmp = cpu_to_le32(val);
4107
4108         if (enable_flag)
4109                 roce_set_field(tmp,
4110                                ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4111                                ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4112                                HNS_ROCE_EQ_STAT_VALID);
4113         else
4114                 roce_set_field(tmp,
4115                                ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4116                                ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4117                                HNS_ROCE_EQ_STAT_INVALID);
4118
4119         val = le32_to_cpu(tmp);
4120         writel(val, eqc);
4121 }
4122
4123 static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
4124                                  struct hns_roce_eq *eq)
4125 {
4126         void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
4127         struct device *dev = &hr_dev->pdev->dev;
4128         dma_addr_t tmp_dma_addr;
4129         u32 eqcuridx_val = 0;
4130         u32 eqconsindx_val;
4131         u32 eqshift_val;
4132         __le32 tmp2 = 0;
4133         __le32 tmp1 = 0;
4134         __le32 tmp = 0;
4135         int num_bas;
4136         int ret;
4137         int i;
4138
4139         num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
4140                    HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4141
4142         if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
4143                 dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
4144                         (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
4145                         num_bas);
4146                 return -EINVAL;
4147         }
4148
4149         eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
4150         if (!eq->buf_list)
4151                 return -ENOMEM;
4152
4153         for (i = 0; i < num_bas; ++i) {
4154                 eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
4155                                                          &tmp_dma_addr,
4156                                                          GFP_KERNEL);
4157                 if (!eq->buf_list[i].buf) {
4158                         ret = -ENOMEM;
4159                         goto err_out_free_pages;
4160                 }
4161
4162                 eq->buf_list[i].map = tmp_dma_addr;
4163         }
4164         eq->cons_index = 0;
4165         roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4166                        ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4167                        HNS_ROCE_EQ_STAT_INVALID);
4168         roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
4169                        ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
4170                        eq->log_entries);
4171         eqshift_val = le32_to_cpu(tmp);
4172         writel(eqshift_val, eqc);
4173
4174         /* Configure eq extended address 12~44bit */
4175         writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
4176
4177         /*
4178          * Configure eq extended address 45~49 bit.
4179          * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
4180          * using 4K page, and shift more 32 because of
4181          * caculating the high 32 bit value evaluated to hardware.
4182          */
4183         roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
4184                        ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
4185                        eq->buf_list[0].map >> 44);
4186         roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
4187                        ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
4188         eqcuridx_val = le32_to_cpu(tmp1);
4189         writel(eqcuridx_val, eqc + 8);
4190
4191         /* Configure eq consumer index */
4192         roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
4193                        ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
4194         eqconsindx_val = le32_to_cpu(tmp2);
4195         writel(eqconsindx_val, eqc + 0xc);
4196
4197         return 0;
4198
4199 err_out_free_pages:
4200         for (i -= 1; i >= 0; i--)
4201                 dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
4202                                   eq->buf_list[i].map);
4203
4204         kfree(eq->buf_list);
4205         return ret;
4206 }
4207
4208 static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
4209 {
4210         struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4211         struct device *dev = &hr_dev->pdev->dev;
4212         struct hns_roce_eq *eq;
4213         int irq_num;
4214         int eq_num;
4215         int ret;
4216         int i, j;
4217
4218         eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4219         irq_num = eq_num + hr_dev->caps.num_other_vectors;
4220
4221         eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
4222         if (!eq_table->eq)
4223                 return -ENOMEM;
4224
4225         eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
4226                                      GFP_KERNEL);
4227         if (!eq_table->eqc_base) {
4228                 ret = -ENOMEM;
4229                 goto err_eqc_base_alloc_fail;
4230         }
4231
4232         for (i = 0; i < eq_num; i++) {
4233                 eq = &eq_table->eq[i];
4234                 eq->hr_dev = hr_dev;
4235                 eq->eqn = i;
4236                 eq->irq = hr_dev->irq[i];
4237                 eq->log_page_size = PAGE_SHIFT;
4238
4239                 if (i < hr_dev->caps.num_comp_vectors) {
4240                         /* CEQ */
4241                         eq_table->eqc_base[i] = hr_dev->reg_base +
4242                                                 ROCEE_CAEP_CEQC_SHIFT_0_REG +
4243                                                 CEQ_REG_OFFSET * i;
4244                         eq->type_flag = HNS_ROCE_CEQ;
4245                         eq->doorbell = hr_dev->reg_base +
4246                                        ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
4247                                        CEQ_REG_OFFSET * i;
4248                         eq->entries = hr_dev->caps.ceqe_depth;
4249                         eq->log_entries = ilog2(eq->entries);
4250                         eq->eqe_size = HNS_ROCE_CEQE_SIZE;
4251                 } else {
4252                         /* AEQ */
4253                         eq_table->eqc_base[i] = hr_dev->reg_base +
4254                                                 ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
4255                         eq->type_flag = HNS_ROCE_AEQ;
4256                         eq->doorbell = hr_dev->reg_base +
4257                                        ROCEE_CAEP_AEQE_CONS_IDX_REG;
4258                         eq->entries = hr_dev->caps.aeqe_depth;
4259                         eq->log_entries = ilog2(eq->entries);
4260                         eq->eqe_size = HNS_ROCE_AEQE_SIZE;
4261                 }
4262         }
4263
4264         /* Disable irq */
4265         hns_roce_v1_int_mask_enable(hr_dev);
4266
4267         /* Configure ce int interval */
4268         roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
4269                    HNS_ROCE_CEQ_DEFAULT_INTERVAL);
4270
4271         /* Configure ce int burst num */
4272         roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
4273                    HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
4274
4275         for (i = 0; i < eq_num; i++) {
4276                 ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
4277                 if (ret) {
4278                         dev_err(dev, "eq create failed\n");
4279                         goto err_create_eq_fail;
4280                 }
4281         }
4282
4283         for (j = 0; j < irq_num; j++) {
4284                 if (j < eq_num)
4285                         ret = request_irq(hr_dev->irq[j],
4286                                           hns_roce_v1_msix_interrupt_eq, 0,
4287                                           hr_dev->irq_names[j],
4288                                           &eq_table->eq[j]);
4289                 else
4290                         ret = request_irq(hr_dev->irq[j],
4291                                           hns_roce_v1_msix_interrupt_abn, 0,
4292                                           hr_dev->irq_names[j], hr_dev);
4293
4294                 if (ret) {
4295                         dev_err(dev, "request irq error!\n");
4296                         goto err_request_irq_fail;
4297                 }
4298         }
4299
4300         for (i = 0; i < eq_num; i++)
4301                 hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
4302
4303         return 0;
4304
4305 err_request_irq_fail:
4306         for (j -= 1; j >= 0; j--)
4307                 free_irq(hr_dev->irq[j], &eq_table->eq[j]);
4308
4309 err_create_eq_fail:
4310         for (i -= 1; i >= 0; i--)
4311                 hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4312
4313         kfree(eq_table->eqc_base);
4314
4315 err_eqc_base_alloc_fail:
4316         kfree(eq_table->eq);
4317
4318         return ret;
4319 }
4320
4321 static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
4322 {
4323         struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4324         int irq_num;
4325         int eq_num;
4326         int i;
4327
4328         eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4329         irq_num = eq_num + hr_dev->caps.num_other_vectors;
4330         for (i = 0; i < eq_num; i++) {
4331                 /* Disable EQ */
4332                 hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
4333
4334                 free_irq(hr_dev->irq[i], &eq_table->eq[i]);
4335
4336                 hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4337         }
4338         for (i = eq_num; i < irq_num; i++)
4339                 free_irq(hr_dev->irq[i], hr_dev);
4340
4341         kfree(eq_table->eqc_base);
4342         kfree(eq_table->eq);
4343 }
4344
4345 static const struct ib_device_ops hns_roce_v1_dev_ops = {
4346         .destroy_qp = hns_roce_v1_destroy_qp,
4347         .poll_cq = hns_roce_v1_poll_cq,
4348         .post_recv = hns_roce_v1_post_recv,
4349         .post_send = hns_roce_v1_post_send,
4350         .query_qp = hns_roce_v1_query_qp,
4351         .req_notify_cq = hns_roce_v1_req_notify_cq,
4352 };
4353
4354 static const struct hns_roce_hw hns_roce_hw_v1 = {
4355         .reset = hns_roce_v1_reset,
4356         .hw_profile = hns_roce_v1_profile,
4357         .hw_init = hns_roce_v1_init,
4358         .hw_exit = hns_roce_v1_exit,
4359         .post_mbox = hns_roce_v1_post_mbox,
4360         .chk_mbox = hns_roce_v1_chk_mbox,
4361         .set_gid = hns_roce_v1_set_gid,
4362         .set_mac = hns_roce_v1_set_mac,
4363         .set_mtu = hns_roce_v1_set_mtu,
4364         .write_mtpt = hns_roce_v1_write_mtpt,
4365         .write_cqc = hns_roce_v1_write_cqc,
4366         .clear_hem = hns_roce_v1_clear_hem,
4367         .modify_qp = hns_roce_v1_modify_qp,
4368         .query_qp = hns_roce_v1_query_qp,
4369         .destroy_qp = hns_roce_v1_destroy_qp,
4370         .post_send = hns_roce_v1_post_send,
4371         .post_recv = hns_roce_v1_post_recv,
4372         .req_notify_cq = hns_roce_v1_req_notify_cq,
4373         .poll_cq = hns_roce_v1_poll_cq,
4374         .dereg_mr = hns_roce_v1_dereg_mr,
4375         .destroy_cq = hns_roce_v1_destroy_cq,
4376         .init_eq = hns_roce_v1_init_eq_table,
4377         .cleanup_eq = hns_roce_v1_cleanup_eq_table,
4378         .hns_roce_dev_ops = &hns_roce_v1_dev_ops,
4379 };
4380
4381 static const struct of_device_id hns_roce_of_match[] = {
4382         { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
4383         {},
4384 };
4385 MODULE_DEVICE_TABLE(of, hns_roce_of_match);
4386
4387 static const struct acpi_device_id hns_roce_acpi_match[] = {
4388         { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
4389         {},
4390 };
4391 MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
4392
4393 static struct
4394 platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
4395 {
4396         struct device *dev;
4397
4398         /* get the 'device' corresponding to the matching 'fwnode' */
4399         dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
4400         /* get the platform device */
4401         return dev ? to_platform_device(dev) : NULL;
4402 }
4403
4404 static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
4405 {
4406         struct device *dev = &hr_dev->pdev->dev;
4407         struct platform_device *pdev = NULL;
4408         struct net_device *netdev = NULL;
4409         struct device_node *net_node;
4410         int port_cnt = 0;
4411         u8 phy_port;
4412         int ret;
4413         int i;
4414
4415         /* check if we are compatible with the underlying SoC */
4416         if (dev_of_node(dev)) {
4417                 const struct of_device_id *of_id;
4418
4419                 of_id = of_match_node(hns_roce_of_match, dev->of_node);
4420                 if (!of_id) {
4421                         dev_err(dev, "device is not compatible!\n");
4422                         return -ENXIO;
4423                 }
4424                 hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
4425                 if (!hr_dev->hw) {
4426                         dev_err(dev, "couldn't get H/W specific DT data!\n");
4427                         return -ENXIO;
4428                 }
4429         } else if (is_acpi_device_node(dev->fwnode)) {
4430                 const struct acpi_device_id *acpi_id;
4431
4432                 acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
4433                 if (!acpi_id) {
4434                         dev_err(dev, "device is not compatible!\n");
4435                         return -ENXIO;
4436                 }
4437                 hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
4438                 if (!hr_dev->hw) {
4439                         dev_err(dev, "couldn't get H/W specific ACPI data!\n");
4440                         return -ENXIO;
4441                 }
4442         } else {
4443                 dev_err(dev, "can't read compatibility data from DT or ACPI\n");
4444                 return -ENXIO;
4445         }
4446
4447         /* get the mapped register base address */
4448         hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0);
4449         if (IS_ERR(hr_dev->reg_base))
4450                 return PTR_ERR(hr_dev->reg_base);
4451
4452         /* read the node_guid of IB device from the DT or ACPI */
4453         ret = device_property_read_u8_array(dev, "node-guid",
4454                                             (u8 *)&hr_dev->ib_dev.node_guid,
4455                                             GUID_LEN);
4456         if (ret) {
4457                 dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
4458                 return ret;
4459         }
4460
4461         /* get the RoCE associated ethernet ports or netdevices */
4462         for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
4463                 if (dev_of_node(dev)) {
4464                         net_node = of_parse_phandle(dev->of_node, "eth-handle",
4465                                                     i);
4466                         if (!net_node)
4467                                 continue;
4468                         pdev = of_find_device_by_node(net_node);
4469                 } else if (is_acpi_device_node(dev->fwnode)) {
4470                         struct fwnode_reference_args args;
4471
4472                         ret = acpi_node_get_property_reference(dev->fwnode,
4473                                                                "eth-handle",
4474                                                                i, &args);
4475                         if (ret)
4476                                 continue;
4477                         pdev = hns_roce_find_pdev(args.fwnode);
4478                 } else {
4479                         dev_err(dev, "cannot read data from DT or ACPI\n");
4480                         return -ENXIO;
4481                 }
4482
4483                 if (pdev) {
4484                         netdev = platform_get_drvdata(pdev);
4485                         phy_port = (u8)i;
4486                         if (netdev) {
4487                                 hr_dev->iboe.netdevs[port_cnt] = netdev;
4488                                 hr_dev->iboe.phy_port[port_cnt] = phy_port;
4489                         } else {
4490                                 dev_err(dev, "no netdev found with pdev %s\n",
4491                                         pdev->name);
4492                                 return -ENODEV;
4493                         }
4494                         port_cnt++;
4495                 }
4496         }
4497
4498         if (port_cnt == 0) {
4499                 dev_err(dev, "unable to get eth-handle for available ports!\n");
4500                 return -EINVAL;
4501         }
4502
4503         hr_dev->caps.num_ports = port_cnt;
4504
4505         /* cmd issue mode: 0 is poll, 1 is event */
4506         hr_dev->cmd_mod = 1;
4507         hr_dev->loop_idc = 0;
4508         hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
4509         hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
4510
4511         /* read the interrupt names from the DT or ACPI */
4512         ret = device_property_read_string_array(dev, "interrupt-names",
4513                                                 hr_dev->irq_names,
4514                                                 HNS_ROCE_V1_MAX_IRQ_NUM);
4515         if (ret < 0) {
4516                 dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
4517                 return ret;
4518         }
4519
4520         /* fetch the interrupt numbers */
4521         for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
4522                 hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
4523                 if (hr_dev->irq[i] <= 0)
4524                         return -EINVAL;
4525         }
4526
4527         return 0;
4528 }
4529
4530 /**
4531  * hns_roce_probe - RoCE driver entrance
4532  * @pdev: pointer to platform device
4533  * Return : int
4534  *
4535  */
4536 static int hns_roce_probe(struct platform_device *pdev)
4537 {
4538         int ret;
4539         struct hns_roce_dev *hr_dev;
4540         struct device *dev = &pdev->dev;
4541
4542         hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
4543         if (!hr_dev)
4544                 return -ENOMEM;
4545
4546         hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
4547         if (!hr_dev->priv) {
4548                 ret = -ENOMEM;
4549                 goto error_failed_kzalloc;
4550         }
4551
4552         hr_dev->pdev = pdev;
4553         hr_dev->dev = dev;
4554         platform_set_drvdata(pdev, hr_dev);
4555
4556         if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
4557             dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
4558                 dev_err(dev, "Not usable DMA addressing mode\n");
4559                 ret = -EIO;
4560                 goto error_failed_get_cfg;
4561         }
4562
4563         ret = hns_roce_get_cfg(hr_dev);
4564         if (ret) {
4565                 dev_err(dev, "Get Configuration failed!\n");
4566                 goto error_failed_get_cfg;
4567         }
4568
4569         ret = hns_roce_init(hr_dev);
4570         if (ret) {
4571                 dev_err(dev, "RoCE engine init failed!\n");
4572                 goto error_failed_get_cfg;
4573         }
4574
4575         return 0;
4576
4577 error_failed_get_cfg:
4578         kfree(hr_dev->priv);
4579
4580 error_failed_kzalloc:
4581         ib_dealloc_device(&hr_dev->ib_dev);
4582
4583         return ret;
4584 }
4585
4586 /**
4587  * hns_roce_remove - remove RoCE device
4588  * @pdev: pointer to platform device
4589  */
4590 static int hns_roce_remove(struct platform_device *pdev)
4591 {
4592         struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
4593
4594         hns_roce_exit(hr_dev);
4595         kfree(hr_dev->priv);
4596         ib_dealloc_device(&hr_dev->ib_dev);
4597
4598         return 0;
4599 }
4600
4601 static struct platform_driver hns_roce_driver = {
4602         .probe = hns_roce_probe,
4603         .remove = hns_roce_remove,
4604         .driver = {
4605                 .name = DRV_NAME,
4606                 .of_match_table = hns_roce_of_match,
4607                 .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
4608         },
4609 };
4610
4611 module_platform_driver(hns_roce_driver);
4612
4613 MODULE_LICENSE("Dual BSD/GPL");
4614 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
4615 MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
4616 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
4617 MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");