Commit | Line | Data |
---|---|---|
9a443537 | 1 | /* |
2 | * Copyright (c) 2016 Hisilicon Limited. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/platform_device.h> | |
528f1deb | 34 | #include <linux/acpi.h> |
543bfe6c | 35 | #include <linux/etherdevice.h> |
b16f8188 | 36 | #include <linux/interrupt.h> |
cd6ce4a5 | 37 | #include <linux/of.h> |
08805fdb | 38 | #include <linux/of_platform.h> |
9a443537 | 39 | #include <rdma/ib_umem.h> |
40 | #include "hns_roce_common.h" | |
41 | #include "hns_roce_device.h" | |
42 | #include "hns_roce_cmd.h" | |
43 | #include "hns_roce_hem.h" | |
44 | #include "hns_roce_hw_v1.h" | |
45 | ||
46 | static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg) | |
47 | { | |
48 | dseg->lkey = cpu_to_le32(sg->lkey); | |
49 | dseg->addr = cpu_to_le64(sg->addr); | |
50 | dseg->len = cpu_to_le32(sg->length); | |
51 | } | |
52 | ||
53 | static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr, | |
54 | u32 rkey) | |
55 | { | |
56 | rseg->raddr = cpu_to_le64(remote_addr); | |
57 | rseg->rkey = cpu_to_le32(rkey); | |
58 | rseg->len = 0; | |
59 | } | |
60 | ||
d61d6de0 BVA |
61 | static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
62 | struct ib_send_wr **bad_wr) | |
9a443537 | 63 | { |
64 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | |
65 | struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah); | |
66 | struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL; | |
67 | struct hns_roce_wqe_ctrl_seg *ctrl = NULL; | |
68 | struct hns_roce_wqe_data_seg *dseg = NULL; | |
69 | struct hns_roce_qp *qp = to_hr_qp(ibqp); | |
70 | struct device *dev = &hr_dev->pdev->dev; | |
71 | struct hns_roce_sq_db sq_db; | |
72 | int ps_opcode = 0, i = 0; | |
73 | unsigned long flags = 0; | |
74 | void *wqe = NULL; | |
75 | u32 doorbell[2]; | |
76 | int nreq = 0; | |
77 | u32 ind = 0; | |
78 | int ret = 0; | |
543bfe6c LO |
79 | u8 *smac; |
80 | int loopback; | |
9a443537 | 81 | |
07182fa7 LO |
82 | if (unlikely(ibqp->qp_type != IB_QPT_GSI && |
83 | ibqp->qp_type != IB_QPT_RC)) { | |
84 | dev_err(dev, "un-supported QP type\n"); | |
85 | *bad_wr = NULL; | |
86 | return -EOPNOTSUPP; | |
87 | } | |
9a443537 | 88 | |
07182fa7 | 89 | spin_lock_irqsave(&qp->sq.lock, flags); |
9a443537 | 90 | ind = qp->sq_next_wqe; |
91 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
92 | if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { | |
93 | ret = -ENOMEM; | |
94 | *bad_wr = wr; | |
95 | goto out; | |
96 | } | |
97 | ||
98 | if (unlikely(wr->num_sge > qp->sq.max_gs)) { | |
99 | dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n", | |
100 | wr->num_sge, qp->sq.max_gs); | |
101 | ret = -EINVAL; | |
102 | *bad_wr = wr; | |
103 | goto out; | |
104 | } | |
105 | ||
106 | wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); | |
107 | qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = | |
108 | wr->wr_id; | |
109 | ||
110 | /* Corresponding to the RC and RD type wqe process separately */ | |
111 | if (ibqp->qp_type == IB_QPT_GSI) { | |
112 | ud_sq_wqe = wqe; | |
113 | roce_set_field(ud_sq_wqe->dmac_h, | |
114 | UD_SEND_WQE_U32_4_DMAC_0_M, | |
115 | UD_SEND_WQE_U32_4_DMAC_0_S, | |
116 | ah->av.mac[0]); | |
117 | roce_set_field(ud_sq_wqe->dmac_h, | |
118 | UD_SEND_WQE_U32_4_DMAC_1_M, | |
119 | UD_SEND_WQE_U32_4_DMAC_1_S, | |
120 | ah->av.mac[1]); | |
121 | roce_set_field(ud_sq_wqe->dmac_h, | |
122 | UD_SEND_WQE_U32_4_DMAC_2_M, | |
123 | UD_SEND_WQE_U32_4_DMAC_2_S, | |
124 | ah->av.mac[2]); | |
125 | roce_set_field(ud_sq_wqe->dmac_h, | |
126 | UD_SEND_WQE_U32_4_DMAC_3_M, | |
127 | UD_SEND_WQE_U32_4_DMAC_3_S, | |
128 | ah->av.mac[3]); | |
129 | ||
130 | roce_set_field(ud_sq_wqe->u32_8, | |
131 | UD_SEND_WQE_U32_8_DMAC_4_M, | |
132 | UD_SEND_WQE_U32_8_DMAC_4_S, | |
133 | ah->av.mac[4]); | |
134 | roce_set_field(ud_sq_wqe->u32_8, | |
135 | UD_SEND_WQE_U32_8_DMAC_5_M, | |
136 | UD_SEND_WQE_U32_8_DMAC_5_S, | |
137 | ah->av.mac[5]); | |
543bfe6c LO |
138 | |
139 | smac = (u8 *)hr_dev->dev_addr[qp->port]; | |
140 | loopback = ether_addr_equal_unaligned(ah->av.mac, | |
141 | smac) ? 1 : 0; | |
142 | roce_set_bit(ud_sq_wqe->u32_8, | |
143 | UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S, | |
144 | loopback); | |
145 | ||
9a443537 | 146 | roce_set_field(ud_sq_wqe->u32_8, |
147 | UD_SEND_WQE_U32_8_OPERATION_TYPE_M, | |
148 | UD_SEND_WQE_U32_8_OPERATION_TYPE_S, | |
149 | HNS_ROCE_WQE_OPCODE_SEND); | |
150 | roce_set_field(ud_sq_wqe->u32_8, | |
151 | UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M, | |
152 | UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S, | |
153 | 2); | |
154 | roce_set_bit(ud_sq_wqe->u32_8, | |
155 | UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S, | |
156 | 1); | |
157 | ||
158 | ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ? | |
159 | cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) | | |
160 | (wr->send_flags & IB_SEND_SOLICITED ? | |
161 | cpu_to_le32(HNS_ROCE_WQE_SE) : 0) | | |
162 | ((wr->opcode == IB_WR_SEND_WITH_IMM) ? | |
163 | cpu_to_le32(HNS_ROCE_WQE_IMM) : 0); | |
164 | ||
165 | roce_set_field(ud_sq_wqe->u32_16, | |
166 | UD_SEND_WQE_U32_16_DEST_QP_M, | |
167 | UD_SEND_WQE_U32_16_DEST_QP_S, | |
168 | ud_wr(wr)->remote_qpn); | |
169 | roce_set_field(ud_sq_wqe->u32_16, | |
170 | UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M, | |
171 | UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S, | |
172 | ah->av.stat_rate); | |
173 | ||
174 | roce_set_field(ud_sq_wqe->u32_36, | |
175 | UD_SEND_WQE_U32_36_FLOW_LABEL_M, | |
176 | UD_SEND_WQE_U32_36_FLOW_LABEL_S, 0); | |
177 | roce_set_field(ud_sq_wqe->u32_36, | |
178 | UD_SEND_WQE_U32_36_PRIORITY_M, | |
179 | UD_SEND_WQE_U32_36_PRIORITY_S, | |
180 | ah->av.sl_tclass_flowlabel >> | |
181 | HNS_ROCE_SL_SHIFT); | |
182 | roce_set_field(ud_sq_wqe->u32_36, | |
183 | UD_SEND_WQE_U32_36_SGID_INDEX_M, | |
184 | UD_SEND_WQE_U32_36_SGID_INDEX_S, | |
7716809e | 185 | hns_get_gid_index(hr_dev, qp->phy_port, |
9a443537 | 186 | ah->av.gid_index)); |
187 | ||
188 | roce_set_field(ud_sq_wqe->u32_40, | |
189 | UD_SEND_WQE_U32_40_HOP_LIMIT_M, | |
190 | UD_SEND_WQE_U32_40_HOP_LIMIT_S, | |
191 | ah->av.hop_limit); | |
192 | roce_set_field(ud_sq_wqe->u32_40, | |
193 | UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M, | |
194 | UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, 0); | |
195 | ||
196 | memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN); | |
197 | ||
8b9b8d14 | 198 | ud_sq_wqe->va0_l = |
199 | cpu_to_le32((u32)wr->sg_list[0].addr); | |
200 | ud_sq_wqe->va0_h = | |
201 | cpu_to_le32((wr->sg_list[0].addr) >> 32); | |
202 | ud_sq_wqe->l_key0 = | |
203 | cpu_to_le32(wr->sg_list[0].lkey); | |
204 | ||
205 | ud_sq_wqe->va1_l = | |
206 | cpu_to_le32((u32)wr->sg_list[1].addr); | |
207 | ud_sq_wqe->va1_h = | |
208 | cpu_to_le32((wr->sg_list[1].addr) >> 32); | |
209 | ud_sq_wqe->l_key1 = | |
210 | cpu_to_le32(wr->sg_list[1].lkey); | |
9a443537 | 211 | ind++; |
212 | } else if (ibqp->qp_type == IB_QPT_RC) { | |
8b9b8d14 | 213 | u32 tmp_len = 0; |
214 | ||
9a443537 | 215 | ctrl = wqe; |
216 | memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg)); | |
217 | for (i = 0; i < wr->num_sge; i++) | |
8b9b8d14 | 218 | tmp_len += wr->sg_list[i].length; |
219 | ||
220 | ctrl->msg_length = | |
221 | cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len); | |
9a443537 | 222 | |
223 | ctrl->sgl_pa_h = 0; | |
224 | ctrl->flag = 0; | |
8b9b8d14 | 225 | |
226 | switch (wr->opcode) { | |
227 | case IB_WR_SEND_WITH_IMM: | |
228 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
229 | ctrl->imm_data = wr->ex.imm_data; | |
230 | break; | |
231 | case IB_WR_SEND_WITH_INV: | |
232 | ctrl->inv_key = | |
233 | cpu_to_le32(wr->ex.invalidate_rkey); | |
234 | break; | |
235 | default: | |
236 | ctrl->imm_data = 0; | |
237 | break; | |
238 | } | |
9a443537 | 239 | |
240 | /*Ctrl field, ctrl set type: sig, solic, imm, fence */ | |
241 | /* SO wait for conforming application scenarios */ | |
242 | ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ? | |
243 | cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) | | |
244 | (wr->send_flags & IB_SEND_SOLICITED ? | |
245 | cpu_to_le32(HNS_ROCE_WQE_SE) : 0) | | |
246 | ((wr->opcode == IB_WR_SEND_WITH_IMM || | |
247 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ? | |
248 | cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) | | |
249 | (wr->send_flags & IB_SEND_FENCE ? | |
250 | (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0); | |
251 | ||
c24bf895 | 252 | wqe += sizeof(struct hns_roce_wqe_ctrl_seg); |
9a443537 | 253 | |
254 | switch (wr->opcode) { | |
255 | case IB_WR_RDMA_READ: | |
256 | ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ; | |
9de61d3f | 257 | set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, |
258 | rdma_wr(wr)->rkey); | |
9a443537 | 259 | break; |
260 | case IB_WR_RDMA_WRITE: | |
261 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
262 | ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE; | |
9de61d3f | 263 | set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, |
264 | rdma_wr(wr)->rkey); | |
9a443537 | 265 | break; |
266 | case IB_WR_SEND: | |
267 | case IB_WR_SEND_WITH_INV: | |
268 | case IB_WR_SEND_WITH_IMM: | |
269 | ps_opcode = HNS_ROCE_WQE_OPCODE_SEND; | |
270 | break; | |
271 | case IB_WR_LOCAL_INV: | |
272 | break; | |
273 | case IB_WR_ATOMIC_CMP_AND_SWP: | |
274 | case IB_WR_ATOMIC_FETCH_AND_ADD: | |
275 | case IB_WR_LSO: | |
276 | default: | |
277 | ps_opcode = HNS_ROCE_WQE_OPCODE_MASK; | |
278 | break; | |
279 | } | |
280 | ctrl->flag |= cpu_to_le32(ps_opcode); | |
c24bf895 | 281 | wqe += sizeof(struct hns_roce_wqe_raddr_seg); |
9a443537 | 282 | |
283 | dseg = wqe; | |
284 | if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { | |
8b9b8d14 | 285 | if (le32_to_cpu(ctrl->msg_length) > |
286 | hr_dev->caps.max_sq_inline) { | |
9a443537 | 287 | ret = -EINVAL; |
288 | *bad_wr = wr; | |
289 | dev_err(dev, "inline len(1-%d)=%d, illegal", | |
290 | ctrl->msg_length, | |
291 | hr_dev->caps.max_sq_inline); | |
292 | goto out; | |
293 | } | |
294 | for (i = 0; i < wr->num_sge; i++) { | |
295 | memcpy(wqe, ((void *) (uintptr_t) | |
296 | wr->sg_list[i].addr), | |
297 | wr->sg_list[i].length); | |
c24bf895 | 298 | wqe += wr->sg_list[i].length; |
9a443537 | 299 | } |
8b9b8d14 | 300 | ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE); |
9a443537 | 301 | } else { |
302 | /*sqe num is two */ | |
303 | for (i = 0; i < wr->num_sge; i++) | |
304 | set_data_seg(dseg + i, wr->sg_list + i); | |
305 | ||
306 | ctrl->flag |= cpu_to_le32(wr->num_sge << | |
307 | HNS_ROCE_WQE_SGE_NUM_BIT); | |
308 | } | |
309 | ind++; | |
9a443537 | 310 | } |
311 | } | |
312 | ||
313 | out: | |
314 | /* Set DB return */ | |
315 | if (likely(nreq)) { | |
316 | qp->sq.head += nreq; | |
317 | /* Memory barrier */ | |
318 | wmb(); | |
319 | ||
320 | sq_db.u32_4 = 0; | |
321 | sq_db.u32_8 = 0; | |
322 | roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M, | |
323 | SQ_DOORBELL_U32_4_SQ_HEAD_S, | |
324 | (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1))); | |
bfcc681b SX |
325 | roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M, |
326 | SQ_DOORBELL_U32_4_SL_S, qp->sl); | |
9a443537 | 327 | roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M, |
7716809e | 328 | SQ_DOORBELL_U32_4_PORT_S, qp->phy_port); |
9a443537 | 329 | roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M, |
330 | SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn); | |
331 | roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1); | |
332 | ||
8b9b8d14 | 333 | doorbell[0] = le32_to_cpu(sq_db.u32_4); |
334 | doorbell[1] = le32_to_cpu(sq_db.u32_8); | |
9a443537 | 335 | |
336 | hns_roce_write64_k(doorbell, qp->sq.db_reg_l); | |
337 | qp->sq_next_wqe = ind; | |
338 | } | |
339 | ||
340 | spin_unlock_irqrestore(&qp->sq.lock, flags); | |
341 | ||
342 | return ret; | |
343 | } | |
344 | ||
d61d6de0 BVA |
345 | static int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, |
346 | struct ib_recv_wr **bad_wr) | |
9a443537 | 347 | { |
348 | int ret = 0; | |
349 | int nreq = 0; | |
350 | int ind = 0; | |
351 | int i = 0; | |
352 | u32 reg_val = 0; | |
353 | unsigned long flags = 0; | |
354 | struct hns_roce_rq_wqe_ctrl *ctrl = NULL; | |
355 | struct hns_roce_wqe_data_seg *scat = NULL; | |
356 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | |
357 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | |
358 | struct device *dev = &hr_dev->pdev->dev; | |
359 | struct hns_roce_rq_db rq_db; | |
360 | uint32_t doorbell[2] = {0}; | |
361 | ||
362 | spin_lock_irqsave(&hr_qp->rq.lock, flags); | |
363 | ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1); | |
364 | ||
365 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
366 | if (hns_roce_wq_overflow(&hr_qp->rq, nreq, | |
367 | hr_qp->ibqp.recv_cq)) { | |
368 | ret = -ENOMEM; | |
369 | *bad_wr = wr; | |
370 | goto out; | |
371 | } | |
372 | ||
373 | if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { | |
374 | dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n", | |
375 | wr->num_sge, hr_qp->rq.max_gs); | |
376 | ret = -EINVAL; | |
377 | *bad_wr = wr; | |
378 | goto out; | |
379 | } | |
380 | ||
381 | ctrl = get_recv_wqe(hr_qp, ind); | |
382 | ||
383 | roce_set_field(ctrl->rwqe_byte_12, | |
384 | RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M, | |
385 | RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S, | |
386 | wr->num_sge); | |
387 | ||
388 | scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1); | |
389 | ||
390 | for (i = 0; i < wr->num_sge; i++) | |
391 | set_data_seg(scat + i, wr->sg_list + i); | |
392 | ||
393 | hr_qp->rq.wrid[ind] = wr->wr_id; | |
394 | ||
395 | ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1); | |
396 | } | |
397 | ||
398 | out: | |
399 | if (likely(nreq)) { | |
400 | hr_qp->rq.head += nreq; | |
401 | /* Memory barrier */ | |
402 | wmb(); | |
403 | ||
404 | if (ibqp->qp_type == IB_QPT_GSI) { | |
405 | /* SW update GSI rq header */ | |
406 | reg_val = roce_read(to_hr_dev(ibqp->device), | |
407 | ROCEE_QP1C_CFG3_0_REG + | |
7716809e | 408 | QP1C_CFGN_OFFSET * hr_qp->phy_port); |
9a443537 | 409 | roce_set_field(reg_val, |
410 | ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M, | |
411 | ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S, | |
412 | hr_qp->rq.head); | |
413 | roce_write(to_hr_dev(ibqp->device), | |
414 | ROCEE_QP1C_CFG3_0_REG + | |
7716809e | 415 | QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val); |
9a443537 | 416 | } else { |
417 | rq_db.u32_4 = 0; | |
418 | rq_db.u32_8 = 0; | |
419 | ||
420 | roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M, | |
421 | RQ_DOORBELL_U32_4_RQ_HEAD_S, | |
422 | hr_qp->rq.head); | |
423 | roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M, | |
424 | RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); | |
425 | roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M, | |
426 | RQ_DOORBELL_U32_8_CMD_S, 1); | |
427 | roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S, | |
428 | 1); | |
429 | ||
8b9b8d14 | 430 | doorbell[0] = le32_to_cpu(rq_db.u32_4); |
431 | doorbell[1] = le32_to_cpu(rq_db.u32_8); | |
9a443537 | 432 | |
433 | hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l); | |
434 | } | |
435 | } | |
436 | spin_unlock_irqrestore(&hr_qp->rq.lock, flags); | |
437 | ||
438 | return ret; | |
439 | } | |
440 | ||
441 | static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev, | |
442 | int sdb_mode, int odb_mode) | |
443 | { | |
444 | u32 val; | |
445 | ||
446 | val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); | |
447 | roce_set_bit(val, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode); | |
448 | roce_set_bit(val, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode); | |
449 | roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); | |
450 | } | |
451 | ||
452 | static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode, | |
453 | u32 odb_mode) | |
454 | { | |
455 | u32 val; | |
456 | ||
457 | /* Configure SDB/ODB extend mode */ | |
458 | val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); | |
459 | roce_set_bit(val, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode); | |
460 | roce_set_bit(val, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode); | |
461 | roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); | |
462 | } | |
463 | ||
464 | static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept, | |
465 | u32 sdb_alful) | |
466 | { | |
467 | u32 val; | |
468 | ||
469 | /* Configure SDB */ | |
470 | val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG); | |
471 | roce_set_field(val, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M, | |
472 | ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful); | |
473 | roce_set_field(val, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M, | |
474 | ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept); | |
475 | roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val); | |
476 | } | |
477 | ||
478 | static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept, | |
479 | u32 odb_alful) | |
480 | { | |
481 | u32 val; | |
482 | ||
483 | /* Configure ODB */ | |
484 | val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG); | |
485 | roce_set_field(val, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M, | |
486 | ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful); | |
487 | roce_set_field(val, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M, | |
488 | ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept); | |
489 | roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val); | |
490 | } | |
491 | ||
492 | static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept, | |
493 | u32 ext_sdb_alful) | |
494 | { | |
495 | struct device *dev = &hr_dev->pdev->dev; | |
496 | struct hns_roce_v1_priv *priv; | |
497 | struct hns_roce_db_table *db; | |
498 | dma_addr_t sdb_dma_addr; | |
499 | u32 val; | |
500 | ||
016a0059 | 501 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
9a443537 | 502 | db = &priv->db_table; |
503 | ||
504 | /* Configure extend SDB threshold */ | |
505 | roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept); | |
506 | roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful); | |
507 | ||
508 | /* Configure extend SDB base addr */ | |
509 | sdb_dma_addr = db->ext_db->sdb_buf_list->map; | |
510 | roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12)); | |
511 | ||
512 | /* Configure extend SDB depth */ | |
513 | val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG); | |
514 | roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M, | |
515 | ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S, | |
516 | db->ext_db->esdb_dep); | |
517 | /* | |
518 | * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of | |
519 | * using 4K page, and shift more 32 because of | |
520 | * caculating the high 32 bit value evaluated to hardware. | |
521 | */ | |
522 | roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M, | |
523 | ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44); | |
524 | roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val); | |
525 | ||
526 | dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep); | |
527 | dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n", | |
528 | ext_sdb_alept, ext_sdb_alful); | |
529 | } | |
530 | ||
531 | static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept, | |
532 | u32 ext_odb_alful) | |
533 | { | |
534 | struct device *dev = &hr_dev->pdev->dev; | |
535 | struct hns_roce_v1_priv *priv; | |
536 | struct hns_roce_db_table *db; | |
537 | dma_addr_t odb_dma_addr; | |
538 | u32 val; | |
539 | ||
016a0059 | 540 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
9a443537 | 541 | db = &priv->db_table; |
542 | ||
543 | /* Configure extend ODB threshold */ | |
544 | roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept); | |
545 | roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful); | |
546 | ||
547 | /* Configure extend ODB base addr */ | |
548 | odb_dma_addr = db->ext_db->odb_buf_list->map; | |
549 | roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12)); | |
550 | ||
551 | /* Configure extend ODB depth */ | |
552 | val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG); | |
553 | roce_set_field(val, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M, | |
554 | ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S, | |
555 | db->ext_db->eodb_dep); | |
556 | roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M, | |
557 | ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S, | |
558 | db->ext_db->eodb_dep); | |
559 | roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val); | |
560 | ||
561 | dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep); | |
562 | dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n", | |
563 | ext_odb_alept, ext_odb_alful); | |
564 | } | |
565 | ||
566 | static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod, | |
567 | u32 odb_ext_mod) | |
568 | { | |
569 | struct device *dev = &hr_dev->pdev->dev; | |
570 | struct hns_roce_v1_priv *priv; | |
571 | struct hns_roce_db_table *db; | |
572 | dma_addr_t sdb_dma_addr; | |
573 | dma_addr_t odb_dma_addr; | |
574 | int ret = 0; | |
575 | ||
016a0059 | 576 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
9a443537 | 577 | db = &priv->db_table; |
578 | ||
579 | db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL); | |
580 | if (!db->ext_db) | |
581 | return -ENOMEM; | |
582 | ||
583 | if (sdb_ext_mod) { | |
584 | db->ext_db->sdb_buf_list = kmalloc( | |
585 | sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL); | |
586 | if (!db->ext_db->sdb_buf_list) { | |
587 | ret = -ENOMEM; | |
588 | goto ext_sdb_buf_fail_out; | |
589 | } | |
590 | ||
591 | db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev, | |
592 | HNS_ROCE_V1_EXT_SDB_SIZE, | |
593 | &sdb_dma_addr, GFP_KERNEL); | |
594 | if (!db->ext_db->sdb_buf_list->buf) { | |
595 | ret = -ENOMEM; | |
596 | goto alloc_sq_db_buf_fail; | |
597 | } | |
598 | db->ext_db->sdb_buf_list->map = sdb_dma_addr; | |
599 | ||
600 | db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH); | |
601 | hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT, | |
602 | HNS_ROCE_V1_EXT_SDB_ALFUL); | |
603 | } else | |
604 | hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT, | |
605 | HNS_ROCE_V1_SDB_ALFUL); | |
606 | ||
607 | if (odb_ext_mod) { | |
608 | db->ext_db->odb_buf_list = kmalloc( | |
609 | sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL); | |
610 | if (!db->ext_db->odb_buf_list) { | |
611 | ret = -ENOMEM; | |
612 | goto ext_odb_buf_fail_out; | |
613 | } | |
614 | ||
615 | db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev, | |
616 | HNS_ROCE_V1_EXT_ODB_SIZE, | |
617 | &odb_dma_addr, GFP_KERNEL); | |
618 | if (!db->ext_db->odb_buf_list->buf) { | |
619 | ret = -ENOMEM; | |
620 | goto alloc_otr_db_buf_fail; | |
621 | } | |
622 | db->ext_db->odb_buf_list->map = odb_dma_addr; | |
623 | ||
624 | db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH); | |
625 | hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT, | |
626 | HNS_ROCE_V1_EXT_ODB_ALFUL); | |
627 | } else | |
628 | hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT, | |
629 | HNS_ROCE_V1_ODB_ALFUL); | |
630 | ||
631 | hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod); | |
632 | ||
633 | return 0; | |
634 | ||
635 | alloc_otr_db_buf_fail: | |
636 | kfree(db->ext_db->odb_buf_list); | |
637 | ||
638 | ext_odb_buf_fail_out: | |
639 | if (sdb_ext_mod) { | |
640 | dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE, | |
641 | db->ext_db->sdb_buf_list->buf, | |
642 | db->ext_db->sdb_buf_list->map); | |
643 | } | |
644 | ||
645 | alloc_sq_db_buf_fail: | |
646 | if (sdb_ext_mod) | |
647 | kfree(db->ext_db->sdb_buf_list); | |
648 | ||
649 | ext_sdb_buf_fail_out: | |
650 | kfree(db->ext_db); | |
651 | return ret; | |
652 | } | |
653 | ||
bfcc681b SX |
654 | static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev, |
655 | struct ib_pd *pd) | |
656 | { | |
657 | struct device *dev = &hr_dev->pdev->dev; | |
658 | struct ib_qp_init_attr init_attr; | |
659 | struct ib_qp *qp; | |
660 | ||
661 | memset(&init_attr, 0, sizeof(struct ib_qp_init_attr)); | |
662 | init_attr.qp_type = IB_QPT_RC; | |
663 | init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; | |
664 | init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM; | |
665 | init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM; | |
666 | ||
667 | qp = hns_roce_create_qp(pd, &init_attr, NULL); | |
668 | if (IS_ERR(qp)) { | |
669 | dev_err(dev, "Create loop qp for mr free failed!"); | |
670 | return NULL; | |
671 | } | |
672 | ||
673 | return to_hr_qp(qp); | |
674 | } | |
675 | ||
676 | static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) | |
677 | { | |
678 | struct hns_roce_caps *caps = &hr_dev->caps; | |
679 | struct device *dev = &hr_dev->pdev->dev; | |
680 | struct ib_cq_init_attr cq_init_attr; | |
681 | struct hns_roce_free_mr *free_mr; | |
682 | struct ib_qp_attr attr = { 0 }; | |
683 | struct hns_roce_v1_priv *priv; | |
684 | struct hns_roce_qp *hr_qp; | |
685 | struct ib_cq *cq; | |
686 | struct ib_pd *pd; | |
d8966fcd | 687 | union ib_gid dgid; |
bfcc681b SX |
688 | u64 subnet_prefix; |
689 | int attr_mask = 0; | |
5802883d | 690 | int i, j; |
bfcc681b | 691 | int ret; |
5802883d | 692 | u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 }; |
bfcc681b | 693 | u8 phy_port; |
5802883d | 694 | u8 port = 0; |
bfcc681b SX |
695 | u8 sl; |
696 | ||
016a0059 | 697 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
bfcc681b SX |
698 | free_mr = &priv->free_mr; |
699 | ||
700 | /* Reserved cq for loop qp */ | |
701 | cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2; | |
702 | cq_init_attr.comp_vector = 0; | |
703 | cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL); | |
704 | if (IS_ERR(cq)) { | |
705 | dev_err(dev, "Create cq for reseved loop qp failed!"); | |
706 | return -ENOMEM; | |
707 | } | |
708 | free_mr->mr_free_cq = to_hr_cq(cq); | |
709 | free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev; | |
710 | free_mr->mr_free_cq->ib_cq.uobject = NULL; | |
711 | free_mr->mr_free_cq->ib_cq.comp_handler = NULL; | |
712 | free_mr->mr_free_cq->ib_cq.event_handler = NULL; | |
713 | free_mr->mr_free_cq->ib_cq.cq_context = NULL; | |
714 | atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0); | |
715 | ||
716 | pd = hns_roce_alloc_pd(&hr_dev->ib_dev, NULL, NULL); | |
717 | if (IS_ERR(pd)) { | |
718 | dev_err(dev, "Create pd for reseved loop qp failed!"); | |
719 | ret = -ENOMEM; | |
720 | goto alloc_pd_failed; | |
721 | } | |
722 | free_mr->mr_free_pd = to_hr_pd(pd); | |
723 | free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev; | |
724 | free_mr->mr_free_pd->ibpd.uobject = NULL; | |
725 | atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0); | |
726 | ||
727 | attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE; | |
728 | attr.pkey_index = 0; | |
729 | attr.min_rnr_timer = 0; | |
730 | /* Disable read ability */ | |
731 | attr.max_dest_rd_atomic = 0; | |
732 | attr.max_rd_atomic = 0; | |
733 | /* Use arbitrary values as rq_psn and sq_psn */ | |
734 | attr.rq_psn = 0x0808; | |
735 | attr.sq_psn = 0x0808; | |
736 | attr.retry_cnt = 7; | |
737 | attr.rnr_retry = 7; | |
738 | attr.timeout = 0x12; | |
739 | attr.path_mtu = IB_MTU_256; | |
5802883d | 740 | attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; |
d8966fcd DC |
741 | rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0); |
742 | rdma_ah_set_static_rate(&attr.ah_attr, 3); | |
bfcc681b SX |
743 | |
744 | subnet_prefix = cpu_to_be64(0xfe80000000000000LL); | |
745 | for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { | |
5802883d | 746 | phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) : |
747 | (i % HNS_ROCE_MAX_PORTS); | |
748 | sl = i / HNS_ROCE_MAX_PORTS; | |
749 | ||
750 | for (j = 0; j < caps->num_ports; j++) { | |
751 | if (hr_dev->iboe.phy_port[j] == phy_port) { | |
752 | queue_en[i] = 1; | |
753 | port = j; | |
754 | break; | |
755 | } | |
756 | } | |
757 | ||
758 | if (!queue_en[i]) | |
759 | continue; | |
760 | ||
bfcc681b | 761 | free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); |
5db465f2 | 762 | if (!free_mr->mr_free_qp[i]) { |
bfcc681b SX |
763 | dev_err(dev, "Create loop qp failed!\n"); |
764 | goto create_lp_qp_failed; | |
765 | } | |
766 | hr_qp = free_mr->mr_free_qp[i]; | |
767 | ||
5802883d | 768 | hr_qp->port = port; |
bfcc681b SX |
769 | hr_qp->phy_port = phy_port; |
770 | hr_qp->ibqp.qp_type = IB_QPT_RC; | |
771 | hr_qp->ibqp.device = &hr_dev->ib_dev; | |
772 | hr_qp->ibqp.uobject = NULL; | |
773 | atomic_set(&hr_qp->ibqp.usecnt, 0); | |
774 | hr_qp->ibqp.pd = pd; | |
775 | hr_qp->ibqp.recv_cq = cq; | |
776 | hr_qp->ibqp.send_cq = cq; | |
777 | ||
5802883d | 778 | rdma_ah_set_port_num(&attr.ah_attr, port + 1); |
779 | rdma_ah_set_sl(&attr.ah_attr, sl); | |
780 | attr.port_num = port + 1; | |
bfcc681b SX |
781 | |
782 | attr.dest_qp_num = hr_qp->qpn; | |
d8966fcd | 783 | memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr), |
5802883d | 784 | hr_dev->dev_addr[port], |
bfcc681b SX |
785 | MAC_ADDR_OCTET_NUM); |
786 | ||
d8966fcd | 787 | memcpy(&dgid.raw, &subnet_prefix, sizeof(u64)); |
5802883d | 788 | memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3); |
789 | memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3); | |
d8966fcd DC |
790 | dgid.raw[11] = 0xff; |
791 | dgid.raw[12] = 0xfe; | |
792 | dgid.raw[8] ^= 2; | |
793 | rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw); | |
bfcc681b SX |
794 | |
795 | ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask, | |
796 | IB_QPS_RESET, IB_QPS_INIT); | |
797 | if (ret) { | |
798 | dev_err(dev, "modify qp failed(%d)!\n", ret); | |
799 | goto create_lp_qp_failed; | |
800 | } | |
801 | ||
107013ce | 802 | ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN, |
bfcc681b SX |
803 | IB_QPS_INIT, IB_QPS_RTR); |
804 | if (ret) { | |
805 | dev_err(dev, "modify qp failed(%d)!\n", ret); | |
806 | goto create_lp_qp_failed; | |
807 | } | |
808 | ||
809 | ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask, | |
810 | IB_QPS_RTR, IB_QPS_RTS); | |
811 | if (ret) { | |
812 | dev_err(dev, "modify qp failed(%d)!\n", ret); | |
813 | goto create_lp_qp_failed; | |
814 | } | |
815 | } | |
816 | ||
817 | return 0; | |
818 | ||
819 | create_lp_qp_failed: | |
820 | for (i -= 1; i >= 0; i--) { | |
821 | hr_qp = free_mr->mr_free_qp[i]; | |
822 | if (hns_roce_v1_destroy_qp(&hr_qp->ibqp)) | |
823 | dev_err(dev, "Destroy qp %d for mr free failed!\n", i); | |
824 | } | |
825 | ||
826 | if (hns_roce_dealloc_pd(pd)) | |
827 | dev_err(dev, "Destroy pd for create_lp_qp failed!\n"); | |
828 | ||
829 | alloc_pd_failed: | |
830 | if (hns_roce_ib_destroy_cq(cq)) | |
831 | dev_err(dev, "Destroy cq for create_lp_qp failed!\n"); | |
832 | ||
833 | return -EINVAL; | |
834 | } | |
835 | ||
836 | static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev) | |
837 | { | |
838 | struct device *dev = &hr_dev->pdev->dev; | |
839 | struct hns_roce_free_mr *free_mr; | |
840 | struct hns_roce_v1_priv *priv; | |
841 | struct hns_roce_qp *hr_qp; | |
842 | int ret; | |
843 | int i; | |
844 | ||
016a0059 | 845 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
bfcc681b SX |
846 | free_mr = &priv->free_mr; |
847 | ||
848 | for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { | |
849 | hr_qp = free_mr->mr_free_qp[i]; | |
5802883d | 850 | if (!hr_qp) |
851 | continue; | |
852 | ||
bfcc681b SX |
853 | ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp); |
854 | if (ret) | |
855 | dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n", | |
856 | i, ret); | |
857 | } | |
858 | ||
859 | ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq); | |
860 | if (ret) | |
861 | dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret); | |
862 | ||
863 | ret = hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd); | |
864 | if (ret) | |
865 | dev_err(dev, "Destroy pd for mr_free failed(%d)!\n", ret); | |
866 | } | |
867 | ||
9a443537 | 868 | static int hns_roce_db_init(struct hns_roce_dev *hr_dev) |
869 | { | |
870 | struct device *dev = &hr_dev->pdev->dev; | |
871 | struct hns_roce_v1_priv *priv; | |
872 | struct hns_roce_db_table *db; | |
873 | u32 sdb_ext_mod; | |
874 | u32 odb_ext_mod; | |
875 | u32 sdb_evt_mod; | |
876 | u32 odb_evt_mod; | |
877 | int ret = 0; | |
878 | ||
016a0059 | 879 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
9a443537 | 880 | db = &priv->db_table; |
881 | ||
882 | memset(db, 0, sizeof(*db)); | |
883 | ||
884 | /* Default DB mode */ | |
885 | sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE; | |
886 | odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE; | |
887 | sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE; | |
888 | odb_evt_mod = HNS_ROCE_ODB_POLL_MODE; | |
889 | ||
890 | db->sdb_ext_mod = sdb_ext_mod; | |
891 | db->odb_ext_mod = odb_ext_mod; | |
892 | ||
893 | /* Init extend DB */ | |
894 | ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod); | |
895 | if (ret) { | |
896 | dev_err(dev, "Failed in extend DB configuration.\n"); | |
897 | return ret; | |
898 | } | |
899 | ||
900 | hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod); | |
901 | ||
902 | return 0; | |
903 | } | |
904 | ||
d61d6de0 | 905 | static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work) |
bfcc681b SX |
906 | { |
907 | struct hns_roce_recreate_lp_qp_work *lp_qp_work; | |
908 | struct hns_roce_dev *hr_dev; | |
909 | ||
910 | lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work, | |
911 | work); | |
912 | hr_dev = to_hr_dev(lp_qp_work->ib_dev); | |
913 | ||
914 | hns_roce_v1_release_lp_qp(hr_dev); | |
915 | ||
916 | if (hns_roce_v1_rsv_lp_qp(hr_dev)) | |
917 | dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n"); | |
918 | ||
919 | if (lp_qp_work->comp_flag) | |
920 | complete(lp_qp_work->comp); | |
921 | ||
922 | kfree(lp_qp_work); | |
923 | } | |
924 | ||
925 | static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev) | |
926 | { | |
927 | struct device *dev = &hr_dev->pdev->dev; | |
928 | struct hns_roce_recreate_lp_qp_work *lp_qp_work; | |
929 | struct hns_roce_free_mr *free_mr; | |
930 | struct hns_roce_v1_priv *priv; | |
931 | struct completion comp; | |
932 | unsigned long end = | |
933 | msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies; | |
934 | ||
016a0059 | 935 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
bfcc681b SX |
936 | free_mr = &priv->free_mr; |
937 | ||
938 | lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work), | |
939 | GFP_KERNEL); | |
a74dc41d WHX |
940 | if (!lp_qp_work) |
941 | return -ENOMEM; | |
bfcc681b SX |
942 | |
943 | INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn); | |
944 | ||
945 | lp_qp_work->ib_dev = &(hr_dev->ib_dev); | |
946 | lp_qp_work->comp = ∁ | |
947 | lp_qp_work->comp_flag = 1; | |
948 | ||
949 | init_completion(lp_qp_work->comp); | |
950 | ||
951 | queue_work(free_mr->free_mr_wq, &(lp_qp_work->work)); | |
952 | ||
953 | while (time_before_eq(jiffies, end)) { | |
954 | if (try_wait_for_completion(&comp)) | |
955 | return 0; | |
956 | msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE); | |
957 | } | |
958 | ||
959 | lp_qp_work->comp_flag = 0; | |
960 | if (try_wait_for_completion(&comp)) | |
961 | return 0; | |
962 | ||
963 | dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n"); | |
964 | return -ETIMEDOUT; | |
965 | } | |
966 | ||
967 | static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp) | |
968 | { | |
969 | struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); | |
970 | struct device *dev = &hr_dev->pdev->dev; | |
971 | struct ib_send_wr send_wr, *bad_wr; | |
972 | int ret; | |
973 | ||
974 | memset(&send_wr, 0, sizeof(send_wr)); | |
975 | send_wr.next = NULL; | |
976 | send_wr.num_sge = 0; | |
977 | send_wr.send_flags = 0; | |
978 | send_wr.sg_list = NULL; | |
979 | send_wr.wr_id = (unsigned long long)&send_wr; | |
980 | send_wr.opcode = IB_WR_RDMA_WRITE; | |
981 | ||
982 | ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr); | |
983 | if (ret) { | |
984 | dev_err(dev, "Post write wqe for mr free failed(%d)!", ret); | |
985 | return ret; | |
986 | } | |
987 | ||
988 | return 0; | |
989 | } | |
990 | ||
991 | static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) | |
992 | { | |
993 | struct hns_roce_mr_free_work *mr_work; | |
994 | struct ib_wc wc[HNS_ROCE_V1_RESV_QP]; | |
995 | struct hns_roce_free_mr *free_mr; | |
996 | struct hns_roce_cq *mr_free_cq; | |
997 | struct hns_roce_v1_priv *priv; | |
998 | struct hns_roce_dev *hr_dev; | |
999 | struct hns_roce_mr *hr_mr; | |
1000 | struct hns_roce_qp *hr_qp; | |
1001 | struct device *dev; | |
1002 | unsigned long end = | |
1003 | msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies; | |
1004 | int i; | |
1005 | int ret; | |
5802883d | 1006 | int ne = 0; |
bfcc681b SX |
1007 | |
1008 | mr_work = container_of(work, struct hns_roce_mr_free_work, work); | |
1009 | hr_mr = (struct hns_roce_mr *)mr_work->mr; | |
1010 | hr_dev = to_hr_dev(mr_work->ib_dev); | |
1011 | dev = &hr_dev->pdev->dev; | |
1012 | ||
016a0059 | 1013 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
bfcc681b SX |
1014 | free_mr = &priv->free_mr; |
1015 | mr_free_cq = free_mr->mr_free_cq; | |
1016 | ||
1017 | for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { | |
1018 | hr_qp = free_mr->mr_free_qp[i]; | |
5802883d | 1019 | if (!hr_qp) |
1020 | continue; | |
1021 | ne++; | |
1022 | ||
bfcc681b SX |
1023 | ret = hns_roce_v1_send_lp_wqe(hr_qp); |
1024 | if (ret) { | |
1025 | dev_err(dev, | |
1026 | "Send wqe (qp:0x%lx) for mr free failed(%d)!\n", | |
1027 | hr_qp->qpn, ret); | |
1028 | goto free_work; | |
1029 | } | |
1030 | } | |
1031 | ||
5e437b1d | 1032 | if (!ne) { |
978cb696 | 1033 | dev_err(dev, "Reserved loop qp is absent!\n"); |
5e437b1d WHX |
1034 | goto free_work; |
1035 | } | |
1036 | ||
bfcc681b SX |
1037 | do { |
1038 | ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); | |
1039 | if (ret < 0) { | |
1040 | dev_err(dev, | |
1041 | "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n", | |
1042 | hr_qp->qpn, ret, hr_mr->key, ne); | |
1043 | goto free_work; | |
1044 | } | |
1045 | ne -= ret; | |
98e77d9f LR |
1046 | usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000, |
1047 | (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000); | |
bfcc681b SX |
1048 | } while (ne && time_before_eq(jiffies, end)); |
1049 | ||
1050 | if (ne != 0) | |
1051 | dev_err(dev, | |
1052 | "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n", | |
1053 | hr_mr->key, ne); | |
1054 | ||
1055 | free_work: | |
1056 | if (mr_work->comp_flag) | |
1057 | complete(mr_work->comp); | |
1058 | kfree(mr_work); | |
1059 | } | |
1060 | ||
d61d6de0 BVA |
1061 | static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, |
1062 | struct hns_roce_mr *mr) | |
bfcc681b SX |
1063 | { |
1064 | struct device *dev = &hr_dev->pdev->dev; | |
1065 | struct hns_roce_mr_free_work *mr_work; | |
1066 | struct hns_roce_free_mr *free_mr; | |
1067 | struct hns_roce_v1_priv *priv; | |
1068 | struct completion comp; | |
1069 | unsigned long end = | |
1070 | msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies; | |
1071 | unsigned long start = jiffies; | |
1072 | int npages; | |
1073 | int ret = 0; | |
1074 | ||
016a0059 | 1075 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
bfcc681b SX |
1076 | free_mr = &priv->free_mr; |
1077 | ||
1078 | if (mr->enabled) { | |
1079 | if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key) | |
1080 | & (hr_dev->caps.num_mtpts - 1))) | |
1081 | dev_warn(dev, "HW2SW_MPT failed!\n"); | |
1082 | } | |
1083 | ||
1084 | mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL); | |
1085 | if (!mr_work) { | |
1086 | ret = -ENOMEM; | |
1087 | goto free_mr; | |
1088 | } | |
1089 | ||
1090 | INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn); | |
1091 | ||
1092 | mr_work->ib_dev = &(hr_dev->ib_dev); | |
1093 | mr_work->comp = ∁ | |
1094 | mr_work->comp_flag = 1; | |
1095 | mr_work->mr = (void *)mr; | |
1096 | init_completion(mr_work->comp); | |
1097 | ||
1098 | queue_work(free_mr->free_mr_wq, &(mr_work->work)); | |
1099 | ||
1100 | while (time_before_eq(jiffies, end)) { | |
1101 | if (try_wait_for_completion(&comp)) | |
1102 | goto free_mr; | |
1103 | msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE); | |
1104 | } | |
1105 | ||
1106 | mr_work->comp_flag = 0; | |
1107 | if (try_wait_for_completion(&comp)) | |
1108 | goto free_mr; | |
1109 | ||
1110 | dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key); | |
1111 | ret = -ETIMEDOUT; | |
1112 | ||
1113 | free_mr: | |
1114 | dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n", | |
1115 | mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start)); | |
1116 | ||
1117 | if (mr->size != ~0ULL) { | |
1118 | npages = ib_umem_page_count(mr->umem); | |
1119 | dma_free_coherent(dev, npages * 8, mr->pbl_buf, | |
1120 | mr->pbl_dma_addr); | |
1121 | } | |
1122 | ||
1123 | hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, | |
1124 | key_to_hw_index(mr->key), 0); | |
1125 | ||
1126 | if (mr->umem) | |
1127 | ib_umem_release(mr->umem); | |
1128 | ||
1129 | kfree(mr); | |
1130 | ||
1131 | return ret; | |
1132 | } | |
1133 | ||
9a443537 | 1134 | static void hns_roce_db_free(struct hns_roce_dev *hr_dev) |
1135 | { | |
1136 | struct device *dev = &hr_dev->pdev->dev; | |
1137 | struct hns_roce_v1_priv *priv; | |
1138 | struct hns_roce_db_table *db; | |
1139 | ||
016a0059 | 1140 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
9a443537 | 1141 | db = &priv->db_table; |
1142 | ||
1143 | if (db->sdb_ext_mod) { | |
1144 | dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE, | |
1145 | db->ext_db->sdb_buf_list->buf, | |
1146 | db->ext_db->sdb_buf_list->map); | |
1147 | kfree(db->ext_db->sdb_buf_list); | |
1148 | } | |
1149 | ||
1150 | if (db->odb_ext_mod) { | |
1151 | dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE, | |
1152 | db->ext_db->odb_buf_list->buf, | |
1153 | db->ext_db->odb_buf_list->map); | |
1154 | kfree(db->ext_db->odb_buf_list); | |
1155 | } | |
1156 | ||
1157 | kfree(db->ext_db); | |
1158 | } | |
1159 | ||
1160 | static int hns_roce_raq_init(struct hns_roce_dev *hr_dev) | |
1161 | { | |
1162 | int ret; | |
1163 | int raq_shift = 0; | |
1164 | dma_addr_t addr; | |
1165 | u32 val; | |
1166 | struct hns_roce_v1_priv *priv; | |
1167 | struct hns_roce_raq_table *raq; | |
1168 | struct device *dev = &hr_dev->pdev->dev; | |
1169 | ||
016a0059 | 1170 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
9a443537 | 1171 | raq = &priv->raq_table; |
1172 | ||
1173 | raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL); | |
1174 | if (!raq->e_raq_buf) | |
1175 | return -ENOMEM; | |
1176 | ||
1177 | raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, | |
1178 | &addr, GFP_KERNEL); | |
1179 | if (!raq->e_raq_buf->buf) { | |
1180 | ret = -ENOMEM; | |
1181 | goto err_dma_alloc_raq; | |
1182 | } | |
1183 | raq->e_raq_buf->map = addr; | |
1184 | ||
1185 | /* Configure raq extended address. 48bit 4K align*/ | |
1186 | roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12); | |
1187 | ||
1188 | /* Configure raq_shift */ | |
1189 | raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY); | |
1190 | val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG); | |
1191 | roce_set_field(val, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M, | |
1192 | ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift); | |
1193 | /* | |
1194 | * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of | |
1195 | * using 4K page, and shift more 32 because of | |
1196 | * caculating the high 32 bit value evaluated to hardware. | |
1197 | */ | |
1198 | roce_set_field(val, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M, | |
1199 | ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S, | |
1200 | raq->e_raq_buf->map >> 44); | |
1201 | roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val); | |
1202 | dev_dbg(dev, "Configure raq_shift 0x%x.\n", val); | |
1203 | ||
1204 | /* Configure raq threshold */ | |
1205 | val = roce_read(hr_dev, ROCEE_RAQ_WL_REG); | |
1206 | roce_set_field(val, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M, | |
1207 | ROCEE_RAQ_WL_ROCEE_RAQ_WL_S, | |
1208 | HNS_ROCE_V1_EXT_RAQ_WF); | |
1209 | roce_write(hr_dev, ROCEE_RAQ_WL_REG, val); | |
1210 | dev_dbg(dev, "Configure raq_wl 0x%x.\n", val); | |
1211 | ||
1212 | /* Enable extend raq */ | |
1213 | val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG); | |
1214 | roce_set_field(val, | |
1215 | ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M, | |
1216 | ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S, | |
1217 | POL_TIME_INTERVAL_VAL); | |
1218 | roce_set_bit(val, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1); | |
1219 | roce_set_field(val, | |
1220 | ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M, | |
1221 | ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S, | |
1222 | 2); | |
1223 | roce_set_bit(val, | |
1224 | ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1); | |
1225 | roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val); | |
1226 | dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val); | |
1227 | ||
1228 | /* Enable raq drop */ | |
1229 | val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); | |
1230 | roce_set_bit(val, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1); | |
1231 | roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); | |
1232 | dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val); | |
1233 | ||
1234 | return 0; | |
1235 | ||
1236 | err_dma_alloc_raq: | |
1237 | kfree(raq->e_raq_buf); | |
1238 | return ret; | |
1239 | } | |
1240 | ||
1241 | static void hns_roce_raq_free(struct hns_roce_dev *hr_dev) | |
1242 | { | |
1243 | struct device *dev = &hr_dev->pdev->dev; | |
1244 | struct hns_roce_v1_priv *priv; | |
1245 | struct hns_roce_raq_table *raq; | |
1246 | ||
016a0059 | 1247 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
9a443537 | 1248 | raq = &priv->raq_table; |
1249 | ||
1250 | dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf, | |
1251 | raq->e_raq_buf->map); | |
1252 | kfree(raq->e_raq_buf); | |
1253 | } | |
1254 | ||
1255 | static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag) | |
1256 | { | |
1257 | u32 val; | |
1258 | ||
1259 | if (enable_flag) { | |
1260 | val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); | |
1261 | /* Open all ports */ | |
1262 | roce_set_field(val, ROCEE_GLB_CFG_ROCEE_PORT_ST_M, | |
1263 | ROCEE_GLB_CFG_ROCEE_PORT_ST_S, | |
1264 | ALL_PORT_VAL_OPEN); | |
1265 | roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); | |
1266 | } else { | |
1267 | val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); | |
1268 | /* Close all ports */ | |
1269 | roce_set_field(val, ROCEE_GLB_CFG_ROCEE_PORT_ST_M, | |
1270 | ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0); | |
1271 | roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); | |
1272 | } | |
1273 | } | |
1274 | ||
97f0e39f WHX |
1275 | static int hns_roce_bt_init(struct hns_roce_dev *hr_dev) |
1276 | { | |
1277 | struct device *dev = &hr_dev->pdev->dev; | |
1278 | struct hns_roce_v1_priv *priv; | |
1279 | int ret; | |
1280 | ||
016a0059 | 1281 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
97f0e39f WHX |
1282 | |
1283 | priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev, | |
1284 | HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map, | |
1285 | GFP_KERNEL); | |
1286 | if (!priv->bt_table.qpc_buf.buf) | |
1287 | return -ENOMEM; | |
1288 | ||
1289 | priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev, | |
1290 | HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map, | |
1291 | GFP_KERNEL); | |
1292 | if (!priv->bt_table.mtpt_buf.buf) { | |
1293 | ret = -ENOMEM; | |
1294 | goto err_failed_alloc_mtpt_buf; | |
1295 | } | |
1296 | ||
1297 | priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev, | |
1298 | HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map, | |
1299 | GFP_KERNEL); | |
1300 | if (!priv->bt_table.cqc_buf.buf) { | |
1301 | ret = -ENOMEM; | |
1302 | goto err_failed_alloc_cqc_buf; | |
1303 | } | |
1304 | ||
1305 | return 0; | |
1306 | ||
1307 | err_failed_alloc_cqc_buf: | |
1308 | dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, | |
1309 | priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); | |
1310 | ||
1311 | err_failed_alloc_mtpt_buf: | |
1312 | dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, | |
1313 | priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); | |
1314 | ||
1315 | return ret; | |
1316 | } | |
1317 | ||
1318 | static void hns_roce_bt_free(struct hns_roce_dev *hr_dev) | |
1319 | { | |
1320 | struct device *dev = &hr_dev->pdev->dev; | |
1321 | struct hns_roce_v1_priv *priv; | |
1322 | ||
016a0059 | 1323 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
97f0e39f WHX |
1324 | |
1325 | dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, | |
1326 | priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map); | |
1327 | ||
1328 | dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, | |
1329 | priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); | |
1330 | ||
1331 | dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, | |
1332 | priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); | |
1333 | } | |
1334 | ||
8f3e9f3e WHX |
1335 | static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev) |
1336 | { | |
1337 | struct device *dev = &hr_dev->pdev->dev; | |
1338 | struct hns_roce_buf_list *tptr_buf; | |
1339 | struct hns_roce_v1_priv *priv; | |
1340 | ||
016a0059 | 1341 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
8f3e9f3e WHX |
1342 | tptr_buf = &priv->tptr_table.tptr_buf; |
1343 | ||
1344 | /* | |
1345 | * This buffer will be used for CQ's tptr(tail pointer), also | |
1346 | * named ci(customer index). Every CQ will use 2 bytes to save | |
1347 | * cqe ci in hip06. Hardware will read this area to get new ci | |
1348 | * when the queue is almost full. | |
1349 | */ | |
1350 | tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE, | |
1351 | &tptr_buf->map, GFP_KERNEL); | |
1352 | if (!tptr_buf->buf) | |
1353 | return -ENOMEM; | |
1354 | ||
1355 | hr_dev->tptr_dma_addr = tptr_buf->map; | |
1356 | hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE; | |
1357 | ||
1358 | return 0; | |
1359 | } | |
1360 | ||
1361 | static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev) | |
1362 | { | |
1363 | struct device *dev = &hr_dev->pdev->dev; | |
1364 | struct hns_roce_buf_list *tptr_buf; | |
1365 | struct hns_roce_v1_priv *priv; | |
1366 | ||
016a0059 | 1367 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
8f3e9f3e WHX |
1368 | tptr_buf = &priv->tptr_table.tptr_buf; |
1369 | ||
1370 | dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE, | |
1371 | tptr_buf->buf, tptr_buf->map); | |
1372 | } | |
1373 | ||
bfcc681b SX |
1374 | static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev) |
1375 | { | |
1376 | struct device *dev = &hr_dev->pdev->dev; | |
1377 | struct hns_roce_free_mr *free_mr; | |
1378 | struct hns_roce_v1_priv *priv; | |
1379 | int ret = 0; | |
1380 | ||
016a0059 | 1381 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
bfcc681b SX |
1382 | free_mr = &priv->free_mr; |
1383 | ||
1384 | free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr"); | |
1385 | if (!free_mr->free_mr_wq) { | |
1386 | dev_err(dev, "Create free mr workqueue failed!\n"); | |
1387 | return -ENOMEM; | |
1388 | } | |
1389 | ||
1390 | ret = hns_roce_v1_rsv_lp_qp(hr_dev); | |
1391 | if (ret) { | |
1392 | dev_err(dev, "Reserved loop qp failed(%d)!\n", ret); | |
1393 | flush_workqueue(free_mr->free_mr_wq); | |
1394 | destroy_workqueue(free_mr->free_mr_wq); | |
1395 | } | |
1396 | ||
1397 | return ret; | |
1398 | } | |
1399 | ||
1400 | static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev) | |
1401 | { | |
1402 | struct hns_roce_free_mr *free_mr; | |
1403 | struct hns_roce_v1_priv *priv; | |
1404 | ||
016a0059 | 1405 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
bfcc681b SX |
1406 | free_mr = &priv->free_mr; |
1407 | ||
1408 | flush_workqueue(free_mr->free_mr_wq); | |
1409 | destroy_workqueue(free_mr->free_mr_wq); | |
1410 | ||
1411 | hns_roce_v1_release_lp_qp(hr_dev); | |
1412 | } | |
1413 | ||
9a443537 | 1414 | /** |
1415 | * hns_roce_v1_reset - reset RoCE | |
1416 | * @hr_dev: RoCE device struct pointer | |
1417 | * @enable: true -- drop reset, false -- reset | |
1418 | * return 0 - success , negative --fail | |
1419 | */ | |
d61d6de0 | 1420 | static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset) |
9a443537 | 1421 | { |
1422 | struct device_node *dsaf_node; | |
1423 | struct device *dev = &hr_dev->pdev->dev; | |
1424 | struct device_node *np = dev->of_node; | |
528f1deb | 1425 | struct fwnode_handle *fwnode; |
9a443537 | 1426 | int ret; |
1427 | ||
528f1deb S |
1428 | /* check if this is DT/ACPI case */ |
1429 | if (dev_of_node(dev)) { | |
1430 | dsaf_node = of_parse_phandle(np, "dsaf-handle", 0); | |
1431 | if (!dsaf_node) { | |
1432 | dev_err(dev, "could not find dsaf-handle\n"); | |
1433 | return -EINVAL; | |
1434 | } | |
1435 | fwnode = &dsaf_node->fwnode; | |
1436 | } else if (is_acpi_device_node(dev->fwnode)) { | |
1437 | struct acpi_reference_args args; | |
1438 | ||
1439 | ret = acpi_node_get_property_reference(dev->fwnode, | |
1440 | "dsaf-handle", 0, &args); | |
1441 | if (ret) { | |
1442 | dev_err(dev, "could not find dsaf-handle\n"); | |
1443 | return ret; | |
1444 | } | |
1445 | fwnode = acpi_fwnode_handle(args.adev); | |
1446 | } else { | |
1447 | dev_err(dev, "cannot read data from DT or ACPI\n"); | |
1448 | return -ENXIO; | |
9a443537 | 1449 | } |
1450 | ||
528f1deb | 1451 | ret = hns_dsaf_roce_reset(fwnode, false); |
9a443537 | 1452 | if (ret) |
1453 | return ret; | |
1454 | ||
528f1deb | 1455 | if (dereset) { |
9a443537 | 1456 | msleep(SLEEP_TIME_INTERVAL); |
528f1deb | 1457 | ret = hns_dsaf_roce_reset(fwnode, true); |
9a443537 | 1458 | } |
1459 | ||
528f1deb | 1460 | return ret; |
9a443537 | 1461 | } |
1462 | ||
d838c481 WHX |
1463 | static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev) |
1464 | { | |
1465 | struct device *dev = &hr_dev->pdev->dev; | |
1466 | struct hns_roce_v1_priv *priv; | |
1467 | struct hns_roce_des_qp *des_qp; | |
1468 | ||
016a0059 | 1469 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
d838c481 WHX |
1470 | des_qp = &priv->des_qp; |
1471 | ||
1472 | des_qp->requeue_flag = 1; | |
1473 | des_qp->qp_wq = create_singlethread_workqueue("hns_roce_destroy_qp"); | |
1474 | if (!des_qp->qp_wq) { | |
1475 | dev_err(dev, "Create destroy qp workqueue failed!\n"); | |
1476 | return -ENOMEM; | |
1477 | } | |
1478 | ||
1479 | return 0; | |
1480 | } | |
1481 | ||
1482 | static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev) | |
1483 | { | |
1484 | struct hns_roce_v1_priv *priv; | |
1485 | struct hns_roce_des_qp *des_qp; | |
1486 | ||
016a0059 | 1487 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
d838c481 WHX |
1488 | des_qp = &priv->des_qp; |
1489 | ||
1490 | des_qp->requeue_flag = 0; | |
1491 | flush_workqueue(des_qp->qp_wq); | |
1492 | destroy_workqueue(des_qp->qp_wq); | |
1493 | } | |
1494 | ||
d61d6de0 | 1495 | static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev) |
9a443537 | 1496 | { |
1497 | int i = 0; | |
1498 | struct hns_roce_caps *caps = &hr_dev->caps; | |
1499 | ||
1500 | hr_dev->vendor_id = le32_to_cpu(roce_read(hr_dev, ROCEE_VENDOR_ID_REG)); | |
1501 | hr_dev->vendor_part_id = le32_to_cpu(roce_read(hr_dev, | |
1502 | ROCEE_VENDOR_PART_ID_REG)); | |
9a443537 | 1503 | hr_dev->sys_image_guid = le32_to_cpu(roce_read(hr_dev, |
1504 | ROCEE_SYS_IMAGE_GUID_L_REG)) | | |
1505 | ((u64)le32_to_cpu(roce_read(hr_dev, | |
1506 | ROCEE_SYS_IMAGE_GUID_H_REG)) << 32); | |
8f3e9f3e | 1507 | hr_dev->hw_rev = HNS_ROCE_HW_VER1; |
9a443537 | 1508 | |
1509 | caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM; | |
1510 | caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM; | |
926a01dc | 1511 | caps->min_wqes = HNS_ROCE_MIN_WQE_NUM; |
9a443537 | 1512 | caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM; |
93aa2187 | 1513 | caps->min_cqes = HNS_ROCE_MIN_CQE_NUM; |
9a443537 | 1514 | caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM; |
1515 | caps->max_sq_sg = HNS_ROCE_V1_SG_NUM; | |
1516 | caps->max_rq_sg = HNS_ROCE_V1_SG_NUM; | |
1517 | caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE; | |
1518 | caps->num_uars = HNS_ROCE_V1_UAR_NUM; | |
1519 | caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM; | |
b16f8188 YL |
1520 | caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM; |
1521 | caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM; | |
1522 | caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM; | |
9a443537 | 1523 | caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM; |
1524 | caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS; | |
1525 | caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM; | |
1526 | caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA; | |
1527 | caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA; | |
1528 | caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ; | |
1529 | caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ; | |
1530 | caps->qpc_entry_sz = HNS_ROCE_V1_QPC_ENTRY_SIZE; | |
1531 | caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE; | |
1532 | caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE; | |
1533 | caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE; | |
1534 | caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE; | |
1535 | caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE; | |
1536 | caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT; | |
9a443537 | 1537 | caps->reserved_lkey = 0; |
1538 | caps->reserved_pds = 0; | |
1539 | caps->reserved_mrws = 1; | |
1540 | caps->reserved_uars = 0; | |
1541 | caps->reserved_cqs = 0; | |
29a1fe5d | 1542 | caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE; |
9a443537 | 1543 | |
1544 | for (i = 0; i < caps->num_ports; i++) | |
1545 | caps->pkey_table_len[i] = 1; | |
1546 | ||
1547 | for (i = 0; i < caps->num_ports; i++) { | |
1548 | /* Six ports shared 16 GID in v1 engine */ | |
1549 | if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports)) | |
1550 | caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM / | |
1551 | caps->num_ports; | |
1552 | else | |
1553 | caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM / | |
1554 | caps->num_ports + 1; | |
1555 | } | |
1556 | ||
b16f8188 YL |
1557 | caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM; |
1558 | caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM; | |
9a443537 | 1559 | caps->local_ca_ack_delay = le32_to_cpu(roce_read(hr_dev, |
1560 | ROCEE_ACK_DELAY_REG)); | |
1561 | caps->max_mtu = IB_MTU_2048; | |
cfc85f3e WHX |
1562 | |
1563 | return 0; | |
9a443537 | 1564 | } |
1565 | ||
d61d6de0 | 1566 | static int hns_roce_v1_init(struct hns_roce_dev *hr_dev) |
9a443537 | 1567 | { |
1568 | int ret; | |
1569 | u32 val; | |
1570 | struct device *dev = &hr_dev->pdev->dev; | |
1571 | ||
1572 | /* DMAE user config */ | |
1573 | val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG); | |
1574 | roce_set_field(val, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M, | |
1575 | ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf); | |
1576 | roce_set_field(val, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M, | |
1577 | ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S, | |
1578 | 1 << PAGES_SHIFT_16); | |
1579 | roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val); | |
1580 | ||
1581 | val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG); | |
1582 | roce_set_field(val, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M, | |
1583 | ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf); | |
1584 | roce_set_field(val, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M, | |
1585 | ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S, | |
1586 | 1 << PAGES_SHIFT_16); | |
1587 | ||
1588 | ret = hns_roce_db_init(hr_dev); | |
1589 | if (ret) { | |
1590 | dev_err(dev, "doorbell init failed!\n"); | |
1591 | return ret; | |
1592 | } | |
1593 | ||
1594 | ret = hns_roce_raq_init(hr_dev); | |
1595 | if (ret) { | |
1596 | dev_err(dev, "raq init failed!\n"); | |
1597 | goto error_failed_raq_init; | |
1598 | } | |
1599 | ||
97f0e39f WHX |
1600 | ret = hns_roce_bt_init(hr_dev); |
1601 | if (ret) { | |
1602 | dev_err(dev, "bt init failed!\n"); | |
1603 | goto error_failed_bt_init; | |
1604 | } | |
1605 | ||
8f3e9f3e WHX |
1606 | ret = hns_roce_tptr_init(hr_dev); |
1607 | if (ret) { | |
1608 | dev_err(dev, "tptr init failed!\n"); | |
1609 | goto error_failed_tptr_init; | |
1610 | } | |
1611 | ||
d838c481 WHX |
1612 | ret = hns_roce_des_qp_init(hr_dev); |
1613 | if (ret) { | |
1614 | dev_err(dev, "des qp init failed!\n"); | |
1615 | goto error_failed_des_qp_init; | |
1616 | } | |
1617 | ||
bfcc681b SX |
1618 | ret = hns_roce_free_mr_init(hr_dev); |
1619 | if (ret) { | |
1620 | dev_err(dev, "free mr init failed!\n"); | |
1621 | goto error_failed_free_mr_init; | |
1622 | } | |
1623 | ||
d838c481 WHX |
1624 | hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP); |
1625 | ||
9a443537 | 1626 | return 0; |
1627 | ||
bfcc681b SX |
1628 | error_failed_free_mr_init: |
1629 | hns_roce_des_qp_free(hr_dev); | |
1630 | ||
d838c481 WHX |
1631 | error_failed_des_qp_init: |
1632 | hns_roce_tptr_free(hr_dev); | |
1633 | ||
8f3e9f3e WHX |
1634 | error_failed_tptr_init: |
1635 | hns_roce_bt_free(hr_dev); | |
1636 | ||
97f0e39f | 1637 | error_failed_bt_init: |
97f0e39f WHX |
1638 | hns_roce_raq_free(hr_dev); |
1639 | ||
9a443537 | 1640 | error_failed_raq_init: |
1641 | hns_roce_db_free(hr_dev); | |
1642 | return ret; | |
1643 | } | |
1644 | ||
d61d6de0 | 1645 | static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev) |
9a443537 | 1646 | { |
d838c481 | 1647 | hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN); |
bfcc681b | 1648 | hns_roce_free_mr_free(hr_dev); |
d838c481 | 1649 | hns_roce_des_qp_free(hr_dev); |
8f3e9f3e | 1650 | hns_roce_tptr_free(hr_dev); |
97f0e39f | 1651 | hns_roce_bt_free(hr_dev); |
9a443537 | 1652 | hns_roce_raq_free(hr_dev); |
1653 | hns_roce_db_free(hr_dev); | |
1654 | } | |
1655 | ||
a680f2f3 WHX |
1656 | static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev) |
1657 | { | |
1658 | u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG); | |
1659 | ||
1660 | return (!!(status & (1 << HCR_GO_BIT))); | |
1661 | } | |
1662 | ||
281d0ccf CIK |
1663 | static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, |
1664 | u64 out_param, u32 in_modifier, u8 op_modifier, | |
1665 | u16 op, u16 token, int event) | |
a680f2f3 | 1666 | { |
cc4ed08b | 1667 | u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG); |
a680f2f3 WHX |
1668 | unsigned long end; |
1669 | u32 val = 0; | |
1670 | ||
1671 | end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies; | |
1672 | while (hns_roce_v1_cmd_pending(hr_dev)) { | |
1673 | if (time_after(jiffies, end)) { | |
1674 | dev_err(hr_dev->dev, "jiffies=%d end=%d\n", | |
1675 | (int)jiffies, (int)end); | |
1676 | return -EAGAIN; | |
1677 | } | |
1678 | cond_resched(); | |
1679 | } | |
1680 | ||
1681 | roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S, | |
1682 | op); | |
1683 | roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M, | |
1684 | ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier); | |
1685 | roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event); | |
1686 | roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1); | |
1687 | roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M, | |
1688 | ROCEE_MB6_ROCEE_MB_TOKEN_S, token); | |
1689 | ||
71591d12 AS |
1690 | writeq(in_param, hcr + 0); |
1691 | writeq(out_param, hcr + 2); | |
1692 | writel(in_modifier, hcr + 4); | |
a680f2f3 WHX |
1693 | /* Memory barrier */ |
1694 | wmb(); | |
1695 | ||
71591d12 | 1696 | writel(val, hcr + 5); |
a680f2f3 WHX |
1697 | |
1698 | mmiowb(); | |
1699 | ||
1700 | return 0; | |
1701 | } | |
1702 | ||
1703 | static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev, | |
1704 | unsigned long timeout) | |
1705 | { | |
1706 | u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG; | |
1707 | unsigned long end = 0; | |
1708 | u32 status = 0; | |
1709 | ||
1710 | end = msecs_to_jiffies(timeout) + jiffies; | |
1711 | while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end)) | |
1712 | cond_resched(); | |
1713 | ||
1714 | if (hns_roce_v1_cmd_pending(hr_dev)) { | |
1715 | dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n"); | |
1716 | return -ETIMEDOUT; | |
1717 | } | |
1718 | ||
1719 | status = le32_to_cpu((__force __be32) | |
1720 | __raw_readl(hcr + HCR_STATUS_OFFSET)); | |
1721 | if ((status & STATUS_MASK) != 0x1) { | |
1722 | dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status); | |
1723 | return -EBUSY; | |
1724 | } | |
1725 | ||
1726 | return 0; | |
1727 | } | |
1728 | ||
b5ff0f61 WHX |
1729 | static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, |
1730 | int gid_index, union ib_gid *gid, | |
1731 | const struct ib_gid_attr *attr) | |
9a443537 | 1732 | { |
1733 | u32 *p = NULL; | |
1734 | u8 gid_idx = 0; | |
1735 | ||
1736 | gid_idx = hns_get_gid_index(hr_dev, port, gid_index); | |
1737 | ||
1738 | p = (u32 *)&gid->raw[0]; | |
1739 | roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG + | |
1740 | (HNS_ROCE_V1_GID_NUM * gid_idx)); | |
1741 | ||
1742 | p = (u32 *)&gid->raw[4]; | |
1743 | roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG + | |
1744 | (HNS_ROCE_V1_GID_NUM * gid_idx)); | |
1745 | ||
1746 | p = (u32 *)&gid->raw[8]; | |
1747 | roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG + | |
1748 | (HNS_ROCE_V1_GID_NUM * gid_idx)); | |
1749 | ||
1750 | p = (u32 *)&gid->raw[0xc]; | |
1751 | roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG + | |
1752 | (HNS_ROCE_V1_GID_NUM * gid_idx)); | |
b5ff0f61 WHX |
1753 | |
1754 | return 0; | |
9a443537 | 1755 | } |
1756 | ||
a74dc41d WHX |
1757 | static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, |
1758 | u8 *addr) | |
9a443537 | 1759 | { |
1760 | u32 reg_smac_l; | |
1761 | u16 reg_smac_h; | |
1762 | u16 *p_h; | |
1763 | u32 *p; | |
1764 | u32 val; | |
1765 | ||
bfcc681b SX |
1766 | /* |
1767 | * When mac changed, loopback may fail | |
1768 | * because of smac not equal to dmac. | |
1769 | * We Need to release and create reserved qp again. | |
1770 | */ | |
a74dc41d WHX |
1771 | if (hr_dev->hw->dereg_mr) { |
1772 | int ret; | |
1773 | ||
1774 | ret = hns_roce_v1_recreate_lp_qp(hr_dev); | |
1775 | if (ret && ret != -ETIMEDOUT) | |
1776 | return ret; | |
1777 | } | |
bfcc681b | 1778 | |
9a443537 | 1779 | p = (u32 *)(&addr[0]); |
1780 | reg_smac_l = *p; | |
1781 | roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG + | |
1782 | PHY_PORT_OFFSET * phy_port); | |
1783 | ||
1784 | val = roce_read(hr_dev, | |
1785 | ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET); | |
1786 | p_h = (u16 *)(&addr[4]); | |
1787 | reg_smac_h = *p_h; | |
1788 | roce_set_field(val, ROCEE_SMAC_H_ROCEE_SMAC_H_M, | |
1789 | ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h); | |
1790 | roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET, | |
1791 | val); | |
a74dc41d WHX |
1792 | |
1793 | return 0; | |
9a443537 | 1794 | } |
1795 | ||
d61d6de0 BVA |
1796 | static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port, |
1797 | enum ib_mtu mtu) | |
9a443537 | 1798 | { |
1799 | u32 val; | |
1800 | ||
1801 | val = roce_read(hr_dev, | |
1802 | ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET); | |
1803 | roce_set_field(val, ROCEE_SMAC_H_ROCEE_PORT_MTU_M, | |
1804 | ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu); | |
1805 | roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET, | |
1806 | val); | |
1807 | } | |
1808 | ||
d61d6de0 BVA |
1809 | static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, |
1810 | unsigned long mtpt_idx) | |
9a443537 | 1811 | { |
1812 | struct hns_roce_v1_mpt_entry *mpt_entry; | |
1813 | struct scatterlist *sg; | |
1814 | u64 *pages; | |
1815 | int entry; | |
1816 | int i; | |
1817 | ||
1818 | /* MPT filled into mailbox buf */ | |
1819 | mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf; | |
1820 | memset(mpt_entry, 0, sizeof(*mpt_entry)); | |
1821 | ||
1822 | roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M, | |
1823 | MPT_BYTE_4_KEY_STATE_S, KEY_VALID); | |
1824 | roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M, | |
1825 | MPT_BYTE_4_KEY_S, mr->key); | |
1826 | roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M, | |
1827 | MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K); | |
1828 | roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0); | |
1829 | roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S, | |
1830 | (mr->access & IB_ACCESS_MW_BIND ? 1 : 0)); | |
1831 | roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0); | |
1832 | roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M, | |
1833 | MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type); | |
1834 | roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0); | |
1835 | roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S, | |
1836 | (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0)); | |
1837 | roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S, | |
1838 | (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0)); | |
1839 | roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S, | |
1840 | (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0)); | |
1841 | roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S, | |
1842 | 0); | |
1843 | roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0); | |
1844 | ||
1845 | roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M, | |
1846 | MPT_BYTE_12_PBL_ADDR_H_S, 0); | |
1847 | roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M, | |
1848 | MPT_BYTE_12_MW_BIND_COUNTER_S, 0); | |
1849 | ||
1850 | mpt_entry->virt_addr_l = (u32)mr->iova; | |
1851 | mpt_entry->virt_addr_h = (u32)(mr->iova >> 32); | |
1852 | mpt_entry->length = (u32)mr->size; | |
1853 | ||
1854 | roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M, | |
1855 | MPT_BYTE_28_PD_S, mr->pd); | |
1856 | roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M, | |
1857 | MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx); | |
1858 | roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M, | |
1859 | MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT); | |
1860 | ||
ad61dd30 | 1861 | /* DMA memory register */ |
9a443537 | 1862 | if (mr->type == MR_TYPE_DMA) |
1863 | return 0; | |
1864 | ||
1865 | pages = (u64 *) __get_free_page(GFP_KERNEL); | |
1866 | if (!pages) | |
1867 | return -ENOMEM; | |
1868 | ||
1869 | i = 0; | |
1870 | for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) { | |
1871 | pages[i] = ((u64)sg_dma_address(sg)) >> 12; | |
1872 | ||
1873 | /* Directly record to MTPT table firstly 7 entry */ | |
1874 | if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM) | |
1875 | break; | |
1876 | i++; | |
1877 | } | |
1878 | ||
1879 | /* Register user mr */ | |
1880 | for (i = 0; i < HNS_ROCE_MAX_INNER_MTPT_NUM; i++) { | |
1881 | switch (i) { | |
1882 | case 0: | |
1883 | mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i])); | |
1884 | roce_set_field(mpt_entry->mpt_byte_36, | |
1885 | MPT_BYTE_36_PA0_H_M, | |
1886 | MPT_BYTE_36_PA0_H_S, | |
1887 | cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_32))); | |
1888 | break; | |
1889 | case 1: | |
1890 | roce_set_field(mpt_entry->mpt_byte_36, | |
1891 | MPT_BYTE_36_PA1_L_M, | |
1892 | MPT_BYTE_36_PA1_L_S, | |
1893 | cpu_to_le32((u32)(pages[i]))); | |
1894 | roce_set_field(mpt_entry->mpt_byte_40, | |
1895 | MPT_BYTE_40_PA1_H_M, | |
1896 | MPT_BYTE_40_PA1_H_S, | |
1897 | cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_24))); | |
1898 | break; | |
1899 | case 2: | |
1900 | roce_set_field(mpt_entry->mpt_byte_40, | |
1901 | MPT_BYTE_40_PA2_L_M, | |
1902 | MPT_BYTE_40_PA2_L_S, | |
1903 | cpu_to_le32((u32)(pages[i]))); | |
1904 | roce_set_field(mpt_entry->mpt_byte_44, | |
1905 | MPT_BYTE_44_PA2_H_M, | |
1906 | MPT_BYTE_44_PA2_H_S, | |
1907 | cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_16))); | |
1908 | break; | |
1909 | case 3: | |
1910 | roce_set_field(mpt_entry->mpt_byte_44, | |
1911 | MPT_BYTE_44_PA3_L_M, | |
1912 | MPT_BYTE_44_PA3_L_S, | |
1913 | cpu_to_le32((u32)(pages[i]))); | |
1914 | roce_set_field(mpt_entry->mpt_byte_48, | |
1915 | MPT_BYTE_48_PA3_H_M, | |
1916 | MPT_BYTE_48_PA3_H_S, | |
1917 | cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_8))); | |
1918 | break; | |
1919 | case 4: | |
1920 | mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i])); | |
1921 | roce_set_field(mpt_entry->mpt_byte_56, | |
1922 | MPT_BYTE_56_PA4_H_M, | |
1923 | MPT_BYTE_56_PA4_H_S, | |
1924 | cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_32))); | |
1925 | break; | |
1926 | case 5: | |
1927 | roce_set_field(mpt_entry->mpt_byte_56, | |
1928 | MPT_BYTE_56_PA5_L_M, | |
1929 | MPT_BYTE_56_PA5_L_S, | |
1930 | cpu_to_le32((u32)(pages[i]))); | |
1931 | roce_set_field(mpt_entry->mpt_byte_60, | |
1932 | MPT_BYTE_60_PA5_H_M, | |
1933 | MPT_BYTE_60_PA5_H_S, | |
1934 | cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_24))); | |
1935 | break; | |
1936 | case 6: | |
1937 | roce_set_field(mpt_entry->mpt_byte_60, | |
1938 | MPT_BYTE_60_PA6_L_M, | |
1939 | MPT_BYTE_60_PA6_L_S, | |
1940 | cpu_to_le32((u32)(pages[i]))); | |
1941 | roce_set_field(mpt_entry->mpt_byte_64, | |
1942 | MPT_BYTE_64_PA6_H_M, | |
1943 | MPT_BYTE_64_PA6_H_S, | |
1944 | cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_16))); | |
1945 | break; | |
1946 | default: | |
1947 | break; | |
1948 | } | |
1949 | } | |
1950 | ||
1951 | free_page((unsigned long) pages); | |
1952 | ||
1953 | mpt_entry->pbl_addr_l = (u32)(mr->pbl_dma_addr); | |
1954 | ||
1955 | roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M, | |
1956 | MPT_BYTE_12_PBL_ADDR_H_S, | |
1957 | ((u32)(mr->pbl_dma_addr >> 32))); | |
1958 | ||
1959 | return 0; | |
1960 | } | |
1961 | ||
1962 | static void *get_cqe(struct hns_roce_cq *hr_cq, int n) | |
1963 | { | |
1964 | return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf, | |
1965 | n * HNS_ROCE_V1_CQE_ENTRY_SIZE); | |
1966 | } | |
1967 | ||
1968 | static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n) | |
1969 | { | |
1970 | struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe); | |
1971 | ||
1972 | /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */ | |
1973 | return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^ | |
1974 | !!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL; | |
1975 | } | |
1976 | ||
1977 | static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq) | |
1978 | { | |
1979 | return get_sw_cqe(hr_cq, hr_cq->cons_index); | |
1980 | } | |
1981 | ||
d61d6de0 | 1982 | static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index) |
9a443537 | 1983 | { |
1984 | u32 doorbell[2]; | |
1985 | ||
1986 | doorbell[0] = cons_index & ((hr_cq->cq_depth << 1) - 1); | |
5b0ff9a0 | 1987 | doorbell[1] = 0; |
9a443537 | 1988 | roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1); |
1989 | roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M, | |
1990 | ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3); | |
1991 | roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M, | |
1992 | ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0); | |
1993 | roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M, | |
1994 | ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn); | |
1995 | ||
1996 | hns_roce_write64_k(doorbell, hr_cq->cq_db_l); | |
1997 | } | |
1998 | ||
1999 | static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, | |
2000 | struct hns_roce_srq *srq) | |
2001 | { | |
2002 | struct hns_roce_cqe *cqe, *dest; | |
2003 | u32 prod_index; | |
2004 | int nfreed = 0; | |
2005 | u8 owner_bit; | |
2006 | ||
2007 | for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index); | |
2008 | ++prod_index) { | |
2009 | if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe) | |
2010 | break; | |
2011 | } | |
2012 | ||
2013 | /* | |
e84e40be S |
2014 | * Now backwards through the CQ, removing CQ entries |
2015 | * that match our QP by overwriting them with next entries. | |
2016 | */ | |
9a443537 | 2017 | while ((int) --prod_index - (int) hr_cq->cons_index >= 0) { |
2018 | cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe); | |
2019 | if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, | |
2020 | CQE_BYTE_16_LOCAL_QPN_S) & | |
2021 | HNS_ROCE_CQE_QPN_MASK) == qpn) { | |
2022 | /* In v1 engine, not support SRQ */ | |
2023 | ++nfreed; | |
2024 | } else if (nfreed) { | |
2025 | dest = get_cqe(hr_cq, (prod_index + nfreed) & | |
2026 | hr_cq->ib_cq.cqe); | |
2027 | owner_bit = roce_get_bit(dest->cqe_byte_4, | |
2028 | CQE_BYTE_4_OWNER_S); | |
2029 | memcpy(dest, cqe, sizeof(*cqe)); | |
2030 | roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S, | |
2031 | owner_bit); | |
2032 | } | |
2033 | } | |
2034 | ||
2035 | if (nfreed) { | |
2036 | hr_cq->cons_index += nfreed; | |
2037 | /* | |
e84e40be S |
2038 | * Make sure update of buffer contents is done before |
2039 | * updating consumer index. | |
2040 | */ | |
9a443537 | 2041 | wmb(); |
2042 | ||
a4be892e | 2043 | hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index); |
9a443537 | 2044 | } |
2045 | } | |
2046 | ||
2047 | static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, | |
2048 | struct hns_roce_srq *srq) | |
2049 | { | |
2050 | spin_lock_irq(&hr_cq->lock); | |
2051 | __hns_roce_v1_cq_clean(hr_cq, qpn, srq); | |
2052 | spin_unlock_irq(&hr_cq->lock); | |
2053 | } | |
2054 | ||
d61d6de0 BVA |
2055 | static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev, |
2056 | struct hns_roce_cq *hr_cq, void *mb_buf, | |
2057 | u64 *mtts, dma_addr_t dma_handle, int nent, | |
2058 | u32 vector) | |
9a443537 | 2059 | { |
2060 | struct hns_roce_cq_context *cq_context = NULL; | |
8f3e9f3e WHX |
2061 | struct hns_roce_buf_list *tptr_buf; |
2062 | struct hns_roce_v1_priv *priv; | |
2063 | dma_addr_t tptr_dma_addr; | |
2064 | int offset; | |
2065 | ||
016a0059 | 2066 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
8f3e9f3e | 2067 | tptr_buf = &priv->tptr_table.tptr_buf; |
9a443537 | 2068 | |
2069 | cq_context = mb_buf; | |
2070 | memset(cq_context, 0, sizeof(*cq_context)); | |
2071 | ||
8f3e9f3e WHX |
2072 | /* Get the tptr for this CQ. */ |
2073 | offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE; | |
2074 | tptr_dma_addr = tptr_buf->map + offset; | |
2075 | hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset); | |
9a443537 | 2076 | |
2077 | /* Register cq_context members */ | |
2078 | roce_set_field(cq_context->cqc_byte_4, | |
2079 | CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M, | |
2080 | CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID); | |
2081 | roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M, | |
2082 | CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn); | |
2083 | cq_context->cqc_byte_4 = cpu_to_le32(cq_context->cqc_byte_4); | |
2084 | ||
2085 | cq_context->cq_bt_l = (u32)dma_handle; | |
2086 | cq_context->cq_bt_l = cpu_to_le32(cq_context->cq_bt_l); | |
2087 | ||
2088 | roce_set_field(cq_context->cqc_byte_12, | |
2089 | CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M, | |
2090 | CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S, | |
2091 | ((u64)dma_handle >> 32)); | |
2092 | roce_set_field(cq_context->cqc_byte_12, | |
2093 | CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M, | |
2094 | CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S, | |
2095 | ilog2((unsigned int)nent)); | |
2096 | roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M, | |
2097 | CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector); | |
2098 | cq_context->cqc_byte_12 = cpu_to_le32(cq_context->cqc_byte_12); | |
2099 | ||
2100 | cq_context->cur_cqe_ba0_l = (u32)(mtts[0]); | |
2101 | cq_context->cur_cqe_ba0_l = cpu_to_le32(cq_context->cur_cqe_ba0_l); | |
2102 | ||
2103 | roce_set_field(cq_context->cqc_byte_20, | |
2104 | CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M, | |
2105 | CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, | |
2106 | cpu_to_le32((mtts[0]) >> 32)); | |
2107 | /* Dedicated hardware, directly set 0 */ | |
2108 | roce_set_field(cq_context->cqc_byte_20, | |
2109 | CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M, | |
2110 | CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0); | |
2111 | /** | |
2112 | * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of | |
2113 | * using 4K page, and shift more 32 because of | |
2114 | * caculating the high 32 bit value evaluated to hardware. | |
2115 | */ | |
2116 | roce_set_field(cq_context->cqc_byte_20, | |
2117 | CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M, | |
2118 | CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S, | |
8f3e9f3e | 2119 | tptr_dma_addr >> 44); |
9a443537 | 2120 | cq_context->cqc_byte_20 = cpu_to_le32(cq_context->cqc_byte_20); |
2121 | ||
8f3e9f3e | 2122 | cq_context->cqe_tptr_addr_l = (u32)(tptr_dma_addr >> 12); |
9a443537 | 2123 | |
2124 | roce_set_field(cq_context->cqc_byte_32, | |
2125 | CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M, | |
2126 | CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0); | |
2127 | roce_set_bit(cq_context->cqc_byte_32, | |
2128 | CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0); | |
2129 | roce_set_bit(cq_context->cqc_byte_32, | |
2130 | CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0); | |
2131 | roce_set_bit(cq_context->cqc_byte_32, | |
2132 | CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0); | |
2133 | roce_set_bit(cq_context->cqc_byte_32, | |
2134 | CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S, | |
2135 | 0); | |
e84e40be | 2136 | /* The initial value of cq's ci is 0 */ |
9a443537 | 2137 | roce_set_field(cq_context->cqc_byte_32, |
2138 | CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M, | |
2139 | CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0); | |
2140 | cq_context->cqc_byte_32 = cpu_to_le32(cq_context->cqc_byte_32); | |
2141 | } | |
2142 | ||
b156269d | 2143 | static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) |
2144 | { | |
2145 | return -EOPNOTSUPP; | |
2146 | } | |
2147 | ||
d61d6de0 BVA |
2148 | static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, |
2149 | enum ib_cq_notify_flags flags) | |
9a443537 | 2150 | { |
2151 | struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); | |
2152 | u32 notification_flag; | |
2153 | u32 doorbell[2]; | |
9a443537 | 2154 | |
2155 | notification_flag = (flags & IB_CQ_SOLICITED_MASK) == | |
2156 | IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL; | |
2157 | /* | |
e84e40be S |
2158 | * flags = 0; Notification Flag = 1, next |
2159 | * flags = 1; Notification Flag = 0, solocited | |
2160 | */ | |
9a443537 | 2161 | doorbell[0] = hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1); |
2162 | roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1); | |
2163 | roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M, | |
2164 | ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3); | |
2165 | roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M, | |
2166 | ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1); | |
2167 | roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M, | |
2168 | ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, | |
2169 | hr_cq->cqn | notification_flag); | |
2170 | ||
2171 | hns_roce_write64_k(doorbell, hr_cq->cq_db_l); | |
2172 | ||
87809f83 | 2173 | return 0; |
9a443537 | 2174 | } |
2175 | ||
2176 | static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq, | |
2177 | struct hns_roce_qp **cur_qp, struct ib_wc *wc) | |
2178 | { | |
2179 | int qpn; | |
2180 | int is_send; | |
2181 | u16 wqe_ctr; | |
2182 | u32 status; | |
2183 | u32 opcode; | |
2184 | struct hns_roce_cqe *cqe; | |
2185 | struct hns_roce_qp *hr_qp; | |
2186 | struct hns_roce_wq *wq; | |
2187 | struct hns_roce_wqe_ctrl_seg *sq_wqe; | |
2188 | struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); | |
2189 | struct device *dev = &hr_dev->pdev->dev; | |
2190 | ||
2191 | /* Find cqe according consumer index */ | |
2192 | cqe = next_cqe_sw(hr_cq); | |
2193 | if (!cqe) | |
2194 | return -EAGAIN; | |
2195 | ||
2196 | ++hr_cq->cons_index; | |
2197 | /* Memory barrier */ | |
2198 | rmb(); | |
2199 | /* 0->SQ, 1->RQ */ | |
2200 | is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S)); | |
2201 | ||
2202 | /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */ | |
2203 | if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, | |
2204 | CQE_BYTE_16_LOCAL_QPN_S) <= 1) { | |
2205 | qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M, | |
2206 | CQE_BYTE_20_PORT_NUM_S) + | |
2207 | roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, | |
2208 | CQE_BYTE_16_LOCAL_QPN_S) * | |
2209 | HNS_ROCE_MAX_PORTS; | |
2210 | } else { | |
2211 | qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, | |
2212 | CQE_BYTE_16_LOCAL_QPN_S); | |
2213 | } | |
2214 | ||
2215 | if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) { | |
2216 | hr_qp = __hns_roce_qp_lookup(hr_dev, qpn); | |
2217 | if (unlikely(!hr_qp)) { | |
2218 | dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n", | |
2219 | hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK)); | |
2220 | return -EINVAL; | |
2221 | } | |
2222 | ||
2223 | *cur_qp = hr_qp; | |
2224 | } | |
2225 | ||
2226 | wc->qp = &(*cur_qp)->ibqp; | |
2227 | wc->vendor_err = 0; | |
2228 | ||
2229 | status = roce_get_field(cqe->cqe_byte_4, | |
2230 | CQE_BYTE_4_STATUS_OF_THE_OPERATION_M, | |
2231 | CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) & | |
2232 | HNS_ROCE_CQE_STATUS_MASK; | |
2233 | switch (status) { | |
2234 | case HNS_ROCE_CQE_SUCCESS: | |
2235 | wc->status = IB_WC_SUCCESS; | |
2236 | break; | |
2237 | case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR: | |
2238 | wc->status = IB_WC_LOC_LEN_ERR; | |
2239 | break; | |
2240 | case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR: | |
2241 | wc->status = IB_WC_LOC_QP_OP_ERR; | |
2242 | break; | |
2243 | case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR: | |
2244 | wc->status = IB_WC_LOC_PROT_ERR; | |
2245 | break; | |
2246 | case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR: | |
2247 | wc->status = IB_WC_WR_FLUSH_ERR; | |
2248 | break; | |
2249 | case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR: | |
2250 | wc->status = IB_WC_MW_BIND_ERR; | |
2251 | break; | |
2252 | case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR: | |
2253 | wc->status = IB_WC_BAD_RESP_ERR; | |
2254 | break; | |
2255 | case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR: | |
2256 | wc->status = IB_WC_LOC_ACCESS_ERR; | |
2257 | break; | |
2258 | case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: | |
2259 | wc->status = IB_WC_REM_INV_REQ_ERR; | |
2260 | break; | |
2261 | case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR: | |
2262 | wc->status = IB_WC_REM_ACCESS_ERR; | |
2263 | break; | |
2264 | case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR: | |
2265 | wc->status = IB_WC_REM_OP_ERR; | |
2266 | break; | |
2267 | case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: | |
2268 | wc->status = IB_WC_RETRY_EXC_ERR; | |
2269 | break; | |
2270 | case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR: | |
2271 | wc->status = IB_WC_RNR_RETRY_EXC_ERR; | |
2272 | break; | |
2273 | default: | |
2274 | wc->status = IB_WC_GENERAL_ERR; | |
2275 | break; | |
2276 | } | |
2277 | ||
2278 | /* CQE status error, directly return */ | |
2279 | if (wc->status != IB_WC_SUCCESS) | |
2280 | return 0; | |
2281 | ||
2282 | if (is_send) { | |
2283 | /* SQ conrespond to CQE */ | |
2284 | sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4, | |
2285 | CQE_BYTE_4_WQE_INDEX_M, | |
1bdab400 S |
2286 | CQE_BYTE_4_WQE_INDEX_S)& |
2287 | ((*cur_qp)->sq.wqe_cnt-1)); | |
8b9b8d14 | 2288 | switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) { |
9a443537 | 2289 | case HNS_ROCE_WQE_OPCODE_SEND: |
2290 | wc->opcode = IB_WC_SEND; | |
2291 | break; | |
2292 | case HNS_ROCE_WQE_OPCODE_RDMA_READ: | |
2293 | wc->opcode = IB_WC_RDMA_READ; | |
2294 | wc->byte_len = le32_to_cpu(cqe->byte_cnt); | |
2295 | break; | |
2296 | case HNS_ROCE_WQE_OPCODE_RDMA_WRITE: | |
2297 | wc->opcode = IB_WC_RDMA_WRITE; | |
2298 | break; | |
2299 | case HNS_ROCE_WQE_OPCODE_LOCAL_INV: | |
2300 | wc->opcode = IB_WC_LOCAL_INV; | |
2301 | break; | |
2302 | case HNS_ROCE_WQE_OPCODE_UD_SEND: | |
2303 | wc->opcode = IB_WC_SEND; | |
2304 | break; | |
2305 | default: | |
2306 | wc->status = IB_WC_GENERAL_ERR; | |
2307 | break; | |
2308 | } | |
8b9b8d14 | 2309 | wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ? |
9a443537 | 2310 | IB_WC_WITH_IMM : 0); |
2311 | ||
2312 | wq = &(*cur_qp)->sq; | |
2313 | if ((*cur_qp)->sq_signal_bits) { | |
2314 | /* | |
e84e40be S |
2315 | * If sg_signal_bit is 1, |
2316 | * firstly tail pointer updated to wqe | |
2317 | * which current cqe correspond to | |
2318 | */ | |
9a443537 | 2319 | wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4, |
2320 | CQE_BYTE_4_WQE_INDEX_M, | |
2321 | CQE_BYTE_4_WQE_INDEX_S); | |
2322 | wq->tail += (wqe_ctr - (u16)wq->tail) & | |
2323 | (wq->wqe_cnt - 1); | |
2324 | } | |
2325 | wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; | |
2326 | ++wq->tail; | |
5f110ac4 | 2327 | } else { |
9a443537 | 2328 | /* RQ conrespond to CQE */ |
2329 | wc->byte_len = le32_to_cpu(cqe->byte_cnt); | |
2330 | opcode = roce_get_field(cqe->cqe_byte_4, | |
2331 | CQE_BYTE_4_OPERATION_TYPE_M, | |
2332 | CQE_BYTE_4_OPERATION_TYPE_S) & | |
2333 | HNS_ROCE_CQE_OPCODE_MASK; | |
2334 | switch (opcode) { | |
2335 | case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE: | |
2336 | wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; | |
2337 | wc->wc_flags = IB_WC_WITH_IMM; | |
ccb8a29e JG |
2338 | wc->ex.imm_data = |
2339 | cpu_to_be32(le32_to_cpu(cqe->immediate_data)); | |
9a443537 | 2340 | break; |
2341 | case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE: | |
2342 | if (roce_get_bit(cqe->cqe_byte_4, | |
2343 | CQE_BYTE_4_IMM_INDICATOR_S)) { | |
2344 | wc->opcode = IB_WC_RECV; | |
2345 | wc->wc_flags = IB_WC_WITH_IMM; | |
ccb8a29e JG |
2346 | wc->ex.imm_data = cpu_to_be32( |
2347 | le32_to_cpu(cqe->immediate_data)); | |
9a443537 | 2348 | } else { |
2349 | wc->opcode = IB_WC_RECV; | |
2350 | wc->wc_flags = 0; | |
2351 | } | |
2352 | break; | |
2353 | default: | |
2354 | wc->status = IB_WC_GENERAL_ERR; | |
2355 | break; | |
2356 | } | |
2357 | ||
2358 | /* Update tail pointer, record wr_id */ | |
2359 | wq = &(*cur_qp)->rq; | |
2360 | wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; | |
2361 | ++wq->tail; | |
2362 | wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M, | |
2363 | CQE_BYTE_20_SL_S); | |
2364 | wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20, | |
2365 | CQE_BYTE_20_REMOTE_QPN_M, | |
2366 | CQE_BYTE_20_REMOTE_QPN_S); | |
2367 | wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20, | |
2368 | CQE_BYTE_20_GRH_PRESENT_S) ? | |
2369 | IB_WC_GRH : 0); | |
2370 | wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28, | |
2371 | CQE_BYTE_28_P_KEY_IDX_M, | |
2372 | CQE_BYTE_28_P_KEY_IDX_S); | |
2373 | } | |
2374 | ||
2375 | return 0; | |
2376 | } | |
2377 | ||
2378 | int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |
2379 | { | |
2380 | struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); | |
2381 | struct hns_roce_qp *cur_qp = NULL; | |
2382 | unsigned long flags; | |
2383 | int npolled; | |
2384 | int ret = 0; | |
2385 | ||
2386 | spin_lock_irqsave(&hr_cq->lock, flags); | |
2387 | ||
2388 | for (npolled = 0; npolled < num_entries; ++npolled) { | |
2389 | ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled); | |
2390 | if (ret) | |
2391 | break; | |
2392 | } | |
2393 | ||
8f3e9f3e WHX |
2394 | if (npolled) { |
2395 | *hr_cq->tptr_addr = hr_cq->cons_index & | |
2396 | ((hr_cq->cq_depth << 1) - 1); | |
2397 | ||
2398 | /* Memroy barrier */ | |
2399 | wmb(); | |
a4be892e | 2400 | hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index); |
8f3e9f3e | 2401 | } |
9a443537 | 2402 | |
2403 | spin_unlock_irqrestore(&hr_cq->lock, flags); | |
2404 | ||
2405 | if (ret == 0 || ret == -EAGAIN) | |
2406 | return npolled; | |
2407 | else | |
2408 | return ret; | |
2409 | } | |
2410 | ||
d61d6de0 BVA |
2411 | static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, |
2412 | struct hns_roce_hem_table *table, int obj, | |
2413 | int step_idx) | |
97f0e39f WHX |
2414 | { |
2415 | struct device *dev = &hr_dev->pdev->dev; | |
2416 | struct hns_roce_v1_priv *priv; | |
2417 | unsigned long end = 0, flags = 0; | |
2418 | uint32_t bt_cmd_val[2] = {0}; | |
2419 | void __iomem *bt_cmd; | |
2420 | u64 bt_ba = 0; | |
2421 | ||
016a0059 | 2422 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
97f0e39f WHX |
2423 | |
2424 | switch (table->type) { | |
2425 | case HEM_TYPE_QPC: | |
2426 | roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, | |
2427 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC); | |
2428 | bt_ba = priv->bt_table.qpc_buf.map >> 12; | |
2429 | break; | |
2430 | case HEM_TYPE_MTPT: | |
2431 | roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, | |
2432 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT); | |
2433 | bt_ba = priv->bt_table.mtpt_buf.map >> 12; | |
2434 | break; | |
2435 | case HEM_TYPE_CQC: | |
2436 | roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, | |
2437 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC); | |
2438 | bt_ba = priv->bt_table.cqc_buf.map >> 12; | |
2439 | break; | |
2440 | case HEM_TYPE_SRQC: | |
2441 | dev_dbg(dev, "HEM_TYPE_SRQC not support.\n"); | |
2442 | return -EINVAL; | |
2443 | default: | |
2444 | return 0; | |
2445 | } | |
2446 | roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, | |
2447 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); | |
2448 | roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); | |
2449 | roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); | |
2450 | ||
2451 | spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags); | |
2452 | ||
2453 | bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; | |
2454 | ||
2455 | end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies; | |
2456 | while (1) { | |
2457 | if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { | |
2458 | if (!(time_before(jiffies, end))) { | |
2459 | dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); | |
2460 | spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, | |
2461 | flags); | |
2462 | return -EBUSY; | |
2463 | } | |
2464 | } else { | |
2465 | break; | |
2466 | } | |
2467 | msleep(HW_SYNC_SLEEP_TIME_INTERVAL); | |
2468 | } | |
2469 | ||
2470 | bt_cmd_val[0] = (uint32_t)bt_ba; | |
2471 | roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, | |
2472 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32); | |
2473 | hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG); | |
2474 | ||
2475 | spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags); | |
2476 | ||
2477 | return 0; | |
2478 | } | |
2479 | ||
9a443537 | 2480 | static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev, |
2481 | struct hns_roce_mtt *mtt, | |
2482 | enum hns_roce_qp_state cur_state, | |
2483 | enum hns_roce_qp_state new_state, | |
2484 | struct hns_roce_qp_context *context, | |
2485 | struct hns_roce_qp *hr_qp) | |
2486 | { | |
2487 | static const u16 | |
2488 | op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = { | |
2489 | [HNS_ROCE_QP_STATE_RST] = { | |
2490 | [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, | |
2491 | [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, | |
2492 | [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP, | |
2493 | }, | |
2494 | [HNS_ROCE_QP_STATE_INIT] = { | |
2495 | [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, | |
2496 | [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, | |
2497 | /* Note: In v1 engine, HW doesn't support RST2INIT. | |
2498 | * We use RST2INIT cmd instead of INIT2INIT. | |
2499 | */ | |
2500 | [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP, | |
2501 | [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP, | |
2502 | }, | |
2503 | [HNS_ROCE_QP_STATE_RTR] = { | |
2504 | [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, | |
2505 | [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, | |
2506 | [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP, | |
2507 | }, | |
2508 | [HNS_ROCE_QP_STATE_RTS] = { | |
2509 | [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, | |
2510 | [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, | |
2511 | [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP, | |
2512 | [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP, | |
2513 | }, | |
2514 | [HNS_ROCE_QP_STATE_SQD] = { | |
2515 | [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, | |
2516 | [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, | |
2517 | [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP, | |
2518 | [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP, | |
2519 | }, | |
2520 | [HNS_ROCE_QP_STATE_ERR] = { | |
2521 | [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, | |
2522 | [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, | |
2523 | } | |
2524 | }; | |
2525 | ||
2526 | struct hns_roce_cmd_mailbox *mailbox; | |
2527 | struct device *dev = &hr_dev->pdev->dev; | |
2528 | int ret = 0; | |
2529 | ||
2530 | if (cur_state >= HNS_ROCE_QP_NUM_STATE || | |
2531 | new_state >= HNS_ROCE_QP_NUM_STATE || | |
2532 | !op[cur_state][new_state]) { | |
2533 | dev_err(dev, "[modify_qp]not support state %d to %d\n", | |
2534 | cur_state, new_state); | |
2535 | return -EINVAL; | |
2536 | } | |
2537 | ||
2538 | if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP) | |
2539 | return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2, | |
2540 | HNS_ROCE_CMD_2RST_QP, | |
6b877c32 | 2541 | HNS_ROCE_CMD_TIMEOUT_MSECS); |
9a443537 | 2542 | |
2543 | if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP) | |
2544 | return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2, | |
2545 | HNS_ROCE_CMD_2ERR_QP, | |
6b877c32 | 2546 | HNS_ROCE_CMD_TIMEOUT_MSECS); |
9a443537 | 2547 | |
2548 | mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); | |
2549 | if (IS_ERR(mailbox)) | |
2550 | return PTR_ERR(mailbox); | |
2551 | ||
2552 | memcpy(mailbox->buf, context, sizeof(*context)); | |
2553 | ||
2554 | ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0, | |
2555 | op[cur_state][new_state], | |
6b877c32 | 2556 | HNS_ROCE_CMD_TIMEOUT_MSECS); |
9a443537 | 2557 | |
2558 | hns_roce_free_cmd_mailbox(hr_dev, mailbox); | |
2559 | return ret; | |
2560 | } | |
2561 | ||
2562 | static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, | |
2563 | int attr_mask, enum ib_qp_state cur_state, | |
2564 | enum ib_qp_state new_state) | |
2565 | { | |
2566 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | |
2567 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | |
2568 | struct hns_roce_sqp_context *context; | |
2569 | struct device *dev = &hr_dev->pdev->dev; | |
2570 | dma_addr_t dma_handle = 0; | |
2571 | int rq_pa_start; | |
2572 | u32 reg_val; | |
2573 | u64 *mtts; | |
cc4ed08b | 2574 | u32 __iomem *addr; |
9a443537 | 2575 | |
2576 | context = kzalloc(sizeof(*context), GFP_KERNEL); | |
2577 | if (!context) | |
2578 | return -ENOMEM; | |
2579 | ||
2580 | /* Search QP buf's MTTs */ | |
6a93c77a | 2581 | mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table, |
9a443537 | 2582 | hr_qp->mtt.first_seg, &dma_handle); |
2583 | if (!mtts) { | |
2584 | dev_err(dev, "qp buf pa find failed\n"); | |
2585 | goto out; | |
2586 | } | |
2587 | ||
2588 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { | |
2589 | roce_set_field(context->qp1c_bytes_4, | |
2590 | QP1C_BYTES_4_SQ_WQE_SHIFT_M, | |
2591 | QP1C_BYTES_4_SQ_WQE_SHIFT_S, | |
2592 | ilog2((unsigned int)hr_qp->sq.wqe_cnt)); | |
2593 | roce_set_field(context->qp1c_bytes_4, | |
2594 | QP1C_BYTES_4_RQ_WQE_SHIFT_M, | |
2595 | QP1C_BYTES_4_RQ_WQE_SHIFT_S, | |
2596 | ilog2((unsigned int)hr_qp->rq.wqe_cnt)); | |
2597 | roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M, | |
2598 | QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn); | |
2599 | ||
2600 | context->sq_rq_bt_l = (u32)(dma_handle); | |
2601 | roce_set_field(context->qp1c_bytes_12, | |
2602 | QP1C_BYTES_12_SQ_RQ_BT_H_M, | |
2603 | QP1C_BYTES_12_SQ_RQ_BT_H_S, | |
2604 | ((u32)(dma_handle >> 32))); | |
2605 | ||
2606 | roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M, | |
2607 | QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head); | |
2608 | roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M, | |
7716809e | 2609 | QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port); |
9a443537 | 2610 | roce_set_bit(context->qp1c_bytes_16, |
2611 | QP1C_BYTES_16_SIGNALING_TYPE_S, | |
2612 | hr_qp->sq_signal_bits); | |
9a443537 | 2613 | roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S, |
2614 | 1); | |
2615 | roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S, | |
2616 | 1); | |
2617 | roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S, | |
2618 | 0); | |
2619 | ||
2620 | roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M, | |
2621 | QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head); | |
2622 | roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M, | |
2623 | QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index); | |
2624 | ||
2625 | rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE; | |
2626 | context->cur_rq_wqe_ba_l = (u32)(mtts[rq_pa_start]); | |
2627 | ||
2628 | roce_set_field(context->qp1c_bytes_28, | |
2629 | QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M, | |
2630 | QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S, | |
2631 | (mtts[rq_pa_start]) >> 32); | |
2632 | roce_set_field(context->qp1c_bytes_28, | |
2633 | QP1C_BYTES_28_RQ_CUR_IDX_M, | |
2634 | QP1C_BYTES_28_RQ_CUR_IDX_S, 0); | |
2635 | ||
2636 | roce_set_field(context->qp1c_bytes_32, | |
2637 | QP1C_BYTES_32_RX_CQ_NUM_M, | |
2638 | QP1C_BYTES_32_RX_CQ_NUM_S, | |
2639 | to_hr_cq(ibqp->recv_cq)->cqn); | |
2640 | roce_set_field(context->qp1c_bytes_32, | |
2641 | QP1C_BYTES_32_TX_CQ_NUM_M, | |
2642 | QP1C_BYTES_32_TX_CQ_NUM_S, | |
2643 | to_hr_cq(ibqp->send_cq)->cqn); | |
2644 | ||
2645 | context->cur_sq_wqe_ba_l = (u32)mtts[0]; | |
2646 | ||
2647 | roce_set_field(context->qp1c_bytes_40, | |
2648 | QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M, | |
2649 | QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S, | |
2650 | (mtts[0]) >> 32); | |
2651 | roce_set_field(context->qp1c_bytes_40, | |
2652 | QP1C_BYTES_40_SQ_CUR_IDX_M, | |
2653 | QP1C_BYTES_40_SQ_CUR_IDX_S, 0); | |
2654 | ||
2655 | /* Copy context to QP1C register */ | |
cc4ed08b BVA |
2656 | addr = (u32 __iomem *)(hr_dev->reg_base + |
2657 | ROCEE_QP1C_CFG0_0_REG + | |
2658 | hr_qp->phy_port * sizeof(*context)); | |
9a443537 | 2659 | |
2660 | writel(context->qp1c_bytes_4, addr); | |
2661 | writel(context->sq_rq_bt_l, addr + 1); | |
2662 | writel(context->qp1c_bytes_12, addr + 2); | |
2663 | writel(context->qp1c_bytes_16, addr + 3); | |
2664 | writel(context->qp1c_bytes_20, addr + 4); | |
2665 | writel(context->cur_rq_wqe_ba_l, addr + 5); | |
2666 | writel(context->qp1c_bytes_28, addr + 6); | |
2667 | writel(context->qp1c_bytes_32, addr + 7); | |
2668 | writel(context->cur_sq_wqe_ba_l, addr + 8); | |
c24bf895 | 2669 | writel(context->qp1c_bytes_40, addr + 9); |
9a443537 | 2670 | } |
2671 | ||
2672 | /* Modify QP1C status */ | |
2673 | reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG + | |
7716809e | 2674 | hr_qp->phy_port * sizeof(*context)); |
9a443537 | 2675 | roce_set_field(reg_val, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M, |
2676 | ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state); | |
2677 | roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG + | |
7716809e | 2678 | hr_qp->phy_port * sizeof(*context), reg_val); |
9a443537 | 2679 | |
2680 | hr_qp->state = new_state; | |
2681 | if (new_state == IB_QPS_RESET) { | |
2682 | hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, | |
2683 | ibqp->srq ? to_hr_srq(ibqp->srq) : NULL); | |
2684 | if (ibqp->send_cq != ibqp->recv_cq) | |
2685 | hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq), | |
2686 | hr_qp->qpn, NULL); | |
2687 | ||
2688 | hr_qp->rq.head = 0; | |
2689 | hr_qp->rq.tail = 0; | |
2690 | hr_qp->sq.head = 0; | |
2691 | hr_qp->sq.tail = 0; | |
2692 | hr_qp->sq_next_wqe = 0; | |
2693 | } | |
2694 | ||
2695 | kfree(context); | |
2696 | return 0; | |
2697 | ||
2698 | out: | |
2699 | kfree(context); | |
2700 | return -EINVAL; | |
2701 | } | |
2702 | ||
2703 | static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, | |
2704 | int attr_mask, enum ib_qp_state cur_state, | |
2705 | enum ib_qp_state new_state) | |
2706 | { | |
2707 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | |
2708 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | |
2709 | struct device *dev = &hr_dev->pdev->dev; | |
2710 | struct hns_roce_qp_context *context; | |
d8966fcd | 2711 | const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); |
9a443537 | 2712 | dma_addr_t dma_handle_2 = 0; |
2713 | dma_addr_t dma_handle = 0; | |
2714 | uint32_t doorbell[2] = {0}; | |
2715 | int rq_pa_start = 0; | |
9a443537 | 2716 | u64 *mtts_2 = NULL; |
2717 | int ret = -EINVAL; | |
2718 | u64 *mtts = NULL; | |
2719 | int port; | |
d8966fcd | 2720 | u8 port_num; |
9a443537 | 2721 | u8 *dmac; |
2722 | u8 *smac; | |
2723 | ||
2724 | context = kzalloc(sizeof(*context), GFP_KERNEL); | |
2725 | if (!context) | |
2726 | return -ENOMEM; | |
2727 | ||
2728 | /* Search qp buf's mtts */ | |
6a93c77a | 2729 | mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table, |
9a443537 | 2730 | hr_qp->mtt.first_seg, &dma_handle); |
2731 | if (mtts == NULL) { | |
2732 | dev_err(dev, "qp buf pa find failed\n"); | |
2733 | goto out; | |
2734 | } | |
2735 | ||
2736 | /* Search IRRL's mtts */ | |
6a93c77a SX |
2737 | mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table, |
2738 | hr_qp->qpn, &dma_handle_2); | |
9a443537 | 2739 | if (mtts_2 == NULL) { |
2740 | dev_err(dev, "qp irrl_table find failed\n"); | |
2741 | goto out; | |
2742 | } | |
2743 | ||
2744 | /* | |
e84e40be S |
2745 | * Reset to init |
2746 | * Mandatory param: | |
2747 | * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS | |
2748 | * Optional param: NA | |
2749 | */ | |
9a443537 | 2750 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { |
2751 | roce_set_field(context->qpc_bytes_4, | |
2752 | QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M, | |
2753 | QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S, | |
2754 | to_hr_qp_type(hr_qp->ibqp.qp_type)); | |
2755 | ||
2756 | roce_set_bit(context->qpc_bytes_4, | |
2757 | QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0); | |
2758 | roce_set_bit(context->qpc_bytes_4, | |
2759 | QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S, | |
2760 | !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ)); | |
2761 | roce_set_bit(context->qpc_bytes_4, | |
2762 | QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S, | |
2763 | !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) | |
2764 | ); | |
2765 | roce_set_bit(context->qpc_bytes_4, | |
2766 | QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S, | |
2767 | !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) | |
2768 | ); | |
2769 | roce_set_bit(context->qpc_bytes_4, | |
2770 | QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1); | |
2771 | roce_set_field(context->qpc_bytes_4, | |
2772 | QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M, | |
2773 | QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S, | |
2774 | ilog2((unsigned int)hr_qp->sq.wqe_cnt)); | |
2775 | roce_set_field(context->qpc_bytes_4, | |
2776 | QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M, | |
2777 | QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S, | |
2778 | ilog2((unsigned int)hr_qp->rq.wqe_cnt)); | |
2779 | roce_set_field(context->qpc_bytes_4, | |
2780 | QP_CONTEXT_QPC_BYTES_4_PD_M, | |
2781 | QP_CONTEXT_QPC_BYTES_4_PD_S, | |
2782 | to_hr_pd(ibqp->pd)->pdn); | |
2783 | hr_qp->access_flags = attr->qp_access_flags; | |
2784 | roce_set_field(context->qpc_bytes_8, | |
2785 | QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M, | |
2786 | QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S, | |
2787 | to_hr_cq(ibqp->send_cq)->cqn); | |
2788 | roce_set_field(context->qpc_bytes_8, | |
2789 | QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M, | |
2790 | QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S, | |
2791 | to_hr_cq(ibqp->recv_cq)->cqn); | |
2792 | ||
2793 | if (ibqp->srq) | |
2794 | roce_set_field(context->qpc_bytes_12, | |
2795 | QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M, | |
2796 | QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S, | |
2797 | to_hr_srq(ibqp->srq)->srqn); | |
2798 | ||
2799 | roce_set_field(context->qpc_bytes_12, | |
2800 | QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, | |
2801 | QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S, | |
2802 | attr->pkey_index); | |
2803 | hr_qp->pkey_index = attr->pkey_index; | |
2804 | roce_set_field(context->qpc_bytes_16, | |
2805 | QP_CONTEXT_QPC_BYTES_16_QP_NUM_M, | |
2806 | QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn); | |
2807 | ||
2808 | } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { | |
2809 | roce_set_field(context->qpc_bytes_4, | |
2810 | QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M, | |
2811 | QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S, | |
2812 | to_hr_qp_type(hr_qp->ibqp.qp_type)); | |
2813 | roce_set_bit(context->qpc_bytes_4, | |
2814 | QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0); | |
2815 | if (attr_mask & IB_QP_ACCESS_FLAGS) { | |
2816 | roce_set_bit(context->qpc_bytes_4, | |
2817 | QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S, | |
2818 | !!(attr->qp_access_flags & | |
2819 | IB_ACCESS_REMOTE_READ)); | |
2820 | roce_set_bit(context->qpc_bytes_4, | |
2821 | QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S, | |
2822 | !!(attr->qp_access_flags & | |
2823 | IB_ACCESS_REMOTE_WRITE)); | |
2824 | } else { | |
2825 | roce_set_bit(context->qpc_bytes_4, | |
2826 | QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S, | |
2827 | !!(hr_qp->access_flags & | |
2828 | IB_ACCESS_REMOTE_READ)); | |
2829 | roce_set_bit(context->qpc_bytes_4, | |
2830 | QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S, | |
2831 | !!(hr_qp->access_flags & | |
2832 | IB_ACCESS_REMOTE_WRITE)); | |
2833 | } | |
2834 | ||
2835 | roce_set_bit(context->qpc_bytes_4, | |
2836 | QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1); | |
2837 | roce_set_field(context->qpc_bytes_4, | |
2838 | QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M, | |
2839 | QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S, | |
2840 | ilog2((unsigned int)hr_qp->sq.wqe_cnt)); | |
2841 | roce_set_field(context->qpc_bytes_4, | |
2842 | QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M, | |
2843 | QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S, | |
2844 | ilog2((unsigned int)hr_qp->rq.wqe_cnt)); | |
2845 | roce_set_field(context->qpc_bytes_4, | |
2846 | QP_CONTEXT_QPC_BYTES_4_PD_M, | |
2847 | QP_CONTEXT_QPC_BYTES_4_PD_S, | |
2848 | to_hr_pd(ibqp->pd)->pdn); | |
2849 | ||
2850 | roce_set_field(context->qpc_bytes_8, | |
2851 | QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M, | |
2852 | QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S, | |
2853 | to_hr_cq(ibqp->send_cq)->cqn); | |
2854 | roce_set_field(context->qpc_bytes_8, | |
2855 | QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M, | |
2856 | QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S, | |
2857 | to_hr_cq(ibqp->recv_cq)->cqn); | |
2858 | ||
2859 | if (ibqp->srq) | |
2860 | roce_set_field(context->qpc_bytes_12, | |
2861 | QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M, | |
2862 | QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S, | |
2863 | to_hr_srq(ibqp->srq)->srqn); | |
2864 | if (attr_mask & IB_QP_PKEY_INDEX) | |
2865 | roce_set_field(context->qpc_bytes_12, | |
2866 | QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, | |
2867 | QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S, | |
2868 | attr->pkey_index); | |
2869 | else | |
2870 | roce_set_field(context->qpc_bytes_12, | |
2871 | QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, | |
2872 | QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S, | |
2873 | hr_qp->pkey_index); | |
2874 | ||
2875 | roce_set_field(context->qpc_bytes_16, | |
2876 | QP_CONTEXT_QPC_BYTES_16_QP_NUM_M, | |
2877 | QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn); | |
2878 | } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { | |
2879 | if ((attr_mask & IB_QP_ALT_PATH) || | |
2880 | (attr_mask & IB_QP_ACCESS_FLAGS) || | |
2881 | (attr_mask & IB_QP_PKEY_INDEX) || | |
2882 | (attr_mask & IB_QP_QKEY)) { | |
2883 | dev_err(dev, "INIT2RTR attr_mask error\n"); | |
2884 | goto out; | |
2885 | } | |
2886 | ||
44c58487 | 2887 | dmac = (u8 *)attr->ah_attr.roce.dmac; |
9a443537 | 2888 | |
2889 | context->sq_rq_bt_l = (u32)(dma_handle); | |
2890 | roce_set_field(context->qpc_bytes_24, | |
2891 | QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M, | |
2892 | QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S, | |
2893 | ((u32)(dma_handle >> 32))); | |
2894 | roce_set_bit(context->qpc_bytes_24, | |
2895 | QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S, | |
2896 | 1); | |
2897 | roce_set_field(context->qpc_bytes_24, | |
2898 | QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M, | |
2899 | QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S, | |
2900 | attr->min_rnr_timer); | |
2901 | context->irrl_ba_l = (u32)(dma_handle_2); | |
2902 | roce_set_field(context->qpc_bytes_32, | |
2903 | QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M, | |
2904 | QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S, | |
2905 | ((u32)(dma_handle_2 >> 32)) & | |
2906 | QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M); | |
2907 | roce_set_field(context->qpc_bytes_32, | |
2908 | QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M, | |
2909 | QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0); | |
2910 | roce_set_bit(context->qpc_bytes_32, | |
2911 | QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S, | |
2912 | 1); | |
2913 | roce_set_bit(context->qpc_bytes_32, | |
2914 | QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S, | |
2915 | hr_qp->sq_signal_bits); | |
2916 | ||
80596c67 LO |
2917 | port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : |
2918 | hr_qp->port; | |
2919 | smac = (u8 *)hr_dev->dev_addr[port]; | |
2920 | /* when dmac equals smac or loop_idc is 1, it should loopback */ | |
2921 | if (ether_addr_equal_unaligned(dmac, smac) || | |
2922 | hr_dev->loop_idc == 0x1) | |
9a443537 | 2923 | roce_set_bit(context->qpc_bytes_32, |
80596c67 | 2924 | QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1); |
9a443537 | 2925 | |
2926 | roce_set_bit(context->qpc_bytes_32, | |
2927 | QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S, | |
d8966fcd | 2928 | rdma_ah_get_ah_flags(&attr->ah_attr)); |
9a443537 | 2929 | roce_set_field(context->qpc_bytes_32, |
2930 | QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M, | |
2931 | QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S, | |
2932 | ilog2((unsigned int)attr->max_dest_rd_atomic)); | |
2933 | ||
512f4f16 LO |
2934 | if (attr_mask & IB_QP_DEST_QPN) |
2935 | roce_set_field(context->qpc_bytes_36, | |
2936 | QP_CONTEXT_QPC_BYTES_36_DEST_QP_M, | |
2937 | QP_CONTEXT_QPC_BYTES_36_DEST_QP_S, | |
2938 | attr->dest_qp_num); | |
9a443537 | 2939 | |
2940 | /* Configure GID index */ | |
d8966fcd | 2941 | port_num = rdma_ah_get_port_num(&attr->ah_attr); |
9a443537 | 2942 | roce_set_field(context->qpc_bytes_36, |
2943 | QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M, | |
2944 | QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S, | |
d8966fcd DC |
2945 | hns_get_gid_index(hr_dev, |
2946 | port_num - 1, | |
2947 | grh->sgid_index)); | |
9a443537 | 2948 | |
2949 | memcpy(&(context->dmac_l), dmac, 4); | |
2950 | ||
2951 | roce_set_field(context->qpc_bytes_44, | |
2952 | QP_CONTEXT_QPC_BYTES_44_DMAC_H_M, | |
2953 | QP_CONTEXT_QPC_BYTES_44_DMAC_H_S, | |
2954 | *((u16 *)(&dmac[4]))); | |
2955 | roce_set_field(context->qpc_bytes_44, | |
2956 | QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M, | |
2957 | QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S, | |
d8966fcd | 2958 | rdma_ah_get_static_rate(&attr->ah_attr)); |
9a443537 | 2959 | roce_set_field(context->qpc_bytes_44, |
2960 | QP_CONTEXT_QPC_BYTES_44_HOPLMT_M, | |
2961 | QP_CONTEXT_QPC_BYTES_44_HOPLMT_S, | |
d8966fcd | 2962 | grh->hop_limit); |
9a443537 | 2963 | |
2964 | roce_set_field(context->qpc_bytes_48, | |
2965 | QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M, | |
2966 | QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S, | |
d8966fcd | 2967 | grh->flow_label); |
9a443537 | 2968 | roce_set_field(context->qpc_bytes_48, |
2969 | QP_CONTEXT_QPC_BYTES_48_TCLASS_M, | |
2970 | QP_CONTEXT_QPC_BYTES_48_TCLASS_S, | |
d8966fcd | 2971 | grh->traffic_class); |
9a443537 | 2972 | roce_set_field(context->qpc_bytes_48, |
2973 | QP_CONTEXT_QPC_BYTES_48_MTU_M, | |
2974 | QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu); | |
2975 | ||
d8966fcd DC |
2976 | memcpy(context->dgid, grh->dgid.raw, |
2977 | sizeof(grh->dgid.raw)); | |
9a443537 | 2978 | |
2979 | dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l, | |
2980 | roce_get_field(context->qpc_bytes_44, | |
2981 | QP_CONTEXT_QPC_BYTES_44_DMAC_H_M, | |
2982 | QP_CONTEXT_QPC_BYTES_44_DMAC_H_S)); | |
2983 | ||
2984 | roce_set_field(context->qpc_bytes_68, | |
2985 | QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M, | |
1fad5fab LO |
2986 | QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S, |
2987 | hr_qp->rq.head); | |
9a443537 | 2988 | roce_set_field(context->qpc_bytes_68, |
2989 | QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M, | |
2990 | QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0); | |
2991 | ||
2992 | rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE; | |
2993 | context->cur_rq_wqe_ba_l = (u32)(mtts[rq_pa_start]); | |
2994 | ||
2995 | roce_set_field(context->qpc_bytes_76, | |
2996 | QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M, | |
2997 | QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S, | |
2998 | mtts[rq_pa_start] >> 32); | |
2999 | roce_set_field(context->qpc_bytes_76, | |
3000 | QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M, | |
3001 | QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0); | |
3002 | ||
3003 | context->rx_rnr_time = 0; | |
3004 | ||
3005 | roce_set_field(context->qpc_bytes_84, | |
3006 | QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M, | |
3007 | QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S, | |
3008 | attr->rq_psn - 1); | |
3009 | roce_set_field(context->qpc_bytes_84, | |
3010 | QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M, | |
3011 | QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0); | |
3012 | ||
3013 | roce_set_field(context->qpc_bytes_88, | |
3014 | QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M, | |
3015 | QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S, | |
3016 | attr->rq_psn); | |
3017 | roce_set_bit(context->qpc_bytes_88, | |
3018 | QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0); | |
3019 | roce_set_bit(context->qpc_bytes_88, | |
3020 | QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0); | |
3021 | roce_set_field(context->qpc_bytes_88, | |
3022 | QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M, | |
3023 | QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S, | |
3024 | 0); | |
3025 | roce_set_field(context->qpc_bytes_88, | |
3026 | QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M, | |
3027 | QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S, | |
3028 | 0); | |
3029 | ||
3030 | context->dma_length = 0; | |
3031 | context->r_key = 0; | |
3032 | context->va_l = 0; | |
3033 | context->va_h = 0; | |
3034 | ||
3035 | roce_set_field(context->qpc_bytes_108, | |
3036 | QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M, | |
3037 | QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0); | |
3038 | roce_set_bit(context->qpc_bytes_108, | |
3039 | QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0); | |
3040 | roce_set_bit(context->qpc_bytes_108, | |
3041 | QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0); | |
3042 | ||
3043 | roce_set_field(context->qpc_bytes_112, | |
3044 | QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M, | |
3045 | QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0); | |
3046 | roce_set_field(context->qpc_bytes_112, | |
3047 | QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M, | |
3048 | QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0); | |
3049 | ||
3050 | /* For chip resp ack */ | |
3051 | roce_set_field(context->qpc_bytes_156, | |
3052 | QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, | |
3053 | QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, | |
7716809e | 3054 | hr_qp->phy_port); |
9a443537 | 3055 | roce_set_field(context->qpc_bytes_156, |
3056 | QP_CONTEXT_QPC_BYTES_156_SL_M, | |
d8966fcd DC |
3057 | QP_CONTEXT_QPC_BYTES_156_SL_S, |
3058 | rdma_ah_get_sl(&attr->ah_attr)); | |
3059 | hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); | |
9a443537 | 3060 | } else if (cur_state == IB_QPS_RTR && |
3061 | new_state == IB_QPS_RTS) { | |
3062 | /* If exist optional param, return error */ | |
3063 | if ((attr_mask & IB_QP_ALT_PATH) || | |
3064 | (attr_mask & IB_QP_ACCESS_FLAGS) || | |
3065 | (attr_mask & IB_QP_QKEY) || | |
3066 | (attr_mask & IB_QP_PATH_MIG_STATE) || | |
3067 | (attr_mask & IB_QP_CUR_STATE) || | |
3068 | (attr_mask & IB_QP_MIN_RNR_TIMER)) { | |
3069 | dev_err(dev, "RTR2RTS attr_mask error\n"); | |
3070 | goto out; | |
3071 | } | |
3072 | ||
3073 | context->rx_cur_sq_wqe_ba_l = (u32)(mtts[0]); | |
3074 | ||
3075 | roce_set_field(context->qpc_bytes_120, | |
3076 | QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M, | |
3077 | QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S, | |
3078 | (mtts[0]) >> 32); | |
3079 | ||
3080 | roce_set_field(context->qpc_bytes_124, | |
3081 | QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M, | |
3082 | QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0); | |
3083 | roce_set_field(context->qpc_bytes_124, | |
3084 | QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M, | |
3085 | QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0); | |
3086 | ||
3087 | roce_set_field(context->qpc_bytes_128, | |
3088 | QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M, | |
3089 | QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S, | |
3090 | attr->sq_psn); | |
3091 | roce_set_bit(context->qpc_bytes_128, | |
3092 | QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0); | |
3093 | roce_set_field(context->qpc_bytes_128, | |
3094 | QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M, | |
3095 | QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S, | |
3096 | 0); | |
3097 | roce_set_bit(context->qpc_bytes_128, | |
3098 | QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0); | |
3099 | ||
3100 | roce_set_field(context->qpc_bytes_132, | |
3101 | QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M, | |
3102 | QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0); | |
3103 | roce_set_field(context->qpc_bytes_132, | |
3104 | QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M, | |
3105 | QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0); | |
3106 | ||
3107 | roce_set_field(context->qpc_bytes_136, | |
3108 | QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M, | |
3109 | QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S, | |
3110 | attr->sq_psn); | |
3111 | roce_set_field(context->qpc_bytes_136, | |
3112 | QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M, | |
3113 | QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S, | |
3114 | attr->sq_psn); | |
3115 | ||
3116 | roce_set_field(context->qpc_bytes_140, | |
3117 | QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M, | |
3118 | QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S, | |
3119 | (attr->sq_psn >> SQ_PSN_SHIFT)); | |
3120 | roce_set_field(context->qpc_bytes_140, | |
3121 | QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M, | |
3122 | QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0); | |
3123 | roce_set_bit(context->qpc_bytes_140, | |
3124 | QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0); | |
3125 | ||
9a443537 | 3126 | roce_set_field(context->qpc_bytes_148, |
3127 | QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M, | |
3128 | QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0); | |
3129 | roce_set_field(context->qpc_bytes_148, | |
3130 | QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, | |
7c7a4ea1 LO |
3131 | QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S, |
3132 | attr->retry_cnt); | |
9a443537 | 3133 | roce_set_field(context->qpc_bytes_148, |
3134 | QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M, | |
7c7a4ea1 LO |
3135 | QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S, |
3136 | attr->rnr_retry); | |
9a443537 | 3137 | roce_set_field(context->qpc_bytes_148, |
3138 | QP_CONTEXT_QPC_BYTES_148_LSN_M, | |
3139 | QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100); | |
3140 | ||
3141 | context->rnr_retry = 0; | |
3142 | ||
3143 | roce_set_field(context->qpc_bytes_156, | |
3144 | QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M, | |
3145 | QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S, | |
3146 | attr->retry_cnt); | |
c6c3bfea LO |
3147 | if (attr->timeout < 0x12) { |
3148 | dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n", | |
3149 | attr->timeout); | |
3150 | roce_set_field(context->qpc_bytes_156, | |
3151 | QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, | |
3152 | QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, | |
3153 | 0x12); | |
3154 | } else { | |
3155 | roce_set_field(context->qpc_bytes_156, | |
3156 | QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, | |
3157 | QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, | |
3158 | attr->timeout); | |
3159 | } | |
9a443537 | 3160 | roce_set_field(context->qpc_bytes_156, |
3161 | QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M, | |
3162 | QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S, | |
3163 | attr->rnr_retry); | |
3164 | roce_set_field(context->qpc_bytes_156, | |
3165 | QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, | |
3166 | QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, | |
7716809e | 3167 | hr_qp->phy_port); |
9a443537 | 3168 | roce_set_field(context->qpc_bytes_156, |
3169 | QP_CONTEXT_QPC_BYTES_156_SL_M, | |
d8966fcd DC |
3170 | QP_CONTEXT_QPC_BYTES_156_SL_S, |
3171 | rdma_ah_get_sl(&attr->ah_attr)); | |
3172 | hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); | |
9a443537 | 3173 | roce_set_field(context->qpc_bytes_156, |
3174 | QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M, | |
3175 | QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S, | |
3176 | ilog2((unsigned int)attr->max_rd_atomic)); | |
3177 | roce_set_field(context->qpc_bytes_156, | |
3178 | QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M, | |
3179 | QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0); | |
3180 | context->pkt_use_len = 0; | |
3181 | ||
3182 | roce_set_field(context->qpc_bytes_164, | |
3183 | QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M, | |
3184 | QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn); | |
3185 | roce_set_field(context->qpc_bytes_164, | |
3186 | QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M, | |
3187 | QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0); | |
3188 | ||
3189 | roce_set_field(context->qpc_bytes_168, | |
3190 | QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M, | |
3191 | QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S, | |
3192 | attr->sq_psn); | |
3193 | roce_set_field(context->qpc_bytes_168, | |
3194 | QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M, | |
3195 | QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0); | |
3196 | roce_set_field(context->qpc_bytes_168, | |
3197 | QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M, | |
3198 | QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0); | |
3199 | roce_set_bit(context->qpc_bytes_168, | |
3200 | QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0); | |
3201 | roce_set_bit(context->qpc_bytes_168, | |
3202 | QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0); | |
3203 | roce_set_bit(context->qpc_bytes_168, | |
3204 | QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0); | |
3205 | context->sge_use_len = 0; | |
3206 | ||
3207 | roce_set_field(context->qpc_bytes_176, | |
3208 | QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M, | |
3209 | QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0); | |
3210 | roce_set_field(context->qpc_bytes_176, | |
3211 | QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M, | |
3212 | QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S, | |
3213 | 0); | |
3214 | roce_set_field(context->qpc_bytes_180, | |
3215 | QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M, | |
3216 | QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0); | |
3217 | roce_set_field(context->qpc_bytes_180, | |
3218 | QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M, | |
3219 | QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0); | |
3220 | ||
3221 | context->tx_cur_sq_wqe_ba_l = (u32)(mtts[0]); | |
3222 | ||
3223 | roce_set_field(context->qpc_bytes_188, | |
3224 | QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M, | |
3225 | QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S, | |
3226 | (mtts[0]) >> 32); | |
3227 | roce_set_bit(context->qpc_bytes_188, | |
3228 | QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0); | |
3229 | roce_set_field(context->qpc_bytes_188, | |
3230 | QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M, | |
3231 | QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S, | |
3232 | 0); | |
deb17f6f | 3233 | } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) || |
9a443537 | 3234 | (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) || |
3235 | (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) || | |
3236 | (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) || | |
3237 | (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) || | |
3238 | (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) || | |
3239 | (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) || | |
deb17f6f LO |
3240 | (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) { |
3241 | dev_err(dev, "not support this status migration\n"); | |
9a443537 | 3242 | goto out; |
3243 | } | |
3244 | ||
3245 | /* Every status migrate must change state */ | |
3246 | roce_set_field(context->qpc_bytes_144, | |
3247 | QP_CONTEXT_QPC_BYTES_144_QP_STATE_M, | |
1dec243a | 3248 | QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state); |
9a443537 | 3249 | |
3250 | /* SW pass context to HW */ | |
3251 | ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt, | |
3252 | to_hns_roce_state(cur_state), | |
3253 | to_hns_roce_state(new_state), context, | |
3254 | hr_qp); | |
3255 | if (ret) { | |
3256 | dev_err(dev, "hns_roce_qp_modify failed\n"); | |
3257 | goto out; | |
3258 | } | |
3259 | ||
3260 | /* | |
e84e40be S |
3261 | * Use rst2init to instead of init2init with drv, |
3262 | * need to hw to flash RQ HEAD by DB again | |
3263 | */ | |
9a443537 | 3264 | if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { |
3265 | /* Memory barrier */ | |
3266 | wmb(); | |
9a443537 | 3267 | |
509bf0c2 LO |
3268 | roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M, |
3269 | RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head); | |
3270 | roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M, | |
3271 | RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); | |
3272 | roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M, | |
3273 | RQ_DOORBELL_U32_8_CMD_S, 1); | |
3274 | roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1); | |
3275 | ||
3276 | if (ibqp->uobject) { | |
3277 | hr_qp->rq.db_reg_l = hr_dev->reg_base + | |
2d407888 | 3278 | hr_dev->odb_offset + |
509bf0c2 | 3279 | DB_REG_OFFSET * hr_dev->priv_uar.index; |
9a443537 | 3280 | } |
509bf0c2 LO |
3281 | |
3282 | hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l); | |
9a443537 | 3283 | } |
3284 | ||
3285 | hr_qp->state = new_state; | |
3286 | ||
3287 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | |
3288 | hr_qp->resp_depth = attr->max_dest_rd_atomic; | |
7716809e LO |
3289 | if (attr_mask & IB_QP_PORT) { |
3290 | hr_qp->port = attr->port_num - 1; | |
3291 | hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; | |
3292 | } | |
9a443537 | 3293 | |
3294 | if (new_state == IB_QPS_RESET && !ibqp->uobject) { | |
3295 | hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, | |
3296 | ibqp->srq ? to_hr_srq(ibqp->srq) : NULL); | |
3297 | if (ibqp->send_cq != ibqp->recv_cq) | |
3298 | hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq), | |
3299 | hr_qp->qpn, NULL); | |
3300 | ||
3301 | hr_qp->rq.head = 0; | |
3302 | hr_qp->rq.tail = 0; | |
3303 | hr_qp->sq.head = 0; | |
3304 | hr_qp->sq.tail = 0; | |
3305 | hr_qp->sq_next_wqe = 0; | |
3306 | } | |
3307 | out: | |
3308 | kfree(context); | |
3309 | return ret; | |
3310 | } | |
3311 | ||
d61d6de0 BVA |
3312 | static int hns_roce_v1_modify_qp(struct ib_qp *ibqp, |
3313 | const struct ib_qp_attr *attr, int attr_mask, | |
3314 | enum ib_qp_state cur_state, | |
3315 | enum ib_qp_state new_state) | |
9a443537 | 3316 | { |
3317 | ||
3318 | if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) | |
3319 | return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state, | |
3320 | new_state); | |
3321 | else | |
3322 | return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state, | |
3323 | new_state); | |
3324 | } | |
3325 | ||
3326 | static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state) | |
3327 | { | |
3328 | switch (state) { | |
3329 | case HNS_ROCE_QP_STATE_RST: | |
3330 | return IB_QPS_RESET; | |
3331 | case HNS_ROCE_QP_STATE_INIT: | |
3332 | return IB_QPS_INIT; | |
3333 | case HNS_ROCE_QP_STATE_RTR: | |
3334 | return IB_QPS_RTR; | |
3335 | case HNS_ROCE_QP_STATE_RTS: | |
3336 | return IB_QPS_RTS; | |
3337 | case HNS_ROCE_QP_STATE_SQD: | |
3338 | return IB_QPS_SQD; | |
3339 | case HNS_ROCE_QP_STATE_ERR: | |
3340 | return IB_QPS_ERR; | |
3341 | default: | |
3342 | return IB_QPS_ERR; | |
3343 | } | |
3344 | } | |
3345 | ||
3346 | static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev, | |
3347 | struct hns_roce_qp *hr_qp, | |
3348 | struct hns_roce_qp_context *hr_context) | |
3349 | { | |
3350 | struct hns_roce_cmd_mailbox *mailbox; | |
3351 | int ret; | |
3352 | ||
3353 | mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); | |
3354 | if (IS_ERR(mailbox)) | |
3355 | return PTR_ERR(mailbox); | |
3356 | ||
3357 | ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0, | |
3358 | HNS_ROCE_CMD_QUERY_QP, | |
6b877c32 | 3359 | HNS_ROCE_CMD_TIMEOUT_MSECS); |
9a443537 | 3360 | if (!ret) |
3361 | memcpy(hr_context, mailbox->buf, sizeof(*hr_context)); | |
3362 | else | |
3363 | dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n"); | |
3364 | ||
3365 | hns_roce_free_cmd_mailbox(hr_dev, mailbox); | |
3366 | ||
3367 | return ret; | |
3368 | } | |
3369 | ||
9eefa953 LO |
3370 | static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, |
3371 | int qp_attr_mask, | |
3372 | struct ib_qp_init_attr *qp_init_attr) | |
3373 | { | |
3374 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | |
3375 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | |
3376 | struct hns_roce_sqp_context context; | |
3377 | u32 addr; | |
3378 | ||
3379 | mutex_lock(&hr_qp->mutex); | |
3380 | ||
3381 | if (hr_qp->state == IB_QPS_RESET) { | |
3382 | qp_attr->qp_state = IB_QPS_RESET; | |
3383 | goto done; | |
3384 | } | |
3385 | ||
3386 | addr = ROCEE_QP1C_CFG0_0_REG + | |
3387 | hr_qp->port * sizeof(struct hns_roce_sqp_context); | |
3388 | context.qp1c_bytes_4 = roce_read(hr_dev, addr); | |
3389 | context.sq_rq_bt_l = roce_read(hr_dev, addr + 1); | |
3390 | context.qp1c_bytes_12 = roce_read(hr_dev, addr + 2); | |
3391 | context.qp1c_bytes_16 = roce_read(hr_dev, addr + 3); | |
3392 | context.qp1c_bytes_20 = roce_read(hr_dev, addr + 4); | |
3393 | context.cur_rq_wqe_ba_l = roce_read(hr_dev, addr + 5); | |
3394 | context.qp1c_bytes_28 = roce_read(hr_dev, addr + 6); | |
3395 | context.qp1c_bytes_32 = roce_read(hr_dev, addr + 7); | |
3396 | context.cur_sq_wqe_ba_l = roce_read(hr_dev, addr + 8); | |
3397 | context.qp1c_bytes_40 = roce_read(hr_dev, addr + 9); | |
3398 | ||
3399 | hr_qp->state = roce_get_field(context.qp1c_bytes_4, | |
3400 | QP1C_BYTES_4_QP_STATE_M, | |
3401 | QP1C_BYTES_4_QP_STATE_S); | |
3402 | qp_attr->qp_state = hr_qp->state; | |
3403 | qp_attr->path_mtu = IB_MTU_256; | |
3404 | qp_attr->path_mig_state = IB_MIG_ARMED; | |
3405 | qp_attr->qkey = QKEY_VAL; | |
2bf910d4 | 3406 | qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; |
9eefa953 LO |
3407 | qp_attr->rq_psn = 0; |
3408 | qp_attr->sq_psn = 0; | |
3409 | qp_attr->dest_qp_num = 1; | |
3410 | qp_attr->qp_access_flags = 6; | |
3411 | ||
3412 | qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20, | |
3413 | QP1C_BYTES_20_PKEY_IDX_M, | |
3414 | QP1C_BYTES_20_PKEY_IDX_S); | |
3415 | qp_attr->port_num = hr_qp->port + 1; | |
3416 | qp_attr->sq_draining = 0; | |
3417 | qp_attr->max_rd_atomic = 0; | |
3418 | qp_attr->max_dest_rd_atomic = 0; | |
3419 | qp_attr->min_rnr_timer = 0; | |
3420 | qp_attr->timeout = 0; | |
3421 | qp_attr->retry_cnt = 0; | |
3422 | qp_attr->rnr_retry = 0; | |
3423 | qp_attr->alt_timeout = 0; | |
3424 | ||
3425 | done: | |
3426 | qp_attr->cur_qp_state = qp_attr->qp_state; | |
3427 | qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt; | |
3428 | qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs; | |
3429 | qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; | |
3430 | qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; | |
3431 | qp_attr->cap.max_inline_data = 0; | |
3432 | qp_init_attr->cap = qp_attr->cap; | |
3433 | qp_init_attr->create_flags = 0; | |
3434 | ||
3435 | mutex_unlock(&hr_qp->mutex); | |
3436 | ||
3437 | return 0; | |
3438 | } | |
3439 | ||
3440 | static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, | |
3441 | int qp_attr_mask, | |
3442 | struct ib_qp_init_attr *qp_init_attr) | |
9a443537 | 3443 | { |
3444 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | |
3445 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | |
3446 | struct device *dev = &hr_dev->pdev->dev; | |
3447 | struct hns_roce_qp_context *context; | |
3448 | int tmp_qp_state = 0; | |
3449 | int ret = 0; | |
3450 | int state; | |
3451 | ||
3452 | context = kzalloc(sizeof(*context), GFP_KERNEL); | |
3453 | if (!context) | |
3454 | return -ENOMEM; | |
3455 | ||
3456 | memset(qp_attr, 0, sizeof(*qp_attr)); | |
3457 | memset(qp_init_attr, 0, sizeof(*qp_init_attr)); | |
3458 | ||
3459 | mutex_lock(&hr_qp->mutex); | |
3460 | ||
3461 | if (hr_qp->state == IB_QPS_RESET) { | |
3462 | qp_attr->qp_state = IB_QPS_RESET; | |
3463 | goto done; | |
3464 | } | |
3465 | ||
3466 | ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context); | |
3467 | if (ret) { | |
3468 | dev_err(dev, "query qpc error\n"); | |
3469 | ret = -EINVAL; | |
3470 | goto out; | |
3471 | } | |
3472 | ||
3473 | state = roce_get_field(context->qpc_bytes_144, | |
3474 | QP_CONTEXT_QPC_BYTES_144_QP_STATE_M, | |
3475 | QP_CONTEXT_QPC_BYTES_144_QP_STATE_S); | |
3476 | tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state); | |
3477 | if (tmp_qp_state == -1) { | |
3478 | dev_err(dev, "to_ib_qp_state error\n"); | |
3479 | ret = -EINVAL; | |
3480 | goto out; | |
3481 | } | |
3482 | hr_qp->state = (u8)tmp_qp_state; | |
3483 | qp_attr->qp_state = (enum ib_qp_state)hr_qp->state; | |
3484 | qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48, | |
3485 | QP_CONTEXT_QPC_BYTES_48_MTU_M, | |
3486 | QP_CONTEXT_QPC_BYTES_48_MTU_S); | |
3487 | qp_attr->path_mig_state = IB_MIG_ARMED; | |
2bf910d4 | 3488 | qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; |
9a443537 | 3489 | if (hr_qp->ibqp.qp_type == IB_QPT_UD) |
3490 | qp_attr->qkey = QKEY_VAL; | |
3491 | ||
3492 | qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88, | |
3493 | QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M, | |
3494 | QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S); | |
3495 | qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164, | |
3496 | QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M, | |
3497 | QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S); | |
3498 | qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36, | |
3499 | QP_CONTEXT_QPC_BYTES_36_DEST_QP_M, | |
3500 | QP_CONTEXT_QPC_BYTES_36_DEST_QP_S); | |
3501 | qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4, | |
3502 | QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) | | |
3503 | ((roce_get_bit(context->qpc_bytes_4, | |
3504 | QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) | | |
3505 | ((roce_get_bit(context->qpc_bytes_4, | |
3506 | QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3); | |
3507 | ||
3508 | if (hr_qp->ibqp.qp_type == IB_QPT_RC || | |
3509 | hr_qp->ibqp.qp_type == IB_QPT_UC) { | |
d8966fcd DC |
3510 | struct ib_global_route *grh = |
3511 | rdma_ah_retrieve_grh(&qp_attr->ah_attr); | |
3512 | ||
3513 | rdma_ah_set_sl(&qp_attr->ah_attr, | |
3514 | roce_get_field(context->qpc_bytes_156, | |
3515 | QP_CONTEXT_QPC_BYTES_156_SL_M, | |
3516 | QP_CONTEXT_QPC_BYTES_156_SL_S)); | |
3517 | rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH); | |
3518 | grh->flow_label = | |
3519 | roce_get_field(context->qpc_bytes_48, | |
3520 | QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M, | |
3521 | QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S); | |
3522 | grh->sgid_index = | |
3523 | roce_get_field(context->qpc_bytes_36, | |
3524 | QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M, | |
3525 | QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S); | |
3526 | grh->hop_limit = | |
3527 | roce_get_field(context->qpc_bytes_44, | |
3528 | QP_CONTEXT_QPC_BYTES_44_HOPLMT_M, | |
3529 | QP_CONTEXT_QPC_BYTES_44_HOPLMT_S); | |
3530 | grh->traffic_class = | |
3531 | roce_get_field(context->qpc_bytes_48, | |
3532 | QP_CONTEXT_QPC_BYTES_48_TCLASS_M, | |
3533 | QP_CONTEXT_QPC_BYTES_48_TCLASS_S); | |
3534 | ||
3535 | memcpy(grh->dgid.raw, context->dgid, | |
3536 | sizeof(grh->dgid.raw)); | |
9a443537 | 3537 | } |
3538 | ||
3539 | qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12, | |
3540 | QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, | |
3541 | QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S); | |
dd783a21 | 3542 | qp_attr->port_num = hr_qp->port + 1; |
9a443537 | 3543 | qp_attr->sq_draining = 0; |
be7acd9d | 3544 | qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156, |
9a443537 | 3545 | QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M, |
3546 | QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S); | |
be7acd9d | 3547 | qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32, |
9a443537 | 3548 | QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M, |
3549 | QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S); | |
3550 | qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24, | |
3551 | QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M, | |
3552 | QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S)); | |
3553 | qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156, | |
3554 | QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, | |
3555 | QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S)); | |
3556 | qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148, | |
3557 | QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, | |
3558 | QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S); | |
3559 | qp_attr->rnr_retry = context->rnr_retry; | |
3560 | ||
3561 | done: | |
3562 | qp_attr->cur_qp_state = qp_attr->qp_state; | |
3563 | qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt; | |
3564 | qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs; | |
3565 | ||
3566 | if (!ibqp->uobject) { | |
3567 | qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; | |
3568 | qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; | |
3569 | } else { | |
3570 | qp_attr->cap.max_send_wr = 0; | |
3571 | qp_attr->cap.max_send_sge = 0; | |
3572 | } | |
3573 | ||
3574 | qp_init_attr->cap = qp_attr->cap; | |
3575 | ||
3576 | out: | |
3577 | mutex_unlock(&hr_qp->mutex); | |
3578 | kfree(context); | |
3579 | return ret; | |
3580 | } | |
3581 | ||
d61d6de0 BVA |
3582 | static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, |
3583 | int qp_attr_mask, | |
3584 | struct ib_qp_init_attr *qp_init_attr) | |
9eefa953 LO |
3585 | { |
3586 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | |
3587 | ||
3588 | return hr_qp->doorbell_qpn <= 1 ? | |
3589 | hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) : | |
3590 | hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr); | |
3591 | } | |
d838c481 | 3592 | |
f44c863b LO |
3593 | static void hns_roce_check_sdb_status(struct hns_roce_dev *hr_dev, |
3594 | u32 *old_send, u32 *old_retry, | |
3595 | u32 *tsp_st, u32 *success_flags) | |
3596 | { | |
3597 | u32 sdb_retry_cnt; | |
3598 | u32 sdb_send_ptr; | |
3599 | u32 cur_cnt, old_cnt; | |
3600 | u32 send_ptr; | |
3601 | ||
3602 | sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG); | |
3603 | sdb_retry_cnt = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG); | |
3604 | cur_cnt = roce_get_field(sdb_send_ptr, | |
3605 | ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, | |
3606 | ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) + | |
3607 | roce_get_field(sdb_retry_cnt, | |
3608 | ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M, | |
3609 | ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S); | |
3610 | if (!roce_get_bit(*tsp_st, ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) { | |
3611 | old_cnt = roce_get_field(*old_send, | |
3612 | ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, | |
3613 | ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) + | |
3614 | roce_get_field(*old_retry, | |
3615 | ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M, | |
3616 | ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S); | |
3617 | if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) | |
3618 | *success_flags = 1; | |
3619 | } else { | |
3620 | old_cnt = roce_get_field(*old_send, | |
3621 | ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, | |
3622 | ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S); | |
3623 | if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) { | |
3624 | *success_flags = 1; | |
3625 | } else { | |
3626 | send_ptr = roce_get_field(*old_send, | |
3627 | ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, | |
3628 | ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) + | |
3629 | roce_get_field(sdb_retry_cnt, | |
3630 | ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M, | |
3631 | ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S); | |
3632 | roce_set_field(*old_send, | |
3633 | ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, | |
3634 | ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S, | |
3635 | send_ptr); | |
3636 | } | |
3637 | } | |
3638 | } | |
3639 | ||
d838c481 WHX |
3640 | static int check_qp_db_process_status(struct hns_roce_dev *hr_dev, |
3641 | struct hns_roce_qp *hr_qp, | |
3642 | u32 sdb_issue_ptr, | |
3643 | u32 *sdb_inv_cnt, | |
3644 | u32 *wait_stage) | |
9a443537 | 3645 | { |
9a443537 | 3646 | struct device *dev = &hr_dev->pdev->dev; |
d838c481 WHX |
3647 | u32 sdb_send_ptr, old_send; |
3648 | u32 success_flags = 0; | |
d838c481 | 3649 | unsigned long end; |
f44c863b | 3650 | u32 old_retry; |
d838c481 WHX |
3651 | u32 inv_cnt; |
3652 | u32 tsp_st; | |
3653 | ||
3654 | if (*wait_stage > HNS_ROCE_V1_DB_STAGE2 || | |
3655 | *wait_stage < HNS_ROCE_V1_DB_STAGE1) { | |
3656 | dev_err(dev, "QP(0x%lx) db status wait stage(%d) error!\n", | |
3657 | hr_qp->qpn, *wait_stage); | |
3658 | return -EINVAL; | |
3659 | } | |
9a443537 | 3660 | |
d838c481 WHX |
3661 | /* Calculate the total timeout for the entire verification process */ |
3662 | end = msecs_to_jiffies(HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS) + jiffies; | |
3663 | ||
3664 | if (*wait_stage == HNS_ROCE_V1_DB_STAGE1) { | |
3665 | /* Query db process status, until hw process completely */ | |
3666 | sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG); | |
3667 | while (roce_hw_index_cmp_lt(sdb_send_ptr, sdb_issue_ptr, | |
3668 | ROCEE_SDB_PTR_CMP_BITS)) { | |
3669 | if (!time_before(jiffies, end)) { | |
3670 | dev_dbg(dev, "QP(0x%lx) db process stage1 timeout. issue 0x%x send 0x%x.\n", | |
3671 | hr_qp->qpn, sdb_issue_ptr, | |
3672 | sdb_send_ptr); | |
3673 | return 0; | |
3674 | } | |
3675 | ||
3676 | msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS); | |
3677 | sdb_send_ptr = roce_read(hr_dev, | |
9a443537 | 3678 | ROCEE_SDB_SEND_PTR_REG); |
d838c481 | 3679 | } |
9a443537 | 3680 | |
d838c481 WHX |
3681 | if (roce_get_field(sdb_issue_ptr, |
3682 | ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M, | |
3683 | ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) == | |
3684 | roce_get_field(sdb_send_ptr, | |
3685 | ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, | |
3686 | ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)) { | |
3687 | old_send = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG); | |
3688 | old_retry = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG); | |
9a443537 | 3689 | |
9a443537 | 3690 | do { |
d838c481 WHX |
3691 | tsp_st = roce_read(hr_dev, ROCEE_TSP_BP_ST_REG); |
3692 | if (roce_get_bit(tsp_st, | |
3693 | ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S) == 1) { | |
3694 | *wait_stage = HNS_ROCE_V1_DB_WAIT_OK; | |
3695 | return 0; | |
3696 | } | |
3697 | ||
9a443537 | 3698 | if (!time_before(jiffies, end)) { |
d838c481 WHX |
3699 | dev_dbg(dev, "QP(0x%lx) db process stage1 timeout when send ptr equals issue ptr.\n" |
3700 | "issue 0x%x send 0x%x.\n", | |
3701 | hr_qp->qpn, sdb_issue_ptr, | |
3702 | sdb_send_ptr); | |
3703 | return 0; | |
9a443537 | 3704 | } |
d838c481 WHX |
3705 | |
3706 | msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS); | |
3707 | ||
f44c863b LO |
3708 | hns_roce_check_sdb_status(hr_dev, &old_send, |
3709 | &old_retry, &tsp_st, | |
3710 | &success_flags); | |
d838c481 WHX |
3711 | } while (!success_flags); |
3712 | } | |
3713 | ||
3714 | *wait_stage = HNS_ROCE_V1_DB_STAGE2; | |
3715 | ||
3716 | /* Get list pointer */ | |
3717 | *sdb_inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG); | |
3718 | dev_dbg(dev, "QP(0x%lx) db process stage2. inv cnt = 0x%x.\n", | |
3719 | hr_qp->qpn, *sdb_inv_cnt); | |
3720 | } | |
3721 | ||
3722 | if (*wait_stage == HNS_ROCE_V1_DB_STAGE2) { | |
3723 | /* Query db's list status, until hw reversal */ | |
3724 | inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG); | |
3725 | while (roce_hw_index_cmp_lt(inv_cnt, | |
3726 | *sdb_inv_cnt + SDB_INV_CNT_OFFSET, | |
3727 | ROCEE_SDB_CNT_CMP_BITS)) { | |
3728 | if (!time_before(jiffies, end)) { | |
3729 | dev_dbg(dev, "QP(0x%lx) db process stage2 timeout. inv cnt 0x%x.\n", | |
3730 | hr_qp->qpn, inv_cnt); | |
3731 | return 0; | |
3732 | } | |
3733 | ||
3734 | msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS); | |
3735 | inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG); | |
9a443537 | 3736 | } |
d838c481 WHX |
3737 | |
3738 | *wait_stage = HNS_ROCE_V1_DB_WAIT_OK; | |
3739 | } | |
3740 | ||
3741 | return 0; | |
3742 | } | |
3743 | ||
3744 | static int check_qp_reset_state(struct hns_roce_dev *hr_dev, | |
3745 | struct hns_roce_qp *hr_qp, | |
3746 | struct hns_roce_qp_work *qp_work_entry, | |
3747 | int *is_timeout) | |
3748 | { | |
3749 | struct device *dev = &hr_dev->pdev->dev; | |
3750 | u32 sdb_issue_ptr; | |
3751 | int ret; | |
3752 | ||
3753 | if (hr_qp->state != IB_QPS_RESET) { | |
3754 | /* Set qp to ERR, waiting for hw complete processing all dbs */ | |
3755 | ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, | |
3756 | IB_QPS_ERR); | |
3757 | if (ret) { | |
3758 | dev_err(dev, "Modify QP(0x%lx) to ERR failed!\n", | |
3759 | hr_qp->qpn); | |
3760 | return ret; | |
3761 | } | |
3762 | ||
3763 | /* Record issued doorbell */ | |
3764 | sdb_issue_ptr = roce_read(hr_dev, ROCEE_SDB_ISSUE_PTR_REG); | |
3765 | qp_work_entry->sdb_issue_ptr = sdb_issue_ptr; | |
3766 | qp_work_entry->db_wait_stage = HNS_ROCE_V1_DB_STAGE1; | |
3767 | ||
3768 | /* Query db process status, until hw process completely */ | |
3769 | ret = check_qp_db_process_status(hr_dev, hr_qp, sdb_issue_ptr, | |
3770 | &qp_work_entry->sdb_inv_cnt, | |
3771 | &qp_work_entry->db_wait_stage); | |
3772 | if (ret) { | |
3773 | dev_err(dev, "Check QP(0x%lx) db process status failed!\n", | |
3774 | hr_qp->qpn); | |
3775 | return ret; | |
3776 | } | |
3777 | ||
3778 | if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK) { | |
3779 | qp_work_entry->sche_cnt = 0; | |
3780 | *is_timeout = 1; | |
3781 | return 0; | |
3782 | } | |
3783 | ||
3784 | /* Modify qp to reset before destroying qp */ | |
3785 | ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, | |
3786 | IB_QPS_RESET); | |
3787 | if (ret) { | |
3788 | dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", | |
3789 | hr_qp->qpn); | |
3790 | return ret; | |
3791 | } | |
3792 | } | |
3793 | ||
3794 | return 0; | |
3795 | } | |
3796 | ||
3797 | static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) | |
3798 | { | |
3799 | struct hns_roce_qp_work *qp_work_entry; | |
3800 | struct hns_roce_v1_priv *priv; | |
3801 | struct hns_roce_dev *hr_dev; | |
3802 | struct hns_roce_qp *hr_qp; | |
3803 | struct device *dev; | |
58c4f0d8 | 3804 | unsigned long qpn; |
d838c481 WHX |
3805 | int ret; |
3806 | ||
3807 | qp_work_entry = container_of(work, struct hns_roce_qp_work, work); | |
3808 | hr_dev = to_hr_dev(qp_work_entry->ib_dev); | |
3809 | dev = &hr_dev->pdev->dev; | |
016a0059 | 3810 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
d838c481 | 3811 | hr_qp = qp_work_entry->qp; |
58c4f0d8 | 3812 | qpn = hr_qp->qpn; |
d838c481 | 3813 | |
58c4f0d8 | 3814 | dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn); |
d838c481 WHX |
3815 | |
3816 | qp_work_entry->sche_cnt++; | |
3817 | ||
3818 | /* Query db process status, until hw process completely */ | |
3819 | ret = check_qp_db_process_status(hr_dev, hr_qp, | |
3820 | qp_work_entry->sdb_issue_ptr, | |
3821 | &qp_work_entry->sdb_inv_cnt, | |
3822 | &qp_work_entry->db_wait_stage); | |
3823 | if (ret) { | |
3824 | dev_err(dev, "Check QP(0x%lx) db process status failed!\n", | |
58c4f0d8 | 3825 | qpn); |
d838c481 WHX |
3826 | return; |
3827 | } | |
3828 | ||
3829 | if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK && | |
3830 | priv->des_qp.requeue_flag) { | |
3831 | queue_work(priv->des_qp.qp_wq, work); | |
3832 | return; | |
3833 | } | |
3834 | ||
3835 | /* Modify qp to reset before destroying qp */ | |
3836 | ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, | |
3837 | IB_QPS_RESET); | |
3838 | if (ret) { | |
58c4f0d8 | 3839 | dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn); |
d838c481 WHX |
3840 | return; |
3841 | } | |
3842 | ||
3843 | hns_roce_qp_remove(hr_dev, hr_qp); | |
3844 | hns_roce_qp_free(hr_dev, hr_qp); | |
3845 | ||
3846 | if (hr_qp->ibqp.qp_type == IB_QPT_RC) { | |
3847 | /* RC QP, release QPN */ | |
58c4f0d8 | 3848 | hns_roce_release_range_qp(hr_dev, qpn, 1); |
d838c481 WHX |
3849 | kfree(hr_qp); |
3850 | } else | |
3851 | kfree(hr_to_hr_sqp(hr_qp)); | |
3852 | ||
3853 | kfree(qp_work_entry); | |
3854 | ||
58c4f0d8 | 3855 | dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn); |
d838c481 WHX |
3856 | } |
3857 | ||
3858 | int hns_roce_v1_destroy_qp(struct ib_qp *ibqp) | |
3859 | { | |
3860 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | |
3861 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | |
3862 | struct device *dev = &hr_dev->pdev->dev; | |
3863 | struct hns_roce_qp_work qp_work_entry; | |
3864 | struct hns_roce_qp_work *qp_work; | |
3865 | struct hns_roce_v1_priv *priv; | |
3866 | struct hns_roce_cq *send_cq, *recv_cq; | |
3867 | int is_user = !!ibqp->pd->uobject; | |
3868 | int is_timeout = 0; | |
3869 | int ret; | |
3870 | ||
3871 | ret = check_qp_reset_state(hr_dev, hr_qp, &qp_work_entry, &is_timeout); | |
3872 | if (ret) { | |
3873 | dev_err(dev, "QP reset state check failed(%d)!\n", ret); | |
3874 | return ret; | |
9a443537 | 3875 | } |
3876 | ||
3877 | send_cq = to_hr_cq(hr_qp->ibqp.send_cq); | |
3878 | recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq); | |
3879 | ||
3880 | hns_roce_lock_cqs(send_cq, recv_cq); | |
9a443537 | 3881 | if (!is_user) { |
3882 | __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ? | |
3883 | to_hr_srq(hr_qp->ibqp.srq) : NULL); | |
3884 | if (send_cq != recv_cq) | |
3885 | __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL); | |
3886 | } | |
9a443537 | 3887 | hns_roce_unlock_cqs(send_cq, recv_cq); |
3888 | ||
d838c481 WHX |
3889 | if (!is_timeout) { |
3890 | hns_roce_qp_remove(hr_dev, hr_qp); | |
3891 | hns_roce_qp_free(hr_dev, hr_qp); | |
9a443537 | 3892 | |
d838c481 WHX |
3893 | /* RC QP, release QPN */ |
3894 | if (hr_qp->ibqp.qp_type == IB_QPT_RC) | |
3895 | hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); | |
3896 | } | |
9a443537 | 3897 | |
3898 | hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt); | |
3899 | ||
d838c481 | 3900 | if (is_user) |
9a443537 | 3901 | ib_umem_release(hr_qp->umem); |
d838c481 | 3902 | else { |
9a443537 | 3903 | kfree(hr_qp->sq.wrid); |
3904 | kfree(hr_qp->rq.wrid); | |
d838c481 | 3905 | |
9a443537 | 3906 | hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); |
3907 | } | |
9a443537 | 3908 | |
d838c481 WHX |
3909 | if (!is_timeout) { |
3910 | if (hr_qp->ibqp.qp_type == IB_QPT_RC) | |
3911 | kfree(hr_qp); | |
3912 | else | |
3913 | kfree(hr_to_hr_sqp(hr_qp)); | |
3914 | } else { | |
3915 | qp_work = kzalloc(sizeof(*qp_work), GFP_KERNEL); | |
3916 | if (!qp_work) | |
3917 | return -ENOMEM; | |
3918 | ||
3919 | INIT_WORK(&qp_work->work, hns_roce_v1_destroy_qp_work_fn); | |
3920 | qp_work->ib_dev = &hr_dev->ib_dev; | |
3921 | qp_work->qp = hr_qp; | |
3922 | qp_work->db_wait_stage = qp_work_entry.db_wait_stage; | |
3923 | qp_work->sdb_issue_ptr = qp_work_entry.sdb_issue_ptr; | |
3924 | qp_work->sdb_inv_cnt = qp_work_entry.sdb_inv_cnt; | |
3925 | qp_work->sche_cnt = qp_work_entry.sche_cnt; | |
3926 | ||
016a0059 | 3927 | priv = (struct hns_roce_v1_priv *)hr_dev->priv; |
d838c481 WHX |
3928 | queue_work(priv->des_qp.qp_wq, &qp_work->work); |
3929 | dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn); | |
3930 | } | |
9a443537 | 3931 | |
3932 | return 0; | |
3933 | } | |
3934 | ||
d61d6de0 | 3935 | static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq) |
afb6b092 SX |
3936 | { |
3937 | struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); | |
3938 | struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); | |
3939 | struct device *dev = &hr_dev->pdev->dev; | |
3940 | u32 cqe_cnt_ori; | |
3941 | u32 cqe_cnt_cur; | |
3942 | u32 cq_buf_size; | |
3943 | int wait_time = 0; | |
3944 | int ret = 0; | |
3945 | ||
3946 | hns_roce_free_cq(hr_dev, hr_cq); | |
3947 | ||
3948 | /* | |
3949 | * Before freeing cq buffer, we need to ensure that the outstanding CQE | |
3950 | * have been written by checking the CQE counter. | |
3951 | */ | |
3952 | cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT); | |
3953 | while (1) { | |
3954 | if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) & | |
3955 | HNS_ROCE_CQE_WCMD_EMPTY_BIT) | |
3956 | break; | |
3957 | ||
3958 | cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT); | |
3959 | if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT) | |
3960 | break; | |
3961 | ||
3962 | msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS); | |
3963 | if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) { | |
3964 | dev_warn(dev, "Destroy cq 0x%lx timeout!\n", | |
3965 | hr_cq->cqn); | |
3966 | ret = -ETIMEDOUT; | |
3967 | break; | |
3968 | } | |
3969 | wait_time++; | |
3970 | } | |
3971 | ||
3972 | hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); | |
3973 | ||
3974 | if (ibcq->uobject) | |
3975 | ib_umem_release(hr_cq->umem); | |
3976 | else { | |
3977 | /* Free the buff of stored cq */ | |
3978 | cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz; | |
3979 | hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf); | |
3980 | } | |
3981 | ||
3982 | kfree(hr_cq); | |
3983 | ||
3984 | return ret; | |
3985 | } | |
3986 | ||
b16f8188 YL |
3987 | static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not) |
3988 | { | |
3989 | roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) | | |
3990 | (req_not << eq->log_entries), eq->doorbell); | |
b16f8188 YL |
3991 | } |
3992 | ||
3993 | static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev, | |
3994 | struct hns_roce_aeqe *aeqe, int qpn) | |
3995 | { | |
3996 | struct device *dev = &hr_dev->pdev->dev; | |
3997 | ||
3998 | dev_warn(dev, "Local Work Queue Catastrophic Error.\n"); | |
3999 | switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, | |
4000 | HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { | |
4001 | case HNS_ROCE_LWQCE_QPC_ERROR: | |
4002 | dev_warn(dev, "QP %d, QPC error.\n", qpn); | |
4003 | break; | |
4004 | case HNS_ROCE_LWQCE_MTU_ERROR: | |
4005 | dev_warn(dev, "QP %d, MTU error.\n", qpn); | |
4006 | break; | |
4007 | case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR: | |
4008 | dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn); | |
4009 | break; | |
4010 | case HNS_ROCE_LWQCE_WQE_ADDR_ERROR: | |
4011 | dev_warn(dev, "QP %d, WQE addr error.\n", qpn); | |
4012 | break; | |
4013 | case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR: | |
4014 | dev_warn(dev, "QP %d, WQE shift error\n", qpn); | |
4015 | break; | |
4016 | case HNS_ROCE_LWQCE_SL_ERROR: | |
4017 | dev_warn(dev, "QP %d, SL error.\n", qpn); | |
4018 | break; | |
4019 | case HNS_ROCE_LWQCE_PORT_ERROR: | |
4020 | dev_warn(dev, "QP %d, port error.\n", qpn); | |
4021 | break; | |
4022 | default: | |
4023 | break; | |
4024 | } | |
4025 | } | |
4026 | ||
4027 | static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, | |
4028 | struct hns_roce_aeqe *aeqe, | |
4029 | int qpn) | |
4030 | { | |
4031 | struct device *dev = &hr_dev->pdev->dev; | |
4032 | ||
4033 | dev_warn(dev, "Local Access Violation Work Queue Error.\n"); | |
4034 | switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, | |
4035 | HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { | |
4036 | case HNS_ROCE_LAVWQE_R_KEY_VIOLATION: | |
4037 | dev_warn(dev, "QP %d, R_key violation.\n", qpn); | |
4038 | break; | |
4039 | case HNS_ROCE_LAVWQE_LENGTH_ERROR: | |
4040 | dev_warn(dev, "QP %d, length error.\n", qpn); | |
4041 | break; | |
4042 | case HNS_ROCE_LAVWQE_VA_ERROR: | |
4043 | dev_warn(dev, "QP %d, VA error.\n", qpn); | |
4044 | break; | |
4045 | case HNS_ROCE_LAVWQE_PD_ERROR: | |
4046 | dev_err(dev, "QP %d, PD error.\n", qpn); | |
4047 | break; | |
4048 | case HNS_ROCE_LAVWQE_RW_ACC_ERROR: | |
4049 | dev_warn(dev, "QP %d, rw acc error.\n", qpn); | |
4050 | break; | |
4051 | case HNS_ROCE_LAVWQE_KEY_STATE_ERROR: | |
4052 | dev_warn(dev, "QP %d, key state error.\n", qpn); | |
4053 | break; | |
4054 | case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR: | |
4055 | dev_warn(dev, "QP %d, MR operation error.\n", qpn); | |
4056 | break; | |
4057 | default: | |
4058 | break; | |
4059 | } | |
4060 | } | |
4061 | ||
4062 | static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev, | |
4063 | struct hns_roce_aeqe *aeqe, | |
4064 | int event_type) | |
4065 | { | |
4066 | struct device *dev = &hr_dev->pdev->dev; | |
4067 | int phy_port; | |
4068 | int qpn; | |
4069 | ||
4070 | qpn = roce_get_field(aeqe->event.qp_event.qp, | |
4071 | HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, | |
4072 | HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S); | |
4073 | phy_port = roce_get_field(aeqe->event.qp_event.qp, | |
4074 | HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M, | |
4075 | HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S); | |
4076 | if (qpn <= 1) | |
4077 | qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port; | |
4078 | ||
4079 | switch (event_type) { | |
4080 | case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: | |
4081 | dev_warn(dev, "Invalid Req Local Work Queue Error.\n" | |
4082 | "QP %d, phy_port %d.\n", qpn, phy_port); | |
4083 | break; | |
4084 | case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: | |
4085 | hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn); | |
4086 | break; | |
4087 | case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: | |
4088 | hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn); | |
4089 | break; | |
4090 | default: | |
4091 | break; | |
4092 | } | |
4093 | ||
4094 | hns_roce_qp_event(hr_dev, qpn, event_type); | |
4095 | } | |
4096 | ||
4097 | static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev, | |
4098 | struct hns_roce_aeqe *aeqe, | |
4099 | int event_type) | |
4100 | { | |
4101 | struct device *dev = &hr_dev->pdev->dev; | |
4102 | u32 cqn; | |
4103 | ||
4104 | cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq, | |
4105 | HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, | |
4106 | HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)); | |
4107 | ||
4108 | switch (event_type) { | |
4109 | case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: | |
4110 | dev_warn(dev, "CQ 0x%x access err.\n", cqn); | |
4111 | break; | |
4112 | case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: | |
4113 | dev_warn(dev, "CQ 0x%x overflow\n", cqn); | |
4114 | break; | |
4115 | case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: | |
4116 | dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn); | |
4117 | break; | |
4118 | default: | |
4119 | break; | |
4120 | } | |
4121 | ||
4122 | hns_roce_cq_event(hr_dev, cqn, event_type); | |
4123 | } | |
4124 | ||
4125 | static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev, | |
4126 | struct hns_roce_aeqe *aeqe) | |
4127 | { | |
4128 | struct device *dev = &hr_dev->pdev->dev; | |
4129 | ||
4130 | switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, | |
4131 | HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { | |
4132 | case HNS_ROCE_DB_SUBTYPE_SDB_OVF: | |
4133 | dev_warn(dev, "SDB overflow.\n"); | |
4134 | break; | |
4135 | case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF: | |
4136 | dev_warn(dev, "SDB almost overflow.\n"); | |
4137 | break; | |
4138 | case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP: | |
4139 | dev_warn(dev, "SDB almost empty.\n"); | |
4140 | break; | |
4141 | case HNS_ROCE_DB_SUBTYPE_ODB_OVF: | |
4142 | dev_warn(dev, "ODB overflow.\n"); | |
4143 | break; | |
4144 | case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF: | |
4145 | dev_warn(dev, "ODB almost overflow.\n"); | |
4146 | break; | |
4147 | case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP: | |
4148 | dev_warn(dev, "SDB almost empty.\n"); | |
4149 | break; | |
4150 | default: | |
4151 | break; | |
4152 | } | |
4153 | } | |
4154 | ||
4155 | static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry) | |
4156 | { | |
4157 | unsigned long off = (entry & (eq->entries - 1)) * | |
4158 | HNS_ROCE_AEQ_ENTRY_SIZE; | |
4159 | ||
4160 | return (struct hns_roce_aeqe *)((u8 *) | |
4161 | (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) + | |
4162 | off % HNS_ROCE_BA_SIZE); | |
4163 | } | |
4164 | ||
4165 | static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq) | |
4166 | { | |
4167 | struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index); | |
4168 | ||
4169 | return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^ | |
4170 | !!(eq->cons_index & eq->entries)) ? aeqe : NULL; | |
4171 | } | |
4172 | ||
4173 | static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev, | |
4174 | struct hns_roce_eq *eq) | |
4175 | { | |
4176 | struct device *dev = &hr_dev->pdev->dev; | |
4177 | struct hns_roce_aeqe *aeqe; | |
4178 | int aeqes_found = 0; | |
4179 | int event_type; | |
4180 | ||
4181 | while ((aeqe = next_aeqe_sw_v1(eq))) { | |
4044a3f4 YL |
4182 | |
4183 | /* Make sure we read the AEQ entry after we have checked the | |
4184 | * ownership bit | |
4185 | */ | |
4186 | dma_rmb(); | |
4187 | ||
b16f8188 YL |
4188 | dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe, |
4189 | roce_get_field(aeqe->asyn, | |
4190 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, | |
4191 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); | |
b16f8188 YL |
4192 | event_type = roce_get_field(aeqe->asyn, |
4193 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, | |
4194 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S); | |
4195 | switch (event_type) { | |
4196 | case HNS_ROCE_EVENT_TYPE_PATH_MIG: | |
4197 | dev_warn(dev, "PATH MIG not supported\n"); | |
4198 | break; | |
4199 | case HNS_ROCE_EVENT_TYPE_COMM_EST: | |
4200 | dev_warn(dev, "COMMUNICATION established\n"); | |
4201 | break; | |
4202 | case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: | |
4203 | dev_warn(dev, "SQ DRAINED not supported\n"); | |
4204 | break; | |
4205 | case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: | |
4206 | dev_warn(dev, "PATH MIG failed\n"); | |
4207 | break; | |
4208 | case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: | |
4209 | case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: | |
4210 | case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: | |
4211 | hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type); | |
4212 | break; | |
4213 | case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: | |
4214 | case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: | |
4215 | case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: | |
4216 | dev_warn(dev, "SRQ not support!\n"); | |
4217 | break; | |
4218 | case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: | |
4219 | case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: | |
4220 | case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: | |
4221 | hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type); | |
4222 | break; | |
4223 | case HNS_ROCE_EVENT_TYPE_PORT_CHANGE: | |
4224 | dev_warn(dev, "port change.\n"); | |
4225 | break; | |
4226 | case HNS_ROCE_EVENT_TYPE_MB: | |
4227 | hns_roce_cmd_event(hr_dev, | |
4228 | le16_to_cpu(aeqe->event.cmd.token), | |
4229 | aeqe->event.cmd.status, | |
4230 | le64_to_cpu(aeqe->event.cmd.out_param | |
4231 | )); | |
4232 | break; | |
4233 | case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: | |
4234 | hns_roce_v1_db_overflow_handle(hr_dev, aeqe); | |
4235 | break; | |
4236 | case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW: | |
4237 | dev_warn(dev, "CEQ 0x%lx overflow.\n", | |
4238 | roce_get_field(aeqe->event.ce_event.ceqe, | |
4239 | HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M, | |
4240 | HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S)); | |
4241 | break; | |
4242 | default: | |
4243 | dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n", | |
4244 | event_type, eq->eqn, eq->cons_index); | |
4245 | break; | |
4246 | } | |
4247 | ||
4248 | eq->cons_index++; | |
4249 | aeqes_found = 1; | |
4250 | ||
4251 | if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) { | |
4252 | dev_warn(dev, "cons_index overflow, set back to 0.\n"); | |
4253 | eq->cons_index = 0; | |
4254 | } | |
4255 | } | |
4256 | ||
4257 | set_eq_cons_index_v1(eq, 0); | |
4258 | ||
4259 | return aeqes_found; | |
4260 | } | |
4261 | ||
4262 | static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry) | |
4263 | { | |
4264 | unsigned long off = (entry & (eq->entries - 1)) * | |
4265 | HNS_ROCE_CEQ_ENTRY_SIZE; | |
4266 | ||
4267 | return (struct hns_roce_ceqe *)((u8 *) | |
4268 | (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) + | |
4269 | off % HNS_ROCE_BA_SIZE); | |
4270 | } | |
4271 | ||
4272 | static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq) | |
4273 | { | |
4274 | struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index); | |
4275 | ||
4276 | return (!!(roce_get_bit(ceqe->comp, | |
4277 | HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^ | |
4278 | (!!(eq->cons_index & eq->entries)) ? ceqe : NULL; | |
4279 | } | |
4280 | ||
4281 | static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev, | |
4282 | struct hns_roce_eq *eq) | |
4283 | { | |
4284 | struct hns_roce_ceqe *ceqe; | |
4285 | int ceqes_found = 0; | |
4286 | u32 cqn; | |
4287 | ||
4288 | while ((ceqe = next_ceqe_sw_v1(eq))) { | |
4044a3f4 YL |
4289 | |
4290 | /* Make sure we read CEQ entry after we have checked the | |
4291 | * ownership bit | |
4292 | */ | |
4293 | dma_rmb(); | |
4294 | ||
b16f8188 YL |
4295 | cqn = roce_get_field(ceqe->comp, |
4296 | HNS_ROCE_CEQE_CEQE_COMP_CQN_M, | |
4297 | HNS_ROCE_CEQE_CEQE_COMP_CQN_S); | |
4298 | hns_roce_cq_completion(hr_dev, cqn); | |
4299 | ||
4300 | ++eq->cons_index; | |
4301 | ceqes_found = 1; | |
4302 | ||
4303 | if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth - 1) { | |
4304 | dev_warn(&eq->hr_dev->pdev->dev, | |
4305 | "cons_index overflow, set back to 0.\n"); | |
4306 | eq->cons_index = 0; | |
4307 | } | |
4308 | } | |
4309 | ||
4310 | set_eq_cons_index_v1(eq, 0); | |
4311 | ||
4312 | return ceqes_found; | |
4313 | } | |
4314 | ||
4315 | static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr) | |
4316 | { | |
4317 | struct hns_roce_eq *eq = eq_ptr; | |
4318 | struct hns_roce_dev *hr_dev = eq->hr_dev; | |
4319 | int int_work = 0; | |
4320 | ||
4321 | if (eq->type_flag == HNS_ROCE_CEQ) | |
4322 | /* CEQ irq routine, CEQ is pulse irq, not clear */ | |
4323 | int_work = hns_roce_v1_ceq_int(hr_dev, eq); | |
4324 | else | |
4325 | /* AEQ irq routine, AEQ is pulse irq, not clear */ | |
4326 | int_work = hns_roce_v1_aeq_int(hr_dev, eq); | |
4327 | ||
4328 | return IRQ_RETVAL(int_work); | |
4329 | } | |
4330 | ||
4331 | static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id) | |
4332 | { | |
4333 | struct hns_roce_dev *hr_dev = dev_id; | |
4334 | struct device *dev = &hr_dev->pdev->dev; | |
4335 | int int_work = 0; | |
4336 | u32 caepaemask_val; | |
4337 | u32 cealmovf_val; | |
4338 | u32 caepaest_val; | |
4339 | u32 aeshift_val; | |
4340 | u32 ceshift_val; | |
4341 | u32 cemask_val; | |
4342 | int i; | |
4343 | ||
4344 | /* | |
4345 | * Abnormal interrupt: | |
4346 | * AEQ overflow, ECC multi-bit err, CEQ overflow must clear | |
4347 | * interrupt, mask irq, clear irq, cancel mask operation | |
4348 | */ | |
4349 | aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG); | |
4350 | ||
4351 | /* AEQE overflow */ | |
4352 | if (roce_get_bit(aeshift_val, | |
4353 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) { | |
4354 | dev_warn(dev, "AEQ overflow!\n"); | |
4355 | ||
4356 | /* Set mask */ | |
4357 | caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); | |
4358 | roce_set_bit(caepaemask_val, | |
4359 | ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, | |
4360 | HNS_ROCE_INT_MASK_ENABLE); | |
4361 | roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val); | |
4362 | ||
4363 | /* Clear int state(INT_WC : write 1 clear) */ | |
4364 | caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG); | |
4365 | roce_set_bit(caepaest_val, | |
4366 | ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1); | |
4367 | roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val); | |
4368 | ||
4369 | /* Clear mask */ | |
4370 | caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); | |
4371 | roce_set_bit(caepaemask_val, | |
4372 | ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, | |
4373 | HNS_ROCE_INT_MASK_DISABLE); | |
4374 | roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val); | |
4375 | } | |
4376 | ||
4377 | /* CEQ almost overflow */ | |
4378 | for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) { | |
4379 | ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG + | |
4380 | i * CEQ_REG_OFFSET); | |
4381 | ||
4382 | if (roce_get_bit(ceshift_val, | |
4383 | ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) { | |
4384 | dev_warn(dev, "CEQ[%d] almost overflow!\n", i); | |
4385 | int_work++; | |
4386 | ||
4387 | /* Set mask */ | |
4388 | cemask_val = roce_read(hr_dev, | |
4389 | ROCEE_CAEP_CE_IRQ_MASK_0_REG + | |
4390 | i * CEQ_REG_OFFSET); | |
4391 | roce_set_bit(cemask_val, | |
4392 | ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S, | |
4393 | HNS_ROCE_INT_MASK_ENABLE); | |
4394 | roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + | |
4395 | i * CEQ_REG_OFFSET, cemask_val); | |
4396 | ||
4397 | /* Clear int state(INT_WC : write 1 clear) */ | |
4398 | cealmovf_val = roce_read(hr_dev, | |
4399 | ROCEE_CAEP_CEQ_ALM_OVF_0_REG + | |
4400 | i * CEQ_REG_OFFSET); | |
4401 | roce_set_bit(cealmovf_val, | |
4402 | ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S, | |
4403 | 1); | |
4404 | roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG + | |
4405 | i * CEQ_REG_OFFSET, cealmovf_val); | |
4406 | ||
4407 | /* Clear mask */ | |
4408 | cemask_val = roce_read(hr_dev, | |
4409 | ROCEE_CAEP_CE_IRQ_MASK_0_REG + | |
4410 | i * CEQ_REG_OFFSET); | |
4411 | roce_set_bit(cemask_val, | |
4412 | ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S, | |
4413 | HNS_ROCE_INT_MASK_DISABLE); | |
4414 | roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + | |
4415 | i * CEQ_REG_OFFSET, cemask_val); | |
4416 | } | |
4417 | } | |
4418 | ||
4419 | /* ECC multi-bit error alarm */ | |
4420 | dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n", | |
4421 | roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG), | |
4422 | roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG), | |
4423 | roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG)); | |
4424 | ||
4425 | dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n", | |
4426 | roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG), | |
4427 | roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG), | |
4428 | roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG)); | |
4429 | ||
4430 | return IRQ_RETVAL(int_work); | |
4431 | } | |
4432 | ||
4433 | static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev) | |
4434 | { | |
4435 | u32 aemask_val; | |
4436 | int masken = 0; | |
4437 | int i; | |
4438 | ||
4439 | /* AEQ INT */ | |
4440 | aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); | |
4441 | roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, | |
4442 | masken); | |
4443 | roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken); | |
4444 | roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val); | |
4445 | ||
4446 | /* CEQ INT */ | |
4447 | for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) { | |
4448 | /* IRQ mask */ | |
4449 | roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + | |
4450 | i * CEQ_REG_OFFSET, masken); | |
4451 | } | |
4452 | } | |
4453 | ||
4454 | static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev, | |
4455 | struct hns_roce_eq *eq) | |
4456 | { | |
4457 | int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) + | |
4458 | HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE; | |
4459 | int i; | |
4460 | ||
4461 | if (!eq->buf_list) | |
4462 | return; | |
4463 | ||
4464 | for (i = 0; i < npages; ++i) | |
4465 | dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE, | |
4466 | eq->buf_list[i].buf, eq->buf_list[i].map); | |
4467 | ||
4468 | kfree(eq->buf_list); | |
4469 | } | |
4470 | ||
4471 | static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num, | |
4472 | int enable_flag) | |
4473 | { | |
4474 | void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num]; | |
4475 | u32 val; | |
4476 | ||
4477 | val = readl(eqc); | |
4478 | ||
4479 | if (enable_flag) | |
4480 | roce_set_field(val, | |
4481 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, | |
4482 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, | |
4483 | HNS_ROCE_EQ_STAT_VALID); | |
4484 | else | |
4485 | roce_set_field(val, | |
4486 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, | |
4487 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, | |
4488 | HNS_ROCE_EQ_STAT_INVALID); | |
4489 | writel(val, eqc); | |
4490 | } | |
4491 | ||
4492 | static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev, | |
4493 | struct hns_roce_eq *eq) | |
4494 | { | |
4495 | void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn]; | |
4496 | struct device *dev = &hr_dev->pdev->dev; | |
4497 | dma_addr_t tmp_dma_addr; | |
4498 | u32 eqconsindx_val = 0; | |
4499 | u32 eqcuridx_val = 0; | |
4500 | u32 eqshift_val = 0; | |
4501 | int num_bas; | |
4502 | int ret; | |
4503 | int i; | |
4504 | ||
4505 | num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) + | |
4506 | HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE; | |
4507 | ||
4508 | if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) { | |
4509 | dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n", | |
4510 | (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE, | |
4511 | num_bas); | |
4512 | return -EINVAL; | |
4513 | } | |
4514 | ||
4515 | eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL); | |
4516 | if (!eq->buf_list) | |
4517 | return -ENOMEM; | |
4518 | ||
4519 | for (i = 0; i < num_bas; ++i) { | |
4520 | eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE, | |
4521 | &tmp_dma_addr, | |
4522 | GFP_KERNEL); | |
4523 | if (!eq->buf_list[i].buf) { | |
4524 | ret = -ENOMEM; | |
4525 | goto err_out_free_pages; | |
4526 | } | |
4527 | ||
4528 | eq->buf_list[i].map = tmp_dma_addr; | |
4529 | memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE); | |
4530 | } | |
4531 | eq->cons_index = 0; | |
4532 | roce_set_field(eqshift_val, | |
4533 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, | |
4534 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, | |
4535 | HNS_ROCE_EQ_STAT_INVALID); | |
4536 | roce_set_field(eqshift_val, | |
4537 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M, | |
4538 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S, | |
4539 | eq->log_entries); | |
4540 | writel(eqshift_val, eqc); | |
4541 | ||
4542 | /* Configure eq extended address 12~44bit */ | |
4543 | writel((u32)(eq->buf_list[0].map >> 12), eqc + 4); | |
4544 | ||
4545 | /* | |
4546 | * Configure eq extended address 45~49 bit. | |
4547 | * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of | |
4548 | * using 4K page, and shift more 32 because of | |
4549 | * caculating the high 32 bit value evaluated to hardware. | |
4550 | */ | |
4551 | roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M, | |
4552 | ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S, | |
4553 | eq->buf_list[0].map >> 44); | |
4554 | roce_set_field(eqcuridx_val, | |
4555 | ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M, | |
4556 | ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0); | |
4557 | writel(eqcuridx_val, eqc + 8); | |
4558 | ||
4559 | /* Configure eq consumer index */ | |
4560 | roce_set_field(eqconsindx_val, | |
4561 | ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M, | |
4562 | ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0); | |
4563 | writel(eqconsindx_val, eqc + 0xc); | |
4564 | ||
4565 | return 0; | |
4566 | ||
4567 | err_out_free_pages: | |
4568 | for (i -= 1; i >= 0; i--) | |
4569 | dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf, | |
4570 | eq->buf_list[i].map); | |
4571 | ||
4572 | kfree(eq->buf_list); | |
4573 | return ret; | |
4574 | } | |
4575 | ||
4576 | static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev) | |
4577 | { | |
4578 | struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; | |
4579 | struct device *dev = &hr_dev->pdev->dev; | |
4580 | struct hns_roce_eq *eq; | |
4581 | int irq_num; | |
4582 | int eq_num; | |
4583 | int ret; | |
4584 | int i, j; | |
4585 | ||
4586 | eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; | |
4587 | irq_num = eq_num + hr_dev->caps.num_other_vectors; | |
4588 | ||
4589 | eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL); | |
4590 | if (!eq_table->eq) | |
4591 | return -ENOMEM; | |
4592 | ||
4593 | eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base), | |
4594 | GFP_KERNEL); | |
4595 | if (!eq_table->eqc_base) { | |
4596 | ret = -ENOMEM; | |
4597 | goto err_eqc_base_alloc_fail; | |
4598 | } | |
4599 | ||
4600 | for (i = 0; i < eq_num; i++) { | |
4601 | eq = &eq_table->eq[i]; | |
4602 | eq->hr_dev = hr_dev; | |
4603 | eq->eqn = i; | |
4604 | eq->irq = hr_dev->irq[i]; | |
4605 | eq->log_page_size = PAGE_SHIFT; | |
4606 | ||
4607 | if (i < hr_dev->caps.num_comp_vectors) { | |
4608 | /* CEQ */ | |
4609 | eq_table->eqc_base[i] = hr_dev->reg_base + | |
4610 | ROCEE_CAEP_CEQC_SHIFT_0_REG + | |
4611 | CEQ_REG_OFFSET * i; | |
4612 | eq->type_flag = HNS_ROCE_CEQ; | |
4613 | eq->doorbell = hr_dev->reg_base + | |
4614 | ROCEE_CAEP_CEQC_CONS_IDX_0_REG + | |
4615 | CEQ_REG_OFFSET * i; | |
4616 | eq->entries = hr_dev->caps.ceqe_depth; | |
4617 | eq->log_entries = ilog2(eq->entries); | |
4618 | eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE; | |
4619 | } else { | |
4620 | /* AEQ */ | |
4621 | eq_table->eqc_base[i] = hr_dev->reg_base + | |
4622 | ROCEE_CAEP_AEQC_AEQE_SHIFT_REG; | |
4623 | eq->type_flag = HNS_ROCE_AEQ; | |
4624 | eq->doorbell = hr_dev->reg_base + | |
4625 | ROCEE_CAEP_AEQE_CONS_IDX_REG; | |
4626 | eq->entries = hr_dev->caps.aeqe_depth; | |
4627 | eq->log_entries = ilog2(eq->entries); | |
4628 | eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE; | |
4629 | } | |
4630 | } | |
4631 | ||
4632 | /* Disable irq */ | |
4633 | hns_roce_v1_int_mask_enable(hr_dev); | |
4634 | ||
4635 | /* Configure ce int interval */ | |
4636 | roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG, | |
4637 | HNS_ROCE_CEQ_DEFAULT_INTERVAL); | |
4638 | ||
4639 | /* Configure ce int burst num */ | |
4640 | roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG, | |
4641 | HNS_ROCE_CEQ_DEFAULT_BURST_NUM); | |
4642 | ||
4643 | for (i = 0; i < eq_num; i++) { | |
4644 | ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]); | |
4645 | if (ret) { | |
4646 | dev_err(dev, "eq create failed\n"); | |
4647 | goto err_create_eq_fail; | |
4648 | } | |
4649 | } | |
4650 | ||
4651 | for (j = 0; j < irq_num; j++) { | |
4652 | if (j < eq_num) | |
4653 | ret = request_irq(hr_dev->irq[j], | |
4654 | hns_roce_v1_msix_interrupt_eq, 0, | |
4655 | hr_dev->irq_names[j], | |
4656 | &eq_table->eq[j]); | |
4657 | else | |
4658 | ret = request_irq(hr_dev->irq[j], | |
4659 | hns_roce_v1_msix_interrupt_abn, 0, | |
4660 | hr_dev->irq_names[j], hr_dev); | |
4661 | ||
4662 | if (ret) { | |
4663 | dev_err(dev, "request irq error!\n"); | |
4664 | goto err_request_irq_fail; | |
4665 | } | |
4666 | } | |
4667 | ||
4668 | for (i = 0; i < eq_num; i++) | |
4669 | hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE); | |
4670 | ||
4671 | return 0; | |
4672 | ||
4673 | err_request_irq_fail: | |
4674 | for (j -= 1; j >= 0; j--) | |
4675 | free_irq(hr_dev->irq[j], &eq_table->eq[j]); | |
4676 | ||
4677 | err_create_eq_fail: | |
4678 | for (i -= 1; i >= 0; i--) | |
4679 | hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]); | |
4680 | ||
4681 | kfree(eq_table->eqc_base); | |
4682 | ||
4683 | err_eqc_base_alloc_fail: | |
4684 | kfree(eq_table->eq); | |
4685 | ||
4686 | return ret; | |
4687 | } | |
4688 | ||
4689 | static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev) | |
4690 | { | |
4691 | struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; | |
4692 | int irq_num; | |
4693 | int eq_num; | |
4694 | int i; | |
4695 | ||
4696 | eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; | |
4697 | irq_num = eq_num + hr_dev->caps.num_other_vectors; | |
4698 | for (i = 0; i < eq_num; i++) { | |
4699 | /* Disable EQ */ | |
4700 | hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE); | |
4701 | ||
4702 | free_irq(hr_dev->irq[i], &eq_table->eq[i]); | |
4703 | ||
4704 | hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]); | |
4705 | } | |
4706 | for (i = eq_num; i < irq_num; i++) | |
4707 | free_irq(hr_dev->irq[i], hr_dev); | |
4708 | ||
4709 | kfree(eq_table->eqc_base); | |
4710 | kfree(eq_table->eq); | |
4711 | } | |
4712 | ||
08805fdb | 4713 | static const struct hns_roce_hw hns_roce_hw_v1 = { |
9a443537 | 4714 | .reset = hns_roce_v1_reset, |
4715 | .hw_profile = hns_roce_v1_profile, | |
4716 | .hw_init = hns_roce_v1_init, | |
4717 | .hw_exit = hns_roce_v1_exit, | |
a680f2f3 WHX |
4718 | .post_mbox = hns_roce_v1_post_mbox, |
4719 | .chk_mbox = hns_roce_v1_chk_mbox, | |
9a443537 | 4720 | .set_gid = hns_roce_v1_set_gid, |
4721 | .set_mac = hns_roce_v1_set_mac, | |
4722 | .set_mtu = hns_roce_v1_set_mtu, | |
4723 | .write_mtpt = hns_roce_v1_write_mtpt, | |
4724 | .write_cqc = hns_roce_v1_write_cqc, | |
b156269d | 4725 | .modify_cq = hns_roce_v1_modify_cq, |
97f0e39f | 4726 | .clear_hem = hns_roce_v1_clear_hem, |
9a443537 | 4727 | .modify_qp = hns_roce_v1_modify_qp, |
4728 | .query_qp = hns_roce_v1_query_qp, | |
4729 | .destroy_qp = hns_roce_v1_destroy_qp, | |
4730 | .post_send = hns_roce_v1_post_send, | |
4731 | .post_recv = hns_roce_v1_post_recv, | |
4732 | .req_notify_cq = hns_roce_v1_req_notify_cq, | |
4733 | .poll_cq = hns_roce_v1_poll_cq, | |
bfcc681b | 4734 | .dereg_mr = hns_roce_v1_dereg_mr, |
afb6b092 | 4735 | .destroy_cq = hns_roce_v1_destroy_cq, |
b16f8188 YL |
4736 | .init_eq = hns_roce_v1_init_eq_table, |
4737 | .cleanup_eq = hns_roce_v1_cleanup_eq_table, | |
9a443537 | 4738 | }; |
08805fdb WHX |
4739 | |
4740 | static const struct of_device_id hns_roce_of_match[] = { | |
4741 | { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, }, | |
4742 | {}, | |
4743 | }; | |
4744 | MODULE_DEVICE_TABLE(of, hns_roce_of_match); | |
4745 | ||
4746 | static const struct acpi_device_id hns_roce_acpi_match[] = { | |
4747 | { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 }, | |
4748 | {}, | |
4749 | }; | |
4750 | MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match); | |
4751 | ||
4752 | static int hns_roce_node_match(struct device *dev, void *fwnode) | |
4753 | { | |
4754 | return dev->fwnode == fwnode; | |
4755 | } | |
4756 | ||
4757 | static struct | |
4758 | platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode) | |
4759 | { | |
4760 | struct device *dev; | |
4761 | ||
4762 | /* get the 'device' corresponding to the matching 'fwnode' */ | |
4763 | dev = bus_find_device(&platform_bus_type, NULL, | |
4764 | fwnode, hns_roce_node_match); | |
4765 | /* get the platform device */ | |
4766 | return dev ? to_platform_device(dev) : NULL; | |
4767 | } | |
4768 | ||
4769 | static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) | |
4770 | { | |
4771 | struct device *dev = &hr_dev->pdev->dev; | |
4772 | struct platform_device *pdev = NULL; | |
4773 | struct net_device *netdev = NULL; | |
4774 | struct device_node *net_node; | |
4775 | struct resource *res; | |
4776 | int port_cnt = 0; | |
4777 | u8 phy_port; | |
4778 | int ret; | |
4779 | int i; | |
4780 | ||
4781 | /* check if we are compatible with the underlying SoC */ | |
4782 | if (dev_of_node(dev)) { | |
4783 | const struct of_device_id *of_id; | |
4784 | ||
4785 | of_id = of_match_node(hns_roce_of_match, dev->of_node); | |
4786 | if (!of_id) { | |
4787 | dev_err(dev, "device is not compatible!\n"); | |
4788 | return -ENXIO; | |
4789 | } | |
4790 | hr_dev->hw = (const struct hns_roce_hw *)of_id->data; | |
4791 | if (!hr_dev->hw) { | |
4792 | dev_err(dev, "couldn't get H/W specific DT data!\n"); | |
4793 | return -ENXIO; | |
4794 | } | |
4795 | } else if (is_acpi_device_node(dev->fwnode)) { | |
4796 | const struct acpi_device_id *acpi_id; | |
4797 | ||
4798 | acpi_id = acpi_match_device(hns_roce_acpi_match, dev); | |
4799 | if (!acpi_id) { | |
4800 | dev_err(dev, "device is not compatible!\n"); | |
4801 | return -ENXIO; | |
4802 | } | |
4803 | hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data; | |
4804 | if (!hr_dev->hw) { | |
4805 | dev_err(dev, "couldn't get H/W specific ACPI data!\n"); | |
4806 | return -ENXIO; | |
4807 | } | |
4808 | } else { | |
4809 | dev_err(dev, "can't read compatibility data from DT or ACPI\n"); | |
4810 | return -ENXIO; | |
4811 | } | |
4812 | ||
4813 | /* get the mapped register base address */ | |
4814 | res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0); | |
08805fdb WHX |
4815 | hr_dev->reg_base = devm_ioremap_resource(dev, res); |
4816 | if (IS_ERR(hr_dev->reg_base)) | |
4817 | return PTR_ERR(hr_dev->reg_base); | |
4818 | ||
4819 | /* read the node_guid of IB device from the DT or ACPI */ | |
4820 | ret = device_property_read_u8_array(dev, "node-guid", | |
4821 | (u8 *)&hr_dev->ib_dev.node_guid, | |
4822 | GUID_LEN); | |
4823 | if (ret) { | |
4824 | dev_err(dev, "couldn't get node_guid from DT or ACPI!\n"); | |
4825 | return ret; | |
4826 | } | |
4827 | ||
4828 | /* get the RoCE associated ethernet ports or netdevices */ | |
4829 | for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) { | |
4830 | if (dev_of_node(dev)) { | |
4831 | net_node = of_parse_phandle(dev->of_node, "eth-handle", | |
4832 | i); | |
4833 | if (!net_node) | |
4834 | continue; | |
4835 | pdev = of_find_device_by_node(net_node); | |
4836 | } else if (is_acpi_device_node(dev->fwnode)) { | |
4837 | struct acpi_reference_args args; | |
4838 | struct fwnode_handle *fwnode; | |
4839 | ||
4840 | ret = acpi_node_get_property_reference(dev->fwnode, | |
4841 | "eth-handle", | |
4842 | i, &args); | |
4843 | if (ret) | |
4844 | continue; | |
4845 | fwnode = acpi_fwnode_handle(args.adev); | |
4846 | pdev = hns_roce_find_pdev(fwnode); | |
4847 | } else { | |
4848 | dev_err(dev, "cannot read data from DT or ACPI\n"); | |
4849 | return -ENXIO; | |
4850 | } | |
4851 | ||
4852 | if (pdev) { | |
4853 | netdev = platform_get_drvdata(pdev); | |
4854 | phy_port = (u8)i; | |
4855 | if (netdev) { | |
4856 | hr_dev->iboe.netdevs[port_cnt] = netdev; | |
4857 | hr_dev->iboe.phy_port[port_cnt] = phy_port; | |
4858 | } else { | |
4859 | dev_err(dev, "no netdev found with pdev %s\n", | |
4860 | pdev->name); | |
4861 | return -ENODEV; | |
4862 | } | |
4863 | port_cnt++; | |
4864 | } | |
4865 | } | |
4866 | ||
4867 | if (port_cnt == 0) { | |
4868 | dev_err(dev, "unable to get eth-handle for available ports!\n"); | |
4869 | return -EINVAL; | |
4870 | } | |
4871 | ||
4872 | hr_dev->caps.num_ports = port_cnt; | |
4873 | ||
4874 | /* cmd issue mode: 0 is poll, 1 is event */ | |
4875 | hr_dev->cmd_mod = 1; | |
4876 | hr_dev->loop_idc = 0; | |
2d407888 WHX |
4877 | hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; |
4878 | hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG; | |
08805fdb WHX |
4879 | |
4880 | /* read the interrupt names from the DT or ACPI */ | |
4881 | ret = device_property_read_string_array(dev, "interrupt-names", | |
4882 | hr_dev->irq_names, | |
b16f8188 | 4883 | HNS_ROCE_V1_MAX_IRQ_NUM); |
08805fdb WHX |
4884 | if (ret < 0) { |
4885 | dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n"); | |
4886 | return ret; | |
4887 | } | |
4888 | ||
4889 | /* fetch the interrupt numbers */ | |
b16f8188 | 4890 | for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) { |
08805fdb WHX |
4891 | hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i); |
4892 | if (hr_dev->irq[i] <= 0) { | |
4893 | dev_err(dev, "platform get of irq[=%d] failed!\n", i); | |
4894 | return -EINVAL; | |
4895 | } | |
4896 | } | |
4897 | ||
4898 | return 0; | |
4899 | } | |
4900 | ||
4901 | /** | |
4902 | * hns_roce_probe - RoCE driver entrance | |
4903 | * @pdev: pointer to platform device | |
4904 | * Return : int | |
4905 | * | |
4906 | */ | |
4907 | static int hns_roce_probe(struct platform_device *pdev) | |
4908 | { | |
4909 | int ret; | |
4910 | struct hns_roce_dev *hr_dev; | |
4911 | struct device *dev = &pdev->dev; | |
4912 | ||
4913 | hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev)); | |
4914 | if (!hr_dev) | |
4915 | return -ENOMEM; | |
4916 | ||
016a0059 WHX |
4917 | hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL); |
4918 | if (!hr_dev->priv) { | |
4919 | ret = -ENOMEM; | |
4920 | goto error_failed_kzalloc; | |
4921 | } | |
4922 | ||
08805fdb | 4923 | hr_dev->pdev = pdev; |
13ca970e | 4924 | hr_dev->dev = dev; |
08805fdb WHX |
4925 | platform_set_drvdata(pdev, hr_dev); |
4926 | ||
4927 | if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) && | |
4928 | dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) { | |
4929 | dev_err(dev, "Not usable DMA addressing mode\n"); | |
4930 | ret = -EIO; | |
4931 | goto error_failed_get_cfg; | |
4932 | } | |
4933 | ||
4934 | ret = hns_roce_get_cfg(hr_dev); | |
4935 | if (ret) { | |
4936 | dev_err(dev, "Get Configuration failed!\n"); | |
4937 | goto error_failed_get_cfg; | |
4938 | } | |
4939 | ||
4940 | ret = hns_roce_init(hr_dev); | |
4941 | if (ret) { | |
4942 | dev_err(dev, "RoCE engine init failed!\n"); | |
4943 | goto error_failed_get_cfg; | |
4944 | } | |
4945 | ||
4946 | return 0; | |
4947 | ||
4948 | error_failed_get_cfg: | |
016a0059 WHX |
4949 | kfree(hr_dev->priv); |
4950 | ||
4951 | error_failed_kzalloc: | |
08805fdb WHX |
4952 | ib_dealloc_device(&hr_dev->ib_dev); |
4953 | ||
4954 | return ret; | |
4955 | } | |
4956 | ||
4957 | /** | |
4958 | * hns_roce_remove - remove RoCE device | |
4959 | * @pdev: pointer to platform device | |
4960 | */ | |
4961 | static int hns_roce_remove(struct platform_device *pdev) | |
4962 | { | |
4963 | struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev); | |
4964 | ||
4965 | hns_roce_exit(hr_dev); | |
016a0059 | 4966 | kfree(hr_dev->priv); |
08805fdb WHX |
4967 | ib_dealloc_device(&hr_dev->ib_dev); |
4968 | ||
4969 | return 0; | |
4970 | } | |
4971 | ||
4972 | static struct platform_driver hns_roce_driver = { | |
4973 | .probe = hns_roce_probe, | |
4974 | .remove = hns_roce_remove, | |
4975 | .driver = { | |
4976 | .name = DRV_NAME, | |
4977 | .of_match_table = hns_roce_of_match, | |
4978 | .acpi_match_table = ACPI_PTR(hns_roce_acpi_match), | |
4979 | }, | |
4980 | }; | |
4981 | ||
4982 | module_platform_driver(hns_roce_driver); | |
4983 | ||
4984 | MODULE_LICENSE("Dual BSD/GPL"); | |
4985 | MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>"); | |
4986 | MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>"); | |
4987 | MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>"); | |
4988 | MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver"); |