RDMA/hns: Support 0 hop addressing for WQE buffer
[linux-2.6-block.git] / drivers / infiniband / hw / hns / hns_roce_qp.c
CommitLineData
9a443537 1/*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
05ad5482 34#include <linux/pci.h>
9a443537 35#include <linux/platform_device.h>
cb814642 36#include <rdma/ib_addr.h>
9a443537 37#include <rdma/ib_umem.h>
89944450 38#include <rdma/uverbs_ioctl.h>
9a443537 39#include "hns_roce_common.h"
40#include "hns_roce_device.h"
41#include "hns_roce_hem.h"
4d409958 42#include <rdma/hns-abi.h>
9a443537 43
1ca5b253 44#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
9a443537 45
ffd541d4
YL
46static void flush_work_handle(struct work_struct *work)
47{
48 struct hns_roce_work *flush_work = container_of(work,
49 struct hns_roce_work, work);
50 struct hns_roce_qp *hr_qp = container_of(flush_work,
51 struct hns_roce_qp, flush_work);
52 struct device *dev = flush_work->hr_dev->dev;
53 struct ib_qp_attr attr;
54 int attr_mask;
55 int ret;
56
57 attr_mask = IB_QP_STATE;
58 attr.qp_state = IB_QPS_ERR;
59
b5374286
YL
60 if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) {
61 ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
62 if (ret)
63 dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n",
64 ret);
65 }
ffd541d4
YL
66
67 /*
68 * make sure we signal QP destroy leg that flush QP was completed
69 * so that it can safely proceed ahead now and destroy QP
70 */
71 if (atomic_dec_and_test(&hr_qp->refcount))
72 complete(&hr_qp->free);
73}
74
75void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
76{
77 struct hns_roce_work *flush_work = &hr_qp->flush_work;
78
79 flush_work->hr_dev = hr_dev;
80 INIT_WORK(&flush_work->work, flush_work_handle);
81 atomic_inc(&hr_qp->refcount);
82 queue_work(hr_dev->irq_workq, &flush_work->work);
83}
84
9a443537 85void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
86{
13ca970e 87 struct device *dev = hr_dev->dev;
9a443537 88 struct hns_roce_qp *qp;
89
736b5a70 90 xa_lock(&hr_dev->qp_table_xa);
9a443537 91 qp = __hns_roce_qp_lookup(hr_dev, qpn);
92 if (qp)
93 atomic_inc(&qp->refcount);
736b5a70 94 xa_unlock(&hr_dev->qp_table_xa);
9a443537 95
96 if (!qp) {
97 dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
98 return;
99 }
100
0fc99566
YL
101 if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
102 (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
103 event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
104 event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR)) {
105 qp->state = IB_QPS_ERR;
106 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
107 init_flush_work(hr_dev, qp);
108 }
109
9a443537 110 qp->event(qp, (enum hns_roce_event)event_type);
111
112 if (atomic_dec_and_test(&qp->refcount))
113 complete(&qp->free);
114}
115
116static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
117 enum hns_roce_event type)
118{
119 struct ib_event event;
120 struct ib_qp *ibqp = &hr_qp->ibqp;
121
122 if (ibqp->event_handler) {
123 event.device = ibqp->device;
124 event.element.qp = ibqp;
125 switch (type) {
126 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
127 event.event = IB_EVENT_PATH_MIG;
128 break;
129 case HNS_ROCE_EVENT_TYPE_COMM_EST:
130 event.event = IB_EVENT_COMM_EST;
131 break;
132 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
133 event.event = IB_EVENT_SQ_DRAINED;
134 break;
135 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
136 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
137 break;
138 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
139 event.event = IB_EVENT_QP_FATAL;
140 break;
141 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
142 event.event = IB_EVENT_PATH_MIG_ERR;
143 break;
144 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
145 event.event = IB_EVENT_QP_REQ_ERR;
146 break;
147 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
148 event.event = IB_EVENT_QP_ACCESS_ERR;
149 break;
150 default:
fecd02eb 151 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
9a443537 152 type, hr_qp->qpn);
153 return;
154 }
155 ibqp->event_handler(&event, ibqp->qp_context);
156 }
157}
158
df83a66e 159static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
9a443537 160{
df83a66e
XW
161 unsigned long num = 0;
162 int ret;
163
164 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
165 /* when hw version is v1, the sqpn is allocated */
166 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
167 num = HNS_ROCE_MAX_PORTS +
168 hr_dev->iboe.phy_port[hr_qp->port];
169 else
170 num = 1;
171
172 hr_qp->doorbell_qpn = 1;
173 } else {
174 ret = hns_roce_bitmap_alloc_range(&hr_dev->qp_table.bitmap,
175 1, 1, &num);
176 if (ret) {
177 ibdev_err(&hr_dev->ib_dev, "Failed to alloc bitmap\n");
178 return -ENOMEM;
179 }
180
181 hr_qp->doorbell_qpn = (u32)num;
182 }
183
184 hr_qp->qpn = num;
9a443537 185
df83a66e 186 return 0;
9a443537 187}
188
189enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
190{
191 switch (state) {
192 case IB_QPS_RESET:
193 return HNS_ROCE_QP_STATE_RST;
194 case IB_QPS_INIT:
195 return HNS_ROCE_QP_STATE_INIT;
196 case IB_QPS_RTR:
197 return HNS_ROCE_QP_STATE_RTR;
198 case IB_QPS_RTS:
199 return HNS_ROCE_QP_STATE_RTS;
200 case IB_QPS_SQD:
201 return HNS_ROCE_QP_STATE_SQD;
202 case IB_QPS_ERR:
203 return HNS_ROCE_QP_STATE_ERR;
204 default:
205 return HNS_ROCE_QP_NUM_STATE;
206 }
207}
208
b71961d1
XW
209static void add_qp_to_list(struct hns_roce_dev *hr_dev,
210 struct hns_roce_qp *hr_qp,
211 struct ib_cq *send_cq, struct ib_cq *recv_cq)
212{
213 struct hns_roce_cq *hr_send_cq, *hr_recv_cq;
214 unsigned long flags;
215
216 hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL;
217 hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL;
218
219 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
220 hns_roce_lock_cqs(hr_send_cq, hr_recv_cq);
221
222 list_add_tail(&hr_qp->node, &hr_dev->qp_list);
223 if (hr_send_cq)
224 list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list);
225 if (hr_recv_cq)
226 list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list);
227
228 hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq);
229 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
230}
231
232static int hns_roce_qp_store(struct hns_roce_dev *hr_dev,
233 struct hns_roce_qp *hr_qp,
234 struct ib_qp_init_attr *init_attr)
9a443537 235{
736b5a70 236 struct xarray *xa = &hr_dev->qp_table_xa;
9a443537 237 int ret;
238
b71961d1 239 if (!hr_qp->qpn)
9a443537 240 return -EINVAL;
241
b71961d1 242 ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL));
736b5a70 243 if (ret)
b71961d1
XW
244 dev_err(hr_dev->dev, "Failed to xa store for QPC\n");
245 else
246 /* add QP to device's QP list for softwc */
247 add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq,
248 init_attr->recv_cq);
9a443537 249
250 return ret;
251}
252
b71961d1 253static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
9a443537 254{
255 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
13ca970e 256 struct device *dev = hr_dev->dev;
9a443537 257 int ret;
258
b71961d1 259 if (!hr_qp->qpn)
9a443537 260 return -EINVAL;
261
b71961d1
XW
262 /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
263 if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
264 hr_dev->hw_rev == HNS_ROCE_HW_VER1)
265 return 0;
9a443537 266
267 /* Alloc memory for QPC */
268 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
269 if (ret) {
b71961d1 270 dev_err(dev, "Failed to get QPC table\n");
9a443537 271 goto err_out;
272 }
273
274 /* Alloc memory for IRRL */
275 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
276 if (ret) {
b71961d1 277 dev_err(dev, "Failed to get IRRL table\n");
9a443537 278 goto err_put_qp;
279 }
280
e92f2c18 281 if (hr_dev->caps.trrl_entry_sz) {
282 /* Alloc memory for TRRL */
283 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
284 hr_qp->qpn);
285 if (ret) {
b71961d1 286 dev_err(dev, "Failed to get TRRL table\n");
e92f2c18 287 goto err_put_irrl;
288 }
289 }
290
6a157f7d
YL
291 if (hr_dev->caps.sccc_entry_sz) {
292 /* Alloc memory for SCC CTX */
293 ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
294 hr_qp->qpn);
295 if (ret) {
b71961d1 296 dev_err(dev, "Failed to get SCC CTX table\n");
6a157f7d
YL
297 goto err_put_trrl;
298 }
299 }
300
9a443537 301 return 0;
302
e92f2c18 303err_put_trrl:
304 if (hr_dev->caps.trrl_entry_sz)
305 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
306
9a443537 307err_put_irrl:
308 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
309
310err_put_qp:
311 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
312
313err_out:
314 return ret;
315}
316
317void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
318{
736b5a70 319 struct xarray *xa = &hr_dev->qp_table_xa;
9a443537 320 unsigned long flags;
321
b71961d1
XW
322 list_del(&hr_qp->node);
323 list_del(&hr_qp->sq_node);
324 list_del(&hr_qp->rq_node);
325
736b5a70
MW
326 xa_lock_irqsave(xa, flags);
327 __xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1));
328 xa_unlock_irqrestore(xa, flags);
9a443537 329}
330
b71961d1 331static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
9a443537 332{
333 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
334
b71961d1
XW
335 /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
336 if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
337 hr_dev->hw_rev == HNS_ROCE_HW_VER1)
338 return;
9a443537 339
b71961d1
XW
340 if (hr_dev->caps.trrl_entry_sz)
341 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
342 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
9a443537 343}
344
df83a66e 345static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
9a443537 346{
347 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
348
df83a66e
XW
349 if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
350 return;
351
352 if (hr_qp->qpn < hr_dev->caps.reserved_qps)
9a443537 353 return;
354
df83a66e 355 hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR);
9a443537 356}
357
ae85bf92 358static int set_rq_size(struct hns_roce_dev *hr_dev,
e00b64f7 359 struct ib_qp_cap *cap, bool is_user, int has_rq,
9a443537 360 struct hns_roce_qp *hr_qp)
361{
362 u32 max_cnt;
9a443537 363
c7bcb134
LO
364 /* If srq exist, set zero for relative number of rq */
365 if (!has_rq) {
366 hr_qp->rq.wqe_cnt = 0;
367 hr_qp->rq.max_gs = 0;
368 cap->max_recv_wr = 0;
369 cap->max_recv_sge = 0;
9a443537 370
026ded37
LC
371 return 0;
372 }
926a01dc 373
026ded37
LC
374 /* Check the validity of QP support capacity */
375 if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes ||
376 cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
377 ibdev_err(&hr_dev->ib_dev, "RQ config error, depth=%u, sge=%d\n",
378 cap->max_recv_wr, cap->max_recv_sge);
379 return -EINVAL;
380 }
9a443537 381
026ded37 382 max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
9a443537 383
026ded37
LC
384 hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
385 if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
386 ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n",
387 cap->max_recv_wr);
388 return -EINVAL;
9a443537 389 }
390
026ded37
LC
391 max_cnt = max(1U, cap->max_recv_sge);
392 hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
393
394 if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
395 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
396 else
397 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
398 hr_qp->rq.max_gs);
399
ec6adad0 400 cap->max_recv_wr = hr_qp->rq.wqe_cnt;
9a443537 401 cap->max_recv_sge = hr_qp->rq.max_gs;
402
403 return 0;
404}
405
cc95b23c
LO
406static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
407 struct ib_qp_cap *cap,
408 struct hns_roce_ib_create_qp *ucmd)
9a443537 409{
410 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
411 u8 max_sq_stride = ilog2(roundup_sq_stride);
412
413 /* Sanity check SQ size before proceeding */
515f6000
JG
414 if (ucmd->log_sq_stride > max_sq_stride ||
415 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
ae85bf92 416 ibdev_err(&hr_dev->ib_dev, "Failed to check SQ stride size\n");
9a443537 417 return -EINVAL;
418 }
419
926a01dc 420 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
ae85bf92 421 ibdev_err(&hr_dev->ib_dev, "Failed to check SQ SGE size %d\n",
db50077b 422 cap->max_send_sge);
926a01dc
WHX
423 return -EINVAL;
424 }
425
cc95b23c
LO
426 return 0;
427}
428
ae85bf92
XW
429static int set_user_sq_size(struct hns_roce_dev *hr_dev,
430 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp,
431 struct hns_roce_ib_create_qp *ucmd)
cc95b23c
LO
432{
433 u32 ex_sge_num;
434 u32 page_size;
435 u32 max_cnt;
436 int ret;
437
515f6000
JG
438 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &hr_qp->sq.wqe_cnt) ||
439 hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes)
440 return -EINVAL;
441
cc95b23c
LO
442 ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
443 if (ret) {
ae85bf92 444 ibdev_err(&hr_dev->ib_dev, "Failed to check user SQ size limit\n");
cc95b23c
LO
445 return ret;
446 }
447
9a443537 448 hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
449
926a01dc 450 max_cnt = max(1U, cap->max_send_sge);
b14c95be 451 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
926a01dc
WHX
452 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
453 else
454 hr_qp->sq.max_gs = max_cnt;
455
b14c95be 456 if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE)
926a01dc
WHX
457 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
458 (hr_qp->sq.max_gs - 2));
05ad5482 459
b14c95be
LC
460 if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE &&
461 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
05ad5482 462 if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
ae85bf92
XW
463 ibdev_err(&hr_dev->ib_dev,
464 "Failed to check extended SGE size limit %d\n",
465 hr_qp->sge.sge_cnt);
05ad5482
LO
466 return -EINVAL;
467 }
468 }
469
926a01dc 470 hr_qp->sge.sge_shift = 4;
b28ca7cc 471 ex_sge_num = hr_qp->sge.sge_cnt;
926a01dc 472
9a443537 473 /* Get buf size, SQ and RQ are aligned to page_szie */
b14c95be 474 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
d800c93b 475 hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
9a443537 476 hr_qp->rq.wqe_shift), PAGE_SIZE) +
d800c93b 477 round_up((hr_qp->sq.wqe_cnt <<
9a443537 478 hr_qp->sq.wqe_shift), PAGE_SIZE);
479
926a01dc 480 hr_qp->sq.offset = 0;
d800c93b 481 hr_qp->rq.offset = round_up((hr_qp->sq.wqe_cnt <<
9a443537 482 hr_qp->sq.wqe_shift), PAGE_SIZE);
926a01dc 483 } else {
9a8982dc 484 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
e0222d18
LO
485 hr_qp->sge.sge_cnt = ex_sge_num ?
486 max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num) : 0;
d800c93b 487 hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
9a8982dc 488 hr_qp->rq.wqe_shift), page_size) +
d800c93b 489 round_up((hr_qp->sge.sge_cnt <<
9a8982dc 490 hr_qp->sge.sge_shift), page_size) +
d800c93b 491 round_up((hr_qp->sq.wqe_cnt <<
9a8982dc 492 hr_qp->sq.wqe_shift), page_size);
926a01dc
WHX
493
494 hr_qp->sq.offset = 0;
b28ca7cc 495 if (ex_sge_num) {
d800c93b
WL
496 hr_qp->sge.offset = round_up((hr_qp->sq.wqe_cnt <<
497 hr_qp->sq.wqe_shift),
498 page_size);
926a01dc 499 hr_qp->rq.offset = hr_qp->sge.offset +
d800c93b
WL
500 round_up((hr_qp->sge.sge_cnt <<
501 hr_qp->sge.sge_shift),
502 page_size);
926a01dc 503 } else {
d800c93b
WL
504 hr_qp->rq.offset = round_up((hr_qp->sq.wqe_cnt <<
505 hr_qp->sq.wqe_shift),
506 page_size);
926a01dc
WHX
507 }
508 }
9a443537 509
510 return 0;
511}
512
8d18ad83
LO
513static int split_wqe_buf_region(struct hns_roce_dev *hr_dev,
514 struct hns_roce_qp *hr_qp,
d563099e 515 struct hns_roce_buf_attr *buf_attr)
8d18ad83 516{
8d18ad83 517 bool is_extend_sge;
8d18ad83 518 int buf_size;
d563099e 519 int idx = 0;
8d18ad83 520
d563099e
XW
521 if (hr_qp->buff_size < 1)
522 return -EINVAL;
523
524 buf_attr->page_shift = PAGE_ADDR_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
525 buf_attr->fixed_page = true;
526 buf_attr->region_count = 0;
8d18ad83
LO
527
528 if (hr_qp->sge.sge_cnt > 0)
529 is_extend_sge = true;
530 else
531 is_extend_sge = false;
532
d563099e 533 /* SQ WQE */
8d18ad83
LO
534 if (is_extend_sge)
535 buf_size = hr_qp->sge.offset - hr_qp->sq.offset;
536 else
537 buf_size = hr_qp->rq.offset - hr_qp->sq.offset;
538
d563099e
XW
539 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
540 buf_attr->region[idx].size = buf_size;
541 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num;
542 idx++;
8d18ad83
LO
543 }
544
d563099e
XW
545 /* extend SGE in SQ WQE */
546 buf_size = hr_qp->rq.offset - hr_qp->sge.offset;
547 if (buf_size > 0 && is_extend_sge &&
548 idx < ARRAY_SIZE(buf_attr->region)) {
549 buf_attr->region[idx].size = buf_size;
550 buf_attr->region[idx].hopnum =
551 hr_dev->caps.wqe_sge_hop_num;
552 idx++;
8d18ad83
LO
553 }
554
d563099e 555 /* RQ WQE */
8d18ad83 556 buf_size = hr_qp->buff_size - hr_qp->rq.offset;
d563099e
XW
557 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
558 buf_attr->region[idx].size = buf_size;
559 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num;
560 idx++;
8d18ad83
LO
561 }
562
d563099e
XW
563 buf_attr->region_count = idx;
564
565 return 0;
8d18ad83
LO
566}
567
947441ea
LO
568static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
569 struct hns_roce_qp *hr_qp)
570{
571 struct device *dev = hr_dev->dev;
572
573 if (hr_qp->sq.max_gs > 2) {
574 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
575 (hr_qp->sq.max_gs - 2));
576 hr_qp->sge.sge_shift = 4;
577 }
578
579 /* ud sqwqe's sge use extend sge */
b14c95be
LC
580 if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
581 hr_qp->ibqp.qp_type == IB_QPT_GSI) {
947441ea
LO
582 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
583 hr_qp->sq.max_gs);
584 hr_qp->sge.sge_shift = 4;
585 }
586
b14c95be
LC
587 if (hr_qp->sq.max_gs > 2 &&
588 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
947441ea
LO
589 if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
590 dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
591 hr_qp->sge.sge_cnt);
592 return -EINVAL;
593 }
594 }
595
596 return 0;
597}
598
ae85bf92
XW
599static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
600 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp)
9a443537 601{
9a8982dc 602 u32 page_size;
9a443537 603 u32 max_cnt;
926a01dc 604 int size;
947441ea 605 int ret;
9a443537 606
026ded37 607 if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
9a443537 608 cap->max_send_sge > hr_dev->caps.max_sq_sg ||
609 cap->max_inline_data > hr_dev->caps.max_sq_inline) {
026ded37
LC
610 ibdev_err(&hr_dev->ib_dev,
611 "SQ WR or sge or inline data error!\n");
9a443537 612 return -EINVAL;
613 }
614
615 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
9a443537 616
026ded37 617 max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
926a01dc 618
9a443537 619 hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
620 if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
026ded37
LC
621 ibdev_err(&hr_dev->ib_dev,
622 "while setting kernel sq size, sq.wqe_cnt too large\n");
9a443537 623 return -EINVAL;
624 }
625
626 /* Get data_seg numbers */
627 max_cnt = max(1U, cap->max_send_sge);
b14c95be 628 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
926a01dc
WHX
629 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
630 else
631 hr_qp->sq.max_gs = max_cnt;
9a443537 632
947441ea
LO
633 ret = set_extend_sge_param(hr_dev, hr_qp);
634 if (ret) {
026ded37 635 ibdev_err(&hr_dev->ib_dev, "set extend sge parameters fail\n");
947441ea 636 return ret;
05ad5482
LO
637 }
638
926a01dc 639 /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
9a8982dc 640 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
9a443537 641 hr_qp->sq.offset = 0;
d800c93b 642 size = round_up(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, page_size);
926a01dc 643
b14c95be 644 if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && hr_qp->sge.sge_cnt) {
b28ca7cc 645 hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
d800c93b 646 (u32)hr_qp->sge.sge_cnt);
926a01dc 647 hr_qp->sge.offset = size;
d800c93b
WL
648 size += round_up(hr_qp->sge.sge_cnt << hr_qp->sge.sge_shift,
649 page_size);
926a01dc
WHX
650 }
651
652 hr_qp->rq.offset = size;
d800c93b 653 size += round_up((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift), page_size);
926a01dc 654 hr_qp->buff_size = size;
9a443537 655
656 /* Get wr and sge number which send */
ec6adad0 657 cap->max_send_wr = hr_qp->sq.wqe_cnt;
9a443537 658 cap->max_send_sge = hr_qp->sq.max_gs;
659
660 /* We don't support inline sends for kernel QPs (yet) */
661 cap->max_inline_data = 0;
662
663 return 0;
664}
665
0425e3e6
YL
666static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
667{
2557fabd 668 if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
0425e3e6
YL
669 return 0;
670
671 return 1;
672}
673
e088a685
YL
674static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
675{
676 if (attr->qp_type == IB_QPT_XRC_INI ||
4d103905
LO
677 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
678 !attr->cap.max_recv_wr)
e088a685
YL
679 return 0;
680
681 return 1;
682}
683
395b59a1
LO
684static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
685 struct ib_qp_init_attr *init_attr)
686{
687 u32 max_recv_sge = init_attr->cap.max_recv_sge;
688 struct hns_roce_rinl_wqe *wqe_list;
689 u32 wqe_cnt = hr_qp->rq.wqe_cnt;
690 int i;
691
692 /* allocate recv inline buf */
693 wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe),
694 GFP_KERNEL);
695
696 if (!wqe_list)
697 goto err;
698
699 /* Allocate a continuous buffer for all inline sge we need */
700 wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge *
701 sizeof(struct hns_roce_rinl_sge)),
702 GFP_KERNEL);
703 if (!wqe_list[0].sg_list)
704 goto err_wqe_list;
705
706 /* Assign buffers of sg_list to each inline wqe */
707 for (i = 1; i < wqe_cnt; i++)
708 wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge];
709
710 hr_qp->rq_inl_buf.wqe_list = wqe_list;
711 hr_qp->rq_inl_buf.wqe_cnt = wqe_cnt;
712
713 return 0;
714
715err_wqe_list:
716 kfree(wqe_list);
717
718err:
719 return -ENOMEM;
720}
721
722static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
723{
724 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
725 kfree(hr_qp->rq_inl_buf.wqe_list);
726}
727
24c22112
XW
728static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
729 struct ib_qp_init_attr *init_attr,
730 struct ib_udata *udata, unsigned long addr)
731{
24c22112 732 struct ib_device *ibdev = &hr_dev->ib_dev;
d563099e 733 struct hns_roce_buf_attr buf_attr = {};
24c22112
XW
734 bool is_rq_buf_inline;
735 int ret;
736
737 is_rq_buf_inline = (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
738 hns_roce_qp_has_rq(init_attr);
739 if (is_rq_buf_inline) {
740 ret = alloc_rq_inline_buf(hr_qp, init_attr);
741 if (ret) {
742 ibdev_err(ibdev, "Failed to alloc inline RQ buffer\n");
743 return ret;
744 }
745 }
746
d563099e
XW
747 ret = split_wqe_buf_region(hr_dev, hr_qp, &buf_attr);
748 if (ret) {
749 ibdev_err(ibdev, "Failed to split WQE buf, ret %d\n", ret);
750 goto err_inline;
751 }
752 ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr,
753 PAGE_ADDR_SHIFT + hr_dev->caps.mtt_ba_pg_sz,
754 udata, addr);
755 if (ret) {
756 ibdev_err(ibdev, "Failed to create WQE mtr, ret %d\n", ret);
757 goto err_inline;
24c22112 758 }
24c22112
XW
759
760 return 0;
24c22112
XW
761err_inline:
762 if (is_rq_buf_inline)
763 free_rq_inline_buf(hr_qp);
764
24c22112
XW
765 return ret;
766}
767
768static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
769{
d563099e 770 hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr);
24c22112
XW
771 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
772 hr_qp->rq.wqe_cnt)
773 free_rq_inline_buf(hr_qp);
774}
ae85bf92 775
cfec045b
XW
776static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
777 struct ib_qp_init_attr *init_attr,
778 struct ib_udata *udata,
779 struct hns_roce_ib_create_qp_resp *resp,
780 struct hns_roce_ib_create_qp *ucmd)
781{
782 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
783 udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
784 hns_roce_qp_has_sq(init_attr) &&
785 udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr));
786}
787
788static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
789 struct ib_qp_init_attr *init_attr,
790 struct ib_udata *udata,
791 struct hns_roce_ib_create_qp_resp *resp)
792{
793 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
794 udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
795 hns_roce_qp_has_rq(init_attr));
796}
797
798static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
799 struct ib_qp_init_attr *init_attr)
800{
801 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
802 hns_roce_qp_has_rq(init_attr));
803}
804
805static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
806 struct ib_qp_init_attr *init_attr,
807 struct ib_udata *udata,
808 struct hns_roce_ib_create_qp *ucmd,
809 struct hns_roce_ib_create_qp_resp *resp)
810{
811 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
812 udata, struct hns_roce_ucontext, ibucontext);
813 struct ib_device *ibdev = &hr_dev->ib_dev;
814 int ret;
815
816 if (udata) {
817 if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
818 ret = hns_roce_db_map_user(uctx, udata, ucmd->sdb_addr,
819 &hr_qp->sdb);
820 if (ret) {
821 ibdev_err(ibdev,
822 "Failed to map user SQ doorbell\n");
823 goto err_out;
824 }
825 hr_qp->sdb_en = 1;
826 resp->cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB;
827 }
828
829 if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
830 ret = hns_roce_db_map_user(uctx, udata, ucmd->db_addr,
831 &hr_qp->rdb);
832 if (ret) {
833 ibdev_err(ibdev,
834 "Failed to map user RQ doorbell\n");
835 goto err_sdb;
836 }
837 hr_qp->rdb_en = 1;
838 resp->cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
839 }
840 } else {
841 /* QP doorbell register address */
842 hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
843 DB_REG_OFFSET * hr_dev->priv_uar.index;
844 hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
845 DB_REG_OFFSET * hr_dev->priv_uar.index;
846
847 if (kernel_qp_has_rdb(hr_dev, init_attr)) {
848 ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
849 if (ret) {
850 ibdev_err(ibdev,
851 "Failed to alloc kernel RQ doorbell\n");
852 goto err_out;
853 }
854 *hr_qp->rdb.db_record = 0;
855 hr_qp->rdb_en = 1;
856 }
857 }
858
859 return 0;
860err_sdb:
861 if (udata && hr_qp->sdb_en)
862 hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
863err_out:
864 return ret;
865}
866
867static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
868 struct ib_udata *udata)
869{
870 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
871 udata, struct hns_roce_ucontext, ibucontext);
872
873 if (udata) {
874 if (hr_qp->rdb_en)
875 hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
876 if (hr_qp->sdb_en)
877 hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
878 } else {
879 if (hr_qp->rdb_en)
880 hns_roce_free_db(hr_dev, &hr_qp->rdb);
881 }
882}
883
b37c4139
XW
884static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev,
885 struct hns_roce_qp *hr_qp)
886{
887 struct ib_device *ibdev = &hr_dev->ib_dev;
888 u64 *sq_wrid = NULL;
889 u64 *rq_wrid = NULL;
890 int ret;
891
892 sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL);
893 if (ZERO_OR_NULL_PTR(sq_wrid)) {
894 ibdev_err(ibdev, "Failed to alloc SQ wrid\n");
895 return -ENOMEM;
896 }
897
898 if (hr_qp->rq.wqe_cnt) {
899 rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
900 if (ZERO_OR_NULL_PTR(rq_wrid)) {
901 ibdev_err(ibdev, "Failed to alloc RQ wrid\n");
902 ret = -ENOMEM;
903 goto err_sq;
904 }
905 }
906
907 hr_qp->sq.wrid = sq_wrid;
908 hr_qp->rq.wrid = rq_wrid;
909 return 0;
910err_sq:
911 kfree(sq_wrid);
912
913 return ret;
914}
915
916static void free_kernel_wrid(struct hns_roce_dev *hr_dev,
917 struct hns_roce_qp *hr_qp)
918{
919 kfree(hr_qp->rq.wrid);
920 kfree(hr_qp->sq.wrid);
921}
922
ae85bf92
XW
923static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
924 struct ib_qp_init_attr *init_attr,
925 struct ib_udata *udata,
926 struct hns_roce_ib_create_qp *ucmd)
927{
928 struct ib_device *ibdev = &hr_dev->ib_dev;
929 int ret;
930
931 hr_qp->ibqp.qp_type = init_attr->qp_type;
932
933 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
934 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
935 else
936 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
937
938 ret = set_rq_size(hr_dev, &init_attr->cap, udata,
939 hns_roce_qp_has_rq(init_attr), hr_qp);
940 if (ret) {
941 ibdev_err(ibdev, "Failed to set user RQ size\n");
942 return ret;
943 }
944
945 if (udata) {
946 if (ib_copy_from_udata(ucmd, udata, sizeof(*ucmd))) {
947 ibdev_err(ibdev, "Failed to copy QP ucmd\n");
948 return -EFAULT;
949 }
950
951 ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
952 if (ret)
953 ibdev_err(ibdev, "Failed to set user SQ size\n");
954 } else {
955 if (init_attr->create_flags &
956 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
957 ibdev_err(ibdev, "Failed to check multicast loopback\n");
958 return -EINVAL;
959 }
960
961 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
962 ibdev_err(ibdev, "Failed to check ipoib ud lso\n");
963 return -EINVAL;
964 }
965
966 ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
967 if (ret)
968 ibdev_err(ibdev, "Failed to set kernel SQ size\n");
969 }
970
971 return ret;
972}
973
9a443537 974static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
975 struct ib_pd *ib_pd,
976 struct ib_qp_init_attr *init_attr,
df83a66e 977 struct ib_udata *udata,
9a443537 978 struct hns_roce_qp *hr_qp)
979{
7b48221c 980 struct hns_roce_ib_create_qp_resp resp = {};
cfec045b
XW
981 struct ib_device *ibdev = &hr_dev->ib_dev;
982 struct hns_roce_ib_create_qp ucmd;
8d18ad83 983 int ret;
9a443537 984
985 mutex_init(&hr_qp->mutex);
986 spin_lock_init(&hr_qp->sq.lock);
987 spin_lock_init(&hr_qp->rq.lock);
988
989 hr_qp->state = IB_QPS_RESET;
b5374286 990 hr_qp->flush_flag = 0;
9a443537 991
ae85bf92 992 ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
9a443537 993 if (ret) {
cfec045b 994 ibdev_err(ibdev, "Failed to set QP param\n");
ae85bf92 995 return ret;
9a443537 996 }
997
cfec045b 998 if (!udata) {
b37c4139
XW
999 ret = alloc_kernel_wrid(hr_dev, hr_qp);
1000 if (ret) {
cfec045b
XW
1001 ibdev_err(ibdev, "Failed to alloc wrid\n");
1002 return ret;
76827087 1003 }
9a443537 1004 }
1005
cfec045b
XW
1006 ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
1007 if (ret) {
1008 ibdev_err(ibdev, "Failed to alloc QP doorbell\n");
1009 goto err_wrid;
1010 }
1011
24c22112 1012 ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
8d18ad83 1013 if (ret) {
cfec045b 1014 ibdev_err(ibdev, "Failed to alloc QP buffer\n");
24c22112 1015 goto err_db;
df83a66e
XW
1016 }
1017
1018 ret = alloc_qpn(hr_dev, hr_qp);
1019 if (ret) {
cfec045b 1020 ibdev_err(ibdev, "Failed to alloc QPN\n");
24c22112 1021 goto err_buf;
8d18ad83
LO
1022 }
1023
b71961d1
XW
1024 ret = alloc_qpc(hr_dev, hr_qp);
1025 if (ret) {
cfec045b 1026 ibdev_err(ibdev, "Failed to alloc QP context\n");
b71961d1
XW
1027 goto err_qpn;
1028 }
1029
1030 ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
1031 if (ret) {
cfec045b 1032 ibdev_err(ibdev, "Failed to store QP\n");
b71961d1 1033 goto err_qpc;
9a443537 1034 }
1035
de77503a
LO
1036 if (udata) {
1037 ret = ib_copy_to_udata(udata, &resp,
1038 min(udata->outlen, sizeof(resp)));
cfec045b
XW
1039 if (ret) {
1040 ibdev_err(ibdev, "copy qp resp failed!\n");
b71961d1 1041 goto err_store;
cfec045b 1042 }
e088a685 1043 }
aa84fa18
YL
1044
1045 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
1046 ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
1047 if (ret)
b71961d1 1048 goto err_store;
aa84fa18
YL
1049 }
1050
df83a66e 1051 hr_qp->ibqp.qp_num = hr_qp->qpn;
9a443537 1052 hr_qp->event = hns_roce_ib_qp_event;
b71961d1
XW
1053 atomic_set(&hr_qp->refcount, 1);
1054 init_completion(&hr_qp->free);
626903e9 1055
9a443537 1056 return 0;
1057
b71961d1
XW
1058err_store:
1059 hns_roce_qp_remove(hr_dev, hr_qp);
b71961d1
XW
1060err_qpc:
1061 free_qpc(hr_dev, hr_qp);
9a443537 1062err_qpn:
df83a66e 1063 free_qpn(hr_dev, hr_qp);
24c22112
XW
1064err_buf:
1065 free_qp_buf(hr_dev, hr_qp);
472bc0fb 1066err_db:
cfec045b
XW
1067 free_qp_db(hr_dev, hr_qp, udata);
1068err_wrid:
1069 free_kernel_wrid(hr_dev, hr_qp);
9a443537 1070 return ret;
1071}
1072
e365b26c
XW
1073void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
1074 struct ib_udata *udata)
1075{
b71961d1
XW
1076 if (atomic_dec_and_test(&hr_qp->refcount))
1077 complete(&hr_qp->free);
1078 wait_for_completion(&hr_qp->free);
1079
1080 free_qpc(hr_dev, hr_qp);
df83a66e 1081 free_qpn(hr_dev, hr_qp);
24c22112 1082 free_qp_buf(hr_dev, hr_qp);
b37c4139 1083 free_kernel_wrid(hr_dev, hr_qp);
cfec045b 1084 free_qp_db(hr_dev, hr_qp, udata);
e365b26c
XW
1085
1086 kfree(hr_qp);
1087}
1088
9a443537 1089struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
1090 struct ib_qp_init_attr *init_attr,
1091 struct ib_udata *udata)
1092{
1093 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
db50077b 1094 struct ib_device *ibdev = &hr_dev->ib_dev;
9a443537 1095 struct hns_roce_qp *hr_qp;
1096 int ret;
1097
1098 switch (init_attr->qp_type) {
1099 case IB_QPT_RC: {
1100 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
1101 if (!hr_qp)
1102 return ERR_PTR(-ENOMEM);
1103
df83a66e 1104 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
9a443537 1105 hr_qp);
1106 if (ret) {
d11769fd 1107 ibdev_err(ibdev, "Create QP 0x%06lx failed(%d)\n",
db50077b 1108 hr_qp->qpn, ret);
9a443537 1109 kfree(hr_qp);
1110 return ERR_PTR(ret);
1111 }
1112
9a443537 1113 break;
1114 }
1115 case IB_QPT_GSI: {
1116 /* Userspace is not allowed to create special QPs: */
e00b64f7 1117 if (udata) {
db50077b 1118 ibdev_err(ibdev, "not support usr space GSI\n");
9a443537 1119 return ERR_PTR(-EINVAL);
1120 }
1121
16a11e0b
LC
1122 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
1123 if (!hr_qp)
9a443537 1124 return ERR_PTR(-ENOMEM);
1125
7716809e
LO
1126 hr_qp->port = init_attr->port_num - 1;
1127 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
b66efc93 1128
9a443537 1129 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
df83a66e 1130 hr_qp);
9a443537 1131 if (ret) {
db50077b 1132 ibdev_err(ibdev, "Create GSI QP failed!\n");
16a11e0b 1133 kfree(hr_qp);
9a443537 1134 return ERR_PTR(ret);
1135 }
1136
9a443537 1137 break;
1138 }
1139 default:{
db50077b
LO
1140 ibdev_err(ibdev, "not support QP type %d\n",
1141 init_attr->qp_type);
bb8865f4 1142 return ERR_PTR(-EOPNOTSUPP);
9a443537 1143 }
1144 }
1145
1146 return &hr_qp->ibqp;
1147}
1148
1149int to_hr_qp_type(int qp_type)
1150{
1151 int transport_type;
1152
1153 if (qp_type == IB_QPT_RC)
1154 transport_type = SERV_TYPE_RC;
1155 else if (qp_type == IB_QPT_UC)
1156 transport_type = SERV_TYPE_UC;
1157 else if (qp_type == IB_QPT_UD)
1158 transport_type = SERV_TYPE_UD;
1159 else if (qp_type == IB_QPT_GSI)
1160 transport_type = SERV_TYPE_UD;
1161 else
1162 transport_type = -1;
1163
1164 return transport_type;
1165}
1166
8ea417ff
LO
1167static int check_mtu_validate(struct hns_roce_dev *hr_dev,
1168 struct hns_roce_qp *hr_qp,
1169 struct ib_qp_attr *attr, int attr_mask)
9a443537 1170{
cb814642 1171 enum ib_mtu active_mtu;
8ea417ff 1172 int p;
9a443537 1173
8ea417ff 1174 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
a7325af7 1175 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
de77503a 1176
8ea417ff
LO
1177 if ((hr_dev->caps.max_mtu >= IB_MTU_2048 &&
1178 attr->path_mtu > hr_dev->caps.max_mtu) ||
1179 attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) {
db50077b
LO
1180 ibdev_err(&hr_dev->ib_dev,
1181 "attr path_mtu(%d)invalid while modify qp",
8ea417ff
LO
1182 attr->path_mtu);
1183 return -EINVAL;
0425e3e6
YL
1184 }
1185
8ea417ff
LO
1186 return 0;
1187}
1188
1189static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1190 int attr_mask)
1191{
1192 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1193 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
8ea417ff 1194 int p;
9a443537 1195
1196 if ((attr_mask & IB_QP_PORT) &&
1197 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
db50077b
LO
1198 ibdev_err(&hr_dev->ib_dev,
1199 "attr port_num invalid.attr->port_num=%d\n",
9a443537 1200 attr->port_num);
8ea417ff 1201 return -EINVAL;
9a443537 1202 }
1203
1204 if (attr_mask & IB_QP_PKEY_INDEX) {
1205 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1206 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
db50077b
LO
1207 ibdev_err(&hr_dev->ib_dev,
1208 "attr pkey_index invalid.attr->pkey_index=%d\n",
9a443537 1209 attr->pkey_index);
8ea417ff 1210 return -EINVAL;
cb814642
LO
1211 }
1212 }
1213
9a443537 1214 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1215 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
db50077b
LO
1216 ibdev_err(&hr_dev->ib_dev,
1217 "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
9a443537 1218 attr->max_rd_atomic);
8ea417ff 1219 return -EINVAL;
9a443537 1220 }
1221
1222 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1223 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
db50077b
LO
1224 ibdev_err(&hr_dev->ib_dev,
1225 "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
9a443537 1226 attr->max_dest_rd_atomic);
8ea417ff
LO
1227 return -EINVAL;
1228 }
1229
1230 if (attr_mask & IB_QP_PATH_MTU)
1231 return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask);
1232
1233 return 0;
1234}
1235
1236int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1237 int attr_mask, struct ib_udata *udata)
1238{
1239 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1240 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1241 enum ib_qp_state cur_state, new_state;
8ea417ff
LO
1242 int ret = -EINVAL;
1243
1244 mutex_lock(&hr_qp->mutex);
1245
1246 cur_state = attr_mask & IB_QP_CUR_STATE ?
1247 attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
1248 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1249
1250 if (ibqp->uobject &&
1251 (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
1252 if (hr_qp->sdb_en == 1) {
1253 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
1254
1255 if (hr_qp->rdb_en == 1)
1256 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
1257 } else {
db50077b
LO
1258 ibdev_warn(&hr_dev->ib_dev,
1259 "flush cqe is not supported in userspace!\n");
8ea417ff
LO
1260 goto out;
1261 }
1262 }
1263
1264 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1265 attr_mask)) {
db50077b 1266 ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n");
9a443537 1267 goto out;
1268 }
1269
8ea417ff
LO
1270 ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask);
1271 if (ret)
1272 goto out;
1273
9a443537 1274 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
026ded37 1275 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
391bd5fc 1276 ret = -EPERM;
db50077b 1277 ibdev_err(&hr_dev->ib_dev,
026ded37 1278 "RST2RST state is not supported\n");
391bd5fc 1279 } else {
1280 ret = 0;
1281 }
1282
9a443537 1283 goto out;
1284 }
1285
1286 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
1287 new_state);
1288
1289out:
1290 mutex_unlock(&hr_qp->mutex);
1291
1292 return ret;
1293}
1294
1295void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
1296 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1297{
626903e9
XW
1298 if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1299 __acquire(&send_cq->lock);
1300 __acquire(&recv_cq->lock);
1301 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1302 spin_lock_irq(&send_cq->lock);
1303 __acquire(&recv_cq->lock);
1304 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1305 spin_lock_irq(&recv_cq->lock);
1306 __acquire(&send_cq->lock);
1307 } else if (send_cq == recv_cq) {
9a443537 1308 spin_lock_irq(&send_cq->lock);
1309 __acquire(&recv_cq->lock);
1310 } else if (send_cq->cqn < recv_cq->cqn) {
1311 spin_lock_irq(&send_cq->lock);
1312 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1313 } else {
1314 spin_lock_irq(&recv_cq->lock);
1315 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1316 }
1317}
1318
1319void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1320 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
1321 __releases(&recv_cq->lock)
1322{
626903e9
XW
1323 if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1324 __release(&recv_cq->lock);
1325 __release(&send_cq->lock);
1326 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1327 __release(&recv_cq->lock);
1328 spin_unlock(&send_cq->lock);
1329 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1330 __release(&send_cq->lock);
1331 spin_unlock(&recv_cq->lock);
1332 } else if (send_cq == recv_cq) {
9a443537 1333 __release(&recv_cq->lock);
1334 spin_unlock_irq(&send_cq->lock);
1335 } else if (send_cq->cqn < recv_cq->cqn) {
1336 spin_unlock(&recv_cq->lock);
1337 spin_unlock_irq(&send_cq->lock);
1338 } else {
1339 spin_unlock(&send_cq->lock);
1340 spin_unlock_irq(&recv_cq->lock);
1341 }
1342}
1343
d563099e 1344static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
9a443537 1345{
d563099e 1346 return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
9a443537 1347}
1348
6c6e3921 1349void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
9a443537 1350{
9a443537 1351 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
1352}
1353
6c6e3921 1354void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n)
9a443537 1355{
9a443537 1356 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
1357}
1358
6c6e3921 1359void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n)
926a01dc 1360{
d563099e 1361 return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift));
926a01dc 1362}
926a01dc 1363
9a443537 1364bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
1365 struct ib_cq *ib_cq)
1366{
1367 struct hns_roce_cq *hr_cq;
1368 u32 cur;
1369
1370 cur = hr_wq->head - hr_wq->tail;
ec6adad0 1371 if (likely(cur + nreq < hr_wq->wqe_cnt))
3756c7f5 1372 return false;
9a443537 1373
1374 hr_cq = to_hr_cq(ib_cq);
1375 spin_lock(&hr_cq->lock);
1376 cur = hr_wq->head - hr_wq->tail;
1377 spin_unlock(&hr_cq->lock);
1378
ec6adad0 1379 return cur + nreq >= hr_wq->wqe_cnt;
9a443537 1380}
1381
1382int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
1383{
1384 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1385 int reserved_from_top = 0;
06ef0ee4 1386 int reserved_from_bot;
9a443537 1387 int ret;
1388
aa84fa18 1389 mutex_init(&qp_table->scc_mutex);
736b5a70 1390 xa_init(&hr_dev->qp_table_xa);
9a443537 1391
21b97f53 1392 reserved_from_bot = hr_dev->caps.reserved_qps;
06ef0ee4 1393
9a443537 1394 ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
06ef0ee4 1395 hr_dev->caps.num_qps - 1, reserved_from_bot,
9a443537 1396 reserved_from_top);
1397 if (ret) {
13ca970e 1398 dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
9a443537 1399 ret);
1400 return ret;
1401 }
1402
1403 return 0;
1404}
1405
1406void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1407{
1408 hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
1409}