Commit | Line | Data |
---|---|---|
9a443537 | 1 | /* |
2 | * Copyright (c) 2016 Hisilicon Limited. | |
3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. | |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
05ad5482 | 34 | #include <linux/pci.h> |
9a443537 | 35 | #include <linux/platform_device.h> |
cb814642 | 36 | #include <rdma/ib_addr.h> |
9a443537 | 37 | #include <rdma/ib_umem.h> |
89944450 | 38 | #include <rdma/uverbs_ioctl.h> |
9a443537 | 39 | #include "hns_roce_common.h" |
40 | #include "hns_roce_device.h" | |
41 | #include "hns_roce_hem.h" | |
4d409958 | 42 | #include <rdma/hns-abi.h> |
9a443537 | 43 | |
1ca5b253 | 44 | #define SQP_NUM (2 * HNS_ROCE_MAX_PORTS) |
9a443537 | 45 | |
ffd541d4 YL |
46 | static void flush_work_handle(struct work_struct *work) |
47 | { | |
48 | struct hns_roce_work *flush_work = container_of(work, | |
49 | struct hns_roce_work, work); | |
50 | struct hns_roce_qp *hr_qp = container_of(flush_work, | |
51 | struct hns_roce_qp, flush_work); | |
52 | struct device *dev = flush_work->hr_dev->dev; | |
53 | struct ib_qp_attr attr; | |
54 | int attr_mask; | |
55 | int ret; | |
56 | ||
57 | attr_mask = IB_QP_STATE; | |
58 | attr.qp_state = IB_QPS_ERR; | |
59 | ||
b5374286 YL |
60 | if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) { |
61 | ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); | |
62 | if (ret) | |
63 | dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n", | |
64 | ret); | |
65 | } | |
ffd541d4 YL |
66 | |
67 | /* | |
68 | * make sure we signal QP destroy leg that flush QP was completed | |
69 | * so that it can safely proceed ahead now and destroy QP | |
70 | */ | |
71 | if (atomic_dec_and_test(&hr_qp->refcount)) | |
72 | complete(&hr_qp->free); | |
73 | } | |
74 | ||
75 | void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) | |
76 | { | |
77 | struct hns_roce_work *flush_work = &hr_qp->flush_work; | |
78 | ||
79 | flush_work->hr_dev = hr_dev; | |
80 | INIT_WORK(&flush_work->work, flush_work_handle); | |
81 | atomic_inc(&hr_qp->refcount); | |
82 | queue_work(hr_dev->irq_workq, &flush_work->work); | |
83 | } | |
84 | ||
9a443537 | 85 | void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) |
86 | { | |
13ca970e | 87 | struct device *dev = hr_dev->dev; |
9a443537 | 88 | struct hns_roce_qp *qp; |
89 | ||
736b5a70 | 90 | xa_lock(&hr_dev->qp_table_xa); |
9a443537 | 91 | qp = __hns_roce_qp_lookup(hr_dev, qpn); |
92 | if (qp) | |
93 | atomic_inc(&qp->refcount); | |
736b5a70 | 94 | xa_unlock(&hr_dev->qp_table_xa); |
9a443537 | 95 | |
96 | if (!qp) { | |
97 | dev_warn(dev, "Async event for bogus QP %08x\n", qpn); | |
98 | return; | |
99 | } | |
100 | ||
0fc99566 YL |
101 | if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && |
102 | (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || | |
103 | event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || | |
104 | event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR)) { | |
105 | qp->state = IB_QPS_ERR; | |
106 | if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag)) | |
107 | init_flush_work(hr_dev, qp); | |
108 | } | |
109 | ||
9a443537 | 110 | qp->event(qp, (enum hns_roce_event)event_type); |
111 | ||
112 | if (atomic_dec_and_test(&qp->refcount)) | |
113 | complete(&qp->free); | |
114 | } | |
115 | ||
116 | static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, | |
117 | enum hns_roce_event type) | |
118 | { | |
119 | struct ib_event event; | |
120 | struct ib_qp *ibqp = &hr_qp->ibqp; | |
121 | ||
122 | if (ibqp->event_handler) { | |
123 | event.device = ibqp->device; | |
124 | event.element.qp = ibqp; | |
125 | switch (type) { | |
126 | case HNS_ROCE_EVENT_TYPE_PATH_MIG: | |
127 | event.event = IB_EVENT_PATH_MIG; | |
128 | break; | |
129 | case HNS_ROCE_EVENT_TYPE_COMM_EST: | |
130 | event.event = IB_EVENT_COMM_EST; | |
131 | break; | |
132 | case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: | |
133 | event.event = IB_EVENT_SQ_DRAINED; | |
134 | break; | |
135 | case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: | |
136 | event.event = IB_EVENT_QP_LAST_WQE_REACHED; | |
137 | break; | |
138 | case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: | |
139 | event.event = IB_EVENT_QP_FATAL; | |
140 | break; | |
141 | case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: | |
142 | event.event = IB_EVENT_PATH_MIG_ERR; | |
143 | break; | |
144 | case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: | |
145 | event.event = IB_EVENT_QP_REQ_ERR; | |
146 | break; | |
147 | case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: | |
148 | event.event = IB_EVENT_QP_ACCESS_ERR; | |
149 | break; | |
150 | default: | |
fecd02eb | 151 | dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n", |
9a443537 | 152 | type, hr_qp->qpn); |
153 | return; | |
154 | } | |
155 | ibqp->event_handler(&event, ibqp->qp_context); | |
156 | } | |
157 | } | |
158 | ||
df83a66e | 159 | static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) |
9a443537 | 160 | { |
df83a66e XW |
161 | unsigned long num = 0; |
162 | int ret; | |
163 | ||
164 | if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { | |
165 | /* when hw version is v1, the sqpn is allocated */ | |
166 | if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) | |
167 | num = HNS_ROCE_MAX_PORTS + | |
168 | hr_dev->iboe.phy_port[hr_qp->port]; | |
169 | else | |
170 | num = 1; | |
171 | ||
172 | hr_qp->doorbell_qpn = 1; | |
173 | } else { | |
174 | ret = hns_roce_bitmap_alloc_range(&hr_dev->qp_table.bitmap, | |
175 | 1, 1, &num); | |
176 | if (ret) { | |
177 | ibdev_err(&hr_dev->ib_dev, "Failed to alloc bitmap\n"); | |
178 | return -ENOMEM; | |
179 | } | |
180 | ||
181 | hr_qp->doorbell_qpn = (u32)num; | |
182 | } | |
183 | ||
184 | hr_qp->qpn = num; | |
9a443537 | 185 | |
df83a66e | 186 | return 0; |
9a443537 | 187 | } |
188 | ||
189 | enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) | |
190 | { | |
191 | switch (state) { | |
192 | case IB_QPS_RESET: | |
193 | return HNS_ROCE_QP_STATE_RST; | |
194 | case IB_QPS_INIT: | |
195 | return HNS_ROCE_QP_STATE_INIT; | |
196 | case IB_QPS_RTR: | |
197 | return HNS_ROCE_QP_STATE_RTR; | |
198 | case IB_QPS_RTS: | |
199 | return HNS_ROCE_QP_STATE_RTS; | |
200 | case IB_QPS_SQD: | |
201 | return HNS_ROCE_QP_STATE_SQD; | |
202 | case IB_QPS_ERR: | |
203 | return HNS_ROCE_QP_STATE_ERR; | |
204 | default: | |
205 | return HNS_ROCE_QP_NUM_STATE; | |
206 | } | |
207 | } | |
208 | ||
b71961d1 XW |
209 | static void add_qp_to_list(struct hns_roce_dev *hr_dev, |
210 | struct hns_roce_qp *hr_qp, | |
211 | struct ib_cq *send_cq, struct ib_cq *recv_cq) | |
212 | { | |
213 | struct hns_roce_cq *hr_send_cq, *hr_recv_cq; | |
214 | unsigned long flags; | |
215 | ||
216 | hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; | |
217 | hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL; | |
218 | ||
219 | spin_lock_irqsave(&hr_dev->qp_list_lock, flags); | |
220 | hns_roce_lock_cqs(hr_send_cq, hr_recv_cq); | |
221 | ||
222 | list_add_tail(&hr_qp->node, &hr_dev->qp_list); | |
223 | if (hr_send_cq) | |
224 | list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list); | |
225 | if (hr_recv_cq) | |
226 | list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list); | |
227 | ||
228 | hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq); | |
229 | spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); | |
230 | } | |
231 | ||
232 | static int hns_roce_qp_store(struct hns_roce_dev *hr_dev, | |
233 | struct hns_roce_qp *hr_qp, | |
234 | struct ib_qp_init_attr *init_attr) | |
9a443537 | 235 | { |
736b5a70 | 236 | struct xarray *xa = &hr_dev->qp_table_xa; |
9a443537 | 237 | int ret; |
238 | ||
b71961d1 | 239 | if (!hr_qp->qpn) |
9a443537 | 240 | return -EINVAL; |
241 | ||
b71961d1 | 242 | ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL)); |
736b5a70 | 243 | if (ret) |
b71961d1 XW |
244 | dev_err(hr_dev->dev, "Failed to xa store for QPC\n"); |
245 | else | |
246 | /* add QP to device's QP list for softwc */ | |
247 | add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, | |
248 | init_attr->recv_cq); | |
9a443537 | 249 | |
250 | return ret; | |
251 | } | |
252 | ||
b71961d1 | 253 | static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) |
9a443537 | 254 | { |
255 | struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; | |
13ca970e | 256 | struct device *dev = hr_dev->dev; |
9a443537 | 257 | int ret; |
258 | ||
b71961d1 | 259 | if (!hr_qp->qpn) |
9a443537 | 260 | return -EINVAL; |
261 | ||
b71961d1 XW |
262 | /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ |
263 | if (hr_qp->ibqp.qp_type == IB_QPT_GSI && | |
264 | hr_dev->hw_rev == HNS_ROCE_HW_VER1) | |
265 | return 0; | |
9a443537 | 266 | |
267 | /* Alloc memory for QPC */ | |
268 | ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); | |
269 | if (ret) { | |
b71961d1 | 270 | dev_err(dev, "Failed to get QPC table\n"); |
9a443537 | 271 | goto err_out; |
272 | } | |
273 | ||
274 | /* Alloc memory for IRRL */ | |
275 | ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); | |
276 | if (ret) { | |
b71961d1 | 277 | dev_err(dev, "Failed to get IRRL table\n"); |
9a443537 | 278 | goto err_put_qp; |
279 | } | |
280 | ||
e92f2c18 | 281 | if (hr_dev->caps.trrl_entry_sz) { |
282 | /* Alloc memory for TRRL */ | |
283 | ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, | |
284 | hr_qp->qpn); | |
285 | if (ret) { | |
b71961d1 | 286 | dev_err(dev, "Failed to get TRRL table\n"); |
e92f2c18 | 287 | goto err_put_irrl; |
288 | } | |
289 | } | |
290 | ||
6a157f7d YL |
291 | if (hr_dev->caps.sccc_entry_sz) { |
292 | /* Alloc memory for SCC CTX */ | |
293 | ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table, | |
294 | hr_qp->qpn); | |
295 | if (ret) { | |
b71961d1 | 296 | dev_err(dev, "Failed to get SCC CTX table\n"); |
6a157f7d YL |
297 | goto err_put_trrl; |
298 | } | |
299 | } | |
300 | ||
9a443537 | 301 | return 0; |
302 | ||
e92f2c18 | 303 | err_put_trrl: |
304 | if (hr_dev->caps.trrl_entry_sz) | |
305 | hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); | |
306 | ||
9a443537 | 307 | err_put_irrl: |
308 | hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); | |
309 | ||
310 | err_put_qp: | |
311 | hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); | |
312 | ||
313 | err_out: | |
314 | return ret; | |
315 | } | |
316 | ||
317 | void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) | |
318 | { | |
736b5a70 | 319 | struct xarray *xa = &hr_dev->qp_table_xa; |
9a443537 | 320 | unsigned long flags; |
321 | ||
b71961d1 XW |
322 | list_del(&hr_qp->node); |
323 | list_del(&hr_qp->sq_node); | |
324 | list_del(&hr_qp->rq_node); | |
325 | ||
736b5a70 MW |
326 | xa_lock_irqsave(xa, flags); |
327 | __xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1)); | |
328 | xa_unlock_irqrestore(xa, flags); | |
9a443537 | 329 | } |
330 | ||
b71961d1 | 331 | static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) |
9a443537 | 332 | { |
333 | struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; | |
334 | ||
b71961d1 XW |
335 | /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ |
336 | if (hr_qp->ibqp.qp_type == IB_QPT_GSI && | |
337 | hr_dev->hw_rev == HNS_ROCE_HW_VER1) | |
338 | return; | |
9a443537 | 339 | |
b71961d1 XW |
340 | if (hr_dev->caps.trrl_entry_sz) |
341 | hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); | |
342 | hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); | |
9a443537 | 343 | } |
344 | ||
df83a66e | 345 | static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) |
9a443537 | 346 | { |
347 | struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; | |
348 | ||
df83a66e XW |
349 | if (hr_qp->ibqp.qp_type == IB_QPT_GSI) |
350 | return; | |
351 | ||
352 | if (hr_qp->qpn < hr_dev->caps.reserved_qps) | |
9a443537 | 353 | return; |
354 | ||
df83a66e | 355 | hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR); |
9a443537 | 356 | } |
357 | ||
54d66387 XW |
358 | static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, |
359 | struct hns_roce_qp *hr_qp, int has_rq) | |
9a443537 | 360 | { |
54d66387 | 361 | u32 cnt; |
9a443537 | 362 | |
c7bcb134 LO |
363 | /* If srq exist, set zero for relative number of rq */ |
364 | if (!has_rq) { | |
365 | hr_qp->rq.wqe_cnt = 0; | |
366 | hr_qp->rq.max_gs = 0; | |
54d66387 | 367 | hr_qp->rq_inl_buf.wqe_cnt = 0; |
c7bcb134 LO |
368 | cap->max_recv_wr = 0; |
369 | cap->max_recv_sge = 0; | |
9a443537 | 370 | |
026ded37 LC |
371 | return 0; |
372 | } | |
926a01dc | 373 | |
026ded37 LC |
374 | /* Check the validity of QP support capacity */ |
375 | if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes || | |
376 | cap->max_recv_sge > hr_dev->caps.max_rq_sg) { | |
377 | ibdev_err(&hr_dev->ib_dev, "RQ config error, depth=%u, sge=%d\n", | |
378 | cap->max_recv_wr, cap->max_recv_sge); | |
379 | return -EINVAL; | |
380 | } | |
9a443537 | 381 | |
54d66387 XW |
382 | cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes)); |
383 | if (cnt > hr_dev->caps.max_wqes) { | |
026ded37 LC |
384 | ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n", |
385 | cap->max_recv_wr); | |
386 | return -EINVAL; | |
9a443537 | 387 | } |
388 | ||
6da06c62 | 389 | hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); |
026ded37 LC |
390 | |
391 | if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE) | |
392 | hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz); | |
393 | else | |
394 | hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * | |
395 | hr_qp->rq.max_gs); | |
396 | ||
54d66387 XW |
397 | hr_qp->rq.wqe_cnt = cnt; |
398 | if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) | |
399 | hr_qp->rq_inl_buf.wqe_cnt = cnt; | |
400 | else | |
401 | hr_qp->rq_inl_buf.wqe_cnt = 0; | |
402 | ||
403 | cap->max_recv_wr = cnt; | |
6da06c62 | 404 | cap->max_recv_sge = hr_qp->rq.max_gs; |
9a443537 | 405 | |
406 | return 0; | |
407 | } | |
408 | ||
54d66387 XW |
409 | static int set_extend_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt, |
410 | struct hns_roce_qp *hr_qp, | |
411 | struct ib_qp_cap *cap) | |
412 | { | |
54d66387 XW |
413 | u32 cnt; |
414 | ||
415 | cnt = max(1U, cap->max_send_sge); | |
416 | if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) { | |
417 | hr_qp->sq.max_gs = roundup_pow_of_two(cnt); | |
418 | hr_qp->sge.sge_cnt = 0; | |
419 | ||
420 | return 0; | |
421 | } | |
422 | ||
423 | hr_qp->sq.max_gs = cnt; | |
424 | ||
425 | /* UD sqwqe's sge use extend sge */ | |
426 | if (hr_qp->ibqp.qp_type == IB_QPT_GSI || | |
427 | hr_qp->ibqp.qp_type == IB_QPT_UD) { | |
428 | cnt = roundup_pow_of_two(sq_wqe_cnt * hr_qp->sq.max_gs); | |
429 | } else if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) { | |
430 | cnt = roundup_pow_of_two(sq_wqe_cnt * | |
431 | (hr_qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE)); | |
54d66387 XW |
432 | } else { |
433 | cnt = 0; | |
434 | } | |
435 | ||
436 | hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT; | |
437 | hr_qp->sge.sge_cnt = cnt; | |
438 | ||
439 | return 0; | |
440 | } | |
441 | ||
cc95b23c LO |
442 | static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, |
443 | struct ib_qp_cap *cap, | |
444 | struct hns_roce_ib_create_qp *ucmd) | |
9a443537 | 445 | { |
446 | u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); | |
447 | u8 max_sq_stride = ilog2(roundup_sq_stride); | |
448 | ||
449 | /* Sanity check SQ size before proceeding */ | |
515f6000 JG |
450 | if (ucmd->log_sq_stride > max_sq_stride || |
451 | ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { | |
ae85bf92 | 452 | ibdev_err(&hr_dev->ib_dev, "Failed to check SQ stride size\n"); |
9a443537 | 453 | return -EINVAL; |
454 | } | |
455 | ||
926a01dc | 456 | if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { |
ae85bf92 | 457 | ibdev_err(&hr_dev->ib_dev, "Failed to check SQ SGE size %d\n", |
db50077b | 458 | cap->max_send_sge); |
926a01dc WHX |
459 | return -EINVAL; |
460 | } | |
461 | ||
cc95b23c LO |
462 | return 0; |
463 | } | |
464 | ||
ae85bf92 XW |
465 | static int set_user_sq_size(struct hns_roce_dev *hr_dev, |
466 | struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp, | |
467 | struct hns_roce_ib_create_qp *ucmd) | |
cc95b23c | 468 | { |
54d66387 XW |
469 | struct ib_device *ibdev = &hr_dev->ib_dev; |
470 | u32 cnt = 0; | |
cc95b23c LO |
471 | int ret; |
472 | ||
54d66387 XW |
473 | if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || |
474 | cnt > hr_dev->caps.max_wqes) | |
515f6000 JG |
475 | return -EINVAL; |
476 | ||
cc95b23c LO |
477 | ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); |
478 | if (ret) { | |
54d66387 XW |
479 | ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n", |
480 | ret); | |
cc95b23c LO |
481 | return ret; |
482 | } | |
483 | ||
54d66387 XW |
484 | ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap); |
485 | if (ret) | |
486 | return ret; | |
926a01dc | 487 | |
54d66387 XW |
488 | hr_qp->sq.wqe_shift = ucmd->log_sq_stride; |
489 | hr_qp->sq.wqe_cnt = cnt; | |
9a443537 | 490 | |
491 | return 0; | |
492 | } | |
493 | ||
e9f2cd28 XW |
494 | static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev, |
495 | struct hns_roce_qp *hr_qp, | |
496 | struct hns_roce_buf_attr *buf_attr) | |
8d18ad83 | 497 | { |
8d18ad83 | 498 | int buf_size; |
d563099e | 499 | int idx = 0; |
8d18ad83 | 500 | |
54d66387 | 501 | hr_qp->buff_size = 0; |
8d18ad83 | 502 | |
d563099e | 503 | /* SQ WQE */ |
54d66387 XW |
504 | hr_qp->sq.offset = 0; |
505 | buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt, | |
506 | hr_qp->sq.wqe_shift); | |
d563099e XW |
507 | if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { |
508 | buf_attr->region[idx].size = buf_size; | |
509 | buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num; | |
510 | idx++; | |
54d66387 | 511 | hr_qp->buff_size += buf_size; |
8d18ad83 LO |
512 | } |
513 | ||
54d66387 XW |
514 | /* extend SGE WQE in SQ */ |
515 | hr_qp->sge.offset = hr_qp->buff_size; | |
516 | buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt, | |
517 | hr_qp->sge.sge_shift); | |
518 | if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { | |
d563099e | 519 | buf_attr->region[idx].size = buf_size; |
54d66387 | 520 | buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num; |
d563099e | 521 | idx++; |
54d66387 | 522 | hr_qp->buff_size += buf_size; |
8d18ad83 LO |
523 | } |
524 | ||
d563099e | 525 | /* RQ WQE */ |
54d66387 XW |
526 | hr_qp->rq.offset = hr_qp->buff_size; |
527 | buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt, | |
528 | hr_qp->rq.wqe_shift); | |
d563099e XW |
529 | if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { |
530 | buf_attr->region[idx].size = buf_size; | |
531 | buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num; | |
532 | idx++; | |
54d66387 | 533 | hr_qp->buff_size += buf_size; |
8d18ad83 LO |
534 | } |
535 | ||
54d66387 XW |
536 | if (hr_qp->buff_size < 1) |
537 | return -EINVAL; | |
947441ea | 538 | |
9581a356 | 539 | buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; |
54d66387 XW |
540 | buf_attr->fixed_page = true; |
541 | buf_attr->region_count = idx; | |
947441ea LO |
542 | |
543 | return 0; | |
544 | } | |
545 | ||
ae85bf92 XW |
546 | static int set_kernel_sq_size(struct hns_roce_dev *hr_dev, |
547 | struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp) | |
9a443537 | 548 | { |
54d66387 XW |
549 | struct ib_device *ibdev = &hr_dev->ib_dev; |
550 | u32 cnt; | |
947441ea | 551 | int ret; |
9a443537 | 552 | |
026ded37 | 553 | if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes || |
9a443537 | 554 | cap->max_send_sge > hr_dev->caps.max_sq_sg || |
555 | cap->max_inline_data > hr_dev->caps.max_sq_inline) { | |
54d66387 XW |
556 | ibdev_err(ibdev, |
557 | "failed to check SQ WR, SGE or inline num, ret = %d.\n", | |
558 | -EINVAL); | |
9a443537 | 559 | return -EINVAL; |
560 | } | |
561 | ||
54d66387 XW |
562 | cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes)); |
563 | if (cnt > hr_dev->caps.max_wqes) { | |
564 | ibdev_err(ibdev, "failed to check WQE num, WQE num = %d.\n", | |
565 | cnt); | |
9a443537 | 566 | return -EINVAL; |
567 | } | |
568 | ||
54d66387 XW |
569 | hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); |
570 | hr_qp->sq.wqe_cnt = cnt; | |
9a443537 | 571 | |
54d66387 XW |
572 | ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap); |
573 | if (ret) | |
947441ea | 574 | return ret; |
05ad5482 | 575 | |
54d66387 XW |
576 | /* sync the parameters of kernel QP to user's configuration */ |
577 | cap->max_send_wr = cnt; | |
9a443537 | 578 | cap->max_send_sge = hr_qp->sq.max_gs; |
579 | ||
580 | /* We don't support inline sends for kernel QPs (yet) */ | |
581 | cap->max_inline_data = 0; | |
582 | ||
583 | return 0; | |
584 | } | |
585 | ||
0425e3e6 YL |
586 | static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr) |
587 | { | |
2557fabd | 588 | if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr) |
0425e3e6 YL |
589 | return 0; |
590 | ||
591 | return 1; | |
592 | } | |
593 | ||
e088a685 YL |
594 | static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr) |
595 | { | |
596 | if (attr->qp_type == IB_QPT_XRC_INI || | |
4d103905 LO |
597 | attr->qp_type == IB_QPT_XRC_TGT || attr->srq || |
598 | !attr->cap.max_recv_wr) | |
e088a685 YL |
599 | return 0; |
600 | ||
601 | return 1; | |
602 | } | |
603 | ||
395b59a1 LO |
604 | static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp, |
605 | struct ib_qp_init_attr *init_attr) | |
606 | { | |
607 | u32 max_recv_sge = init_attr->cap.max_recv_sge; | |
54d66387 | 608 | u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt; |
395b59a1 | 609 | struct hns_roce_rinl_wqe *wqe_list; |
395b59a1 LO |
610 | int i; |
611 | ||
612 | /* allocate recv inline buf */ | |
613 | wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe), | |
614 | GFP_KERNEL); | |
615 | ||
616 | if (!wqe_list) | |
617 | goto err; | |
618 | ||
619 | /* Allocate a continuous buffer for all inline sge we need */ | |
620 | wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge * | |
621 | sizeof(struct hns_roce_rinl_sge)), | |
622 | GFP_KERNEL); | |
623 | if (!wqe_list[0].sg_list) | |
624 | goto err_wqe_list; | |
625 | ||
626 | /* Assign buffers of sg_list to each inline wqe */ | |
627 | for (i = 1; i < wqe_cnt; i++) | |
628 | wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge]; | |
629 | ||
630 | hr_qp->rq_inl_buf.wqe_list = wqe_list; | |
395b59a1 LO |
631 | |
632 | return 0; | |
633 | ||
634 | err_wqe_list: | |
635 | kfree(wqe_list); | |
636 | ||
637 | err: | |
638 | return -ENOMEM; | |
639 | } | |
640 | ||
641 | static void free_rq_inline_buf(struct hns_roce_qp *hr_qp) | |
642 | { | |
54d66387 XW |
643 | if (hr_qp->rq_inl_buf.wqe_list) |
644 | kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); | |
395b59a1 LO |
645 | kfree(hr_qp->rq_inl_buf.wqe_list); |
646 | } | |
647 | ||
24c22112 XW |
648 | static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, |
649 | struct ib_qp_init_attr *init_attr, | |
650 | struct ib_udata *udata, unsigned long addr) | |
651 | { | |
24c22112 | 652 | struct ib_device *ibdev = &hr_dev->ib_dev; |
d563099e | 653 | struct hns_roce_buf_attr buf_attr = {}; |
24c22112 XW |
654 | int ret; |
655 | ||
54d66387 | 656 | if (!udata && hr_qp->rq_inl_buf.wqe_cnt) { |
24c22112 XW |
657 | ret = alloc_rq_inline_buf(hr_qp, init_attr); |
658 | if (ret) { | |
54d66387 XW |
659 | ibdev_err(ibdev, |
660 | "failed to alloc inline buf, ret = %d.\n", | |
661 | ret); | |
24c22112 XW |
662 | return ret; |
663 | } | |
54d66387 XW |
664 | } else { |
665 | hr_qp->rq_inl_buf.wqe_list = NULL; | |
24c22112 XW |
666 | } |
667 | ||
e9f2cd28 | 668 | ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr); |
d563099e | 669 | if (ret) { |
54d66387 | 670 | ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret); |
d563099e XW |
671 | goto err_inline; |
672 | } | |
673 | ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr, | |
9581a356 | 674 | HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz, |
d563099e XW |
675 | udata, addr); |
676 | if (ret) { | |
54d66387 | 677 | ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret); |
d563099e | 678 | goto err_inline; |
24c22112 | 679 | } |
24c22112 XW |
680 | |
681 | return 0; | |
24c22112 | 682 | err_inline: |
54d66387 | 683 | free_rq_inline_buf(hr_qp); |
24c22112 | 684 | |
24c22112 XW |
685 | return ret; |
686 | } | |
687 | ||
688 | static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) | |
689 | { | |
d563099e | 690 | hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr); |
54d66387 | 691 | free_rq_inline_buf(hr_qp); |
24c22112 | 692 | } |
ae85bf92 | 693 | |
cfec045b XW |
694 | static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev, |
695 | struct ib_qp_init_attr *init_attr, | |
696 | struct ib_udata *udata, | |
697 | struct hns_roce_ib_create_qp_resp *resp, | |
698 | struct hns_roce_ib_create_qp *ucmd) | |
699 | { | |
700 | return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && | |
701 | udata->outlen >= offsetofend(typeof(*resp), cap_flags) && | |
702 | hns_roce_qp_has_sq(init_attr) && | |
703 | udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr)); | |
704 | } | |
705 | ||
706 | static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev, | |
707 | struct ib_qp_init_attr *init_attr, | |
708 | struct ib_udata *udata, | |
709 | struct hns_roce_ib_create_qp_resp *resp) | |
710 | { | |
711 | return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && | |
712 | udata->outlen >= offsetofend(typeof(*resp), cap_flags) && | |
713 | hns_roce_qp_has_rq(init_attr)); | |
714 | } | |
715 | ||
716 | static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev, | |
717 | struct ib_qp_init_attr *init_attr) | |
718 | { | |
719 | return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && | |
720 | hns_roce_qp_has_rq(init_attr)); | |
721 | } | |
722 | ||
723 | static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, | |
724 | struct ib_qp_init_attr *init_attr, | |
725 | struct ib_udata *udata, | |
726 | struct hns_roce_ib_create_qp *ucmd, | |
727 | struct hns_roce_ib_create_qp_resp *resp) | |
728 | { | |
729 | struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( | |
730 | udata, struct hns_roce_ucontext, ibucontext); | |
731 | struct ib_device *ibdev = &hr_dev->ib_dev; | |
732 | int ret; | |
733 | ||
734 | if (udata) { | |
735 | if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) { | |
736 | ret = hns_roce_db_map_user(uctx, udata, ucmd->sdb_addr, | |
737 | &hr_qp->sdb); | |
738 | if (ret) { | |
739 | ibdev_err(ibdev, | |
740 | "Failed to map user SQ doorbell\n"); | |
741 | goto err_out; | |
742 | } | |
90ae0b57 LC |
743 | hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB; |
744 | resp->cap_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB; | |
cfec045b XW |
745 | } |
746 | ||
747 | if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) { | |
748 | ret = hns_roce_db_map_user(uctx, udata, ucmd->db_addr, | |
749 | &hr_qp->rdb); | |
750 | if (ret) { | |
751 | ibdev_err(ibdev, | |
752 | "Failed to map user RQ doorbell\n"); | |
753 | goto err_sdb; | |
754 | } | |
90ae0b57 LC |
755 | hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; |
756 | resp->cap_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; | |
cfec045b XW |
757 | } |
758 | } else { | |
759 | /* QP doorbell register address */ | |
760 | hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset + | |
761 | DB_REG_OFFSET * hr_dev->priv_uar.index; | |
762 | hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset + | |
763 | DB_REG_OFFSET * hr_dev->priv_uar.index; | |
764 | ||
765 | if (kernel_qp_has_rdb(hr_dev, init_attr)) { | |
766 | ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); | |
767 | if (ret) { | |
768 | ibdev_err(ibdev, | |
769 | "Failed to alloc kernel RQ doorbell\n"); | |
770 | goto err_out; | |
771 | } | |
772 | *hr_qp->rdb.db_record = 0; | |
90ae0b57 | 773 | hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; |
cfec045b XW |
774 | } |
775 | } | |
776 | ||
777 | return 0; | |
778 | err_sdb: | |
90ae0b57 | 779 | if (udata && hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) |
cfec045b XW |
780 | hns_roce_db_unmap_user(uctx, &hr_qp->sdb); |
781 | err_out: | |
782 | return ret; | |
783 | } | |
784 | ||
785 | static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, | |
786 | struct ib_udata *udata) | |
787 | { | |
788 | struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( | |
789 | udata, struct hns_roce_ucontext, ibucontext); | |
790 | ||
791 | if (udata) { | |
90ae0b57 | 792 | if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) |
cfec045b | 793 | hns_roce_db_unmap_user(uctx, &hr_qp->rdb); |
90ae0b57 | 794 | if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) |
cfec045b XW |
795 | hns_roce_db_unmap_user(uctx, &hr_qp->sdb); |
796 | } else { | |
90ae0b57 | 797 | if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) |
cfec045b XW |
798 | hns_roce_free_db(hr_dev, &hr_qp->rdb); |
799 | } | |
800 | } | |
801 | ||
b37c4139 XW |
802 | static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev, |
803 | struct hns_roce_qp *hr_qp) | |
804 | { | |
805 | struct ib_device *ibdev = &hr_dev->ib_dev; | |
806 | u64 *sq_wrid = NULL; | |
807 | u64 *rq_wrid = NULL; | |
808 | int ret; | |
809 | ||
810 | sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL); | |
811 | if (ZERO_OR_NULL_PTR(sq_wrid)) { | |
812 | ibdev_err(ibdev, "Failed to alloc SQ wrid\n"); | |
813 | return -ENOMEM; | |
814 | } | |
815 | ||
816 | if (hr_qp->rq.wqe_cnt) { | |
817 | rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL); | |
818 | if (ZERO_OR_NULL_PTR(rq_wrid)) { | |
819 | ibdev_err(ibdev, "Failed to alloc RQ wrid\n"); | |
820 | ret = -ENOMEM; | |
821 | goto err_sq; | |
822 | } | |
823 | } | |
824 | ||
825 | hr_qp->sq.wrid = sq_wrid; | |
826 | hr_qp->rq.wrid = rq_wrid; | |
827 | return 0; | |
828 | err_sq: | |
829 | kfree(sq_wrid); | |
830 | ||
831 | return ret; | |
832 | } | |
833 | ||
f226f676 | 834 | static void free_kernel_wrid(struct hns_roce_qp *hr_qp) |
b37c4139 XW |
835 | { |
836 | kfree(hr_qp->rq.wrid); | |
837 | kfree(hr_qp->sq.wrid); | |
838 | } | |
839 | ||
ae85bf92 XW |
840 | static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, |
841 | struct ib_qp_init_attr *init_attr, | |
842 | struct ib_udata *udata, | |
843 | struct hns_roce_ib_create_qp *ucmd) | |
844 | { | |
845 | struct ib_device *ibdev = &hr_dev->ib_dev; | |
846 | int ret; | |
847 | ||
848 | hr_qp->ibqp.qp_type = init_attr->qp_type; | |
849 | ||
850 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) | |
851 | hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; | |
852 | else | |
853 | hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; | |
854 | ||
54d66387 XW |
855 | ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp, |
856 | hns_roce_qp_has_rq(init_attr)); | |
ae85bf92 | 857 | if (ret) { |
54d66387 XW |
858 | ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n", |
859 | ret); | |
ae85bf92 XW |
860 | return ret; |
861 | } | |
862 | ||
863 | if (udata) { | |
864 | if (ib_copy_from_udata(ucmd, udata, sizeof(*ucmd))) { | |
865 | ibdev_err(ibdev, "Failed to copy QP ucmd\n"); | |
866 | return -EFAULT; | |
867 | } | |
868 | ||
869 | ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); | |
870 | if (ret) | |
871 | ibdev_err(ibdev, "Failed to set user SQ size\n"); | |
872 | } else { | |
873 | if (init_attr->create_flags & | |
874 | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { | |
875 | ibdev_err(ibdev, "Failed to check multicast loopback\n"); | |
876 | return -EINVAL; | |
877 | } | |
878 | ||
879 | if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { | |
880 | ibdev_err(ibdev, "Failed to check ipoib ud lso\n"); | |
881 | return -EINVAL; | |
882 | } | |
883 | ||
884 | ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp); | |
885 | if (ret) | |
886 | ibdev_err(ibdev, "Failed to set kernel SQ size\n"); | |
887 | } | |
888 | ||
889 | return ret; | |
890 | } | |
891 | ||
9a443537 | 892 | static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, |
893 | struct ib_pd *ib_pd, | |
894 | struct ib_qp_init_attr *init_attr, | |
df83a66e | 895 | struct ib_udata *udata, |
9a443537 | 896 | struct hns_roce_qp *hr_qp) |
897 | { | |
7b48221c | 898 | struct hns_roce_ib_create_qp_resp resp = {}; |
cfec045b XW |
899 | struct ib_device *ibdev = &hr_dev->ib_dev; |
900 | struct hns_roce_ib_create_qp ucmd; | |
8d18ad83 | 901 | int ret; |
9a443537 | 902 | |
903 | mutex_init(&hr_qp->mutex); | |
904 | spin_lock_init(&hr_qp->sq.lock); | |
905 | spin_lock_init(&hr_qp->rq.lock); | |
906 | ||
907 | hr_qp->state = IB_QPS_RESET; | |
b5374286 | 908 | hr_qp->flush_flag = 0; |
9a443537 | 909 | |
ae85bf92 | 910 | ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd); |
9a443537 | 911 | if (ret) { |
cfec045b | 912 | ibdev_err(ibdev, "Failed to set QP param\n"); |
ae85bf92 | 913 | return ret; |
9a443537 | 914 | } |
915 | ||
cfec045b | 916 | if (!udata) { |
b37c4139 XW |
917 | ret = alloc_kernel_wrid(hr_dev, hr_qp); |
918 | if (ret) { | |
cfec045b XW |
919 | ibdev_err(ibdev, "Failed to alloc wrid\n"); |
920 | return ret; | |
76827087 | 921 | } |
9a443537 | 922 | } |
923 | ||
cfec045b XW |
924 | ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp); |
925 | if (ret) { | |
926 | ibdev_err(ibdev, "Failed to alloc QP doorbell\n"); | |
927 | goto err_wrid; | |
928 | } | |
929 | ||
24c22112 | 930 | ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr); |
8d18ad83 | 931 | if (ret) { |
cfec045b | 932 | ibdev_err(ibdev, "Failed to alloc QP buffer\n"); |
24c22112 | 933 | goto err_db; |
df83a66e XW |
934 | } |
935 | ||
936 | ret = alloc_qpn(hr_dev, hr_qp); | |
937 | if (ret) { | |
cfec045b | 938 | ibdev_err(ibdev, "Failed to alloc QPN\n"); |
24c22112 | 939 | goto err_buf; |
8d18ad83 LO |
940 | } |
941 | ||
b71961d1 XW |
942 | ret = alloc_qpc(hr_dev, hr_qp); |
943 | if (ret) { | |
cfec045b | 944 | ibdev_err(ibdev, "Failed to alloc QP context\n"); |
b71961d1 XW |
945 | goto err_qpn; |
946 | } | |
947 | ||
948 | ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr); | |
949 | if (ret) { | |
cfec045b | 950 | ibdev_err(ibdev, "Failed to store QP\n"); |
b71961d1 | 951 | goto err_qpc; |
9a443537 | 952 | } |
953 | ||
de77503a LO |
954 | if (udata) { |
955 | ret = ib_copy_to_udata(udata, &resp, | |
956 | min(udata->outlen, sizeof(resp))); | |
cfec045b XW |
957 | if (ret) { |
958 | ibdev_err(ibdev, "copy qp resp failed!\n"); | |
b71961d1 | 959 | goto err_store; |
cfec045b | 960 | } |
e088a685 | 961 | } |
aa84fa18 YL |
962 | |
963 | if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { | |
964 | ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp); | |
965 | if (ret) | |
b71961d1 | 966 | goto err_store; |
aa84fa18 YL |
967 | } |
968 | ||
df83a66e | 969 | hr_qp->ibqp.qp_num = hr_qp->qpn; |
9a443537 | 970 | hr_qp->event = hns_roce_ib_qp_event; |
b71961d1 XW |
971 | atomic_set(&hr_qp->refcount, 1); |
972 | init_completion(&hr_qp->free); | |
626903e9 | 973 | |
9a443537 | 974 | return 0; |
975 | ||
b71961d1 XW |
976 | err_store: |
977 | hns_roce_qp_remove(hr_dev, hr_qp); | |
b71961d1 XW |
978 | err_qpc: |
979 | free_qpc(hr_dev, hr_qp); | |
9a443537 | 980 | err_qpn: |
df83a66e | 981 | free_qpn(hr_dev, hr_qp); |
24c22112 XW |
982 | err_buf: |
983 | free_qp_buf(hr_dev, hr_qp); | |
472bc0fb | 984 | err_db: |
cfec045b XW |
985 | free_qp_db(hr_dev, hr_qp, udata); |
986 | err_wrid: | |
f226f676 | 987 | free_kernel_wrid(hr_qp); |
9a443537 | 988 | return ret; |
989 | } | |
990 | ||
e365b26c XW |
991 | void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, |
992 | struct ib_udata *udata) | |
993 | { | |
b71961d1 XW |
994 | if (atomic_dec_and_test(&hr_qp->refcount)) |
995 | complete(&hr_qp->free); | |
996 | wait_for_completion(&hr_qp->free); | |
997 | ||
998 | free_qpc(hr_dev, hr_qp); | |
df83a66e | 999 | free_qpn(hr_dev, hr_qp); |
24c22112 | 1000 | free_qp_buf(hr_dev, hr_qp); |
f226f676 | 1001 | free_kernel_wrid(hr_qp); |
cfec045b | 1002 | free_qp_db(hr_dev, hr_qp, udata); |
e365b26c XW |
1003 | |
1004 | kfree(hr_qp); | |
1005 | } | |
1006 | ||
9a443537 | 1007 | struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, |
1008 | struct ib_qp_init_attr *init_attr, | |
1009 | struct ib_udata *udata) | |
1010 | { | |
1011 | struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); | |
db50077b | 1012 | struct ib_device *ibdev = &hr_dev->ib_dev; |
9a443537 | 1013 | struct hns_roce_qp *hr_qp; |
1014 | int ret; | |
1015 | ||
1016 | switch (init_attr->qp_type) { | |
1017 | case IB_QPT_RC: { | |
1018 | hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); | |
1019 | if (!hr_qp) | |
1020 | return ERR_PTR(-ENOMEM); | |
1021 | ||
df83a66e | 1022 | ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, |
9a443537 | 1023 | hr_qp); |
1024 | if (ret) { | |
d11769fd | 1025 | ibdev_err(ibdev, "Create QP 0x%06lx failed(%d)\n", |
db50077b | 1026 | hr_qp->qpn, ret); |
9a443537 | 1027 | kfree(hr_qp); |
1028 | return ERR_PTR(ret); | |
1029 | } | |
1030 | ||
9a443537 | 1031 | break; |
1032 | } | |
1033 | case IB_QPT_GSI: { | |
1034 | /* Userspace is not allowed to create special QPs: */ | |
e00b64f7 | 1035 | if (udata) { |
db50077b | 1036 | ibdev_err(ibdev, "not support usr space GSI\n"); |
9a443537 | 1037 | return ERR_PTR(-EINVAL); |
1038 | } | |
1039 | ||
16a11e0b LC |
1040 | hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL); |
1041 | if (!hr_qp) | |
9a443537 | 1042 | return ERR_PTR(-ENOMEM); |
1043 | ||
7716809e LO |
1044 | hr_qp->port = init_attr->port_num - 1; |
1045 | hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; | |
b66efc93 | 1046 | |
9a443537 | 1047 | ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, |
df83a66e | 1048 | hr_qp); |
9a443537 | 1049 | if (ret) { |
db50077b | 1050 | ibdev_err(ibdev, "Create GSI QP failed!\n"); |
16a11e0b | 1051 | kfree(hr_qp); |
9a443537 | 1052 | return ERR_PTR(ret); |
1053 | } | |
1054 | ||
9a443537 | 1055 | break; |
1056 | } | |
1057 | default:{ | |
db50077b LO |
1058 | ibdev_err(ibdev, "not support QP type %d\n", |
1059 | init_attr->qp_type); | |
bb8865f4 | 1060 | return ERR_PTR(-EOPNOTSUPP); |
9a443537 | 1061 | } |
1062 | } | |
1063 | ||
1064 | return &hr_qp->ibqp; | |
1065 | } | |
1066 | ||
1067 | int to_hr_qp_type(int qp_type) | |
1068 | { | |
1069 | int transport_type; | |
1070 | ||
1071 | if (qp_type == IB_QPT_RC) | |
1072 | transport_type = SERV_TYPE_RC; | |
1073 | else if (qp_type == IB_QPT_UC) | |
1074 | transport_type = SERV_TYPE_UC; | |
1075 | else if (qp_type == IB_QPT_UD) | |
1076 | transport_type = SERV_TYPE_UD; | |
1077 | else if (qp_type == IB_QPT_GSI) | |
1078 | transport_type = SERV_TYPE_UD; | |
1079 | else | |
1080 | transport_type = -1; | |
1081 | ||
1082 | return transport_type; | |
1083 | } | |
1084 | ||
8ea417ff LO |
1085 | static int check_mtu_validate(struct hns_roce_dev *hr_dev, |
1086 | struct hns_roce_qp *hr_qp, | |
1087 | struct ib_qp_attr *attr, int attr_mask) | |
9a443537 | 1088 | { |
cb814642 | 1089 | enum ib_mtu active_mtu; |
8ea417ff | 1090 | int p; |
9a443537 | 1091 | |
8ea417ff | 1092 | p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; |
a7325af7 | 1093 | active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); |
de77503a | 1094 | |
8ea417ff LO |
1095 | if ((hr_dev->caps.max_mtu >= IB_MTU_2048 && |
1096 | attr->path_mtu > hr_dev->caps.max_mtu) || | |
1097 | attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) { | |
db50077b LO |
1098 | ibdev_err(&hr_dev->ib_dev, |
1099 | "attr path_mtu(%d)invalid while modify qp", | |
8ea417ff LO |
1100 | attr->path_mtu); |
1101 | return -EINVAL; | |
0425e3e6 YL |
1102 | } |
1103 | ||
8ea417ff LO |
1104 | return 0; |
1105 | } | |
1106 | ||
1107 | static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
1108 | int attr_mask) | |
1109 | { | |
1110 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | |
1111 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | |
8ea417ff | 1112 | int p; |
9a443537 | 1113 | |
1114 | if ((attr_mask & IB_QP_PORT) && | |
1115 | (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { | |
db50077b LO |
1116 | ibdev_err(&hr_dev->ib_dev, |
1117 | "attr port_num invalid.attr->port_num=%d\n", | |
9a443537 | 1118 | attr->port_num); |
8ea417ff | 1119 | return -EINVAL; |
9a443537 | 1120 | } |
1121 | ||
1122 | if (attr_mask & IB_QP_PKEY_INDEX) { | |
1123 | p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; | |
1124 | if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { | |
db50077b LO |
1125 | ibdev_err(&hr_dev->ib_dev, |
1126 | "attr pkey_index invalid.attr->pkey_index=%d\n", | |
9a443537 | 1127 | attr->pkey_index); |
8ea417ff | 1128 | return -EINVAL; |
cb814642 LO |
1129 | } |
1130 | } | |
1131 | ||
9a443537 | 1132 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && |
1133 | attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { | |
db50077b LO |
1134 | ibdev_err(&hr_dev->ib_dev, |
1135 | "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", | |
9a443537 | 1136 | attr->max_rd_atomic); |
8ea417ff | 1137 | return -EINVAL; |
9a443537 | 1138 | } |
1139 | ||
1140 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && | |
1141 | attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { | |
db50077b LO |
1142 | ibdev_err(&hr_dev->ib_dev, |
1143 | "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n", | |
9a443537 | 1144 | attr->max_dest_rd_atomic); |
8ea417ff LO |
1145 | return -EINVAL; |
1146 | } | |
1147 | ||
1148 | if (attr_mask & IB_QP_PATH_MTU) | |
1149 | return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask); | |
1150 | ||
1151 | return 0; | |
1152 | } | |
1153 | ||
1154 | int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
1155 | int attr_mask, struct ib_udata *udata) | |
1156 | { | |
1157 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | |
1158 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | |
1159 | enum ib_qp_state cur_state, new_state; | |
8ea417ff LO |
1160 | int ret = -EINVAL; |
1161 | ||
1162 | mutex_lock(&hr_qp->mutex); | |
1163 | ||
1164 | cur_state = attr_mask & IB_QP_CUR_STATE ? | |
1165 | attr->cur_qp_state : (enum ib_qp_state)hr_qp->state; | |
1166 | new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; | |
1167 | ||
1168 | if (ibqp->uobject && | |
1169 | (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) { | |
90ae0b57 | 1170 | if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) { |
8ea417ff LO |
1171 | hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); |
1172 | ||
90ae0b57 | 1173 | if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) |
8ea417ff LO |
1174 | hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); |
1175 | } else { | |
db50077b LO |
1176 | ibdev_warn(&hr_dev->ib_dev, |
1177 | "flush cqe is not supported in userspace!\n"); | |
8ea417ff LO |
1178 | goto out; |
1179 | } | |
1180 | } | |
1181 | ||
1182 | if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, | |
1183 | attr_mask)) { | |
db50077b | 1184 | ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n"); |
9a443537 | 1185 | goto out; |
1186 | } | |
1187 | ||
8ea417ff LO |
1188 | ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask); |
1189 | if (ret) | |
1190 | goto out; | |
1191 | ||
9a443537 | 1192 | if (cur_state == new_state && cur_state == IB_QPS_RESET) { |
026ded37 | 1193 | if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) { |
391bd5fc | 1194 | ret = -EPERM; |
db50077b | 1195 | ibdev_err(&hr_dev->ib_dev, |
026ded37 | 1196 | "RST2RST state is not supported\n"); |
391bd5fc | 1197 | } else { |
1198 | ret = 0; | |
1199 | } | |
1200 | ||
9a443537 | 1201 | goto out; |
1202 | } | |
1203 | ||
1204 | ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state, | |
1205 | new_state); | |
1206 | ||
1207 | out: | |
1208 | mutex_unlock(&hr_qp->mutex); | |
1209 | ||
1210 | return ret; | |
1211 | } | |
1212 | ||
1213 | void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) | |
1214 | __acquires(&send_cq->lock) __acquires(&recv_cq->lock) | |
1215 | { | |
626903e9 XW |
1216 | if (unlikely(send_cq == NULL && recv_cq == NULL)) { |
1217 | __acquire(&send_cq->lock); | |
1218 | __acquire(&recv_cq->lock); | |
1219 | } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { | |
1220 | spin_lock_irq(&send_cq->lock); | |
1221 | __acquire(&recv_cq->lock); | |
1222 | } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { | |
1223 | spin_lock_irq(&recv_cq->lock); | |
1224 | __acquire(&send_cq->lock); | |
1225 | } else if (send_cq == recv_cq) { | |
9a443537 | 1226 | spin_lock_irq(&send_cq->lock); |
1227 | __acquire(&recv_cq->lock); | |
1228 | } else if (send_cq->cqn < recv_cq->cqn) { | |
1229 | spin_lock_irq(&send_cq->lock); | |
1230 | spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); | |
1231 | } else { | |
1232 | spin_lock_irq(&recv_cq->lock); | |
1233 | spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); | |
1234 | } | |
1235 | } | |
1236 | ||
1237 | void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, | |
1238 | struct hns_roce_cq *recv_cq) __releases(&send_cq->lock) | |
1239 | __releases(&recv_cq->lock) | |
1240 | { | |
626903e9 XW |
1241 | if (unlikely(send_cq == NULL && recv_cq == NULL)) { |
1242 | __release(&recv_cq->lock); | |
1243 | __release(&send_cq->lock); | |
1244 | } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { | |
1245 | __release(&recv_cq->lock); | |
1246 | spin_unlock(&send_cq->lock); | |
1247 | } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { | |
1248 | __release(&send_cq->lock); | |
1249 | spin_unlock(&recv_cq->lock); | |
1250 | } else if (send_cq == recv_cq) { | |
9a443537 | 1251 | __release(&recv_cq->lock); |
1252 | spin_unlock_irq(&send_cq->lock); | |
1253 | } else if (send_cq->cqn < recv_cq->cqn) { | |
1254 | spin_unlock(&recv_cq->lock); | |
1255 | spin_unlock_irq(&send_cq->lock); | |
1256 | } else { | |
1257 | spin_unlock(&send_cq->lock); | |
1258 | spin_unlock_irq(&recv_cq->lock); | |
1259 | } | |
1260 | } | |
1261 | ||
d563099e | 1262 | static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset) |
9a443537 | 1263 | { |
d563099e | 1264 | return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); |
9a443537 | 1265 | } |
1266 | ||
6c6e3921 | 1267 | void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n) |
9a443537 | 1268 | { |
9a443537 | 1269 | return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); |
1270 | } | |
1271 | ||
6c6e3921 | 1272 | void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n) |
9a443537 | 1273 | { |
9a443537 | 1274 | return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); |
1275 | } | |
1276 | ||
6c6e3921 | 1277 | void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n) |
926a01dc | 1278 | { |
d563099e | 1279 | return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift)); |
926a01dc | 1280 | } |
926a01dc | 1281 | |
9a443537 | 1282 | bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, |
1283 | struct ib_cq *ib_cq) | |
1284 | { | |
1285 | struct hns_roce_cq *hr_cq; | |
1286 | u32 cur; | |
1287 | ||
1288 | cur = hr_wq->head - hr_wq->tail; | |
ec6adad0 | 1289 | if (likely(cur + nreq < hr_wq->wqe_cnt)) |
3756c7f5 | 1290 | return false; |
9a443537 | 1291 | |
1292 | hr_cq = to_hr_cq(ib_cq); | |
1293 | spin_lock(&hr_cq->lock); | |
1294 | cur = hr_wq->head - hr_wq->tail; | |
1295 | spin_unlock(&hr_cq->lock); | |
1296 | ||
ec6adad0 | 1297 | return cur + nreq >= hr_wq->wqe_cnt; |
9a443537 | 1298 | } |
1299 | ||
1300 | int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) | |
1301 | { | |
1302 | struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; | |
1303 | int reserved_from_top = 0; | |
06ef0ee4 | 1304 | int reserved_from_bot; |
9a443537 | 1305 | int ret; |
1306 | ||
aa84fa18 | 1307 | mutex_init(&qp_table->scc_mutex); |
736b5a70 | 1308 | xa_init(&hr_dev->qp_table_xa); |
9a443537 | 1309 | |
21b97f53 | 1310 | reserved_from_bot = hr_dev->caps.reserved_qps; |
06ef0ee4 | 1311 | |
9a443537 | 1312 | ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps, |
06ef0ee4 | 1313 | hr_dev->caps.num_qps - 1, reserved_from_bot, |
9a443537 | 1314 | reserved_from_top); |
1315 | if (ret) { | |
13ca970e | 1316 | dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n", |
9a443537 | 1317 | ret); |
1318 | return ret; | |
1319 | } | |
1320 | ||
1321 | return 0; | |
1322 | } | |
1323 | ||
1324 | void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev) | |
1325 | { | |
1326 | hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap); | |
1327 | } |