Commit | Line | Data |
---|---|---|
9a443537 | 1 | /* |
2 | * Copyright (c) 2016 Hisilicon Limited. | |
3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. | |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
05ad5482 | 34 | #include <linux/pci.h> |
cb814642 | 35 | #include <rdma/ib_addr.h> |
9a443537 | 36 | #include <rdma/ib_umem.h> |
89944450 | 37 | #include <rdma/uverbs_ioctl.h> |
9a443537 | 38 | #include "hns_roce_common.h" |
39 | #include "hns_roce_device.h" | |
40 | #include "hns_roce_hem.h" | |
9a443537 | 41 | |
ffd541d4 YL |
42 | static void flush_work_handle(struct work_struct *work) |
43 | { | |
44 | struct hns_roce_work *flush_work = container_of(work, | |
45 | struct hns_roce_work, work); | |
46 | struct hns_roce_qp *hr_qp = container_of(flush_work, | |
47 | struct hns_roce_qp, flush_work); | |
48 | struct device *dev = flush_work->hr_dev->dev; | |
49 | struct ib_qp_attr attr; | |
50 | int attr_mask; | |
51 | int ret; | |
52 | ||
53 | attr_mask = IB_QP_STATE; | |
54 | attr.qp_state = IB_QPS_ERR; | |
55 | ||
b5374286 YL |
56 | if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) { |
57 | ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); | |
58 | if (ret) | |
f0588567 | 59 | dev_err(dev, "modify QP to error state failed(%d) during CQE flush\n", |
b5374286 YL |
60 | ret); |
61 | } | |
ffd541d4 YL |
62 | |
63 | /* | |
64 | * make sure we signal QP destroy leg that flush QP was completed | |
65 | * so that it can safely proceed ahead now and destroy QP | |
66 | */ | |
8f9513d8 | 67 | if (refcount_dec_and_test(&hr_qp->refcount)) |
ffd541d4 YL |
68 | complete(&hr_qp->free); |
69 | } | |
70 | ||
71 | void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) | |
72 | { | |
73 | struct hns_roce_work *flush_work = &hr_qp->flush_work; | |
74 | ||
75 | flush_work->hr_dev = hr_dev; | |
76 | INIT_WORK(&flush_work->work, flush_work_handle); | |
8f9513d8 | 77 | refcount_inc(&hr_qp->refcount); |
ffd541d4 YL |
78 | queue_work(hr_dev->irq_workq, &flush_work->work); |
79 | } | |
80 | ||
c462a024 WL |
81 | void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp) |
82 | { | |
83 | /* | |
84 | * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state | |
85 | * gets into errored mode. Hence, as a workaround to this | |
86 | * hardware limitation, driver needs to assist in flushing. But | |
87 | * the flushing operation uses mailbox to convey the QP state to | |
88 | * the hardware and which can sleep due to the mutex protection | |
89 | * around the mailbox calls. Hence, use the deferred flush for | |
90 | * now. | |
91 | */ | |
92 | if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag)) | |
93 | init_flush_work(dev, qp); | |
94 | } | |
95 | ||
9a443537 | 96 | void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) |
97 | { | |
13ca970e | 98 | struct device *dev = hr_dev->dev; |
9a443537 | 99 | struct hns_roce_qp *qp; |
100 | ||
736b5a70 | 101 | xa_lock(&hr_dev->qp_table_xa); |
9a443537 | 102 | qp = __hns_roce_qp_lookup(hr_dev, qpn); |
103 | if (qp) | |
8f9513d8 | 104 | refcount_inc(&qp->refcount); |
736b5a70 | 105 | xa_unlock(&hr_dev->qp_table_xa); |
9a443537 | 106 | |
107 | if (!qp) { | |
f0588567 | 108 | dev_warn(dev, "async event for bogus QP %08x\n", qpn); |
9a443537 | 109 | return; |
110 | } | |
111 | ||
38d22088 CT |
112 | if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || |
113 | event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || | |
114 | event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR || | |
115 | event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION || | |
116 | event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) { | |
0fc99566 | 117 | qp->state = IB_QPS_ERR; |
c462a024 WL |
118 | |
119 | flush_cqe(hr_dev, qp); | |
0fc99566 YL |
120 | } |
121 | ||
9a443537 | 122 | qp->event(qp, (enum hns_roce_event)event_type); |
123 | ||
8f9513d8 | 124 | if (refcount_dec_and_test(&qp->refcount)) |
9a443537 | 125 | complete(&qp->free); |
126 | } | |
127 | ||
128 | static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, | |
129 | enum hns_roce_event type) | |
130 | { | |
9a443537 | 131 | struct ib_qp *ibqp = &hr_qp->ibqp; |
dc93a0d9 | 132 | struct ib_event event; |
9a443537 | 133 | |
134 | if (ibqp->event_handler) { | |
135 | event.device = ibqp->device; | |
136 | event.element.qp = ibqp; | |
137 | switch (type) { | |
138 | case HNS_ROCE_EVENT_TYPE_PATH_MIG: | |
139 | event.event = IB_EVENT_PATH_MIG; | |
140 | break; | |
141 | case HNS_ROCE_EVENT_TYPE_COMM_EST: | |
142 | event.event = IB_EVENT_COMM_EST; | |
143 | break; | |
144 | case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: | |
145 | event.event = IB_EVENT_SQ_DRAINED; | |
146 | break; | |
147 | case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: | |
148 | event.event = IB_EVENT_QP_LAST_WQE_REACHED; | |
149 | break; | |
150 | case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: | |
151 | event.event = IB_EVENT_QP_FATAL; | |
152 | break; | |
153 | case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: | |
154 | event.event = IB_EVENT_PATH_MIG_ERR; | |
155 | break; | |
156 | case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: | |
157 | event.event = IB_EVENT_QP_REQ_ERR; | |
158 | break; | |
159 | case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: | |
32548870 WL |
160 | case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION: |
161 | case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH: | |
9a443537 | 162 | event.event = IB_EVENT_QP_ACCESS_ERR; |
163 | break; | |
164 | default: | |
fecd02eb | 165 | dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n", |
9a443537 | 166 | type, hr_qp->qpn); |
167 | return; | |
168 | } | |
169 | ibqp->event_handler(&event, ibqp->qp_context); | |
170 | } | |
171 | } | |
172 | ||
9e03dbea | 173 | static u8 get_affinity_cq_bank(u8 qp_bank) |
71586dd2 | 174 | { |
9e03dbea CT |
175 | return (qp_bank >> 1) & CQ_BANKID_MASK; |
176 | } | |
177 | ||
178 | static u8 get_least_load_bankid_for_qp(struct ib_qp_init_attr *init_attr, | |
179 | struct hns_roce_bank *bank) | |
180 | { | |
181 | #define INVALID_LOAD_QPNUM 0xFFFFFFFF | |
182 | struct ib_cq *scq = init_attr->send_cq; | |
183 | u32 least_load = INVALID_LOAD_QPNUM; | |
184 | unsigned long cqn = 0; | |
71586dd2 YL |
185 | u8 bankid = 0; |
186 | u32 bankcnt; | |
187 | u8 i; | |
188 | ||
9e03dbea CT |
189 | if (scq) |
190 | cqn = to_hr_cq(scq)->cqn; | |
191 | ||
192 | for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) { | |
193 | if (scq && (get_affinity_cq_bank(i) != (cqn & CQ_BANKID_MASK))) | |
194 | continue; | |
195 | ||
71586dd2 YL |
196 | bankcnt = bank[i].inuse; |
197 | if (bankcnt < least_load) { | |
198 | least_load = bankcnt; | |
199 | bankid = i; | |
200 | } | |
201 | } | |
202 | ||
203 | return bankid; | |
204 | } | |
205 | ||
206 | static int alloc_qpn_with_bankid(struct hns_roce_bank *bank, u8 bankid, | |
207 | unsigned long *qpn) | |
208 | { | |
209 | int id; | |
210 | ||
211 | id = ida_alloc_range(&bank->ida, bank->next, bank->max, GFP_KERNEL); | |
212 | if (id < 0) { | |
213 | id = ida_alloc_range(&bank->ida, bank->min, bank->max, | |
214 | GFP_KERNEL); | |
215 | if (id < 0) | |
216 | return id; | |
217 | } | |
218 | ||
219 | /* the QPN should keep increasing until the max value is reached. */ | |
220 | bank->next = (id + 1) > bank->max ? bank->min : id + 1; | |
221 | ||
222 | /* the lower 3 bits is bankid */ | |
223 | *qpn = (id << 3) | bankid; | |
224 | ||
225 | return 0; | |
226 | } | |
9e03dbea CT |
227 | static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, |
228 | struct ib_qp_init_attr *init_attr) | |
9a443537 | 229 | { |
71586dd2 | 230 | struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; |
df83a66e | 231 | unsigned long num = 0; |
71586dd2 | 232 | u8 bankid; |
df83a66e XW |
233 | int ret; |
234 | ||
235 | if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { | |
38d22088 | 236 | num = 1; |
df83a66e | 237 | } else { |
9293d3fc | 238 | mutex_lock(&qp_table->bank_mutex); |
9e03dbea | 239 | bankid = get_least_load_bankid_for_qp(init_attr, qp_table->bank); |
71586dd2 YL |
240 | |
241 | ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid, | |
242 | &num); | |
df83a66e | 243 | if (ret) { |
71586dd2 YL |
244 | ibdev_err(&hr_dev->ib_dev, |
245 | "failed to alloc QPN, ret = %d\n", ret); | |
9293d3fc | 246 | mutex_unlock(&qp_table->bank_mutex); |
71586dd2 | 247 | return ret; |
df83a66e XW |
248 | } |
249 | ||
71586dd2 | 250 | qp_table->bank[bankid].inuse++; |
9293d3fc | 251 | mutex_unlock(&qp_table->bank_mutex); |
df83a66e XW |
252 | } |
253 | ||
254 | hr_qp->qpn = num; | |
9a443537 | 255 | |
df83a66e | 256 | return 0; |
9a443537 | 257 | } |
258 | ||
b71961d1 XW |
259 | static void add_qp_to_list(struct hns_roce_dev *hr_dev, |
260 | struct hns_roce_qp *hr_qp, | |
261 | struct ib_cq *send_cq, struct ib_cq *recv_cq) | |
262 | { | |
263 | struct hns_roce_cq *hr_send_cq, *hr_recv_cq; | |
264 | unsigned long flags; | |
265 | ||
266 | hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; | |
267 | hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL; | |
268 | ||
269 | spin_lock_irqsave(&hr_dev->qp_list_lock, flags); | |
270 | hns_roce_lock_cqs(hr_send_cq, hr_recv_cq); | |
271 | ||
272 | list_add_tail(&hr_qp->node, &hr_dev->qp_list); | |
273 | if (hr_send_cq) | |
274 | list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list); | |
275 | if (hr_recv_cq) | |
276 | list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list); | |
277 | ||
278 | hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq); | |
279 | spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); | |
280 | } | |
281 | ||
282 | static int hns_roce_qp_store(struct hns_roce_dev *hr_dev, | |
283 | struct hns_roce_qp *hr_qp, | |
284 | struct ib_qp_init_attr *init_attr) | |
9a443537 | 285 | { |
736b5a70 | 286 | struct xarray *xa = &hr_dev->qp_table_xa; |
9a443537 | 287 | int ret; |
288 | ||
b71961d1 | 289 | if (!hr_qp->qpn) |
9a443537 | 290 | return -EINVAL; |
291 | ||
b71961d1 | 292 | ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL)); |
736b5a70 | 293 | if (ret) |
f0588567 | 294 | dev_err(hr_dev->dev, "failed to xa store for QPC\n"); |
b71961d1 XW |
295 | else |
296 | /* add QP to device's QP list for softwc */ | |
297 | add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, | |
298 | init_attr->recv_cq); | |
9a443537 | 299 | |
300 | return ret; | |
301 | } | |
302 | ||
b71961d1 | 303 | static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) |
9a443537 | 304 | { |
305 | struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; | |
13ca970e | 306 | struct device *dev = hr_dev->dev; |
9a443537 | 307 | int ret; |
308 | ||
b71961d1 | 309 | if (!hr_qp->qpn) |
9a443537 | 310 | return -EINVAL; |
311 | ||
9a443537 | 312 | /* Alloc memory for QPC */ |
313 | ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); | |
314 | if (ret) { | |
f0588567 | 315 | dev_err(dev, "failed to get QPC table\n"); |
9a443537 | 316 | goto err_out; |
317 | } | |
318 | ||
319 | /* Alloc memory for IRRL */ | |
320 | ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); | |
321 | if (ret) { | |
f0588567 | 322 | dev_err(dev, "failed to get IRRL table\n"); |
9a443537 | 323 | goto err_put_qp; |
324 | } | |
325 | ||
e92f2c18 | 326 | if (hr_dev->caps.trrl_entry_sz) { |
327 | /* Alloc memory for TRRL */ | |
328 | ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, | |
329 | hr_qp->qpn); | |
330 | if (ret) { | |
f0588567 | 331 | dev_err(dev, "failed to get TRRL table\n"); |
e92f2c18 | 332 | goto err_put_irrl; |
333 | } | |
334 | } | |
335 | ||
4ddeacf6 | 336 | if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { |
6a157f7d YL |
337 | /* Alloc memory for SCC CTX */ |
338 | ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table, | |
339 | hr_qp->qpn); | |
340 | if (ret) { | |
f0588567 | 341 | dev_err(dev, "failed to get SCC CTX table\n"); |
6a157f7d YL |
342 | goto err_put_trrl; |
343 | } | |
344 | } | |
345 | ||
9a443537 | 346 | return 0; |
347 | ||
e92f2c18 | 348 | err_put_trrl: |
349 | if (hr_dev->caps.trrl_entry_sz) | |
350 | hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); | |
351 | ||
9a443537 | 352 | err_put_irrl: |
353 | hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); | |
354 | ||
355 | err_put_qp: | |
356 | hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); | |
357 | ||
358 | err_out: | |
359 | return ret; | |
360 | } | |
361 | ||
0045e0d3 YL |
362 | static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp) |
363 | { | |
364 | rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry); | |
365 | } | |
366 | ||
9a443537 | 367 | void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) |
368 | { | |
736b5a70 | 369 | struct xarray *xa = &hr_dev->qp_table_xa; |
9a443537 | 370 | unsigned long flags; |
371 | ||
b71961d1 | 372 | list_del(&hr_qp->node); |
32548870 WL |
373 | |
374 | if (hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT) | |
375 | list_del(&hr_qp->sq_node); | |
376 | ||
377 | if (hr_qp->ibqp.qp_type != IB_QPT_XRC_INI && | |
378 | hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT) | |
379 | list_del(&hr_qp->rq_node); | |
b71961d1 | 380 | |
736b5a70 | 381 | xa_lock_irqsave(xa, flags); |
61b460d1 | 382 | __xa_erase(xa, hr_qp->qpn); |
736b5a70 | 383 | xa_unlock_irqrestore(xa, flags); |
9a443537 | 384 | } |
385 | ||
b71961d1 | 386 | static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) |
9a443537 | 387 | { |
388 | struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; | |
389 | ||
b71961d1 XW |
390 | if (hr_dev->caps.trrl_entry_sz) |
391 | hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); | |
392 | hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); | |
9a443537 | 393 | } |
394 | ||
71586dd2 YL |
395 | static inline u8 get_qp_bankid(unsigned long qpn) |
396 | { | |
397 | /* The lower 3 bits of QPN are used to hash to different banks */ | |
398 | return (u8)(qpn & GENMASK(2, 0)); | |
399 | } | |
400 | ||
df83a66e | 401 | static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) |
9a443537 | 402 | { |
71586dd2 | 403 | u8 bankid; |
9a443537 | 404 | |
df83a66e XW |
405 | if (hr_qp->ibqp.qp_type == IB_QPT_GSI) |
406 | return; | |
407 | ||
408 | if (hr_qp->qpn < hr_dev->caps.reserved_qps) | |
9a443537 | 409 | return; |
410 | ||
71586dd2 YL |
411 | bankid = get_qp_bankid(hr_qp->qpn); |
412 | ||
bfb6be40 YL |
413 | ida_free(&hr_dev->qp_table.bank[bankid].ida, |
414 | hr_qp->qpn / HNS_ROCE_QP_BANK_NUM); | |
71586dd2 | 415 | |
9293d3fc | 416 | mutex_lock(&hr_dev->qp_table.bank_mutex); |
71586dd2 | 417 | hr_dev->qp_table.bank[bankid].inuse--; |
9293d3fc | 418 | mutex_unlock(&hr_dev->qp_table.bank_mutex); |
9a443537 | 419 | } |
420 | ||
9dd05247 LC |
421 | static u32 proc_rq_sge(struct hns_roce_dev *dev, struct hns_roce_qp *hr_qp, |
422 | bool user) | |
423 | { | |
424 | u32 max_sge = dev->caps.max_rq_sg; | |
425 | ||
426 | if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) | |
427 | return max_sge; | |
428 | ||
429 | /* Reserve SGEs only for HIP08 in kernel; The userspace driver will | |
430 | * calculate number of max_sge with reserved SGEs when allocating wqe | |
431 | * buf, so there is no need to do this again in kernel. But the number | |
432 | * may exceed the capacity of SGEs recorded in the firmware, so the | |
433 | * kernel driver should just adapt the value accordingly. | |
434 | */ | |
435 | if (user) | |
436 | max_sge = roundup_pow_of_two(max_sge + 1); | |
437 | else | |
438 | hr_qp->rq.rsv_sge = 1; | |
439 | ||
440 | return max_sge; | |
441 | } | |
442 | ||
54d66387 | 443 | static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, |
9dd05247 | 444 | struct hns_roce_qp *hr_qp, int has_rq, bool user) |
9a443537 | 445 | { |
9dd05247 | 446 | u32 max_sge = proc_rq_sge(hr_dev, hr_qp, user); |
54d66387 | 447 | u32 cnt; |
9a443537 | 448 | |
c7bcb134 LO |
449 | /* If srq exist, set zero for relative number of rq */ |
450 | if (!has_rq) { | |
451 | hr_qp->rq.wqe_cnt = 0; | |
452 | hr_qp->rq.max_gs = 0; | |
453 | cap->max_recv_wr = 0; | |
454 | cap->max_recv_sge = 0; | |
9a443537 | 455 | |
026ded37 LC |
456 | return 0; |
457 | } | |
926a01dc | 458 | |
026ded37 LC |
459 | /* Check the validity of QP support capacity */ |
460 | if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes || | |
9dd05247 LC |
461 | cap->max_recv_sge > max_sge) { |
462 | ibdev_err(&hr_dev->ib_dev, | |
463 | "RQ config error, depth = %u, sge = %u\n", | |
026ded37 LC |
464 | cap->max_recv_wr, cap->max_recv_sge); |
465 | return -EINVAL; | |
466 | } | |
9a443537 | 467 | |
54d66387 XW |
468 | cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes)); |
469 | if (cnt > hr_dev->caps.max_wqes) { | |
026ded37 LC |
470 | ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n", |
471 | cap->max_recv_wr); | |
472 | return -EINVAL; | |
9a443537 | 473 | } |
474 | ||
9dd05247 LC |
475 | hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) + |
476 | hr_qp->rq.rsv_sge); | |
026ded37 | 477 | |
0c8b5d62 WL |
478 | hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * |
479 | hr_qp->rq.max_gs); | |
026ded37 | 480 | |
54d66387 | 481 | hr_qp->rq.wqe_cnt = cnt; |
54d66387 XW |
482 | |
483 | cap->max_recv_wr = cnt; | |
9dd05247 | 484 | cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge; |
9a443537 | 485 | |
486 | return 0; | |
487 | } | |
488 | ||
0c5e259b L |
489 | static u32 get_max_inline_data(struct hns_roce_dev *hr_dev, |
490 | struct ib_qp_cap *cap) | |
54d66387 | 491 | { |
0c5e259b L |
492 | if (cap->max_inline_data) { |
493 | cap->max_inline_data = roundup_pow_of_two(cap->max_inline_data); | |
494 | return min(cap->max_inline_data, | |
495 | hr_dev->caps.max_sq_inline); | |
496 | } | |
54d66387 | 497 | |
05201e01 WL |
498 | return 0; |
499 | } | |
54d66387 | 500 | |
0c5e259b L |
501 | static void update_inline_data(struct hns_roce_qp *hr_qp, |
502 | struct ib_qp_cap *cap) | |
503 | { | |
504 | u32 sge_num = hr_qp->sq.ext_sge_cnt; | |
505 | ||
506 | if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) { | |
507 | if (!(hr_qp->ibqp.qp_type == IB_QPT_GSI || | |
508 | hr_qp->ibqp.qp_type == IB_QPT_UD)) | |
509 | sge_num = max((u32)HNS_ROCE_SGE_IN_WQE, sge_num); | |
510 | ||
511 | cap->max_inline_data = max(cap->max_inline_data, | |
512 | sge_num * HNS_ROCE_SGE_SIZE); | |
513 | } | |
514 | ||
515 | hr_qp->max_inline_data = cap->max_inline_data; | |
516 | } | |
517 | ||
518 | static u32 get_sge_num_from_max_send_sge(bool is_ud_or_gsi, | |
519 | u32 max_send_sge) | |
520 | { | |
521 | unsigned int std_sge_num; | |
522 | unsigned int min_sge; | |
523 | ||
524 | std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE; | |
525 | min_sge = is_ud_or_gsi ? 1 : 0; | |
526 | return max_send_sge > std_sge_num ? (max_send_sge - std_sge_num) : | |
527 | min_sge; | |
528 | } | |
529 | ||
530 | static unsigned int get_sge_num_from_max_inl_data(bool is_ud_or_gsi, | |
531 | u32 max_inline_data) | |
532 | { | |
533 | unsigned int inline_sge; | |
534 | ||
24c62913 CT |
535 | if (!max_inline_data) |
536 | return 0; | |
0c5e259b L |
537 | |
538 | /* | |
539 | * if max_inline_data less than | |
540 | * HNS_ROCE_SGE_IN_WQE * HNS_ROCE_SGE_SIZE, | |
541 | * In addition to ud's mode, no need to extend sge. | |
542 | */ | |
24c62913 | 543 | inline_sge = roundup_pow_of_two(max_inline_data) / HNS_ROCE_SGE_SIZE; |
0c5e259b L |
544 | if (!is_ud_or_gsi && inline_sge <= HNS_ROCE_SGE_IN_WQE) |
545 | inline_sge = 0; | |
546 | ||
547 | return inline_sge; | |
548 | } | |
549 | ||
05201e01 WL |
550 | static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt, |
551 | struct hns_roce_qp *hr_qp, struct ib_qp_cap *cap) | |
552 | { | |
0c5e259b L |
553 | bool is_ud_or_gsi = (hr_qp->ibqp.qp_type == IB_QPT_GSI || |
554 | hr_qp->ibqp.qp_type == IB_QPT_UD); | |
555 | unsigned int std_sge_num; | |
556 | u32 inline_ext_sge = 0; | |
557 | u32 ext_wqe_sge_cnt; | |
05201e01 | 558 | u32 total_sge_cnt; |
0c5e259b L |
559 | |
560 | cap->max_inline_data = get_max_inline_data(hr_dev, cap); | |
54d66387 | 561 | |
05201e01 | 562 | hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT; |
0c5e259b L |
563 | std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE; |
564 | ext_wqe_sge_cnt = get_sge_num_from_max_send_sge(is_ud_or_gsi, | |
565 | cap->max_send_sge); | |
05201e01 | 566 | |
0c5e259b L |
567 | if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) { |
568 | inline_ext_sge = max(ext_wqe_sge_cnt, | |
569 | get_sge_num_from_max_inl_data(is_ud_or_gsi, | |
570 | cap->max_inline_data)); | |
571 | hr_qp->sq.ext_sge_cnt = inline_ext_sge ? | |
572 | roundup_pow_of_two(inline_ext_sge) : 0; | |
05201e01 | 573 | |
0c5e259b L |
574 | hr_qp->sq.max_gs = max(1U, (hr_qp->sq.ext_sge_cnt + std_sge_num)); |
575 | hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg); | |
576 | ||
577 | ext_wqe_sge_cnt = hr_qp->sq.ext_sge_cnt; | |
578 | } else { | |
579 | hr_qp->sq.max_gs = max(1U, cap->max_send_sge); | |
580 | hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg); | |
581 | hr_qp->sq.ext_sge_cnt = hr_qp->sq.max_gs; | |
582 | } | |
d34895c3 YL |
583 | |
584 | /* If the number of extended sge is not zero, they MUST use the | |
585 | * space of HNS_HW_PAGE_SIZE at least. | |
586 | */ | |
0c5e259b L |
587 | if (ext_wqe_sge_cnt) { |
588 | total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * ext_wqe_sge_cnt); | |
05201e01 WL |
589 | hr_qp->sge.sge_cnt = max(total_sge_cnt, |
590 | (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE); | |
591 | } | |
0c5e259b L |
592 | |
593 | update_inline_data(hr_qp, cap); | |
54d66387 XW |
594 | } |
595 | ||
cc95b23c LO |
596 | static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, |
597 | struct ib_qp_cap *cap, | |
598 | struct hns_roce_ib_create_qp *ucmd) | |
9a443537 | 599 | { |
600 | u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); | |
601 | u8 max_sq_stride = ilog2(roundup_sq_stride); | |
602 | ||
603 | /* Sanity check SQ size before proceeding */ | |
515f6000 JG |
604 | if (ucmd->log_sq_stride > max_sq_stride || |
605 | ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { | |
61918e9b | 606 | ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n"); |
9a443537 | 607 | return -EINVAL; |
608 | } | |
609 | ||
926a01dc | 610 | if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { |
61918e9b | 611 | ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n", |
db50077b | 612 | cap->max_send_sge); |
926a01dc WHX |
613 | return -EINVAL; |
614 | } | |
615 | ||
cc95b23c LO |
616 | return 0; |
617 | } | |
618 | ||
ae85bf92 XW |
619 | static int set_user_sq_size(struct hns_roce_dev *hr_dev, |
620 | struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp, | |
621 | struct hns_roce_ib_create_qp *ucmd) | |
cc95b23c | 622 | { |
54d66387 XW |
623 | struct ib_device *ibdev = &hr_dev->ib_dev; |
624 | u32 cnt = 0; | |
cc95b23c LO |
625 | int ret; |
626 | ||
54d66387 XW |
627 | if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || |
628 | cnt > hr_dev->caps.max_wqes) | |
515f6000 JG |
629 | return -EINVAL; |
630 | ||
cc95b23c LO |
631 | ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); |
632 | if (ret) { | |
54d66387 XW |
633 | ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n", |
634 | ret); | |
cc95b23c LO |
635 | return ret; |
636 | } | |
637 | ||
05201e01 | 638 | set_ext_sge_param(hr_dev, cnt, hr_qp, cap); |
926a01dc | 639 | |
54d66387 XW |
640 | hr_qp->sq.wqe_shift = ucmd->log_sq_stride; |
641 | hr_qp->sq.wqe_cnt = cnt; | |
0c5e259b | 642 | cap->max_send_sge = hr_qp->sq.max_gs; |
9a443537 | 643 | |
644 | return 0; | |
645 | } | |
646 | ||
e9f2cd28 XW |
647 | static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev, |
648 | struct hns_roce_qp *hr_qp, | |
649 | struct hns_roce_buf_attr *buf_attr) | |
8d18ad83 | 650 | { |
8d18ad83 | 651 | int buf_size; |
d563099e | 652 | int idx = 0; |
8d18ad83 | 653 | |
54d66387 | 654 | hr_qp->buff_size = 0; |
8d18ad83 | 655 | |
d563099e | 656 | /* SQ WQE */ |
54d66387 XW |
657 | hr_qp->sq.offset = 0; |
658 | buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt, | |
659 | hr_qp->sq.wqe_shift); | |
d563099e XW |
660 | if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { |
661 | buf_attr->region[idx].size = buf_size; | |
662 | buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num; | |
663 | idx++; | |
54d66387 | 664 | hr_qp->buff_size += buf_size; |
8d18ad83 LO |
665 | } |
666 | ||
54d66387 XW |
667 | /* extend SGE WQE in SQ */ |
668 | hr_qp->sge.offset = hr_qp->buff_size; | |
669 | buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt, | |
670 | hr_qp->sge.sge_shift); | |
671 | if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { | |
d563099e | 672 | buf_attr->region[idx].size = buf_size; |
54d66387 | 673 | buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num; |
d563099e | 674 | idx++; |
54d66387 | 675 | hr_qp->buff_size += buf_size; |
8d18ad83 LO |
676 | } |
677 | ||
d563099e | 678 | /* RQ WQE */ |
54d66387 XW |
679 | hr_qp->rq.offset = hr_qp->buff_size; |
680 | buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt, | |
681 | hr_qp->rq.wqe_shift); | |
d563099e XW |
682 | if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { |
683 | buf_attr->region[idx].size = buf_size; | |
684 | buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num; | |
685 | idx++; | |
54d66387 | 686 | hr_qp->buff_size += buf_size; |
8d18ad83 LO |
687 | } |
688 | ||
54d66387 XW |
689 | if (hr_qp->buff_size < 1) |
690 | return -EINVAL; | |
947441ea | 691 | |
9581a356 | 692 | buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; |
54d66387 | 693 | buf_attr->region_count = idx; |
947441ea LO |
694 | |
695 | return 0; | |
696 | } | |
697 | ||
ae85bf92 XW |
698 | static int set_kernel_sq_size(struct hns_roce_dev *hr_dev, |
699 | struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp) | |
9a443537 | 700 | { |
54d66387 XW |
701 | struct ib_device *ibdev = &hr_dev->ib_dev; |
702 | u32 cnt; | |
9a443537 | 703 | |
026ded37 | 704 | if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes || |
30b70788 | 705 | cap->max_send_sge > hr_dev->caps.max_sq_sg) { |
125073e7 | 706 | ibdev_err(ibdev, "failed to check SQ WR or SGE num.\n"); |
9a443537 | 707 | return -EINVAL; |
708 | } | |
709 | ||
54d66387 XW |
710 | cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes)); |
711 | if (cnt > hr_dev->caps.max_wqes) { | |
61918e9b | 712 | ibdev_err(ibdev, "failed to check WQE num, WQE num = %u.\n", |
54d66387 | 713 | cnt); |
9a443537 | 714 | return -EINVAL; |
715 | } | |
716 | ||
54d66387 XW |
717 | hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); |
718 | hr_qp->sq.wqe_cnt = cnt; | |
9a443537 | 719 | |
05201e01 | 720 | set_ext_sge_param(hr_dev, cnt, hr_qp, cap); |
05ad5482 | 721 | |
54d66387 XW |
722 | /* sync the parameters of kernel QP to user's configuration */ |
723 | cap->max_send_wr = cnt; | |
9a443537 | 724 | cap->max_send_sge = hr_qp->sq.max_gs; |
725 | ||
9a443537 | 726 | return 0; |
727 | } | |
728 | ||
0425e3e6 YL |
729 | static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr) |
730 | { | |
2557fabd | 731 | if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr) |
0425e3e6 YL |
732 | return 0; |
733 | ||
734 | return 1; | |
735 | } | |
736 | ||
e088a685 YL |
737 | static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr) |
738 | { | |
739 | if (attr->qp_type == IB_QPT_XRC_INI || | |
4d103905 LO |
740 | attr->qp_type == IB_QPT_XRC_TGT || attr->srq || |
741 | !attr->cap.max_recv_wr) | |
e088a685 YL |
742 | return 0; |
743 | ||
744 | return 1; | |
745 | } | |
746 | ||
24c22112 XW |
747 | static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, |
748 | struct ib_qp_init_attr *init_attr, | |
749 | struct ib_udata *udata, unsigned long addr) | |
750 | { | |
24c22112 | 751 | struct ib_device *ibdev = &hr_dev->ib_dev; |
d563099e | 752 | struct hns_roce_buf_attr buf_attr = {}; |
24c22112 XW |
753 | int ret; |
754 | ||
e9f2cd28 | 755 | ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr); |
d563099e | 756 | if (ret) { |
54d66387 | 757 | ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret); |
d563099e XW |
758 | goto err_inline; |
759 | } | |
760 | ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr, | |
7b0006db | 761 | PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz, |
d563099e XW |
762 | udata, addr); |
763 | if (ret) { | |
54d66387 | 764 | ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret); |
d563099e | 765 | goto err_inline; |
24c22112 | 766 | } |
24c22112 | 767 | |
0045e0d3 YL |
768 | if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) |
769 | hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE; | |
770 | ||
24c22112 | 771 | return 0; |
0045e0d3 | 772 | |
24c22112 | 773 | err_inline: |
24c22112 | 774 | |
24c22112 XW |
775 | return ret; |
776 | } | |
777 | ||
778 | static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) | |
779 | { | |
d563099e | 780 | hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr); |
24c22112 | 781 | } |
ae85bf92 | 782 | |
cfec045b XW |
783 | static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev, |
784 | struct ib_qp_init_attr *init_attr, | |
785 | struct ib_udata *udata, | |
786 | struct hns_roce_ib_create_qp_resp *resp, | |
787 | struct hns_roce_ib_create_qp *ucmd) | |
788 | { | |
cf8cd4cc | 789 | return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && |
cfec045b XW |
790 | udata->outlen >= offsetofend(typeof(*resp), cap_flags) && |
791 | hns_roce_qp_has_sq(init_attr) && | |
792 | udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr)); | |
793 | } | |
794 | ||
795 | static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev, | |
796 | struct ib_qp_init_attr *init_attr, | |
797 | struct ib_udata *udata, | |
798 | struct hns_roce_ib_create_qp_resp *resp) | |
799 | { | |
cf8cd4cc | 800 | return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && |
cfec045b XW |
801 | udata->outlen >= offsetofend(typeof(*resp), cap_flags) && |
802 | hns_roce_qp_has_rq(init_attr)); | |
803 | } | |
804 | ||
805 | static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev, | |
806 | struct ib_qp_init_attr *init_attr) | |
807 | { | |
cf8cd4cc | 808 | return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && |
cfec045b XW |
809 | hns_roce_qp_has_rq(init_attr)); |
810 | } | |
811 | ||
0045e0d3 YL |
812 | static int qp_mmap_entry(struct hns_roce_qp *hr_qp, |
813 | struct hns_roce_dev *hr_dev, | |
814 | struct ib_udata *udata, | |
815 | struct hns_roce_ib_create_qp_resp *resp) | |
816 | { | |
817 | struct hns_roce_ucontext *uctx = | |
818 | rdma_udata_to_drv_context(udata, | |
819 | struct hns_roce_ucontext, ibucontext); | |
820 | struct rdma_user_mmap_entry *rdma_entry; | |
821 | u64 address; | |
822 | ||
823 | address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE; | |
824 | ||
825 | hr_qp->dwqe_mmap_entry = | |
826 | hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address, | |
827 | HNS_ROCE_DWQE_SIZE, | |
828 | HNS_ROCE_MMAP_TYPE_DWQE); | |
829 | ||
830 | if (!hr_qp->dwqe_mmap_entry) { | |
831 | ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n"); | |
832 | return -ENOMEM; | |
833 | } | |
834 | ||
835 | rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry; | |
836 | resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry); | |
837 | ||
838 | return 0; | |
839 | } | |
840 | ||
ae2854c5 YL |
841 | static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, |
842 | struct hns_roce_qp *hr_qp, | |
843 | struct ib_qp_init_attr *init_attr, | |
844 | struct ib_udata *udata, | |
845 | struct hns_roce_ib_create_qp *ucmd, | |
846 | struct hns_roce_ib_create_qp_resp *resp) | |
847 | { | |
848 | struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata, | |
849 | struct hns_roce_ucontext, ibucontext); | |
850 | struct ib_device *ibdev = &hr_dev->ib_dev; | |
851 | int ret; | |
852 | ||
853 | if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) { | |
854 | ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb); | |
855 | if (ret) { | |
856 | ibdev_err(ibdev, | |
857 | "failed to map user SQ doorbell, ret = %d.\n", | |
858 | ret); | |
859 | goto err_out; | |
860 | } | |
861 | hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB; | |
862 | } | |
863 | ||
864 | if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) { | |
865 | ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb); | |
866 | if (ret) { | |
867 | ibdev_err(ibdev, | |
868 | "failed to map user RQ doorbell, ret = %d.\n", | |
869 | ret); | |
870 | goto err_sdb; | |
871 | } | |
872 | hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; | |
873 | } | |
874 | ||
875 | return 0; | |
876 | ||
877 | err_sdb: | |
878 | if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) | |
879 | hns_roce_db_unmap_user(uctx, &hr_qp->sdb); | |
880 | err_out: | |
881 | return ret; | |
882 | } | |
883 | ||
884 | static int alloc_kernel_qp_db(struct hns_roce_dev *hr_dev, | |
885 | struct hns_roce_qp *hr_qp, | |
886 | struct ib_qp_init_attr *init_attr) | |
887 | { | |
888 | struct ib_device *ibdev = &hr_dev->ib_dev; | |
889 | int ret; | |
890 | ||
891 | if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) | |
892 | hr_qp->sq.db_reg = hr_dev->mem_base + | |
893 | HNS_ROCE_DWQE_SIZE * hr_qp->qpn; | |
894 | else | |
895 | hr_qp->sq.db_reg = hr_dev->reg_base + hr_dev->sdb_offset + | |
896 | DB_REG_OFFSET * hr_dev->priv_uar.index; | |
897 | ||
898 | hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset + | |
899 | DB_REG_OFFSET * hr_dev->priv_uar.index; | |
900 | ||
901 | if (kernel_qp_has_rdb(hr_dev, init_attr)) { | |
902 | ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); | |
903 | if (ret) { | |
904 | ibdev_err(ibdev, | |
905 | "failed to alloc kernel RQ doorbell, ret = %d.\n", | |
906 | ret); | |
907 | return ret; | |
908 | } | |
909 | *hr_qp->rdb.db_record = 0; | |
910 | hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; | |
911 | } | |
912 | ||
913 | return 0; | |
914 | } | |
915 | ||
cfec045b XW |
916 | static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, |
917 | struct ib_qp_init_attr *init_attr, | |
918 | struct ib_udata *udata, | |
919 | struct hns_roce_ib_create_qp *ucmd, | |
920 | struct hns_roce_ib_create_qp_resp *resp) | |
921 | { | |
cfec045b XW |
922 | int ret; |
923 | ||
aba457ca LC |
924 | if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE) |
925 | hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB; | |
926 | ||
cfec045b | 927 | if (udata) { |
0045e0d3 YL |
928 | if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) { |
929 | ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp); | |
930 | if (ret) | |
931 | return ret; | |
932 | } | |
933 | ||
ae2854c5 YL |
934 | ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd, |
935 | resp); | |
936 | if (ret) | |
0045e0d3 | 937 | goto err_remove_qp; |
cfec045b | 938 | } else { |
ae2854c5 YL |
939 | ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr); |
940 | if (ret) | |
941 | return ret; | |
cfec045b XW |
942 | } |
943 | ||
944 | return 0; | |
0045e0d3 YL |
945 | |
946 | err_remove_qp: | |
947 | if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) | |
948 | qp_user_mmap_entry_remove(hr_qp); | |
949 | ||
950 | return ret; | |
cfec045b XW |
951 | } |
952 | ||
953 | static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, | |
954 | struct ib_udata *udata) | |
955 | { | |
956 | struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( | |
957 | udata, struct hns_roce_ucontext, ibucontext); | |
958 | ||
959 | if (udata) { | |
90ae0b57 | 960 | if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) |
cfec045b | 961 | hns_roce_db_unmap_user(uctx, &hr_qp->rdb); |
90ae0b57 | 962 | if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) |
cfec045b | 963 | hns_roce_db_unmap_user(uctx, &hr_qp->sdb); |
0045e0d3 YL |
964 | if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) |
965 | qp_user_mmap_entry_remove(hr_qp); | |
cfec045b | 966 | } else { |
90ae0b57 | 967 | if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) |
cfec045b XW |
968 | hns_roce_free_db(hr_dev, &hr_qp->rdb); |
969 | } | |
970 | } | |
971 | ||
b37c4139 XW |
972 | static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev, |
973 | struct hns_roce_qp *hr_qp) | |
974 | { | |
975 | struct ib_device *ibdev = &hr_dev->ib_dev; | |
976 | u64 *sq_wrid = NULL; | |
977 | u64 *rq_wrid = NULL; | |
978 | int ret; | |
979 | ||
980 | sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL); | |
981 | if (ZERO_OR_NULL_PTR(sq_wrid)) { | |
61918e9b | 982 | ibdev_err(ibdev, "failed to alloc SQ wrid.\n"); |
b37c4139 XW |
983 | return -ENOMEM; |
984 | } | |
985 | ||
986 | if (hr_qp->rq.wqe_cnt) { | |
987 | rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL); | |
988 | if (ZERO_OR_NULL_PTR(rq_wrid)) { | |
61918e9b | 989 | ibdev_err(ibdev, "failed to alloc RQ wrid.\n"); |
b37c4139 XW |
990 | ret = -ENOMEM; |
991 | goto err_sq; | |
992 | } | |
993 | } | |
994 | ||
995 | hr_qp->sq.wrid = sq_wrid; | |
996 | hr_qp->rq.wrid = rq_wrid; | |
997 | return 0; | |
998 | err_sq: | |
999 | kfree(sq_wrid); | |
1000 | ||
1001 | return ret; | |
1002 | } | |
1003 | ||
f226f676 | 1004 | static void free_kernel_wrid(struct hns_roce_qp *hr_qp) |
b37c4139 XW |
1005 | { |
1006 | kfree(hr_qp->rq.wrid); | |
1007 | kfree(hr_qp->sq.wrid); | |
1008 | } | |
1009 | ||
6ec429d5 JH |
1010 | static void default_congest_type(struct hns_roce_dev *hr_dev, |
1011 | struct hns_roce_qp *hr_qp) | |
1012 | { | |
1013 | if (hr_qp->ibqp.qp_type == IB_QPT_UD || | |
1014 | hr_qp->ibqp.qp_type == IB_QPT_GSI) | |
1015 | hr_qp->cong_type = CONG_TYPE_DCQCN; | |
1016 | else | |
1017 | hr_qp->cong_type = hr_dev->caps.default_cong_type; | |
1018 | } | |
1019 | ||
1020 | static int set_congest_type(struct hns_roce_qp *hr_qp, | |
1021 | struct hns_roce_ib_create_qp *ucmd) | |
1022 | { | |
1023 | struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); | |
1024 | ||
1025 | switch (ucmd->cong_type_flags) { | |
1026 | case HNS_ROCE_CREATE_QP_FLAGS_DCQCN: | |
1027 | hr_qp->cong_type = CONG_TYPE_DCQCN; | |
1028 | break; | |
1029 | case HNS_ROCE_CREATE_QP_FLAGS_LDCP: | |
1030 | hr_qp->cong_type = CONG_TYPE_LDCP; | |
1031 | break; | |
1032 | case HNS_ROCE_CREATE_QP_FLAGS_HC3: | |
1033 | hr_qp->cong_type = CONG_TYPE_HC3; | |
1034 | break; | |
1035 | case HNS_ROCE_CREATE_QP_FLAGS_DIP: | |
1036 | hr_qp->cong_type = CONG_TYPE_DIP; | |
1037 | break; | |
1038 | default: | |
1039 | return -EINVAL; | |
1040 | } | |
1041 | ||
1042 | if (!test_bit(hr_qp->cong_type, (unsigned long *)&hr_dev->caps.cong_cap)) | |
1043 | return -EOPNOTSUPP; | |
1044 | ||
1045 | if (hr_qp->ibqp.qp_type == IB_QPT_UD && | |
1046 | hr_qp->cong_type != CONG_TYPE_DCQCN) | |
1047 | return -EOPNOTSUPP; | |
1048 | ||
1049 | return 0; | |
1050 | } | |
1051 | ||
1052 | static int set_congest_param(struct hns_roce_dev *hr_dev, | |
1053 | struct hns_roce_qp *hr_qp, | |
1054 | struct hns_roce_ib_create_qp *ucmd) | |
1055 | { | |
1056 | if (ucmd->comp_mask & HNS_ROCE_CREATE_QP_MASK_CONGEST_TYPE) | |
1057 | return set_congest_type(hr_qp, ucmd); | |
1058 | ||
1059 | default_congest_type(hr_dev, hr_qp); | |
1060 | ||
1061 | return 0; | |
1062 | } | |
1063 | ||
ae85bf92 XW |
1064 | static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, |
1065 | struct ib_qp_init_attr *init_attr, | |
1066 | struct ib_udata *udata, | |
1067 | struct hns_roce_ib_create_qp *ucmd) | |
1068 | { | |
1069 | struct ib_device *ibdev = &hr_dev->ib_dev; | |
0c5e259b | 1070 | struct hns_roce_ucontext *uctx; |
ae85bf92 XW |
1071 | int ret; |
1072 | ||
ae85bf92 XW |
1073 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) |
1074 | hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; | |
1075 | else | |
1076 | hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; | |
1077 | ||
54d66387 | 1078 | ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp, |
9dd05247 | 1079 | hns_roce_qp_has_rq(init_attr), !!udata); |
ae85bf92 | 1080 | if (ret) { |
54d66387 XW |
1081 | ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n", |
1082 | ret); | |
ae85bf92 XW |
1083 | return ret; |
1084 | } | |
1085 | ||
1086 | if (udata) { | |
1c0ca9cd WL |
1087 | ret = ib_copy_from_udata(ucmd, udata, |
1088 | min(udata->inlen, sizeof(*ucmd))); | |
1089 | if (ret) { | |
1090 | ibdev_err(ibdev, | |
1091 | "failed to copy QP ucmd, ret = %d\n", ret); | |
1092 | return ret; | |
ae85bf92 XW |
1093 | } |
1094 | ||
0c5e259b L |
1095 | uctx = rdma_udata_to_drv_context(udata, struct hns_roce_ucontext, |
1096 | ibucontext); | |
1097 | hr_qp->config = uctx->config; | |
ae85bf92 XW |
1098 | ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); |
1099 | if (ret) | |
61918e9b YL |
1100 | ibdev_err(ibdev, |
1101 | "failed to set user SQ size, ret = %d.\n", | |
1102 | ret); | |
6ec429d5 JH |
1103 | |
1104 | ret = set_congest_param(hr_dev, hr_qp, ucmd); | |
1105 | if (ret) | |
1106 | return ret; | |
ae85bf92 | 1107 | } else { |
0c5e259b L |
1108 | if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) |
1109 | hr_qp->config = HNS_ROCE_EXSGE_FLAGS; | |
ae85bf92 XW |
1110 | ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp); |
1111 | if (ret) | |
61918e9b YL |
1112 | ibdev_err(ibdev, |
1113 | "failed to set kernel SQ size, ret = %d.\n", | |
1114 | ret); | |
6ec429d5 JH |
1115 | |
1116 | default_congest_type(hr_dev, hr_qp); | |
ae85bf92 XW |
1117 | } |
1118 | ||
1119 | return ret; | |
1120 | } | |
1121 | ||
9a443537 | 1122 | static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, |
9a443537 | 1123 | struct ib_qp_init_attr *init_attr, |
df83a66e | 1124 | struct ib_udata *udata, |
9a443537 | 1125 | struct hns_roce_qp *hr_qp) |
1126 | { | |
7b48221c | 1127 | struct hns_roce_ib_create_qp_resp resp = {}; |
cfec045b | 1128 | struct ib_device *ibdev = &hr_dev->ib_dev; |
c64e9710 | 1129 | struct hns_roce_ib_create_qp ucmd = {}; |
8d18ad83 | 1130 | int ret; |
9a443537 | 1131 | |
1132 | mutex_init(&hr_qp->mutex); | |
1133 | spin_lock_init(&hr_qp->sq.lock); | |
1134 | spin_lock_init(&hr_qp->rq.lock); | |
1135 | ||
1136 | hr_qp->state = IB_QPS_RESET; | |
b5374286 | 1137 | hr_qp->flush_flag = 0; |
9a443537 | 1138 | |
1f11a761 JG |
1139 | if (init_attr->create_flags) |
1140 | return -EOPNOTSUPP; | |
1141 | ||
ae85bf92 | 1142 | ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd); |
9a443537 | 1143 | if (ret) { |
61918e9b | 1144 | ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret); |
9a84848d | 1145 | goto err_out; |
9a443537 | 1146 | } |
1147 | ||
cfec045b | 1148 | if (!udata) { |
b37c4139 XW |
1149 | ret = alloc_kernel_wrid(hr_dev, hr_qp); |
1150 | if (ret) { | |
61918e9b YL |
1151 | ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n", |
1152 | ret); | |
9a84848d | 1153 | goto err_out; |
76827087 | 1154 | } |
9a443537 | 1155 | } |
1156 | ||
24c22112 | 1157 | ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr); |
8d18ad83 | 1158 | if (ret) { |
61918e9b | 1159 | ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret); |
0f00571f | 1160 | goto err_buf; |
df83a66e XW |
1161 | } |
1162 | ||
9e03dbea | 1163 | ret = alloc_qpn(hr_dev, hr_qp, init_attr); |
df83a66e | 1164 | if (ret) { |
61918e9b | 1165 | ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret); |
0f00571f LC |
1166 | goto err_qpn; |
1167 | } | |
1168 | ||
1169 | ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp); | |
1170 | if (ret) { | |
1171 | ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n", | |
1172 | ret); | |
1173 | goto err_db; | |
8d18ad83 LO |
1174 | } |
1175 | ||
b71961d1 XW |
1176 | ret = alloc_qpc(hr_dev, hr_qp); |
1177 | if (ret) { | |
61918e9b YL |
1178 | ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n", |
1179 | ret); | |
0f00571f | 1180 | goto err_qpc; |
b71961d1 XW |
1181 | } |
1182 | ||
1183 | ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr); | |
1184 | if (ret) { | |
61918e9b | 1185 | ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret); |
0f00571f | 1186 | goto err_store; |
9a443537 | 1187 | } |
1188 | ||
de77503a | 1189 | if (udata) { |
d2e0ccff | 1190 | resp.cap_flags = hr_qp->en_flags; |
de77503a LO |
1191 | ret = ib_copy_to_udata(udata, &resp, |
1192 | min(udata->outlen, sizeof(resp))); | |
cfec045b XW |
1193 | if (ret) { |
1194 | ibdev_err(ibdev, "copy qp resp failed!\n"); | |
b71961d1 | 1195 | goto err_store; |
cfec045b | 1196 | } |
e088a685 | 1197 | } |
aa84fa18 YL |
1198 | |
1199 | if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { | |
1200 | ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp); | |
1201 | if (ret) | |
0f00571f | 1202 | goto err_flow_ctrl; |
aa84fa18 YL |
1203 | } |
1204 | ||
df83a66e | 1205 | hr_qp->ibqp.qp_num = hr_qp->qpn; |
9a443537 | 1206 | hr_qp->event = hns_roce_ib_qp_event; |
8f9513d8 | 1207 | refcount_set(&hr_qp->refcount, 1); |
b71961d1 | 1208 | init_completion(&hr_qp->free); |
626903e9 | 1209 | |
9a443537 | 1210 | return 0; |
1211 | ||
0f00571f | 1212 | err_flow_ctrl: |
b71961d1 | 1213 | hns_roce_qp_remove(hr_dev, hr_qp); |
0f00571f | 1214 | err_store: |
b71961d1 | 1215 | free_qpc(hr_dev, hr_qp); |
0f00571f LC |
1216 | err_qpc: |
1217 | free_qp_db(hr_dev, hr_qp, udata); | |
1218 | err_db: | |
df83a66e | 1219 | free_qpn(hr_dev, hr_qp); |
0f00571f | 1220 | err_qpn: |
24c22112 | 1221 | free_qp_buf(hr_dev, hr_qp); |
0f00571f | 1222 | err_buf: |
f226f676 | 1223 | free_kernel_wrid(hr_qp); |
9a84848d | 1224 | err_out: |
1225 | mutex_destroy(&hr_qp->mutex); | |
9a443537 | 1226 | return ret; |
1227 | } | |
1228 | ||
e365b26c XW |
1229 | void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, |
1230 | struct ib_udata *udata) | |
1231 | { | |
8f9513d8 | 1232 | if (refcount_dec_and_test(&hr_qp->refcount)) |
b71961d1 XW |
1233 | complete(&hr_qp->free); |
1234 | wait_for_completion(&hr_qp->free); | |
1235 | ||
1236 | free_qpc(hr_dev, hr_qp); | |
df83a66e | 1237 | free_qpn(hr_dev, hr_qp); |
24c22112 | 1238 | free_qp_buf(hr_dev, hr_qp); |
f226f676 | 1239 | free_kernel_wrid(hr_qp); |
cfec045b | 1240 | free_qp_db(hr_dev, hr_qp, udata); |
9a84848d | 1241 | mutex_destroy(&hr_qp->mutex); |
e365b26c XW |
1242 | } |
1243 | ||
66d86e52 WL |
1244 | static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type, |
1245 | bool is_user) | |
1246 | { | |
1247 | switch (type) { | |
32548870 WL |
1248 | case IB_QPT_XRC_INI: |
1249 | case IB_QPT_XRC_TGT: | |
1250 | if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)) | |
1251 | goto out; | |
1252 | break; | |
66d86e52 | 1253 | case IB_QPT_UD: |
38d22088 | 1254 | if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && |
66d86e52 WL |
1255 | is_user) |
1256 | goto out; | |
32548870 | 1257 | break; |
66d86e52 WL |
1258 | case IB_QPT_RC: |
1259 | case IB_QPT_GSI: | |
1260 | break; | |
1261 | default: | |
1262 | goto out; | |
1263 | } | |
1264 | ||
1265 | return 0; | |
1266 | ||
1267 | out: | |
1268 | ibdev_err(&hr_dev->ib_dev, "not support QP type %d\n", type); | |
1269 | ||
1270 | return -EOPNOTSUPP; | |
1271 | } | |
1272 | ||
514aee66 LR |
1273 | int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr, |
1274 | struct ib_udata *udata) | |
9a443537 | 1275 | { |
514aee66 | 1276 | struct ib_device *ibdev = qp->device; |
32548870 | 1277 | struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); |
514aee66 | 1278 | struct hns_roce_qp *hr_qp = to_hr_qp(qp); |
9a443537 | 1279 | int ret; |
1280 | ||
66d86e52 WL |
1281 | ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata); |
1282 | if (ret) | |
eb7854d6 | 1283 | goto err_out; |
9a443537 | 1284 | |
e66e4959 | 1285 | if (init_attr->qp_type == IB_QPT_XRC_TGT) |
32548870 | 1286 | hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn; |
32548870 | 1287 | |
b925c555 | 1288 | if (init_attr->qp_type == IB_QPT_GSI) { |
7716809e LO |
1289 | hr_qp->port = init_attr->port_num - 1; |
1290 | hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; | |
9a443537 | 1291 | } |
1292 | ||
f4caa864 | 1293 | ret = hns_roce_create_qp_common(hr_dev, init_attr, udata, hr_qp); |
514aee66 | 1294 | if (ret) |
f0588567 | 1295 | ibdev_err(ibdev, "create QP type 0x%x failed(%d)\n", |
b925c555 | 1296 | init_attr->qp_type, ret); |
66d86e52 | 1297 | |
eb7854d6 JH |
1298 | err_out: |
1299 | if (ret) | |
1300 | atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_QP_CREATE_ERR_CNT]); | |
1301 | ||
514aee66 | 1302 | return ret; |
9a443537 | 1303 | } |
1304 | ||
1305 | int to_hr_qp_type(int qp_type) | |
1306 | { | |
32548870 WL |
1307 | switch (qp_type) { |
1308 | case IB_QPT_RC: | |
1309 | return SERV_TYPE_RC; | |
32548870 WL |
1310 | case IB_QPT_UD: |
1311 | case IB_QPT_GSI: | |
1312 | return SERV_TYPE_UD; | |
1313 | case IB_QPT_XRC_INI: | |
1314 | case IB_QPT_XRC_TGT: | |
1315 | return SERV_TYPE_XRC; | |
1316 | default: | |
1317 | return -1; | |
1318 | } | |
9a443537 | 1319 | } |
1320 | ||
8ea417ff LO |
1321 | static int check_mtu_validate(struct hns_roce_dev *hr_dev, |
1322 | struct hns_roce_qp *hr_qp, | |
1323 | struct ib_qp_attr *attr, int attr_mask) | |
9a443537 | 1324 | { |
cb814642 | 1325 | enum ib_mtu active_mtu; |
8ea417ff | 1326 | int p; |
9a443537 | 1327 | |
8ea417ff | 1328 | p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; |
a7325af7 | 1329 | active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); |
de77503a | 1330 | |
8ea417ff LO |
1331 | if ((hr_dev->caps.max_mtu >= IB_MTU_2048 && |
1332 | attr->path_mtu > hr_dev->caps.max_mtu) || | |
1333 | attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) { | |
db50077b LO |
1334 | ibdev_err(&hr_dev->ib_dev, |
1335 | "attr path_mtu(%d)invalid while modify qp", | |
8ea417ff LO |
1336 | attr->path_mtu); |
1337 | return -EINVAL; | |
0425e3e6 YL |
1338 | } |
1339 | ||
8ea417ff LO |
1340 | return 0; |
1341 | } | |
1342 | ||
1343 | static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
1344 | int attr_mask) | |
1345 | { | |
1346 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | |
1347 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | |
8ea417ff | 1348 | int p; |
9a443537 | 1349 | |
1350 | if ((attr_mask & IB_QP_PORT) && | |
1351 | (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { | |
61918e9b YL |
1352 | ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n", |
1353 | attr->port_num); | |
8ea417ff | 1354 | return -EINVAL; |
9a443537 | 1355 | } |
1356 | ||
1357 | if (attr_mask & IB_QP_PKEY_INDEX) { | |
1358 | p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; | |
1359 | if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { | |
db50077b | 1360 | ibdev_err(&hr_dev->ib_dev, |
61918e9b YL |
1361 | "invalid attr, pkey_index = %u.\n", |
1362 | attr->pkey_index); | |
8ea417ff | 1363 | return -EINVAL; |
cb814642 LO |
1364 | } |
1365 | } | |
1366 | ||
9a443537 | 1367 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && |
1368 | attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { | |
db50077b | 1369 | ibdev_err(&hr_dev->ib_dev, |
61918e9b YL |
1370 | "invalid attr, max_rd_atomic = %u.\n", |
1371 | attr->max_rd_atomic); | |
8ea417ff | 1372 | return -EINVAL; |
9a443537 | 1373 | } |
1374 | ||
1375 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && | |
1376 | attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { | |
db50077b | 1377 | ibdev_err(&hr_dev->ib_dev, |
61918e9b YL |
1378 | "invalid attr, max_dest_rd_atomic = %u.\n", |
1379 | attr->max_dest_rd_atomic); | |
8ea417ff LO |
1380 | return -EINVAL; |
1381 | } | |
1382 | ||
1383 | if (attr_mask & IB_QP_PATH_MTU) | |
1384 | return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask); | |
1385 | ||
1386 | return 0; | |
1387 | } | |
1388 | ||
1389 | int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
1390 | int attr_mask, struct ib_udata *udata) | |
1391 | { | |
1392 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | |
ee20cc17 | 1393 | struct hns_roce_ib_modify_qp_resp resp = {}; |
8ea417ff LO |
1394 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); |
1395 | enum ib_qp_state cur_state, new_state; | |
8ea417ff LO |
1396 | int ret = -EINVAL; |
1397 | ||
1398 | mutex_lock(&hr_qp->mutex); | |
1399 | ||
e0ef0f68 LC |
1400 | if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state) |
1401 | goto out; | |
1402 | ||
1403 | cur_state = hr_qp->state; | |
8ea417ff LO |
1404 | new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; |
1405 | ||
1406 | if (ibqp->uobject && | |
1407 | (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) { | |
90ae0b57 | 1408 | if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) { |
8ea417ff LO |
1409 | hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); |
1410 | ||
90ae0b57 | 1411 | if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) |
8ea417ff LO |
1412 | hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); |
1413 | } else { | |
db50077b LO |
1414 | ibdev_warn(&hr_dev->ib_dev, |
1415 | "flush cqe is not supported in userspace!\n"); | |
8ea417ff LO |
1416 | goto out; |
1417 | } | |
1418 | } | |
1419 | ||
1420 | if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, | |
1421 | attr_mask)) { | |
db50077b | 1422 | ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n"); |
9a443537 | 1423 | goto out; |
1424 | } | |
1425 | ||
8ea417ff LO |
1426 | ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask); |
1427 | if (ret) | |
1428 | goto out; | |
1429 | ||
ab5cbb9d | 1430 | if (cur_state == new_state && cur_state == IB_QPS_RESET) |
9a443537 | 1431 | goto out; |
9a443537 | 1432 | |
1433 | ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state, | |
2bb185c6 | 1434 | new_state, udata); |
ee20cc17 JH |
1435 | if (ret) |
1436 | goto out; | |
1437 | ||
1438 | if (udata && udata->outlen) { | |
1439 | resp.tc_mode = hr_qp->tc_mode; | |
1440 | resp.priority = hr_qp->sl; | |
1441 | ret = ib_copy_to_udata(udata, &resp, | |
1442 | min(udata->outlen, sizeof(resp))); | |
1443 | if (ret) | |
1444 | ibdev_err_ratelimited(&hr_dev->ib_dev, | |
1445 | "failed to copy modify qp resp.\n"); | |
1446 | } | |
9a443537 | 1447 | |
1448 | out: | |
1449 | mutex_unlock(&hr_qp->mutex); | |
eb7854d6 JH |
1450 | if (ret) |
1451 | atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_QP_MODIFY_ERR_CNT]); | |
9a443537 | 1452 | |
1453 | return ret; | |
1454 | } | |
1455 | ||
1456 | void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) | |
1457 | __acquires(&send_cq->lock) __acquires(&recv_cq->lock) | |
1458 | { | |
626903e9 XW |
1459 | if (unlikely(send_cq == NULL && recv_cq == NULL)) { |
1460 | __acquire(&send_cq->lock); | |
1461 | __acquire(&recv_cq->lock); | |
1462 | } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { | |
1463 | spin_lock_irq(&send_cq->lock); | |
1464 | __acquire(&recv_cq->lock); | |
1465 | } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { | |
1466 | spin_lock_irq(&recv_cq->lock); | |
1467 | __acquire(&send_cq->lock); | |
1468 | } else if (send_cq == recv_cq) { | |
9a443537 | 1469 | spin_lock_irq(&send_cq->lock); |
1470 | __acquire(&recv_cq->lock); | |
1471 | } else if (send_cq->cqn < recv_cq->cqn) { | |
1472 | spin_lock_irq(&send_cq->lock); | |
1473 | spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); | |
1474 | } else { | |
1475 | spin_lock_irq(&recv_cq->lock); | |
1476 | spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); | |
1477 | } | |
1478 | } | |
1479 | ||
1480 | void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, | |
1481 | struct hns_roce_cq *recv_cq) __releases(&send_cq->lock) | |
1482 | __releases(&recv_cq->lock) | |
1483 | { | |
626903e9 XW |
1484 | if (unlikely(send_cq == NULL && recv_cq == NULL)) { |
1485 | __release(&recv_cq->lock); | |
1486 | __release(&send_cq->lock); | |
1487 | } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { | |
1488 | __release(&recv_cq->lock); | |
1489 | spin_unlock(&send_cq->lock); | |
1490 | } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { | |
1491 | __release(&send_cq->lock); | |
1492 | spin_unlock(&recv_cq->lock); | |
1493 | } else if (send_cq == recv_cq) { | |
9a443537 | 1494 | __release(&recv_cq->lock); |
1495 | spin_unlock_irq(&send_cq->lock); | |
1496 | } else if (send_cq->cqn < recv_cq->cqn) { | |
1497 | spin_unlock(&recv_cq->lock); | |
1498 | spin_unlock_irq(&send_cq->lock); | |
1499 | } else { | |
1500 | spin_unlock(&send_cq->lock); | |
1501 | spin_unlock_irq(&recv_cq->lock); | |
1502 | } | |
1503 | } | |
1504 | ||
d147583e | 1505 | static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset) |
9a443537 | 1506 | { |
d563099e | 1507 | return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); |
9a443537 | 1508 | } |
1509 | ||
dcdc366a | 1510 | void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n) |
9a443537 | 1511 | { |
9a443537 | 1512 | return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); |
1513 | } | |
1514 | ||
dcdc366a | 1515 | void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n) |
9a443537 | 1516 | { |
9a443537 | 1517 | return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); |
1518 | } | |
1519 | ||
dcdc366a | 1520 | void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n) |
926a01dc | 1521 | { |
d563099e | 1522 | return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift)); |
926a01dc | 1523 | } |
926a01dc | 1524 | |
dcdc366a | 1525 | bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq, |
9a443537 | 1526 | struct ib_cq *ib_cq) |
1527 | { | |
1528 | struct hns_roce_cq *hr_cq; | |
1529 | u32 cur; | |
1530 | ||
1531 | cur = hr_wq->head - hr_wq->tail; | |
ec6adad0 | 1532 | if (likely(cur + nreq < hr_wq->wqe_cnt)) |
3756c7f5 | 1533 | return false; |
9a443537 | 1534 | |
1535 | hr_cq = to_hr_cq(ib_cq); | |
1536 | spin_lock(&hr_cq->lock); | |
1537 | cur = hr_wq->head - hr_wq->tail; | |
1538 | spin_unlock(&hr_cq->lock); | |
1539 | ||
ec6adad0 | 1540 | return cur + nreq >= hr_wq->wqe_cnt; |
9a443537 | 1541 | } |
1542 | ||
eb653eda | 1543 | int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) |
9a443537 | 1544 | { |
1545 | struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; | |
71586dd2 YL |
1546 | unsigned int reserved_from_bot; |
1547 | unsigned int i; | |
9a443537 | 1548 | |
eb653eda JH |
1549 | qp_table->idx_table.spare_idx = kcalloc(hr_dev->caps.num_qps, |
1550 | sizeof(u32), GFP_KERNEL); | |
1551 | if (!qp_table->idx_table.spare_idx) | |
1552 | return -ENOMEM; | |
1553 | ||
aa84fa18 | 1554 | mutex_init(&qp_table->scc_mutex); |
9293d3fc | 1555 | mutex_init(&qp_table->bank_mutex); |
736b5a70 | 1556 | xa_init(&hr_dev->qp_table_xa); |
9a443537 | 1557 | |
21b97f53 | 1558 | reserved_from_bot = hr_dev->caps.reserved_qps; |
06ef0ee4 | 1559 | |
71586dd2 YL |
1560 | for (i = 0; i < reserved_from_bot; i++) { |
1561 | hr_dev->qp_table.bank[get_qp_bankid(i)].inuse++; | |
1562 | hr_dev->qp_table.bank[get_qp_bankid(i)].min++; | |
1563 | } | |
1564 | ||
1565 | for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) { | |
1566 | ida_init(&hr_dev->qp_table.bank[i].ida); | |
1567 | hr_dev->qp_table.bank[i].max = hr_dev->caps.num_qps / | |
1568 | HNS_ROCE_QP_BANK_NUM - 1; | |
1569 | hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min; | |
9a443537 | 1570 | } |
eb653eda JH |
1571 | |
1572 | return 0; | |
9a443537 | 1573 | } |
1574 | ||
1575 | void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev) | |
1576 | { | |
71586dd2 YL |
1577 | int i; |
1578 | ||
1579 | for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) | |
1580 | ida_destroy(&hr_dev->qp_table.bank[i].ida); | |
9a84848d | 1581 | mutex_destroy(&hr_dev->qp_table.bank_mutex); |
1582 | mutex_destroy(&hr_dev->qp_table.scc_mutex); | |
eb653eda | 1583 | kfree(hr_dev->qp_table.idx_table.spare_idx); |
9a443537 | 1584 | } |