Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[linux-2.6-block.git] / drivers / infiniband / hw / mana / qp.c
CommitLineData
0266a177
LL
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
5
6#include "mana_ib.h"
7
8static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
9 struct net_device *ndev,
10 mana_handle_t default_rxobj,
11 mana_handle_t ind_table[],
12 u32 log_ind_tbl_size, u32 rx_hash_key_len,
13 u8 *rx_hash_key)
14{
15 struct mana_port_context *mpc = netdev_priv(ndev);
21453285 16 struct mana_cfg_rx_steer_req_v2 *req;
0266a177 17 struct mana_cfg_rx_steer_resp resp = {};
0266a177 18 struct gdma_context *gc;
0266a177
LL
19 u32 req_buf_size;
20 int i, err;
21
71c8cbfc 22 gc = mdev_to_gc(dev);
0266a177 23
29b8e13a 24 req_buf_size = struct_size(req, indir_tab, MANA_INDIRECT_TABLE_SIZE);
0266a177
LL
25 req = kzalloc(req_buf_size, GFP_KERNEL);
26 if (!req)
27 return -ENOMEM;
28
29 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
30 sizeof(resp));
31
21453285
LL
32 req->hdr.req.msg_version = GDMA_MESSAGE_V2;
33
0266a177
LL
34 req->vport = mpc->port_handle;
35 req->rx_enable = 1;
36 req->update_default_rxobj = 1;
37 req->default_rxobj = default_rxobj;
71c8cbfc 38 req->hdr.dev_id = gc->mana.dev_id;
0266a177
LL
39
40 /* If there are more than 1 entries in indirection table, enable RSS */
41 if (log_ind_tbl_size)
42 req->rss_enable = true;
43
44 req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE;
29b8e13a
EA
45 req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2,
46 indir_tab);
0266a177 47 req->update_indir_tab = true;
21453285 48 req->cqe_coalescing_enable = 1;
0266a177 49
0266a177
LL
50 /* The ind table passed to the hardware must have
51 * MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb
52 * ind_table to MANA_INDIRECT_TABLE_SIZE if required
53 */
54 ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
55 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
29b8e13a 56 req->indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
0266a177 57 ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
29b8e13a 58 req->indir_tab[i]);
0266a177
LL
59 }
60
61 req->update_hashkey = true;
62 if (rx_hash_key_len)
63 memcpy(req->hashkey, rx_hash_key, rx_hash_key_len);
64 else
65 netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE);
66
67 ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n",
68 req->vport, default_rxobj);
69
70 err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp), &resp);
71 if (err) {
72 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
73 goto out;
74 }
75
76 if (resp.hdr.status) {
77 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
78 resp.hdr.status);
79 err = -EPROTO;
80 goto out;
81 }
82
83 netdev_info(ndev, "Configured steering vPort %llu log_entries %u\n",
84 mpc->port_handle, log_ind_tbl_size);
85
86out:
87 kfree(req);
88 return err;
89}
90
91static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
92 struct ib_qp_init_attr *attr,
93 struct ib_udata *udata)
94{
95 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
96 struct mana_ib_dev *mdev =
97 container_of(pd->device, struct mana_ib_dev, ib_dev);
98 struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
99 struct mana_ib_create_qp_rss_resp resp = {};
100 struct mana_ib_create_qp_rss ucmd = {};
0266a177
LL
101 mana_handle_t *mana_ind_table;
102 struct mana_port_context *mpc;
a7f0636d 103 unsigned int ind_tbl_size;
0266a177
LL
104 struct net_device *ndev;
105 struct mana_ib_cq *cq;
106 struct mana_ib_wq *wq;
c15d7802 107 struct mana_eq *eq;
0266a177
LL
108 struct ib_cq *ibcq;
109 struct ib_wq *ibwq;
110 int i = 0;
111 u32 port;
112 int ret;
113
0266a177
LL
114 if (!udata || udata->inlen < sizeof(ucmd))
115 return -EINVAL;
116
117 ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
118 if (ret) {
119 ibdev_dbg(&mdev->ib_dev,
120 "Failed copy from udata for create rss-qp, err %d\n",
121 ret);
122 return ret;
123 }
124
2c20e20b 125 if (attr->cap.max_recv_wr > mdev->adapter_caps.max_qp_wr) {
0266a177
LL
126 ibdev_dbg(&mdev->ib_dev,
127 "Requested max_recv_wr %d exceeding limit\n",
128 attr->cap.max_recv_wr);
129 return -EINVAL;
130 }
131
132 if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) {
133 ibdev_dbg(&mdev->ib_dev,
134 "Requested max_recv_sge %d exceeding limit\n",
135 attr->cap.max_recv_sge);
136 return -EINVAL;
137 }
138
139 ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size;
140 if (ind_tbl_size > MANA_INDIRECT_TABLE_SIZE) {
141 ibdev_dbg(&mdev->ib_dev,
142 "Indirect table size %d exceeding limit\n",
143 ind_tbl_size);
144 return -EINVAL;
145 }
146
147 if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) {
148 ibdev_dbg(&mdev->ib_dev,
149 "RX Hash function is not supported, %d\n",
150 ucmd.rx_hash_function);
151 return -EINVAL;
152 }
153
154 /* IB ports start with 1, MANA start with 0 */
155 port = ucmd.port;
3b73eb3a
KT
156 ndev = mana_ib_get_netdev(pd->device, port);
157 if (!ndev) {
0266a177
LL
158 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
159 port);
160 return -EINVAL;
161 }
0266a177
LL
162 mpc = netdev_priv(ndev);
163
164 ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
165 ucmd.rx_hash_function, port);
166
167 mana_ind_table = kcalloc(ind_tbl_size, sizeof(mana_handle_t),
168 GFP_KERNEL);
169 if (!mana_ind_table) {
170 ret = -ENOMEM;
171 goto fail;
172 }
173
174 qp->port = port;
175
176 for (i = 0; i < ind_tbl_size; i++) {
177 struct mana_obj_spec wq_spec = {};
178 struct mana_obj_spec cq_spec = {};
179
180 ibwq = ind_tbl->ind_tbl[i];
181 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
182
183 ibcq = ibwq->cq;
184 cq = container_of(ibcq, struct mana_ib_cq, ibcq);
185
688bac28 186 wq_spec.gdma_region = wq->queue.gdma_region;
0266a177
LL
187 wq_spec.queue_size = wq->wq_buf_size;
188
60a7ac0b 189 cq_spec.gdma_region = cq->queue.gdma_region;
0266a177
LL
190 cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
191 cq_spec.modr_ctx_id = 0;
23f59f4e 192 eq = &mpc->ac->eqs[cq->comp_vector];
c15d7802 193 cq_spec.attached_eq = eq->eq->id;
0266a177
LL
194
195 ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
196 &wq_spec, &cq_spec, &wq->rx_object);
c15d7802
LL
197 if (ret) {
198 /* Do cleanup starting with index i-1 */
199 i--;
0266a177 200 goto fail;
c15d7802 201 }
0266a177
LL
202
203 /* The GDMA regions are now owned by the WQ object */
688bac28 204 wq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
60a7ac0b 205 cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
0266a177 206
688bac28 207 wq->queue.id = wq_spec.queue_index;
60a7ac0b 208 cq->queue.id = cq_spec.queue_index;
0266a177
LL
209
210 ibdev_dbg(&mdev->ib_dev,
c8fc935f
KT
211 "rx_object 0x%llx wq id %llu cq id %llu\n",
212 wq->rx_object, wq->queue.id, cq->queue.id);
0266a177 213
60a7ac0b 214 resp.entries[i].cqid = cq->queue.id;
688bac28 215 resp.entries[i].wqid = wq->queue.id;
0266a177
LL
216
217 mana_ind_table[i] = wq->rx_object;
c15d7802
LL
218
219 /* Create CQ table entry */
2a31c5a7
KT
220 ret = mana_ib_install_cq_cb(mdev, cq);
221 if (ret)
c15d7802 222 goto fail;
0266a177
LL
223 }
224 resp.num_entries = i;
225
226 ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object,
227 mana_ind_table,
228 ind_tbl->log_ind_tbl_size,
229 ucmd.rx_hash_key_len,
230 ucmd.rx_hash_key);
231 if (ret)
232 goto fail;
233
234 ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
235 if (ret) {
236 ibdev_dbg(&mdev->ib_dev,
237 "Failed to copy to udata create rss-qp, %d\n",
238 ret);
239 goto fail;
240 }
241
242 kfree(mana_ind_table);
243
244 return 0;
245
246fail:
247 while (i-- > 0) {
248 ibwq = ind_tbl->ind_tbl[i];
c15d7802 249 ibcq = ibwq->cq;
0266a177 250 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
c15d7802
LL
251 cq = container_of(ibcq, struct mana_ib_cq, ibcq);
252
3e411052 253 mana_ib_remove_cq_cb(mdev, cq);
0266a177
LL
254 mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
255 }
256
257 kfree(mana_ind_table);
258
259 return ret;
260}
261
262static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
263 struct ib_qp_init_attr *attr,
264 struct ib_udata *udata)
265{
266 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
267 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
268 struct mana_ib_dev *mdev =
269 container_of(ibpd->device, struct mana_ib_dev, ib_dev);
270 struct mana_ib_cq *send_cq =
271 container_of(attr->send_cq, struct mana_ib_cq, ibcq);
272 struct mana_ib_ucontext *mana_ucontext =
273 rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
274 ibucontext);
275 struct mana_ib_create_qp_resp resp = {};
0266a177
LL
276 struct mana_ib_create_qp ucmd = {};
277 struct mana_obj_spec wq_spec = {};
278 struct mana_obj_spec cq_spec = {};
279 struct mana_port_context *mpc;
0266a177 280 struct net_device *ndev;
c15d7802
LL
281 struct mana_eq *eq;
282 int eq_vec;
0266a177 283 u32 port;
c15d7802 284 int err;
0266a177 285
0266a177
LL
286 if (!mana_ucontext || udata->inlen < sizeof(ucmd))
287 return -EINVAL;
288
289 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
290 if (err) {
291 ibdev_dbg(&mdev->ib_dev,
292 "Failed to copy from udata create qp-raw, %d\n", err);
293 return err;
294 }
295
2c20e20b 296 if (attr->cap.max_send_wr > mdev->adapter_caps.max_qp_wr) {
0266a177
LL
297 ibdev_dbg(&mdev->ib_dev,
298 "Requested max_send_wr %d exceeding limit\n",
299 attr->cap.max_send_wr);
300 return -EINVAL;
301 }
302
303 if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) {
304 ibdev_dbg(&mdev->ib_dev,
305 "Requested max_send_sge %d exceeding limit\n",
306 attr->cap.max_send_sge);
307 return -EINVAL;
308 }
309
3b73eb3a
KT
310 port = ucmd.port;
311 ndev = mana_ib_get_netdev(ibpd->device, port);
312 if (!ndev) {
313 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
314 port);
315 return -EINVAL;
316 }
0266a177
LL
317 mpc = netdev_priv(ndev);
318 ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
319
3b73eb3a 320 err = mana_ib_cfg_vport(mdev, port, pd, mana_ucontext->doorbell);
0266a177
LL
321 if (err)
322 return -ENODEV;
323
324 qp->port = port;
325
326 ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
327 ucmd.sq_buf_addr, ucmd.port);
328
f10242b3 329 err = mana_ib_create_queue(mdev, ucmd.sq_buf_addr, ucmd.sq_buf_size, &qp->raw_sq);
0266a177
LL
330 if (err) {
331 ibdev_dbg(&mdev->ib_dev,
f10242b3
KT
332 "Failed to create queue for create qp-raw, err %d\n", err);
333 goto err_free_vport;
0266a177
LL
334 }
335
0266a177 336 /* Create a WQ on the same port handle used by the Ethernet */
f10242b3 337 wq_spec.gdma_region = qp->raw_sq.gdma_region;
0266a177
LL
338 wq_spec.queue_size = ucmd.sq_buf_size;
339
60a7ac0b 340 cq_spec.gdma_region = send_cq->queue.gdma_region;
0266a177
LL
341 cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
342 cq_spec.modr_ctx_id = 0;
23f59f4e 343 eq_vec = send_cq->comp_vector;
3b73eb3a 344 eq = &mpc->ac->eqs[eq_vec];
c15d7802 345 cq_spec.attached_eq = eq->eq->id;
0266a177
LL
346
347 err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
f10242b3 348 &cq_spec, &qp->qp_handle);
0266a177
LL
349 if (err) {
350 ibdev_dbg(&mdev->ib_dev,
351 "Failed to create wq for create raw-qp, err %d\n",
352 err);
f10242b3 353 goto err_destroy_queue;
0266a177
LL
354 }
355
356 /* The GDMA regions are now owned by the WQ object */
f10242b3 357 qp->raw_sq.gdma_region = GDMA_INVALID_DMA_REGION;
60a7ac0b 358 send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
0266a177 359
f10242b3 360 qp->raw_sq.id = wq_spec.queue_index;
60a7ac0b 361 send_cq->queue.id = cq_spec.queue_index;
0266a177 362
c15d7802 363 /* Create CQ table entry */
2a31c5a7
KT
364 err = mana_ib_install_cq_cb(mdev, send_cq);
365 if (err)
c15d7802 366 goto err_destroy_wq_obj;
c15d7802 367
0266a177 368 ibdev_dbg(&mdev->ib_dev,
c8fc935f 369 "qp->qp_handle 0x%llx sq id %llu cq id %llu\n",
f10242b3 370 qp->qp_handle, qp->raw_sq.id, send_cq->queue.id);
0266a177 371
f10242b3 372 resp.sqid = qp->raw_sq.id;
60a7ac0b 373 resp.cqid = send_cq->queue.id;
0266a177
LL
374 resp.tx_vp_offset = pd->tx_vp_offset;
375
376 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
377 if (err) {
378 ibdev_dbg(&mdev->ib_dev,
379 "Failed copy udata for create qp-raw, %d\n",
380 err);
3e411052 381 goto err_remove_cq_cb;
0266a177
LL
382 }
383
384 return 0;
385
3e411052
KT
386err_remove_cq_cb:
387 mana_ib_remove_cq_cb(mdev, send_cq);
c15d7802 388
0266a177 389err_destroy_wq_obj:
f10242b3 390 mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
0266a177 391
f10242b3
KT
392err_destroy_queue:
393 mana_ib_destroy_queue(mdev, &qp->raw_sq);
0266a177
LL
394
395err_free_vport:
3b73eb3a 396 mana_ib_uncfg_vport(mdev, pd, port);
0266a177
LL
397
398 return err;
399}
400
401int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
402 struct ib_udata *udata)
403{
404 switch (attr->qp_type) {
405 case IB_QPT_RAW_PACKET:
406 /* When rwq_ind_tbl is used, it's for creating WQs for RSS */
407 if (attr->rwq_ind_tbl)
408 return mana_ib_create_qp_rss(ibqp, ibqp->pd, attr,
409 udata);
410
411 return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata);
412 default:
413 /* Creating QP other than IB_QPT_RAW_PACKET is not supported */
414 ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n",
415 attr->qp_type);
416 }
417
418 return -EINVAL;
419}
420
421int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
422 int attr_mask, struct ib_udata *udata)
423{
424 /* modify_qp is not supported by this version of the driver */
425 return -EOPNOTSUPP;
426}
427
428static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
429 struct ib_rwq_ind_table *ind_tbl,
430 struct ib_udata *udata)
431{
432 struct mana_ib_dev *mdev =
433 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
0266a177 434 struct mana_port_context *mpc;
0266a177
LL
435 struct net_device *ndev;
436 struct mana_ib_wq *wq;
437 struct ib_wq *ibwq;
438 int i;
439
3b73eb3a 440 ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
0266a177
LL
441 mpc = netdev_priv(ndev);
442
443 for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
444 ibwq = ind_tbl->ind_tbl[i];
445 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
446 ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n",
447 wq->rx_object);
448 mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
449 }
450
451 return 0;
452}
453
454static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
455{
456 struct mana_ib_dev *mdev =
457 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
0266a177
LL
458 struct ib_pd *ibpd = qp->ibqp.pd;
459 struct mana_port_context *mpc;
0266a177
LL
460 struct net_device *ndev;
461 struct mana_ib_pd *pd;
462
3b73eb3a 463 ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
0266a177
LL
464 mpc = netdev_priv(ndev);
465 pd = container_of(ibpd, struct mana_ib_pd, ibpd);
466
f10242b3 467 mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
0266a177 468
f10242b3 469 mana_ib_destroy_queue(mdev, &qp->raw_sq);
0266a177 470
3b73eb3a 471 mana_ib_uncfg_vport(mdev, pd, qp->port);
0266a177
LL
472
473 return 0;
474}
475
476int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
477{
478 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
479
480 switch (ibqp->qp_type) {
481 case IB_QPT_RAW_PACKET:
482 if (ibqp->rwq_ind_tbl)
483 return mana_ib_destroy_qp_rss(qp, ibqp->rwq_ind_tbl,
484 udata);
485
486 return mana_ib_destroy_qp_raw(qp, udata);
487
488 default:
489 ibdev_dbg(ibqp->device, "Unexpected QP type %u\n",
490 ibqp->qp_type);
491 }
492
493 return -ENOENT;
494}