IB/hfi1: Add opcode states to qp_stats
[linux-block.git] / drivers / infiniband / hw / usnic / usnic_ib_verbs.c
CommitLineData
e3cf00d0
UM
1/*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
3805eade
JS
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
e3cf00d0
UM
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/slab.h>
36#include <linux/errno.h>
37
38#include <rdma/ib_user_verbs.h>
39#include <rdma/ib_addr.h>
40
41#include "usnic_abi.h"
42#include "usnic_ib.h"
43#include "usnic_common_util.h"
44#include "usnic_ib_qp_grp.h"
45#include "usnic_fwd.h"
46#include "usnic_log.h"
47#include "usnic_uiom.h"
48#include "usnic_transport.h"
f5029e75 49#include "usnic_ib_verbs.h"
e3cf00d0
UM
50
51#define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
52
53static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
54{
3ea72861 55 *fw_ver = *((u64 *)fw_ver_str);
e3cf00d0
UM
56}
57
58static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
59 struct ib_udata *udata)
60{
61 struct usnic_ib_dev *us_ibdev;
62 struct usnic_ib_create_qp_resp resp;
63 struct pci_dev *pdev;
64 struct vnic_dev_bar *bar;
65 struct usnic_vnic_res_chunk *chunk;
8af94ac6 66 struct usnic_ib_qp_grp_flow *default_flow;
e3cf00d0
UM
67 int i, err;
68
69 memset(&resp, 0, sizeof(resp));
70
71 us_ibdev = qp_grp->vf->pf;
72 pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
73 if (!pdev) {
74 usnic_err("Failed to get pdev of qp_grp %d\n",
75 qp_grp->grp_id);
76 return -EFAULT;
77 }
78
79 bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0);
80 if (!bar) {
81 usnic_err("Failed to get bar0 of qp_grp %d vf %s",
82 qp_grp->grp_id, pci_name(pdev));
83 return -EFAULT;
84 }
85
86 resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic);
87 resp.bar_bus_addr = bar->bus_addr;
88 resp.bar_len = bar->len;
e3cf00d0
UM
89
90 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
5f4c7e4e 91 if (IS_ERR(chunk)) {
e3cf00d0
UM
92 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
93 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
94 qp_grp->grp_id,
95 PTR_ERR(chunk));
5f4c7e4e 96 return PTR_ERR(chunk);
e3cf00d0
UM
97 }
98
99 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
100 resp.rq_cnt = chunk->cnt;
101 for (i = 0; i < chunk->cnt; i++)
102 resp.rq_idx[i] = chunk->res[i]->vnic_idx;
103
104 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
5f4c7e4e 105 if (IS_ERR(chunk)) {
e3cf00d0
UM
106 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
107 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
108 qp_grp->grp_id,
109 PTR_ERR(chunk));
5f4c7e4e 110 return PTR_ERR(chunk);
e3cf00d0
UM
111 }
112
113 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
114 resp.wq_cnt = chunk->cnt;
115 for (i = 0; i < chunk->cnt; i++)
116 resp.wq_idx[i] = chunk->res[i]->vnic_idx;
117
118 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
5f4c7e4e 119 if (IS_ERR(chunk)) {
e3cf00d0
UM
120 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
121 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
122 qp_grp->grp_id,
123 PTR_ERR(chunk));
5f4c7e4e 124 return PTR_ERR(chunk);
e3cf00d0
UM
125 }
126
127 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
128 resp.cq_cnt = chunk->cnt;
129 for (i = 0; i < chunk->cnt; i++)
130 resp.cq_idx[i] = chunk->res[i]->vnic_idx;
131
8af94ac6
UM
132 default_flow = list_first_entry(&qp_grp->flows_lst,
133 struct usnic_ib_qp_grp_flow, link);
134 resp.transport = default_flow->trans_type;
135
e3cf00d0
UM
136 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
137 if (err) {
138 usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
139 return err;
140 }
141
142 return 0;
143}
144
145static struct usnic_ib_qp_grp*
146find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
147 struct usnic_ib_pd *pd,
8af94ac6 148 struct usnic_transport_spec *trans_spec,
e3cf00d0
UM
149 struct usnic_vnic_res_spec *res_spec)
150{
151 struct usnic_ib_vf *vf;
152 struct usnic_vnic *vnic;
153 struct usnic_ib_qp_grp *qp_grp;
154 struct device *dev, **dev_list;
313e16d5 155 int i;
e3cf00d0
UM
156
157 BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
158
159 if (list_empty(&us_ibdev->vf_dev_list)) {
160 usnic_info("No vfs to allocate\n");
161 return NULL;
162 }
163
e3cf00d0
UM
164 if (usnic_ib_share_vf) {
165 /* Try to find resouces on a used vf which is in pd */
166 dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
167 for (i = 0; dev_list[i]; i++) {
168 dev = dev_list[i];
169 vf = pci_get_drvdata(to_pci_dev(dev));
170 spin_lock(&vf->lock);
171 vnic = vf->vnic;
172 if (!usnic_vnic_check_room(vnic, res_spec)) {
173 usnic_dbg("Found used vnic %s from %s\n",
174 us_ibdev->ib_dev.name,
175 pci_name(usnic_vnic_get_pdev(
176 vnic)));
313e16d5
LR
177 qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev,
178 vf, pd,
179 res_spec,
180 trans_spec);
181
182 spin_unlock(&vf->lock);
183 goto qp_grp_check;
e3cf00d0
UM
184 }
185 spin_unlock(&vf->lock);
186
187 }
188 usnic_uiom_free_dev_list(dev_list);
189 }
190
313e16d5
LR
191 /* Try to find resources on an unused vf */
192 list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
193 spin_lock(&vf->lock);
194 vnic = vf->vnic;
195 if (vf->qp_grp_ref_cnt == 0 &&
196 usnic_vnic_check_room(vnic, res_spec) == 0) {
197 qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf,
198 pd, res_spec,
199 trans_spec);
200
e3cf00d0 201 spin_unlock(&vf->lock);
313e16d5 202 goto qp_grp_check;
e3cf00d0 203 }
313e16d5 204 spin_unlock(&vf->lock);
e3cf00d0
UM
205 }
206
313e16d5
LR
207 usnic_info("No free qp grp found on %s\n", us_ibdev->ib_dev.name);
208 return ERR_PTR(-ENOMEM);
e3cf00d0 209
313e16d5 210qp_grp_check:
e3cf00d0
UM
211 if (IS_ERR_OR_NULL(qp_grp)) {
212 usnic_err("Failed to allocate qp_grp\n");
213 return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
214 }
e3cf00d0
UM
215 return qp_grp;
216}
217
218static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
219{
220 struct usnic_ib_vf *vf = qp_grp->vf;
221
222 WARN_ON(qp_grp->state != IB_QPS_RESET);
223
224 spin_lock(&vf->lock);
225 usnic_ib_qp_grp_destroy(qp_grp);
226 spin_unlock(&vf->lock);
227}
228
c7845bca
UM
229static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
230{
231 if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN ||
232 cmd.spec.trans_type >= USNIC_TRANSPORT_MAX)
233 return -EINVAL;
234
235 return 0;
236}
237
e3cf00d0
UM
238/* Start of ib callback functions */
239
240enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
241 u8 port_num)
242{
243 return IB_LINK_LAYER_ETHERNET;
244}
245
246int usnic_ib_query_device(struct ib_device *ibdev,
2528e33e
MB
247 struct ib_device_attr *props,
248 struct ib_udata *uhw)
e3cf00d0
UM
249{
250 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
251 union ib_gid gid;
252 struct ethtool_drvinfo info;
e3cf00d0
UM
253 int qp_per_vf;
254
255 usnic_dbg("\n");
2528e33e
MB
256 if (uhw->inlen || uhw->outlen)
257 return -EINVAL;
258
e3cf00d0
UM
259 mutex_lock(&us_ibdev->usdev_lock);
260 us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
e3cf00d0 261 memset(props, 0, sizeof(*props));
c7845bca
UM
262 usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
263 &gid.raw[0]);
e3cf00d0
UM
264 memcpy(&props->sys_image_guid, &gid.global.interface_id,
265 sizeof(gid.global.interface_id));
266 usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver);
267 props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE;
268 props->page_size_cap = USNIC_UIOM_PAGE_SIZE;
269 props->vendor_id = PCI_VENDOR_ID_CISCO;
270 props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC;
271 props->hw_ver = us_ibdev->pdev->subsystem_device;
272 qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
273 us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
274 props->max_qp = qp_per_vf *
2c935bc5 275 kref_read(&us_ibdev->vf_cnt);
e3cf00d0
UM
276 props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
277 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
278 props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
2c935bc5 279 kref_read(&us_ibdev->vf_cnt);
e3cf00d0
UM
280 props->max_pd = USNIC_UIOM_MAX_PD_CNT;
281 props->max_mr = USNIC_UIOM_MAX_MR_CNT;
282 props->local_ca_ack_delay = 0;
283 props->max_pkeys = 0;
284 props->atomic_cap = IB_ATOMIC_NONE;
285 props->masked_atomic_cap = props->atomic_cap;
286 props->max_qp_rd_atom = 0;
287 props->max_qp_init_rd_atom = 0;
288 props->max_res_rd_atom = 0;
289 props->max_srq = 0;
290 props->max_srq_wr = 0;
291 props->max_srq_sge = 0;
292 props->max_fast_reg_page_list_len = 0;
293 props->max_mcast_grp = 0;
294 props->max_mcast_qp_attach = 0;
295 props->max_total_mcast_qp_attach = 0;
296 props->max_map_per_fmr = 0;
297 /* Owned by Userspace
298 * max_qp_wr, max_sge, max_sge_rd, max_cqe */
299 mutex_unlock(&us_ibdev->usdev_lock);
300
301 return 0;
302}
303
304int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
305 struct ib_port_attr *props)
306{
307 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
e3cf00d0
UM
308
309 usnic_dbg("\n");
310
311 mutex_lock(&us_ibdev->usdev_lock);
7be05753
SX
312 if (ib_get_eth_speed(ibdev, port, &props->active_speed,
313 &props->active_width)) {
d4186194
YS
314 mutex_unlock(&us_ibdev->usdev_lock);
315 return -EINVAL;
316 }
317
c4550c63 318 /* props being zeroed by the caller, avoid zeroing it here */
e3cf00d0
UM
319
320 props->lid = 0;
321 props->lmc = 1;
322 props->sm_lid = 0;
323 props->sm_sl = 0;
324
c7845bca 325 if (!us_ibdev->ufdev->link_up) {
e3cf00d0
UM
326 props->state = IB_PORT_DOWN;
327 props->phys_state = 3;
c7845bca
UM
328 } else if (!us_ibdev->ufdev->inaddr) {
329 props->state = IB_PORT_INIT;
330 props->phys_state = 4;
331 } else {
332 props->state = IB_PORT_ACTIVE;
333 props->phys_state = 5;
e3cf00d0
UM
334 }
335
336 props->port_cap_flags = 0;
337 props->gid_tbl_len = 1;
338 props->pkey_tbl_len = 1;
339 props->bad_pkey_cntr = 0;
340 props->qkey_viol_cntr = 0;
e3cf00d0 341 props->max_mtu = IB_MTU_4096;
8af94ac6 342 props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
e3cf00d0 343 /* Userspace will adjust for hdrs */
8af94ac6 344 props->max_msg_sz = us_ibdev->ufdev->mtu;
e3cf00d0
UM
345 props->max_vl_num = 1;
346 mutex_unlock(&us_ibdev->usdev_lock);
347
348 return 0;
349}
350
351int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
352 int qp_attr_mask,
353 struct ib_qp_init_attr *qp_init_attr)
354{
355 struct usnic_ib_qp_grp *qp_grp;
356 struct usnic_ib_vf *vf;
357 int err;
358
359 usnic_dbg("\n");
360
361 memset(qp_attr, 0, sizeof(*qp_attr));
362 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
363
364 qp_grp = to_uqp_grp(qp);
365 vf = qp_grp->vf;
366 mutex_lock(&vf->pf->usdev_lock);
367 usnic_dbg("\n");
368 qp_attr->qp_state = qp_grp->state;
369 qp_attr->cur_qp_state = qp_grp->state;
370
371 switch (qp_grp->ibqp.qp_type) {
372 case IB_QPT_UD:
373 qp_attr->qkey = 0;
374 break;
375 default:
376 usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type);
377 err = -EINVAL;
378 goto err_out;
379 }
380
381 mutex_unlock(&vf->pf->usdev_lock);
382 return 0;
383
384err_out:
385 mutex_unlock(&vf->pf->usdev_lock);
386 return err;
387}
388
389int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
390 union ib_gid *gid)
391{
392
393 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
394 usnic_dbg("\n");
395
396 if (index > 1)
397 return -EINVAL;
398
399 mutex_lock(&us_ibdev->usdev_lock);
400 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
c7845bca
UM
401 usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
402 &gid->raw[0]);
e3cf00d0
UM
403 mutex_unlock(&us_ibdev->usdev_lock);
404
405 return 0;
406}
407
44b0b745
YS
408struct net_device *usnic_get_netdev(struct ib_device *device, u8 port_num)
409{
410 struct usnic_ib_dev *us_ibdev = to_usdev(device);
411
412 if (us_ibdev->netdev)
413 dev_hold(us_ibdev->netdev);
414
415 return us_ibdev->netdev;
416}
417
e3cf00d0
UM
418int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
419 u16 *pkey)
420{
421 if (index > 1)
422 return -EINVAL;
423
424 *pkey = 0xffff;
425 return 0;
426}
427
428struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
429 struct ib_ucontext *context,
430 struct ib_udata *udata)
431{
432 struct usnic_ib_pd *pd;
433 void *umem_pd;
434
435 usnic_dbg("\n");
436
437 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
438 if (!pd)
439 return ERR_PTR(-ENOMEM);
440
441 umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
442 if (IS_ERR_OR_NULL(umem_pd)) {
443 kfree(pd);
444 return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
445 }
446
447 usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
448 pd, context, ibdev->name);
449 return &pd->ibpd;
450}
451
452int usnic_ib_dealloc_pd(struct ib_pd *pd)
453{
454 usnic_info("freeing domain 0x%p\n", pd);
455
456 usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
457 kfree(pd);
458 return 0;
459}
460
461struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
462 struct ib_qp_init_attr *init_attr,
463 struct ib_udata *udata)
464{
465 int err;
466 struct usnic_ib_dev *us_ibdev;
467 struct usnic_ib_qp_grp *qp_grp;
468 struct usnic_ib_ucontext *ucontext;
469 int cq_cnt;
470 struct usnic_vnic_res_spec res_spec;
c7845bca 471 struct usnic_ib_create_qp_cmd cmd;
8af94ac6 472 struct usnic_transport_spec trans_spec;
e3cf00d0
UM
473
474 usnic_dbg("\n");
475
476 ucontext = to_uucontext(pd->uobject->context);
477 us_ibdev = to_usdev(pd->device);
478
60093dc0
OG
479 if (init_attr->create_flags)
480 return ERR_PTR(-EINVAL);
481
c7845bca
UM
482 err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
483 if (err) {
484 usnic_err("%s: cannot copy udata for create_qp\n",
485 us_ibdev->ib_dev.name);
486 return ERR_PTR(-EINVAL);
487 }
488
489 err = create_qp_validate_user_data(cmd);
490 if (err) {
491 usnic_err("%s: Failed to validate user data\n",
492 us_ibdev->ib_dev.name);
493 return ERR_PTR(-EINVAL);
494 }
495
e3cf00d0
UM
496 if (init_attr->qp_type != IB_QPT_UD) {
497 usnic_err("%s asked to make a non-UD QP: %d\n",
498 us_ibdev->ib_dev.name, init_attr->qp_type);
499 return ERR_PTR(-EINVAL);
500 }
501
c7845bca 502 trans_spec = cmd.spec;
e3cf00d0 503 mutex_lock(&us_ibdev->usdev_lock);
8af94ac6
UM
504 cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
505 res_spec = min_transport_spec[trans_spec.trans_type];
e3cf00d0
UM
506 usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
507 qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
8af94ac6 508 &trans_spec,
e3cf00d0
UM
509 &res_spec);
510 if (IS_ERR_OR_NULL(qp_grp)) {
6a54d9f9 511 err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
e3cf00d0
UM
512 goto out_release_mutex;
513 }
514
515 err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
516 if (err) {
517 err = -EBUSY;
518 goto out_release_qp_grp;
519 }
520
521 qp_grp->ctx = ucontext;
522 list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
523 usnic_ib_log_vf(qp_grp->vf);
524 mutex_unlock(&us_ibdev->usdev_lock);
525 return &qp_grp->ibqp;
526
527out_release_qp_grp:
528 qp_grp_destroy(qp_grp);
529out_release_mutex:
530 mutex_unlock(&us_ibdev->usdev_lock);
531 return ERR_PTR(err);
532}
533
534int usnic_ib_destroy_qp(struct ib_qp *qp)
535{
536 struct usnic_ib_qp_grp *qp_grp;
537 struct usnic_ib_vf *vf;
538
539 usnic_dbg("\n");
540
541 qp_grp = to_uqp_grp(qp);
542 vf = qp_grp->vf;
543 mutex_lock(&vf->pf->usdev_lock);
544 if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) {
545 usnic_err("Failed to move qp grp %u to reset\n",
546 qp_grp->grp_id);
547 }
548
549 list_del(&qp_grp->link);
550 qp_grp_destroy(qp_grp);
551 mutex_unlock(&vf->pf->usdev_lock);
552
553 return 0;
554}
555
556int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
557 int attr_mask, struct ib_udata *udata)
558{
559 struct usnic_ib_qp_grp *qp_grp;
560 int status;
561 usnic_dbg("\n");
562
563 qp_grp = to_uqp_grp(ibqp);
564
e3cf00d0 565 mutex_lock(&qp_grp->vf->pf->usdev_lock);
89e5323c
NE
566 if ((attr_mask & IB_QP_PORT) && attr->port_num != 1) {
567 /* usnic devices only have one port */
568 status = -EINVAL;
569 goto out_unlock;
570 }
571 if (attr_mask & IB_QP_STATE) {
572 status = usnic_ib_qp_grp_modify(qp_grp, attr->qp_state, NULL);
e3cf00d0 573 } else {
89e5323c 574 usnic_err("Unhandled request, attr_mask=0x%x\n", attr_mask);
e3cf00d0
UM
575 status = -EINVAL;
576 }
577
89e5323c 578out_unlock:
e3cf00d0
UM
579 mutex_unlock(&qp_grp->vf->pf->usdev_lock);
580 return status;
581}
582
bcf4c1ea
MB
583struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
584 const struct ib_cq_init_attr *attr,
585 struct ib_ucontext *context,
586 struct ib_udata *udata)
e3cf00d0
UM
587{
588 struct ib_cq *cq;
589
590 usnic_dbg("\n");
bcf4c1ea
MB
591 if (attr->flags)
592 return ERR_PTR(-EINVAL);
593
e3cf00d0
UM
594 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
595 if (!cq)
596 return ERR_PTR(-EBUSY);
597
598 return cq;
599}
600
601int usnic_ib_destroy_cq(struct ib_cq *cq)
602{
603 usnic_dbg("\n");
604 kfree(cq);
605 return 0;
606}
607
608struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
609 u64 virt_addr, int access_flags,
610 struct ib_udata *udata)
611{
612 struct usnic_ib_mr *mr;
613 int err;
614
615 usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
616 virt_addr, length);
617
618 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
59f65f49
JL
619 if (!mr)
620 return ERR_PTR(-ENOMEM);
e3cf00d0
UM
621
622 mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
623 access_flags, 0);
624 if (IS_ERR_OR_NULL(mr->umem)) {
6a54d9f9 625 err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT;
e3cf00d0
UM
626 goto err_free;
627 }
628
629 mr->ibmr.lkey = mr->ibmr.rkey = 0;
630 return &mr->ibmr;
631
632err_free:
633 kfree(mr);
634 return ERR_PTR(err);
635}
636
637int usnic_ib_dereg_mr(struct ib_mr *ibmr)
638{
639 struct usnic_ib_mr *mr = to_umr(ibmr);
640
641 usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
642
643 usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing);
644 kfree(mr);
645 return 0;
646}
647
648struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
649 struct ib_udata *udata)
650{
651 struct usnic_ib_ucontext *context;
652 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
653 usnic_dbg("\n");
654
655 context = kmalloc(sizeof(*context), GFP_KERNEL);
656 if (!context)
657 return ERR_PTR(-ENOMEM);
658
659 INIT_LIST_HEAD(&context->qp_grp_list);
660 mutex_lock(&us_ibdev->usdev_lock);
661 list_add_tail(&context->link, &us_ibdev->ctx_list);
662 mutex_unlock(&us_ibdev->usdev_lock);
663
664 return &context->ibucontext;
665}
666
667int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
668{
669 struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
670 struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
671 usnic_dbg("\n");
672
673 mutex_lock(&us_ibdev->usdev_lock);
674 BUG_ON(!list_empty(&context->qp_grp_list));
675 list_del(&context->link);
676 mutex_unlock(&us_ibdev->usdev_lock);
677 kfree(context);
678 return 0;
679}
680
681int usnic_ib_mmap(struct ib_ucontext *context,
682 struct vm_area_struct *vma)
683{
684 struct usnic_ib_ucontext *uctx = to_ucontext(context);
685 struct usnic_ib_dev *us_ibdev;
686 struct usnic_ib_qp_grp *qp_grp;
687 struct usnic_ib_vf *vf;
688 struct vnic_dev_bar *bar;
689 dma_addr_t bus_addr;
690 unsigned int len;
691 unsigned int vfid;
692
693 usnic_dbg("\n");
694
695 us_ibdev = to_usdev(context->device);
696 vma->vm_flags |= VM_IO;
697 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
698 vfid = vma->vm_pgoff;
699 usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
700 vma->vm_pgoff, PAGE_SHIFT, vfid);
701
702 mutex_lock(&us_ibdev->usdev_lock);
703 list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) {
704 vf = qp_grp->vf;
705 if (usnic_vnic_get_index(vf->vnic) == vfid) {
706 bar = usnic_vnic_get_bar(vf->vnic, 0);
707 if ((vma->vm_end - vma->vm_start) != bar->len) {
708 usnic_err("Bar0 Len %lu - Request map %lu\n",
709 bar->len,
710 vma->vm_end - vma->vm_start);
711 mutex_unlock(&us_ibdev->usdev_lock);
712 return -EINVAL;
713 }
714 bus_addr = bar->bus_addr;
715 len = bar->len;
716 usnic_dbg("bus: %pa vaddr: %p size: %ld\n",
717 &bus_addr, bar->vaddr, bar->len);
718 mutex_unlock(&us_ibdev->usdev_lock);
719
720 return remap_pfn_range(vma,
721 vma->vm_start,
722 bus_addr >> PAGE_SHIFT,
723 len, vma->vm_page_prot);
724 }
725 }
726
727 mutex_unlock(&us_ibdev->usdev_lock);
728 usnic_err("No VF %u found\n", vfid);
729 return -EINVAL;
730}
731
732/* In ib callbacks section - Start of stub funcs */
733struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
90898850 734 struct rdma_ah_attr *ah_attr,
477864c8
MS
735 struct ib_udata *udata)
736
e3cf00d0
UM
737{
738 usnic_dbg("\n");
739 return ERR_PTR(-EPERM);
740}
741
742int usnic_ib_destroy_ah(struct ib_ah *ah)
743{
744 usnic_dbg("\n");
745 return -EINVAL;
746}
747
748int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
749 struct ib_send_wr **bad_wr)
750{
751 usnic_dbg("\n");
752 return -EINVAL;
753}
754
755int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
756 struct ib_recv_wr **bad_wr)
757{
758 usnic_dbg("\n");
759 return -EINVAL;
760}
761
762int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
763 struct ib_wc *wc)
764{
765 usnic_dbg("\n");
766 return -EINVAL;
767}
768
769int usnic_ib_req_notify_cq(struct ib_cq *cq,
770 enum ib_cq_notify_flags flags)
771{
772 usnic_dbg("\n");
773 return -EINVAL;
774}
775
776struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc)
777{
778 usnic_dbg("\n");
779 return ERR_PTR(-ENOMEM);
780}
781
782
783/* In ib callbacks section - End of stub funcs */
784/* End of ib callbacks section */