Commit | Line | Data |
---|---|---|
fe2caefc PP |
1 | /******************************************************************* |
2 | * This file is part of the Emulex RoCE Device Driver for * | |
3 | * RoCE (RDMA over Converged Ethernet) adapters. * | |
4 | * Copyright (C) 2008-2012 Emulex. All rights reserved. * | |
5 | * EMULEX and SLI are trademarks of Emulex. * | |
6 | * www.emulex.com * | |
7 | * * | |
8 | * This program is free software; you can redistribute it and/or * | |
9 | * modify it under the terms of version 2 of the GNU General * | |
10 | * Public License as published by the Free Software Foundation. * | |
11 | * This program is distributed in the hope that it will be useful. * | |
12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | |
13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | |
14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | |
15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | |
16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | |
17 | * more details, a copy of which can be found in the file COPYING * | |
18 | * included with this package. * | |
19 | * | |
20 | * Contact Information: | |
21 | * linux-drivers@emulex.com | |
22 | * | |
23 | * Emulex | |
24 | * 3333 Susan Street | |
25 | * Costa Mesa, CA 92626 | |
26 | *******************************************************************/ | |
27 | ||
28 | #include <linux/dma-mapping.h> | |
29 | #include <rdma/ib_verbs.h> | |
30 | #include <rdma/ib_user_verbs.h> | |
31 | #include <rdma/iw_cm.h> | |
32 | #include <rdma/ib_umem.h> | |
33 | #include <rdma/ib_addr.h> | |
34 | ||
35 | #include "ocrdma.h" | |
36 | #include "ocrdma_hw.h" | |
37 | #include "ocrdma_verbs.h" | |
38 | #include "ocrdma_abi.h" | |
39 | ||
40 | int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) | |
41 | { | |
42 | if (index > 1) | |
43 | return -EINVAL; | |
44 | ||
45 | *pkey = 0xffff; | |
46 | return 0; | |
47 | } | |
48 | ||
49 | int ocrdma_query_gid(struct ib_device *ibdev, u8 port, | |
50 | int index, union ib_gid *sgid) | |
51 | { | |
52 | struct ocrdma_dev *dev; | |
53 | ||
54 | dev = get_ocrdma_dev(ibdev); | |
55 | memset(sgid, 0, sizeof(*sgid)); | |
fad51b7d | 56 | if (index > OCRDMA_MAX_SGID) |
fe2caefc PP |
57 | return -EINVAL; |
58 | ||
59 | memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); | |
60 | ||
61 | return 0; | |
62 | } | |
63 | ||
64 | int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) | |
65 | { | |
66 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); | |
67 | ||
68 | memset(attr, 0, sizeof *attr); | |
69 | memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], | |
70 | min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); | |
71 | ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid); | |
033edd4d | 72 | attr->max_mr_size = dev->attr.max_mr_size; |
fe2caefc PP |
73 | attr->page_size_cap = 0xffff000; |
74 | attr->vendor_id = dev->nic_info.pdev->vendor; | |
75 | attr->vendor_part_id = dev->nic_info.pdev->device; | |
76 | attr->hw_ver = 0; | |
77 | attr->max_qp = dev->attr.max_qp; | |
d3cb6c0b | 78 | attr->max_ah = OCRDMA_MAX_AH; |
fe2caefc PP |
79 | attr->max_qp_wr = dev->attr.max_wqe; |
80 | ||
81 | attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD | | |
82 | IB_DEVICE_RC_RNR_NAK_GEN | | |
83 | IB_DEVICE_SHUTDOWN_PORT | | |
84 | IB_DEVICE_SYS_IMAGE_GUID | | |
2b51a9b9 NG |
85 | IB_DEVICE_LOCAL_DMA_LKEY | |
86 | IB_DEVICE_MEM_MGT_EXTENSIONS; | |
634c5796 | 87 | attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge); |
c43e9ab8 | 88 | attr->max_sge_rd = 0; |
fe2caefc PP |
89 | attr->max_cq = dev->attr.max_cq; |
90 | attr->max_cqe = dev->attr.max_cqe; | |
91 | attr->max_mr = dev->attr.max_mr; | |
ac578aef | 92 | attr->max_mw = dev->attr.max_mw; |
fe2caefc PP |
93 | attr->max_pd = dev->attr.max_pd; |
94 | attr->atomic_cap = 0; | |
95 | attr->max_fmr = 0; | |
96 | attr->max_map_per_fmr = 0; | |
97 | attr->max_qp_rd_atom = | |
98 | min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); | |
99 | attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; | |
7c33880c | 100 | attr->max_srq = dev->attr.max_srq; |
d1e09ebf | 101 | attr->max_srq_sge = dev->attr.max_srq_sge; |
fe2caefc PP |
102 | attr->max_srq_wr = dev->attr.max_rqe; |
103 | attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; | |
104 | attr->max_fast_reg_page_list_len = 0; | |
105 | attr->max_pkeys = 1; | |
106 | return 0; | |
107 | } | |
108 | ||
f24ceba6 NG |
109 | static inline void get_link_speed_and_width(struct ocrdma_dev *dev, |
110 | u8 *ib_speed, u8 *ib_width) | |
111 | { | |
112 | int status; | |
113 | u8 speed; | |
114 | ||
115 | status = ocrdma_mbx_get_link_speed(dev, &speed); | |
116 | if (status) | |
117 | speed = OCRDMA_PHYS_LINK_SPEED_ZERO; | |
118 | ||
119 | switch (speed) { | |
120 | case OCRDMA_PHYS_LINK_SPEED_1GBPS: | |
121 | *ib_speed = IB_SPEED_SDR; | |
122 | *ib_width = IB_WIDTH_1X; | |
123 | break; | |
124 | ||
125 | case OCRDMA_PHYS_LINK_SPEED_10GBPS: | |
126 | *ib_speed = IB_SPEED_QDR; | |
127 | *ib_width = IB_WIDTH_1X; | |
128 | break; | |
129 | ||
130 | case OCRDMA_PHYS_LINK_SPEED_20GBPS: | |
131 | *ib_speed = IB_SPEED_DDR; | |
132 | *ib_width = IB_WIDTH_4X; | |
133 | break; | |
134 | ||
135 | case OCRDMA_PHYS_LINK_SPEED_40GBPS: | |
136 | *ib_speed = IB_SPEED_QDR; | |
137 | *ib_width = IB_WIDTH_4X; | |
138 | break; | |
139 | ||
140 | default: | |
141 | /* Unsupported */ | |
142 | *ib_speed = IB_SPEED_SDR; | |
143 | *ib_width = IB_WIDTH_1X; | |
2b50176d | 144 | } |
f24ceba6 NG |
145 | } |
146 | ||
fe2caefc PP |
147 | int ocrdma_query_port(struct ib_device *ibdev, |
148 | u8 port, struct ib_port_attr *props) | |
149 | { | |
150 | enum ib_port_state port_state; | |
151 | struct ocrdma_dev *dev; | |
152 | struct net_device *netdev; | |
153 | ||
154 | dev = get_ocrdma_dev(ibdev); | |
155 | if (port > 1) { | |
ef99c4c2 NG |
156 | pr_err("%s(%d) invalid_port=0x%x\n", __func__, |
157 | dev->id, port); | |
fe2caefc PP |
158 | return -EINVAL; |
159 | } | |
160 | netdev = dev->nic_info.netdev; | |
161 | if (netif_running(netdev) && netif_oper_up(netdev)) { | |
162 | port_state = IB_PORT_ACTIVE; | |
163 | props->phys_state = 5; | |
164 | } else { | |
165 | port_state = IB_PORT_DOWN; | |
166 | props->phys_state = 3; | |
167 | } | |
168 | props->max_mtu = IB_MTU_4096; | |
169 | props->active_mtu = iboe_get_mtu(netdev->mtu); | |
170 | props->lid = 0; | |
171 | props->lmc = 0; | |
172 | props->sm_lid = 0; | |
173 | props->sm_sl = 0; | |
174 | props->state = port_state; | |
175 | props->port_cap_flags = | |
176 | IB_PORT_CM_SUP | | |
177 | IB_PORT_REINIT_SUP | | |
b4a26a27 | 178 | IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS; |
fe2caefc PP |
179 | props->gid_tbl_len = OCRDMA_MAX_SGID; |
180 | props->pkey_tbl_len = 1; | |
181 | props->bad_pkey_cntr = 0; | |
182 | props->qkey_viol_cntr = 0; | |
f24ceba6 NG |
183 | get_link_speed_and_width(dev, &props->active_speed, |
184 | &props->active_width); | |
fe2caefc PP |
185 | props->max_msg_sz = 0x80000000; |
186 | props->max_vl_num = 4; | |
187 | return 0; | |
188 | } | |
189 | ||
190 | int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, | |
191 | struct ib_port_modify *props) | |
192 | { | |
193 | struct ocrdma_dev *dev; | |
194 | ||
195 | dev = get_ocrdma_dev(ibdev); | |
196 | if (port > 1) { | |
ef99c4c2 | 197 | pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port); |
fe2caefc PP |
198 | return -EINVAL; |
199 | } | |
200 | return 0; | |
201 | } | |
202 | ||
203 | static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, | |
204 | unsigned long len) | |
205 | { | |
206 | struct ocrdma_mm *mm; | |
207 | ||
208 | mm = kzalloc(sizeof(*mm), GFP_KERNEL); | |
209 | if (mm == NULL) | |
210 | return -ENOMEM; | |
211 | mm->key.phy_addr = phy_addr; | |
212 | mm->key.len = len; | |
213 | INIT_LIST_HEAD(&mm->entry); | |
214 | ||
215 | mutex_lock(&uctx->mm_list_lock); | |
216 | list_add_tail(&mm->entry, &uctx->mm_head); | |
217 | mutex_unlock(&uctx->mm_list_lock); | |
218 | return 0; | |
219 | } | |
220 | ||
221 | static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, | |
222 | unsigned long len) | |
223 | { | |
224 | struct ocrdma_mm *mm, *tmp; | |
225 | ||
226 | mutex_lock(&uctx->mm_list_lock); | |
227 | list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { | |
43a6b402 | 228 | if (len != mm->key.len && phy_addr != mm->key.phy_addr) |
fe2caefc PP |
229 | continue; |
230 | ||
231 | list_del(&mm->entry); | |
232 | kfree(mm); | |
233 | break; | |
234 | } | |
235 | mutex_unlock(&uctx->mm_list_lock); | |
236 | } | |
237 | ||
238 | static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, | |
239 | unsigned long len) | |
240 | { | |
241 | bool found = false; | |
242 | struct ocrdma_mm *mm; | |
243 | ||
244 | mutex_lock(&uctx->mm_list_lock); | |
245 | list_for_each_entry(mm, &uctx->mm_head, entry) { | |
43a6b402 | 246 | if (len != mm->key.len && phy_addr != mm->key.phy_addr) |
fe2caefc PP |
247 | continue; |
248 | ||
249 | found = true; | |
250 | break; | |
251 | } | |
252 | mutex_unlock(&uctx->mm_list_lock); | |
253 | return found; | |
254 | } | |
255 | ||
cffce990 NG |
256 | static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, |
257 | struct ocrdma_ucontext *uctx, | |
258 | struct ib_udata *udata) | |
259 | { | |
260 | struct ocrdma_pd *pd = NULL; | |
261 | int status = 0; | |
262 | ||
263 | pd = kzalloc(sizeof(*pd), GFP_KERNEL); | |
264 | if (!pd) | |
265 | return ERR_PTR(-ENOMEM); | |
266 | ||
267 | if (udata && uctx) { | |
268 | pd->dpp_enabled = | |
21c3391a | 269 | ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R; |
cffce990 | 270 | pd->num_dpp_qp = |
a53d77a3 DS |
271 | pd->dpp_enabled ? (dev->nic_info.db_page_size / |
272 | dev->attr.wqe_size) : 0; | |
cffce990 NG |
273 | } |
274 | ||
275 | retry: | |
276 | status = ocrdma_mbx_alloc_pd(dev, pd); | |
277 | if (status) { | |
278 | if (pd->dpp_enabled) { | |
279 | pd->dpp_enabled = false; | |
280 | pd->num_dpp_qp = 0; | |
281 | goto retry; | |
282 | } else { | |
283 | kfree(pd); | |
284 | return ERR_PTR(status); | |
285 | } | |
286 | } | |
287 | ||
288 | return pd; | |
289 | } | |
290 | ||
291 | static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx, | |
292 | struct ocrdma_pd *pd) | |
293 | { | |
294 | return (uctx->cntxt_pd == pd ? true : false); | |
295 | } | |
296 | ||
297 | static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev, | |
298 | struct ocrdma_pd *pd) | |
299 | { | |
300 | int status = 0; | |
301 | ||
302 | status = ocrdma_mbx_dealloc_pd(dev, pd); | |
303 | kfree(pd); | |
304 | return status; | |
305 | } | |
306 | ||
307 | static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev, | |
308 | struct ocrdma_ucontext *uctx, | |
309 | struct ib_udata *udata) | |
310 | { | |
311 | int status = 0; | |
312 | ||
313 | uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata); | |
314 | if (IS_ERR(uctx->cntxt_pd)) { | |
315 | status = PTR_ERR(uctx->cntxt_pd); | |
316 | uctx->cntxt_pd = NULL; | |
317 | goto err; | |
318 | } | |
319 | ||
320 | uctx->cntxt_pd->uctx = uctx; | |
321 | uctx->cntxt_pd->ibpd.device = &dev->ibdev; | |
322 | err: | |
323 | return status; | |
324 | } | |
325 | ||
326 | static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) | |
327 | { | |
328 | int status = 0; | |
329 | struct ocrdma_pd *pd = uctx->cntxt_pd; | |
330 | struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); | |
331 | ||
6dab0264 MA |
332 | if (uctx->pd_in_use) { |
333 | pr_err("%s(%d) Freeing in use pdid=0x%x.\n", | |
334 | __func__, dev->id, pd->id); | |
335 | } | |
cffce990 NG |
336 | uctx->cntxt_pd = NULL; |
337 | status = _ocrdma_dealloc_pd(dev, pd); | |
338 | return status; | |
339 | } | |
340 | ||
341 | static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) | |
342 | { | |
343 | struct ocrdma_pd *pd = NULL; | |
344 | ||
345 | mutex_lock(&uctx->mm_list_lock); | |
346 | if (!uctx->pd_in_use) { | |
347 | uctx->pd_in_use = true; | |
348 | pd = uctx->cntxt_pd; | |
349 | } | |
350 | mutex_unlock(&uctx->mm_list_lock); | |
351 | ||
352 | return pd; | |
353 | } | |
354 | ||
355 | static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx) | |
356 | { | |
357 | mutex_lock(&uctx->mm_list_lock); | |
358 | uctx->pd_in_use = false; | |
359 | mutex_unlock(&uctx->mm_list_lock); | |
360 | } | |
361 | ||
fe2caefc PP |
362 | struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, |
363 | struct ib_udata *udata) | |
364 | { | |
365 | int status; | |
366 | struct ocrdma_ucontext *ctx; | |
367 | struct ocrdma_alloc_ucontext_resp resp; | |
368 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); | |
369 | struct pci_dev *pdev = dev->nic_info.pdev; | |
370 | u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE); | |
371 | ||
372 | if (!udata) | |
373 | return ERR_PTR(-EFAULT); | |
374 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | |
375 | if (!ctx) | |
376 | return ERR_PTR(-ENOMEM); | |
fe2caefc PP |
377 | INIT_LIST_HEAD(&ctx->mm_head); |
378 | mutex_init(&ctx->mm_list_lock); | |
379 | ||
380 | ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len, | |
381 | &ctx->ah_tbl.pa, GFP_KERNEL); | |
382 | if (!ctx->ah_tbl.va) { | |
383 | kfree(ctx); | |
384 | return ERR_PTR(-ENOMEM); | |
385 | } | |
386 | memset(ctx->ah_tbl.va, 0, map_len); | |
387 | ctx->ah_tbl.len = map_len; | |
388 | ||
63ea3749 | 389 | memset(&resp, 0, sizeof(resp)); |
fe2caefc PP |
390 | resp.ah_tbl_len = ctx->ah_tbl.len; |
391 | resp.ah_tbl_page = ctx->ah_tbl.pa; | |
392 | ||
393 | status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len); | |
394 | if (status) | |
395 | goto map_err; | |
cffce990 NG |
396 | |
397 | status = ocrdma_alloc_ucontext_pd(dev, ctx, udata); | |
398 | if (status) | |
399 | goto pd_err; | |
400 | ||
fe2caefc PP |
401 | resp.dev_id = dev->id; |
402 | resp.max_inline_data = dev->attr.max_inline_data; | |
403 | resp.wqe_size = dev->attr.wqe_size; | |
404 | resp.rqe_size = dev->attr.rqe_size; | |
405 | resp.dpp_wqe_size = dev->attr.wqe_size; | |
fe2caefc PP |
406 | |
407 | memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver)); | |
408 | status = ib_copy_to_udata(udata, &resp, sizeof(resp)); | |
409 | if (status) | |
410 | goto cpy_err; | |
411 | return &ctx->ibucontext; | |
412 | ||
413 | cpy_err: | |
cffce990 | 414 | pd_err: |
fe2caefc PP |
415 | ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len); |
416 | map_err: | |
417 | dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va, | |
418 | ctx->ah_tbl.pa); | |
419 | kfree(ctx); | |
420 | return ERR_PTR(status); | |
421 | } | |
422 | ||
423 | int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx) | |
424 | { | |
cffce990 | 425 | int status = 0; |
fe2caefc PP |
426 | struct ocrdma_mm *mm, *tmp; |
427 | struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx); | |
1afc0454 NG |
428 | struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device); |
429 | struct pci_dev *pdev = dev->nic_info.pdev; | |
fe2caefc | 430 | |
cffce990 NG |
431 | status = ocrdma_dealloc_ucontext_pd(uctx); |
432 | ||
fe2caefc PP |
433 | ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len); |
434 | dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va, | |
435 | uctx->ah_tbl.pa); | |
436 | ||
437 | list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { | |
438 | list_del(&mm->entry); | |
439 | kfree(mm); | |
440 | } | |
441 | kfree(uctx); | |
cffce990 | 442 | return status; |
fe2caefc PP |
443 | } |
444 | ||
445 | int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | |
446 | { | |
447 | struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context); | |
1afc0454 | 448 | struct ocrdma_dev *dev = get_ocrdma_dev(context->device); |
fe2caefc PP |
449 | unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT; |
450 | u64 unmapped_db = (u64) dev->nic_info.unmapped_db; | |
451 | unsigned long len = (vma->vm_end - vma->vm_start); | |
452 | int status = 0; | |
453 | bool found; | |
454 | ||
455 | if (vma->vm_start & (PAGE_SIZE - 1)) | |
456 | return -EINVAL; | |
457 | found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len); | |
458 | if (!found) | |
459 | return -EINVAL; | |
460 | ||
461 | if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db + | |
462 | dev->nic_info.db_total_size)) && | |
463 | (len <= dev->nic_info.db_page_size)) { | |
43a6b402 NG |
464 | if (vma->vm_flags & VM_READ) |
465 | return -EPERM; | |
466 | ||
467 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | |
fe2caefc PP |
468 | status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, |
469 | len, vma->vm_page_prot); | |
470 | } else if (dev->nic_info.dpp_unmapped_len && | |
471 | (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) && | |
472 | (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr + | |
473 | dev->nic_info.dpp_unmapped_len)) && | |
474 | (len <= dev->nic_info.dpp_unmapped_len)) { | |
43a6b402 NG |
475 | if (vma->vm_flags & VM_READ) |
476 | return -EPERM; | |
477 | ||
fe2caefc PP |
478 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
479 | status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | |
480 | len, vma->vm_page_prot); | |
481 | } else { | |
fe2caefc PP |
482 | status = remap_pfn_range(vma, vma->vm_start, |
483 | vma->vm_pgoff, len, vma->vm_page_prot); | |
484 | } | |
485 | return status; | |
486 | } | |
487 | ||
45e86b33 | 488 | static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd, |
fe2caefc PP |
489 | struct ib_ucontext *ib_ctx, |
490 | struct ib_udata *udata) | |
491 | { | |
492 | int status; | |
493 | u64 db_page_addr; | |
da496438 | 494 | u64 dpp_page_addr = 0; |
fe2caefc PP |
495 | u32 db_page_size; |
496 | struct ocrdma_alloc_pd_uresp rsp; | |
497 | struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); | |
498 | ||
63ea3749 | 499 | memset(&rsp, 0, sizeof(rsp)); |
fe2caefc PP |
500 | rsp.id = pd->id; |
501 | rsp.dpp_enabled = pd->dpp_enabled; | |
cffce990 | 502 | db_page_addr = ocrdma_get_db_addr(dev, pd->id); |
f99b1649 | 503 | db_page_size = dev->nic_info.db_page_size; |
fe2caefc PP |
504 | |
505 | status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size); | |
506 | if (status) | |
507 | return status; | |
508 | ||
509 | if (pd->dpp_enabled) { | |
f99b1649 | 510 | dpp_page_addr = dev->nic_info.dpp_unmapped_addr + |
43a6b402 | 511 | (pd->id * PAGE_SIZE); |
fe2caefc | 512 | status = ocrdma_add_mmap(uctx, dpp_page_addr, |
43a6b402 | 513 | PAGE_SIZE); |
fe2caefc PP |
514 | if (status) |
515 | goto dpp_map_err; | |
516 | rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr); | |
517 | rsp.dpp_page_addr_lo = dpp_page_addr; | |
518 | } | |
519 | ||
520 | status = ib_copy_to_udata(udata, &rsp, sizeof(rsp)); | |
521 | if (status) | |
522 | goto ucopy_err; | |
523 | ||
524 | pd->uctx = uctx; | |
525 | return 0; | |
526 | ||
527 | ucopy_err: | |
da496438 | 528 | if (pd->dpp_enabled) |
43a6b402 | 529 | ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE); |
fe2caefc PP |
530 | dpp_map_err: |
531 | ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size); | |
532 | return status; | |
533 | } | |
534 | ||
535 | struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev, | |
536 | struct ib_ucontext *context, | |
537 | struct ib_udata *udata) | |
538 | { | |
539 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); | |
540 | struct ocrdma_pd *pd; | |
cffce990 | 541 | struct ocrdma_ucontext *uctx = NULL; |
fe2caefc | 542 | int status; |
cffce990 | 543 | u8 is_uctx_pd = false; |
fe2caefc | 544 | |
fe2caefc | 545 | if (udata && context) { |
cffce990 NG |
546 | uctx = get_ocrdma_ucontext(context); |
547 | pd = ocrdma_get_ucontext_pd(uctx); | |
548 | if (pd) { | |
549 | is_uctx_pd = true; | |
550 | goto pd_mapping; | |
43a6b402 | 551 | } |
fe2caefc | 552 | } |
fe2caefc | 553 | |
cffce990 NG |
554 | pd = _ocrdma_alloc_pd(dev, uctx, udata); |
555 | if (IS_ERR(pd)) { | |
556 | status = PTR_ERR(pd); | |
557 | goto exit; | |
558 | } | |
559 | ||
560 | pd_mapping: | |
fe2caefc | 561 | if (udata && context) { |
45e86b33 | 562 | status = ocrdma_copy_pd_uresp(dev, pd, context, udata); |
fe2caefc PP |
563 | if (status) |
564 | goto err; | |
565 | } | |
566 | return &pd->ibpd; | |
567 | ||
568 | err: | |
cffce990 NG |
569 | if (is_uctx_pd) { |
570 | ocrdma_release_ucontext_pd(uctx); | |
571 | } else { | |
572 | status = ocrdma_mbx_dealloc_pd(dev, pd); | |
573 | kfree(pd); | |
574 | } | |
575 | exit: | |
fe2caefc PP |
576 | return ERR_PTR(status); |
577 | } | |
578 | ||
579 | int ocrdma_dealloc_pd(struct ib_pd *ibpd) | |
580 | { | |
581 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | |
f99b1649 | 582 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); |
cffce990 NG |
583 | struct ocrdma_ucontext *uctx = NULL; |
584 | int status = 0; | |
fe2caefc PP |
585 | u64 usr_db; |
586 | ||
cffce990 NG |
587 | uctx = pd->uctx; |
588 | if (uctx) { | |
fe2caefc | 589 | u64 dpp_db = dev->nic_info.dpp_unmapped_addr + |
cffce990 | 590 | (pd->id * PAGE_SIZE); |
fe2caefc | 591 | if (pd->dpp_enabled) |
43a6b402 | 592 | ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE); |
cffce990 | 593 | usr_db = ocrdma_get_db_addr(dev, pd->id); |
fe2caefc | 594 | ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size); |
cffce990 NG |
595 | |
596 | if (is_ucontext_pd(uctx, pd)) { | |
597 | ocrdma_release_ucontext_pd(uctx); | |
598 | return status; | |
599 | } | |
fe2caefc | 600 | } |
cffce990 | 601 | status = _ocrdma_dealloc_pd(dev, pd); |
fe2caefc PP |
602 | return status; |
603 | } | |
604 | ||
1afc0454 NG |
605 | static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr, |
606 | u32 pdid, int acc, u32 num_pbls, u32 addr_check) | |
fe2caefc PP |
607 | { |
608 | int status; | |
fe2caefc | 609 | |
fe2caefc PP |
610 | mr->hwmr.fr_mr = 0; |
611 | mr->hwmr.local_rd = 1; | |
612 | mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; | |
613 | mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; | |
614 | mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; | |
615 | mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0; | |
616 | mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; | |
617 | mr->hwmr.num_pbls = num_pbls; | |
618 | ||
f99b1649 NG |
619 | status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check); |
620 | if (status) | |
621 | return status; | |
622 | ||
fe2caefc PP |
623 | mr->ibmr.lkey = mr->hwmr.lkey; |
624 | if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) | |
625 | mr->ibmr.rkey = mr->hwmr.lkey; | |
f99b1649 | 626 | return 0; |
fe2caefc PP |
627 | } |
628 | ||
629 | struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc) | |
630 | { | |
f99b1649 | 631 | int status; |
fe2caefc | 632 | struct ocrdma_mr *mr; |
f99b1649 NG |
633 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); |
634 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); | |
635 | ||
636 | if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) { | |
637 | pr_err("%s err, invalid access rights\n", __func__); | |
638 | return ERR_PTR(-EINVAL); | |
639 | } | |
fe2caefc | 640 | |
f99b1649 NG |
641 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
642 | if (!mr) | |
643 | return ERR_PTR(-ENOMEM); | |
644 | ||
1afc0454 | 645 | status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0, |
f99b1649 NG |
646 | OCRDMA_ADDR_CHECK_DISABLE); |
647 | if (status) { | |
648 | kfree(mr); | |
649 | return ERR_PTR(status); | |
650 | } | |
fe2caefc PP |
651 | |
652 | return &mr->ibmr; | |
653 | } | |
654 | ||
655 | static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev, | |
656 | struct ocrdma_hw_mr *mr) | |
657 | { | |
658 | struct pci_dev *pdev = dev->nic_info.pdev; | |
659 | int i = 0; | |
660 | ||
661 | if (mr->pbl_table) { | |
662 | for (i = 0; i < mr->num_pbls; i++) { | |
663 | if (!mr->pbl_table[i].va) | |
664 | continue; | |
665 | dma_free_coherent(&pdev->dev, mr->pbl_size, | |
666 | mr->pbl_table[i].va, | |
667 | mr->pbl_table[i].pa); | |
668 | } | |
669 | kfree(mr->pbl_table); | |
670 | mr->pbl_table = NULL; | |
671 | } | |
672 | } | |
673 | ||
1afc0454 NG |
674 | static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr, |
675 | u32 num_pbes) | |
fe2caefc PP |
676 | { |
677 | u32 num_pbls = 0; | |
678 | u32 idx = 0; | |
679 | int status = 0; | |
680 | u32 pbl_size; | |
681 | ||
682 | do { | |
683 | pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx); | |
684 | if (pbl_size > MAX_OCRDMA_PBL_SIZE) { | |
685 | status = -EFAULT; | |
686 | break; | |
687 | } | |
688 | num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64))); | |
689 | num_pbls = num_pbls / (pbl_size / sizeof(u64)); | |
690 | idx++; | |
1afc0454 | 691 | } while (num_pbls >= dev->attr.max_num_mr_pbl); |
fe2caefc PP |
692 | |
693 | mr->hwmr.num_pbes = num_pbes; | |
694 | mr->hwmr.num_pbls = num_pbls; | |
695 | mr->hwmr.pbl_size = pbl_size; | |
696 | return status; | |
697 | } | |
698 | ||
699 | static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr) | |
700 | { | |
701 | int status = 0; | |
702 | int i; | |
703 | u32 dma_len = mr->pbl_size; | |
704 | struct pci_dev *pdev = dev->nic_info.pdev; | |
705 | void *va; | |
706 | dma_addr_t pa; | |
707 | ||
708 | mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) * | |
709 | mr->num_pbls, GFP_KERNEL); | |
710 | ||
711 | if (!mr->pbl_table) | |
712 | return -ENOMEM; | |
713 | ||
714 | for (i = 0; i < mr->num_pbls; i++) { | |
715 | va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); | |
716 | if (!va) { | |
717 | ocrdma_free_mr_pbl_tbl(dev, mr); | |
718 | status = -ENOMEM; | |
719 | break; | |
720 | } | |
721 | memset(va, 0, dma_len); | |
722 | mr->pbl_table[i].va = va; | |
723 | mr->pbl_table[i].pa = pa; | |
724 | } | |
725 | return status; | |
726 | } | |
727 | ||
728 | static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, | |
729 | u32 num_pbes) | |
730 | { | |
731 | struct ocrdma_pbe *pbe; | |
eeb8461e | 732 | struct scatterlist *sg; |
fe2caefc PP |
733 | struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table; |
734 | struct ib_umem *umem = mr->umem; | |
eeb8461e | 735 | int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0; |
fe2caefc PP |
736 | |
737 | if (!mr->hwmr.num_pbes) | |
738 | return; | |
739 | ||
740 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; | |
741 | pbe_cnt = 0; | |
742 | ||
743 | shift = ilog2(umem->page_size); | |
744 | ||
eeb8461e YH |
745 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { |
746 | pages = sg_dma_len(sg) >> shift; | |
747 | for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { | |
748 | /* store the page address in pbe */ | |
749 | pbe->pa_lo = | |
750 | cpu_to_le32(sg_dma_address | |
751 | (sg) + | |
752 | (umem->page_size * pg_cnt)); | |
753 | pbe->pa_hi = | |
754 | cpu_to_le32(upper_32_bits | |
755 | ((sg_dma_address | |
756 | (sg) + | |
757 | umem->page_size * pg_cnt))); | |
758 | pbe_cnt += 1; | |
759 | total_num_pbes += 1; | |
760 | pbe++; | |
761 | ||
762 | /* if done building pbes, issue the mbx cmd. */ | |
763 | if (total_num_pbes == num_pbes) | |
764 | return; | |
765 | ||
766 | /* if the given pbl is full storing the pbes, | |
767 | * move to next pbl. | |
768 | */ | |
769 | if (pbe_cnt == | |
770 | (mr->hwmr.pbl_size / sizeof(u64))) { | |
771 | pbl_tbl++; | |
772 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; | |
773 | pbe_cnt = 0; | |
fe2caefc | 774 | } |
eeb8461e | 775 | |
fe2caefc PP |
776 | } |
777 | } | |
778 | } | |
779 | ||
780 | struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, | |
781 | u64 usr_addr, int acc, struct ib_udata *udata) | |
782 | { | |
783 | int status = -ENOMEM; | |
f99b1649 | 784 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); |
fe2caefc PP |
785 | struct ocrdma_mr *mr; |
786 | struct ocrdma_pd *pd; | |
fe2caefc PP |
787 | u32 num_pbes; |
788 | ||
789 | pd = get_ocrdma_pd(ibpd); | |
fe2caefc PP |
790 | |
791 | if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) | |
792 | return ERR_PTR(-EINVAL); | |
793 | ||
794 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
795 | if (!mr) | |
796 | return ERR_PTR(status); | |
fe2caefc PP |
797 | mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0); |
798 | if (IS_ERR(mr->umem)) { | |
799 | status = -EFAULT; | |
800 | goto umem_err; | |
801 | } | |
802 | num_pbes = ib_umem_page_count(mr->umem); | |
1afc0454 | 803 | status = ocrdma_get_pbl_info(dev, mr, num_pbes); |
fe2caefc PP |
804 | if (status) |
805 | goto umem_err; | |
806 | ||
807 | mr->hwmr.pbe_size = mr->umem->page_size; | |
808 | mr->hwmr.fbo = mr->umem->offset; | |
809 | mr->hwmr.va = usr_addr; | |
810 | mr->hwmr.len = len; | |
811 | mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; | |
812 | mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; | |
813 | mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; | |
814 | mr->hwmr.local_rd = 1; | |
815 | mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; | |
816 | status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); | |
817 | if (status) | |
818 | goto umem_err; | |
819 | build_user_pbes(dev, mr, num_pbes); | |
820 | status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc); | |
821 | if (status) | |
822 | goto mbx_err; | |
fe2caefc PP |
823 | mr->ibmr.lkey = mr->hwmr.lkey; |
824 | if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) | |
825 | mr->ibmr.rkey = mr->hwmr.lkey; | |
826 | ||
827 | return &mr->ibmr; | |
828 | ||
829 | mbx_err: | |
830 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); | |
831 | umem_err: | |
832 | kfree(mr); | |
833 | return ERR_PTR(status); | |
834 | } | |
835 | ||
836 | int ocrdma_dereg_mr(struct ib_mr *ib_mr) | |
837 | { | |
838 | struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); | |
1afc0454 | 839 | struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); |
fe2caefc PP |
840 | int status; |
841 | ||
842 | status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); | |
843 | ||
9d1878a3 | 844 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); |
fe2caefc | 845 | |
fe2caefc PP |
846 | /* it could be user registered memory. */ |
847 | if (mr->umem) | |
848 | ib_umem_release(mr->umem); | |
849 | kfree(mr); | |
6dab0264 MA |
850 | |
851 | /* Don't stop cleanup, in case FW is unresponsive */ | |
852 | if (dev->mqe_ctx.fw_error_state) { | |
853 | status = 0; | |
854 | pr_err("%s(%d) fw not responding.\n", | |
855 | __func__, dev->id); | |
856 | } | |
fe2caefc PP |
857 | return status; |
858 | } | |
859 | ||
1afc0454 NG |
860 | static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, |
861 | struct ib_udata *udata, | |
fe2caefc PP |
862 | struct ib_ucontext *ib_ctx) |
863 | { | |
864 | int status; | |
cffce990 | 865 | struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); |
fe2caefc PP |
866 | struct ocrdma_create_cq_uresp uresp; |
867 | ||
63ea3749 | 868 | memset(&uresp, 0, sizeof(uresp)); |
fe2caefc | 869 | uresp.cq_id = cq->id; |
43a6b402 | 870 | uresp.page_size = PAGE_ALIGN(cq->len); |
fe2caefc PP |
871 | uresp.num_pages = 1; |
872 | uresp.max_hw_cqe = cq->max_hw_cqe; | |
873 | uresp.page_addr[0] = cq->pa; | |
cffce990 | 874 | uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id); |
1afc0454 | 875 | uresp.db_page_size = dev->nic_info.db_page_size; |
fe2caefc PP |
876 | uresp.phase_change = cq->phase_change ? 1 : 0; |
877 | status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | |
878 | if (status) { | |
ef99c4c2 | 879 | pr_err("%s(%d) copy error cqid=0x%x.\n", |
1afc0454 | 880 | __func__, dev->id, cq->id); |
fe2caefc PP |
881 | goto err; |
882 | } | |
fe2caefc PP |
883 | status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); |
884 | if (status) | |
885 | goto err; | |
886 | status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size); | |
887 | if (status) { | |
888 | ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size); | |
889 | goto err; | |
890 | } | |
891 | cq->ucontext = uctx; | |
892 | err: | |
893 | return status; | |
894 | } | |
895 | ||
896 | struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector, | |
897 | struct ib_ucontext *ib_ctx, | |
898 | struct ib_udata *udata) | |
899 | { | |
900 | struct ocrdma_cq *cq; | |
901 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); | |
cffce990 NG |
902 | struct ocrdma_ucontext *uctx = NULL; |
903 | u16 pd_id = 0; | |
fe2caefc PP |
904 | int status; |
905 | struct ocrdma_create_cq_ureq ureq; | |
906 | ||
907 | if (udata) { | |
908 | if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) | |
909 | return ERR_PTR(-EFAULT); | |
910 | } else | |
911 | ureq.dpp_cq = 0; | |
912 | cq = kzalloc(sizeof(*cq), GFP_KERNEL); | |
913 | if (!cq) | |
914 | return ERR_PTR(-ENOMEM); | |
915 | ||
916 | spin_lock_init(&cq->cq_lock); | |
917 | spin_lock_init(&cq->comp_handler_lock); | |
fe2caefc PP |
918 | INIT_LIST_HEAD(&cq->sq_head); |
919 | INIT_LIST_HEAD(&cq->rq_head); | |
ea617626 | 920 | cq->first_arm = true; |
fe2caefc | 921 | |
cffce990 NG |
922 | if (ib_ctx) { |
923 | uctx = get_ocrdma_ucontext(ib_ctx); | |
924 | pd_id = uctx->cntxt_pd->id; | |
925 | } | |
926 | ||
927 | status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id); | |
fe2caefc PP |
928 | if (status) { |
929 | kfree(cq); | |
930 | return ERR_PTR(status); | |
931 | } | |
932 | if (ib_ctx) { | |
1afc0454 | 933 | status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx); |
fe2caefc PP |
934 | if (status) |
935 | goto ctx_err; | |
936 | } | |
937 | cq->phase = OCRDMA_CQE_VALID; | |
fe2caefc | 938 | dev->cq_tbl[cq->id] = cq; |
fe2caefc PP |
939 | return &cq->ibcq; |
940 | ||
941 | ctx_err: | |
942 | ocrdma_mbx_destroy_cq(dev, cq); | |
943 | kfree(cq); | |
944 | return ERR_PTR(status); | |
945 | } | |
946 | ||
947 | int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt, | |
948 | struct ib_udata *udata) | |
949 | { | |
950 | int status = 0; | |
951 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); | |
952 | ||
953 | if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) { | |
954 | status = -EINVAL; | |
955 | return status; | |
956 | } | |
957 | ibcq->cqe = new_cnt; | |
958 | return status; | |
959 | } | |
960 | ||
ea617626 DS |
961 | static void ocrdma_flush_cq(struct ocrdma_cq *cq) |
962 | { | |
963 | int cqe_cnt; | |
964 | int valid_count = 0; | |
965 | unsigned long flags; | |
966 | ||
967 | struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); | |
968 | struct ocrdma_cqe *cqe = NULL; | |
969 | ||
970 | cqe = cq->va; | |
971 | cqe_cnt = cq->cqe_cnt; | |
972 | ||
973 | /* Last irq might have scheduled a polling thread | |
974 | * sync-up with it before hard flushing. | |
975 | */ | |
976 | spin_lock_irqsave(&cq->cq_lock, flags); | |
977 | while (cqe_cnt) { | |
978 | if (is_cqe_valid(cq, cqe)) | |
979 | valid_count++; | |
980 | cqe++; | |
981 | cqe_cnt--; | |
982 | } | |
983 | ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count); | |
984 | spin_unlock_irqrestore(&cq->cq_lock, flags); | |
985 | } | |
986 | ||
fe2caefc PP |
987 | int ocrdma_destroy_cq(struct ib_cq *ibcq) |
988 | { | |
989 | int status; | |
990 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); | |
ea617626 | 991 | struct ocrdma_eq *eq = NULL; |
1afc0454 | 992 | struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); |
cffce990 | 993 | int pdid = 0; |
ea617626 | 994 | u32 irq, indx; |
fe2caefc | 995 | |
ea617626 DS |
996 | dev->cq_tbl[cq->id] = NULL; |
997 | indx = ocrdma_get_eq_table_index(dev, cq->eqn); | |
998 | if (indx == -EINVAL) | |
999 | BUG(); | |
fe2caefc | 1000 | |
ea617626 DS |
1001 | eq = &dev->eq_tbl[indx]; |
1002 | irq = ocrdma_get_irq(dev, eq); | |
1003 | synchronize_irq(irq); | |
1004 | ocrdma_flush_cq(cq); | |
fe2caefc | 1005 | |
ea617626 | 1006 | status = ocrdma_mbx_destroy_cq(dev, cq); |
fe2caefc | 1007 | if (cq->ucontext) { |
cffce990 | 1008 | pdid = cq->ucontext->cntxt_pd->id; |
43a6b402 NG |
1009 | ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, |
1010 | PAGE_ALIGN(cq->len)); | |
cffce990 NG |
1011 | ocrdma_del_mmap(cq->ucontext, |
1012 | ocrdma_get_db_addr(dev, pdid), | |
fe2caefc PP |
1013 | dev->nic_info.db_page_size); |
1014 | } | |
fe2caefc PP |
1015 | |
1016 | kfree(cq); | |
1017 | return status; | |
1018 | } | |
1019 | ||
1020 | static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) | |
1021 | { | |
1022 | int status = -EINVAL; | |
1023 | ||
1024 | if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) { | |
1025 | dev->qp_tbl[qp->id] = qp; | |
1026 | status = 0; | |
1027 | } | |
1028 | return status; | |
1029 | } | |
1030 | ||
1031 | static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) | |
1032 | { | |
1033 | dev->qp_tbl[qp->id] = NULL; | |
1034 | } | |
1035 | ||
1036 | static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, | |
1037 | struct ib_qp_init_attr *attrs) | |
1038 | { | |
43a6b402 NG |
1039 | if ((attrs->qp_type != IB_QPT_GSI) && |
1040 | (attrs->qp_type != IB_QPT_RC) && | |
1041 | (attrs->qp_type != IB_QPT_UC) && | |
1042 | (attrs->qp_type != IB_QPT_UD)) { | |
ef99c4c2 NG |
1043 | pr_err("%s(%d) unsupported qp type=0x%x requested\n", |
1044 | __func__, dev->id, attrs->qp_type); | |
fe2caefc PP |
1045 | return -EINVAL; |
1046 | } | |
43a6b402 NG |
1047 | /* Skip the check for QP1 to support CM size of 128 */ |
1048 | if ((attrs->qp_type != IB_QPT_GSI) && | |
1049 | (attrs->cap.max_send_wr > dev->attr.max_wqe)) { | |
ef99c4c2 NG |
1050 | pr_err("%s(%d) unsupported send_wr=0x%x requested\n", |
1051 | __func__, dev->id, attrs->cap.max_send_wr); | |
1052 | pr_err("%s(%d) supported send_wr=0x%x\n", | |
1053 | __func__, dev->id, dev->attr.max_wqe); | |
fe2caefc PP |
1054 | return -EINVAL; |
1055 | } | |
1056 | if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) { | |
ef99c4c2 NG |
1057 | pr_err("%s(%d) unsupported recv_wr=0x%x requested\n", |
1058 | __func__, dev->id, attrs->cap.max_recv_wr); | |
1059 | pr_err("%s(%d) supported recv_wr=0x%x\n", | |
1060 | __func__, dev->id, dev->attr.max_rqe); | |
fe2caefc PP |
1061 | return -EINVAL; |
1062 | } | |
1063 | if (attrs->cap.max_inline_data > dev->attr.max_inline_data) { | |
ef99c4c2 NG |
1064 | pr_err("%s(%d) unsupported inline data size=0x%x requested\n", |
1065 | __func__, dev->id, attrs->cap.max_inline_data); | |
1066 | pr_err("%s(%d) supported inline data size=0x%x\n", | |
1067 | __func__, dev->id, dev->attr.max_inline_data); | |
fe2caefc PP |
1068 | return -EINVAL; |
1069 | } | |
1070 | if (attrs->cap.max_send_sge > dev->attr.max_send_sge) { | |
ef99c4c2 NG |
1071 | pr_err("%s(%d) unsupported send_sge=0x%x requested\n", |
1072 | __func__, dev->id, attrs->cap.max_send_sge); | |
1073 | pr_err("%s(%d) supported send_sge=0x%x\n", | |
1074 | __func__, dev->id, dev->attr.max_send_sge); | |
fe2caefc PP |
1075 | return -EINVAL; |
1076 | } | |
1077 | if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) { | |
ef99c4c2 NG |
1078 | pr_err("%s(%d) unsupported recv_sge=0x%x requested\n", |
1079 | __func__, dev->id, attrs->cap.max_recv_sge); | |
1080 | pr_err("%s(%d) supported recv_sge=0x%x\n", | |
1081 | __func__, dev->id, dev->attr.max_recv_sge); | |
fe2caefc PP |
1082 | return -EINVAL; |
1083 | } | |
1084 | /* unprivileged user space cannot create special QP */ | |
1085 | if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { | |
ef99c4c2 | 1086 | pr_err |
fe2caefc PP |
1087 | ("%s(%d) Userspace can't create special QPs of type=0x%x\n", |
1088 | __func__, dev->id, attrs->qp_type); | |
1089 | return -EINVAL; | |
1090 | } | |
1091 | /* allow creating only one GSI type of QP */ | |
1092 | if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) { | |
ef99c4c2 NG |
1093 | pr_err("%s(%d) GSI special QPs already created.\n", |
1094 | __func__, dev->id); | |
fe2caefc PP |
1095 | return -EINVAL; |
1096 | } | |
1097 | /* verify consumer QPs are not trying to use GSI QP's CQ */ | |
1098 | if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) { | |
1099 | if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) || | |
43a6b402 | 1100 | (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) { |
ef99c4c2 | 1101 | pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n", |
43a6b402 | 1102 | __func__, dev->id); |
fe2caefc PP |
1103 | return -EINVAL; |
1104 | } | |
1105 | } | |
1106 | return 0; | |
1107 | } | |
1108 | ||
1109 | static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, | |
1110 | struct ib_udata *udata, int dpp_offset, | |
1111 | int dpp_credit_lmt, int srq) | |
1112 | { | |
1113 | int status = 0; | |
1114 | u64 usr_db; | |
1115 | struct ocrdma_create_qp_uresp uresp; | |
1116 | struct ocrdma_dev *dev = qp->dev; | |
1117 | struct ocrdma_pd *pd = qp->pd; | |
1118 | ||
1119 | memset(&uresp, 0, sizeof(uresp)); | |
1120 | usr_db = dev->nic_info.unmapped_db + | |
1121 | (pd->id * dev->nic_info.db_page_size); | |
1122 | uresp.qp_id = qp->id; | |
1123 | uresp.sq_dbid = qp->sq.dbid; | |
1124 | uresp.num_sq_pages = 1; | |
43a6b402 | 1125 | uresp.sq_page_size = PAGE_ALIGN(qp->sq.len); |
fe2caefc PP |
1126 | uresp.sq_page_addr[0] = qp->sq.pa; |
1127 | uresp.num_wqe_allocated = qp->sq.max_cnt; | |
1128 | if (!srq) { | |
1129 | uresp.rq_dbid = qp->rq.dbid; | |
1130 | uresp.num_rq_pages = 1; | |
43a6b402 | 1131 | uresp.rq_page_size = PAGE_ALIGN(qp->rq.len); |
fe2caefc PP |
1132 | uresp.rq_page_addr[0] = qp->rq.pa; |
1133 | uresp.num_rqe_allocated = qp->rq.max_cnt; | |
1134 | } | |
1135 | uresp.db_page_addr = usr_db; | |
1136 | uresp.db_page_size = dev->nic_info.db_page_size; | |
2df84fa8 DS |
1137 | uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET; |
1138 | uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; | |
1139 | uresp.db_shift = OCRDMA_DB_RQ_SHIFT; | |
fe2caefc PP |
1140 | |
1141 | if (qp->dpp_enabled) { | |
1142 | uresp.dpp_credit = dpp_credit_lmt; | |
1143 | uresp.dpp_offset = dpp_offset; | |
1144 | } | |
1145 | status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | |
1146 | if (status) { | |
ef99c4c2 | 1147 | pr_err("%s(%d) user copy error.\n", __func__, dev->id); |
fe2caefc PP |
1148 | goto err; |
1149 | } | |
1150 | status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0], | |
1151 | uresp.sq_page_size); | |
1152 | if (status) | |
1153 | goto err; | |
1154 | ||
1155 | if (!srq) { | |
1156 | status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0], | |
1157 | uresp.rq_page_size); | |
1158 | if (status) | |
1159 | goto rq_map_err; | |
1160 | } | |
1161 | return status; | |
1162 | rq_map_err: | |
1163 | ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size); | |
1164 | err: | |
1165 | return status; | |
1166 | } | |
1167 | ||
1168 | static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp, | |
1169 | struct ocrdma_pd *pd) | |
1170 | { | |
21c3391a | 1171 | if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { |
fe2caefc PP |
1172 | qp->sq_db = dev->nic_info.db + |
1173 | (pd->id * dev->nic_info.db_page_size) + | |
1174 | OCRDMA_DB_GEN2_SQ_OFFSET; | |
1175 | qp->rq_db = dev->nic_info.db + | |
1176 | (pd->id * dev->nic_info.db_page_size) + | |
f11220ee | 1177 | OCRDMA_DB_GEN2_RQ_OFFSET; |
fe2caefc PP |
1178 | } else { |
1179 | qp->sq_db = dev->nic_info.db + | |
1180 | (pd->id * dev->nic_info.db_page_size) + | |
1181 | OCRDMA_DB_SQ_OFFSET; | |
1182 | qp->rq_db = dev->nic_info.db + | |
1183 | (pd->id * dev->nic_info.db_page_size) + | |
1184 | OCRDMA_DB_RQ_OFFSET; | |
1185 | } | |
1186 | } | |
1187 | ||
1188 | static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp) | |
1189 | { | |
1190 | qp->wqe_wr_id_tbl = | |
1191 | kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt, | |
1192 | GFP_KERNEL); | |
1193 | if (qp->wqe_wr_id_tbl == NULL) | |
1194 | return -ENOMEM; | |
1195 | qp->rqe_wr_id_tbl = | |
1196 | kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL); | |
1197 | if (qp->rqe_wr_id_tbl == NULL) | |
1198 | return -ENOMEM; | |
1199 | ||
1200 | return 0; | |
1201 | } | |
1202 | ||
1203 | static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp, | |
1204 | struct ocrdma_pd *pd, | |
1205 | struct ib_qp_init_attr *attrs) | |
1206 | { | |
1207 | qp->pd = pd; | |
1208 | spin_lock_init(&qp->q_lock); | |
1209 | INIT_LIST_HEAD(&qp->sq_entry); | |
1210 | INIT_LIST_HEAD(&qp->rq_entry); | |
1211 | ||
1212 | qp->qp_type = attrs->qp_type; | |
1213 | qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR; | |
1214 | qp->max_inline_data = attrs->cap.max_inline_data; | |
1215 | qp->sq.max_sges = attrs->cap.max_send_sge; | |
1216 | qp->rq.max_sges = attrs->cap.max_recv_sge; | |
1217 | qp->state = OCRDMA_QPS_RST; | |
2b51a9b9 | 1218 | qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false; |
fe2caefc PP |
1219 | } |
1220 | ||
fe2caefc PP |
1221 | static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev, |
1222 | struct ib_qp_init_attr *attrs) | |
1223 | { | |
1224 | if (attrs->qp_type == IB_QPT_GSI) { | |
1225 | dev->gsi_qp_created = 1; | |
1226 | dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq); | |
1227 | dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq); | |
1228 | } | |
1229 | } | |
1230 | ||
1231 | struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd, | |
1232 | struct ib_qp_init_attr *attrs, | |
1233 | struct ib_udata *udata) | |
1234 | { | |
1235 | int status; | |
1236 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | |
1237 | struct ocrdma_qp *qp; | |
f99b1649 | 1238 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); |
fe2caefc PP |
1239 | struct ocrdma_create_qp_ureq ureq; |
1240 | u16 dpp_credit_lmt, dpp_offset; | |
1241 | ||
1242 | status = ocrdma_check_qp_params(ibpd, dev, attrs); | |
1243 | if (status) | |
1244 | goto gen_err; | |
1245 | ||
1246 | memset(&ureq, 0, sizeof(ureq)); | |
1247 | if (udata) { | |
1248 | if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) | |
1249 | return ERR_PTR(-EFAULT); | |
1250 | } | |
1251 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); | |
1252 | if (!qp) { | |
1253 | status = -ENOMEM; | |
1254 | goto gen_err; | |
1255 | } | |
1256 | qp->dev = dev; | |
1257 | ocrdma_set_qp_init_params(qp, pd, attrs); | |
43a6b402 NG |
1258 | if (udata == NULL) |
1259 | qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | | |
1260 | OCRDMA_QP_FAST_REG); | |
fe2caefc PP |
1261 | |
1262 | mutex_lock(&dev->dev_lock); | |
1263 | status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq, | |
1264 | ureq.dpp_cq_id, | |
1265 | &dpp_offset, &dpp_credit_lmt); | |
1266 | if (status) | |
1267 | goto mbx_err; | |
1268 | ||
1269 | /* user space QP's wr_id table are managed in library */ | |
1270 | if (udata == NULL) { | |
fe2caefc PP |
1271 | status = ocrdma_alloc_wr_id_tbl(qp); |
1272 | if (status) | |
1273 | goto map_err; | |
1274 | } | |
1275 | ||
1276 | status = ocrdma_add_qpn_map(dev, qp); | |
1277 | if (status) | |
1278 | goto map_err; | |
1279 | ocrdma_set_qp_db(dev, qp, pd); | |
1280 | if (udata) { | |
1281 | status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset, | |
1282 | dpp_credit_lmt, | |
1283 | (attrs->srq != NULL)); | |
1284 | if (status) | |
1285 | goto cpy_err; | |
1286 | } | |
1287 | ocrdma_store_gsi_qp_cq(dev, attrs); | |
27159f50 | 1288 | qp->ibqp.qp_num = qp->id; |
fe2caefc PP |
1289 | mutex_unlock(&dev->dev_lock); |
1290 | return &qp->ibqp; | |
1291 | ||
1292 | cpy_err: | |
1293 | ocrdma_del_qpn_map(dev, qp); | |
1294 | map_err: | |
1295 | ocrdma_mbx_destroy_qp(dev, qp); | |
1296 | mbx_err: | |
1297 | mutex_unlock(&dev->dev_lock); | |
1298 | kfree(qp->wqe_wr_id_tbl); | |
1299 | kfree(qp->rqe_wr_id_tbl); | |
1300 | kfree(qp); | |
ef99c4c2 | 1301 | pr_err("%s(%d) error=%d\n", __func__, dev->id, status); |
fe2caefc PP |
1302 | gen_err: |
1303 | return ERR_PTR(status); | |
1304 | } | |
1305 | ||
1306 | int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
1307 | int attr_mask) | |
1308 | { | |
1309 | int status = 0; | |
1310 | struct ocrdma_qp *qp; | |
1311 | struct ocrdma_dev *dev; | |
1312 | enum ib_qp_state old_qps; | |
1313 | ||
1314 | qp = get_ocrdma_qp(ibqp); | |
1315 | dev = qp->dev; | |
1316 | if (attr_mask & IB_QP_STATE) | |
057729cb | 1317 | status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps); |
fe2caefc PP |
1318 | /* if new and previous states are same hw doesn't need to |
1319 | * know about it. | |
1320 | */ | |
1321 | if (status < 0) | |
1322 | return status; | |
bc1b04ab | 1323 | status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask); |
45e86b33 | 1324 | |
fe2caefc PP |
1325 | return status; |
1326 | } | |
1327 | ||
1328 | int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
1329 | int attr_mask, struct ib_udata *udata) | |
1330 | { | |
1331 | unsigned long flags; | |
1332 | int status = -EINVAL; | |
1333 | struct ocrdma_qp *qp; | |
1334 | struct ocrdma_dev *dev; | |
1335 | enum ib_qp_state old_qps, new_qps; | |
1336 | ||
1337 | qp = get_ocrdma_qp(ibqp); | |
1338 | dev = qp->dev; | |
1339 | ||
1340 | /* syncronize with multiple context trying to change, retrive qps */ | |
1341 | mutex_lock(&dev->dev_lock); | |
1342 | /* syncronize with wqe, rqe posting and cqe processing contexts */ | |
1343 | spin_lock_irqsave(&qp->q_lock, flags); | |
1344 | old_qps = get_ibqp_state(qp->state); | |
1345 | if (attr_mask & IB_QP_STATE) | |
1346 | new_qps = attr->qp_state; | |
1347 | else | |
1348 | new_qps = old_qps; | |
1349 | spin_unlock_irqrestore(&qp->q_lock, flags); | |
1350 | ||
dd5f03be | 1351 | if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask, |
37721d85 | 1352 | IB_LINK_LAYER_ETHERNET)) { |
ef99c4c2 NG |
1353 | pr_err("%s(%d) invalid attribute mask=0x%x specified for\n" |
1354 | "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", | |
1355 | __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, | |
1356 | old_qps, new_qps); | |
fe2caefc PP |
1357 | goto param_err; |
1358 | } | |
1359 | ||
1360 | status = _ocrdma_modify_qp(ibqp, attr, attr_mask); | |
1361 | if (status > 0) | |
1362 | status = 0; | |
1363 | param_err: | |
1364 | mutex_unlock(&dev->dev_lock); | |
1365 | return status; | |
1366 | } | |
1367 | ||
1368 | static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu) | |
1369 | { | |
1370 | switch (mtu) { | |
1371 | case 256: | |
1372 | return IB_MTU_256; | |
1373 | case 512: | |
1374 | return IB_MTU_512; | |
1375 | case 1024: | |
1376 | return IB_MTU_1024; | |
1377 | case 2048: | |
1378 | return IB_MTU_2048; | |
1379 | case 4096: | |
1380 | return IB_MTU_4096; | |
1381 | default: | |
1382 | return IB_MTU_1024; | |
1383 | } | |
1384 | } | |
1385 | ||
1386 | static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags) | |
1387 | { | |
1388 | int ib_qp_acc_flags = 0; | |
1389 | ||
1390 | if (qp_cap_flags & OCRDMA_QP_INB_WR) | |
1391 | ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE; | |
1392 | if (qp_cap_flags & OCRDMA_QP_INB_RD) | |
1393 | ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE; | |
1394 | return ib_qp_acc_flags; | |
1395 | } | |
1396 | ||
1397 | int ocrdma_query_qp(struct ib_qp *ibqp, | |
1398 | struct ib_qp_attr *qp_attr, | |
1399 | int attr_mask, struct ib_qp_init_attr *qp_init_attr) | |
1400 | { | |
1401 | int status; | |
1402 | u32 qp_state; | |
1403 | struct ocrdma_qp_params params; | |
1404 | struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); | |
1405 | struct ocrdma_dev *dev = qp->dev; | |
1406 | ||
1407 | memset(¶ms, 0, sizeof(params)); | |
1408 | mutex_lock(&dev->dev_lock); | |
1409 | status = ocrdma_mbx_query_qp(dev, qp, ¶ms); | |
1410 | mutex_unlock(&dev->dev_lock); | |
1411 | if (status) | |
1412 | goto mbx_err; | |
1413 | qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT); | |
1414 | qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT); | |
1415 | qp_attr->path_mtu = | |
1416 | ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx & | |
1417 | OCRDMA_QP_PARAMS_PATH_MTU_MASK) >> | |
1418 | OCRDMA_QP_PARAMS_PATH_MTU_SHIFT; | |
1419 | qp_attr->path_mig_state = IB_MIG_MIGRATED; | |
1420 | qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK; | |
1421 | qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK; | |
1422 | qp_attr->dest_qp_num = | |
1423 | params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK; | |
1424 | ||
1425 | qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags); | |
1426 | qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1; | |
1427 | qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1; | |
1428 | qp_attr->cap.max_send_sge = qp->sq.max_sges; | |
1429 | qp_attr->cap.max_recv_sge = qp->rq.max_sges; | |
c43e9ab8 | 1430 | qp_attr->cap.max_inline_data = qp->max_inline_data; |
fe2caefc PP |
1431 | qp_init_attr->cap = qp_attr->cap; |
1432 | memcpy(&qp_attr->ah_attr.grh.dgid, ¶ms.dgid[0], | |
1433 | sizeof(params.dgid)); | |
1434 | qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl & | |
1435 | OCRDMA_QP_PARAMS_FLOW_LABEL_MASK; | |
1436 | qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx; | |
1437 | qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn & | |
1438 | OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> | |
1439 | OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; | |
1440 | qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & | |
a61d93d9 | 1441 | OCRDMA_QP_PARAMS_TCLASS_MASK) >> |
fe2caefc PP |
1442 | OCRDMA_QP_PARAMS_TCLASS_SHIFT; |
1443 | ||
1444 | qp_attr->ah_attr.ah_flags = IB_AH_GRH; | |
1445 | qp_attr->ah_attr.port_num = 1; | |
1446 | qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl & | |
1447 | OCRDMA_QP_PARAMS_SL_MASK) >> | |
1448 | OCRDMA_QP_PARAMS_SL_SHIFT; | |
1449 | qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn & | |
1450 | OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >> | |
1451 | OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT; | |
1452 | qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn & | |
1453 | OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >> | |
1454 | OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT; | |
1455 | qp_attr->retry_cnt = | |
1456 | (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >> | |
1457 | OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT; | |
1458 | qp_attr->min_rnr_timer = 0; | |
1459 | qp_attr->pkey_index = 0; | |
1460 | qp_attr->port_num = 1; | |
1461 | qp_attr->ah_attr.src_path_bits = 0; | |
1462 | qp_attr->ah_attr.static_rate = 0; | |
1463 | qp_attr->alt_pkey_index = 0; | |
1464 | qp_attr->alt_port_num = 0; | |
1465 | qp_attr->alt_timeout = 0; | |
1466 | memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); | |
1467 | qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >> | |
1468 | OCRDMA_QP_PARAMS_STATE_SHIFT; | |
1469 | qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0; | |
1470 | qp_attr->max_dest_rd_atomic = | |
1471 | params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT; | |
1472 | qp_attr->max_rd_atomic = | |
1473 | params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK; | |
1474 | qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags & | |
1475 | OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0; | |
1476 | mbx_err: | |
1477 | return status; | |
1478 | } | |
1479 | ||
1480 | static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx) | |
1481 | { | |
1482 | int i = idx / 32; | |
1483 | unsigned int mask = (1 << (idx % 32)); | |
1484 | ||
1485 | if (srq->idx_bit_fields[i] & mask) | |
1486 | srq->idx_bit_fields[i] &= ~mask; | |
1487 | else | |
1488 | srq->idx_bit_fields[i] |= mask; | |
1489 | } | |
1490 | ||
1491 | static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) | |
1492 | { | |
43a6b402 | 1493 | return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt; |
fe2caefc PP |
1494 | } |
1495 | ||
1496 | static int is_hw_sq_empty(struct ocrdma_qp *qp) | |
1497 | { | |
43a6b402 | 1498 | return (qp->sq.tail == qp->sq.head); |
fe2caefc PP |
1499 | } |
1500 | ||
1501 | static int is_hw_rq_empty(struct ocrdma_qp *qp) | |
1502 | { | |
43a6b402 | 1503 | return (qp->rq.tail == qp->rq.head); |
fe2caefc PP |
1504 | } |
1505 | ||
1506 | static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q) | |
1507 | { | |
1508 | return q->va + (q->head * q->entry_size); | |
1509 | } | |
1510 | ||
1511 | static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q, | |
1512 | u32 idx) | |
1513 | { | |
1514 | return q->va + (idx * q->entry_size); | |
1515 | } | |
1516 | ||
1517 | static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q) | |
1518 | { | |
1519 | q->head = (q->head + 1) & q->max_wqe_idx; | |
1520 | } | |
1521 | ||
1522 | static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q) | |
1523 | { | |
1524 | q->tail = (q->tail + 1) & q->max_wqe_idx; | |
1525 | } | |
1526 | ||
1527 | /* discard the cqe for a given QP */ | |
1528 | static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq) | |
1529 | { | |
1530 | unsigned long cq_flags; | |
1531 | unsigned long flags; | |
1532 | int discard_cnt = 0; | |
1533 | u32 cur_getp, stop_getp; | |
1534 | struct ocrdma_cqe *cqe; | |
cf5788ad | 1535 | u32 qpn = 0, wqe_idx = 0; |
fe2caefc PP |
1536 | |
1537 | spin_lock_irqsave(&cq->cq_lock, cq_flags); | |
1538 | ||
1539 | /* traverse through the CQEs in the hw CQ, | |
1540 | * find the matching CQE for a given qp, | |
1541 | * mark the matching one discarded by clearing qpn. | |
1542 | * ring the doorbell in the poll_cq() as | |
1543 | * we don't complete out of order cqe. | |
1544 | */ | |
1545 | ||
1546 | cur_getp = cq->getp; | |
1547 | /* find upto when do we reap the cq. */ | |
1548 | stop_getp = cur_getp; | |
1549 | do { | |
1550 | if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp))) | |
1551 | break; | |
1552 | ||
1553 | cqe = cq->va + cur_getp; | |
1554 | /* if (a) done reaping whole hw cq, or | |
1555 | * (b) qp_xq becomes empty. | |
1556 | * then exit | |
1557 | */ | |
1558 | qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; | |
1559 | /* if previously discarded cqe found, skip that too. */ | |
1560 | /* check for matching qp */ | |
1561 | if (qpn == 0 || qpn != qp->id) | |
1562 | goto skip_cqe; | |
1563 | ||
f99b1649 | 1564 | if (is_cqe_for_sq(cqe)) { |
fe2caefc | 1565 | ocrdma_hwq_inc_tail(&qp->sq); |
f99b1649 | 1566 | } else { |
fe2caefc | 1567 | if (qp->srq) { |
cf5788ad SX |
1568 | wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> |
1569 | OCRDMA_CQE_BUFTAG_SHIFT) & | |
1570 | qp->srq->rq.max_wqe_idx; | |
1571 | if (wqe_idx < 1) | |
1572 | BUG(); | |
fe2caefc PP |
1573 | spin_lock_irqsave(&qp->srq->q_lock, flags); |
1574 | ocrdma_hwq_inc_tail(&qp->srq->rq); | |
cf5788ad | 1575 | ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1); |
fe2caefc PP |
1576 | spin_unlock_irqrestore(&qp->srq->q_lock, flags); |
1577 | ||
f99b1649 | 1578 | } else { |
fe2caefc | 1579 | ocrdma_hwq_inc_tail(&qp->rq); |
f99b1649 | 1580 | } |
fe2caefc | 1581 | } |
cf5788ad SX |
1582 | /* mark cqe discarded so that it is not picked up later |
1583 | * in the poll_cq(). | |
1584 | */ | |
1585 | discard_cnt += 1; | |
1586 | cqe->cmn.qpn = 0; | |
fe2caefc PP |
1587 | skip_cqe: |
1588 | cur_getp = (cur_getp + 1) % cq->max_hw_cqe; | |
1589 | } while (cur_getp != stop_getp); | |
1590 | spin_unlock_irqrestore(&cq->cq_lock, cq_flags); | |
1591 | } | |
1592 | ||
f11220ee | 1593 | void ocrdma_del_flush_qp(struct ocrdma_qp *qp) |
fe2caefc PP |
1594 | { |
1595 | int found = false; | |
1596 | unsigned long flags; | |
1597 | struct ocrdma_dev *dev = qp->dev; | |
1598 | /* sync with any active CQ poll */ | |
1599 | ||
1600 | spin_lock_irqsave(&dev->flush_q_lock, flags); | |
1601 | found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); | |
1602 | if (found) | |
1603 | list_del(&qp->sq_entry); | |
1604 | if (!qp->srq) { | |
1605 | found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp); | |
1606 | if (found) | |
1607 | list_del(&qp->rq_entry); | |
1608 | } | |
1609 | spin_unlock_irqrestore(&dev->flush_q_lock, flags); | |
1610 | } | |
1611 | ||
1612 | int ocrdma_destroy_qp(struct ib_qp *ibqp) | |
1613 | { | |
1614 | int status; | |
1615 | struct ocrdma_pd *pd; | |
1616 | struct ocrdma_qp *qp; | |
1617 | struct ocrdma_dev *dev; | |
1618 | struct ib_qp_attr attrs; | |
1619 | int attr_mask = IB_QP_STATE; | |
d19081e0 | 1620 | unsigned long flags; |
fe2caefc PP |
1621 | |
1622 | qp = get_ocrdma_qp(ibqp); | |
1623 | dev = qp->dev; | |
1624 | ||
1625 | attrs.qp_state = IB_QPS_ERR; | |
1626 | pd = qp->pd; | |
1627 | ||
1628 | /* change the QP state to ERROR */ | |
1629 | _ocrdma_modify_qp(ibqp, &attrs, attr_mask); | |
1630 | ||
1631 | /* ensure that CQEs for newly created QP (whose id may be same with | |
1632 | * one which just getting destroyed are same), dont get | |
1633 | * discarded until the old CQEs are discarded. | |
1634 | */ | |
1635 | mutex_lock(&dev->dev_lock); | |
1636 | status = ocrdma_mbx_destroy_qp(dev, qp); | |
1637 | ||
1638 | /* | |
1639 | * acquire CQ lock while destroy is in progress, in order to | |
1640 | * protect against proessing in-flight CQEs for this QP. | |
1641 | */ | |
d19081e0 | 1642 | spin_lock_irqsave(&qp->sq_cq->cq_lock, flags); |
fe2caefc | 1643 | if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) |
d19081e0 | 1644 | spin_lock(&qp->rq_cq->cq_lock); |
fe2caefc PP |
1645 | |
1646 | ocrdma_del_qpn_map(dev, qp); | |
1647 | ||
1648 | if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) | |
d19081e0 DC |
1649 | spin_unlock(&qp->rq_cq->cq_lock); |
1650 | spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags); | |
fe2caefc PP |
1651 | |
1652 | if (!pd->uctx) { | |
1653 | ocrdma_discard_cqes(qp, qp->sq_cq); | |
1654 | ocrdma_discard_cqes(qp, qp->rq_cq); | |
1655 | } | |
1656 | mutex_unlock(&dev->dev_lock); | |
1657 | ||
1658 | if (pd->uctx) { | |
43a6b402 NG |
1659 | ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, |
1660 | PAGE_ALIGN(qp->sq.len)); | |
fe2caefc | 1661 | if (!qp->srq) |
43a6b402 NG |
1662 | ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, |
1663 | PAGE_ALIGN(qp->rq.len)); | |
fe2caefc PP |
1664 | } |
1665 | ||
1666 | ocrdma_del_flush_qp(qp); | |
1667 | ||
fe2caefc PP |
1668 | kfree(qp->wqe_wr_id_tbl); |
1669 | kfree(qp->rqe_wr_id_tbl); | |
1670 | kfree(qp); | |
1671 | return status; | |
1672 | } | |
1673 | ||
1afc0454 NG |
1674 | static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, |
1675 | struct ib_udata *udata) | |
fe2caefc PP |
1676 | { |
1677 | int status; | |
1678 | struct ocrdma_create_srq_uresp uresp; | |
1679 | ||
63ea3749 | 1680 | memset(&uresp, 0, sizeof(uresp)); |
fe2caefc PP |
1681 | uresp.rq_dbid = srq->rq.dbid; |
1682 | uresp.num_rq_pages = 1; | |
1683 | uresp.rq_page_addr[0] = srq->rq.pa; | |
1684 | uresp.rq_page_size = srq->rq.len; | |
1afc0454 NG |
1685 | uresp.db_page_addr = dev->nic_info.unmapped_db + |
1686 | (srq->pd->id * dev->nic_info.db_page_size); | |
1687 | uresp.db_page_size = dev->nic_info.db_page_size; | |
fe2caefc | 1688 | uresp.num_rqe_allocated = srq->rq.max_cnt; |
21c3391a | 1689 | if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { |
f11220ee | 1690 | uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; |
fe2caefc PP |
1691 | uresp.db_shift = 24; |
1692 | } else { | |
1693 | uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; | |
1694 | uresp.db_shift = 16; | |
1695 | } | |
1696 | ||
1697 | status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | |
1698 | if (status) | |
1699 | return status; | |
1700 | status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0], | |
1701 | uresp.rq_page_size); | |
1702 | if (status) | |
1703 | return status; | |
1704 | return status; | |
1705 | } | |
1706 | ||
1707 | struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd, | |
1708 | struct ib_srq_init_attr *init_attr, | |
1709 | struct ib_udata *udata) | |
1710 | { | |
1711 | int status = -ENOMEM; | |
1712 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | |
f99b1649 | 1713 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); |
fe2caefc PP |
1714 | struct ocrdma_srq *srq; |
1715 | ||
1716 | if (init_attr->attr.max_sge > dev->attr.max_recv_sge) | |
1717 | return ERR_PTR(-EINVAL); | |
1718 | if (init_attr->attr.max_wr > dev->attr.max_rqe) | |
1719 | return ERR_PTR(-EINVAL); | |
1720 | ||
1721 | srq = kzalloc(sizeof(*srq), GFP_KERNEL); | |
1722 | if (!srq) | |
1723 | return ERR_PTR(status); | |
1724 | ||
1725 | spin_lock_init(&srq->q_lock); | |
fe2caefc PP |
1726 | srq->pd = pd; |
1727 | srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size); | |
1afc0454 | 1728 | status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd); |
fe2caefc PP |
1729 | if (status) |
1730 | goto err; | |
1731 | ||
1732 | if (udata == NULL) { | |
1733 | srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt, | |
1734 | GFP_KERNEL); | |
1735 | if (srq->rqe_wr_id_tbl == NULL) | |
1736 | goto arm_err; | |
1737 | ||
1738 | srq->bit_fields_len = (srq->rq.max_cnt / 32) + | |
1739 | (srq->rq.max_cnt % 32 ? 1 : 0); | |
1740 | srq->idx_bit_fields = | |
1741 | kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL); | |
1742 | if (srq->idx_bit_fields == NULL) | |
1743 | goto arm_err; | |
1744 | memset(srq->idx_bit_fields, 0xff, | |
1745 | srq->bit_fields_len * sizeof(u32)); | |
1746 | } | |
1747 | ||
1748 | if (init_attr->attr.srq_limit) { | |
1749 | status = ocrdma_mbx_modify_srq(srq, &init_attr->attr); | |
1750 | if (status) | |
1751 | goto arm_err; | |
1752 | } | |
1753 | ||
fe2caefc | 1754 | if (udata) { |
1afc0454 | 1755 | status = ocrdma_copy_srq_uresp(dev, srq, udata); |
fe2caefc PP |
1756 | if (status) |
1757 | goto arm_err; | |
1758 | } | |
1759 | ||
fe2caefc PP |
1760 | return &srq->ibsrq; |
1761 | ||
1762 | arm_err: | |
1763 | ocrdma_mbx_destroy_srq(dev, srq); | |
1764 | err: | |
1765 | kfree(srq->rqe_wr_id_tbl); | |
1766 | kfree(srq->idx_bit_fields); | |
1767 | kfree(srq); | |
1768 | return ERR_PTR(status); | |
1769 | } | |
1770 | ||
1771 | int ocrdma_modify_srq(struct ib_srq *ibsrq, | |
1772 | struct ib_srq_attr *srq_attr, | |
1773 | enum ib_srq_attr_mask srq_attr_mask, | |
1774 | struct ib_udata *udata) | |
1775 | { | |
1776 | int status = 0; | |
1777 | struct ocrdma_srq *srq; | |
fe2caefc PP |
1778 | |
1779 | srq = get_ocrdma_srq(ibsrq); | |
fe2caefc PP |
1780 | if (srq_attr_mask & IB_SRQ_MAX_WR) |
1781 | status = -EINVAL; | |
1782 | else | |
1783 | status = ocrdma_mbx_modify_srq(srq, srq_attr); | |
1784 | return status; | |
1785 | } | |
1786 | ||
1787 | int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) | |
1788 | { | |
1789 | int status; | |
1790 | struct ocrdma_srq *srq; | |
fe2caefc PP |
1791 | |
1792 | srq = get_ocrdma_srq(ibsrq); | |
fe2caefc PP |
1793 | status = ocrdma_mbx_query_srq(srq, srq_attr); |
1794 | return status; | |
1795 | } | |
1796 | ||
1797 | int ocrdma_destroy_srq(struct ib_srq *ibsrq) | |
1798 | { | |
1799 | int status; | |
1800 | struct ocrdma_srq *srq; | |
1afc0454 | 1801 | struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device); |
fe2caefc PP |
1802 | |
1803 | srq = get_ocrdma_srq(ibsrq); | |
fe2caefc PP |
1804 | |
1805 | status = ocrdma_mbx_destroy_srq(dev, srq); | |
1806 | ||
1807 | if (srq->pd->uctx) | |
43a6b402 NG |
1808 | ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, |
1809 | PAGE_ALIGN(srq->rq.len)); | |
fe2caefc | 1810 | |
fe2caefc PP |
1811 | kfree(srq->idx_bit_fields); |
1812 | kfree(srq->rqe_wr_id_tbl); | |
1813 | kfree(srq); | |
1814 | return status; | |
1815 | } | |
1816 | ||
1817 | /* unprivileged verbs and their support functions. */ | |
1818 | static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp, | |
1819 | struct ocrdma_hdr_wqe *hdr, | |
1820 | struct ib_send_wr *wr) | |
1821 | { | |
1822 | struct ocrdma_ewqe_ud_hdr *ud_hdr = | |
1823 | (struct ocrdma_ewqe_ud_hdr *)(hdr + 1); | |
1824 | struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah); | |
1825 | ||
1826 | ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn; | |
1827 | if (qp->qp_type == IB_QPT_GSI) | |
1828 | ud_hdr->qkey = qp->qkey; | |
1829 | else | |
1830 | ud_hdr->qkey = wr->wr.ud.remote_qkey; | |
1831 | ud_hdr->rsvd_ahid = ah->id; | |
1832 | } | |
1833 | ||
1834 | static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr, | |
1835 | struct ocrdma_sge *sge, int num_sge, | |
1836 | struct ib_sge *sg_list) | |
1837 | { | |
1838 | int i; | |
1839 | ||
1840 | for (i = 0; i < num_sge; i++) { | |
1841 | sge[i].lrkey = sg_list[i].lkey; | |
1842 | sge[i].addr_lo = sg_list[i].addr; | |
1843 | sge[i].addr_hi = upper_32_bits(sg_list[i].addr); | |
1844 | sge[i].len = sg_list[i].length; | |
1845 | hdr->total_len += sg_list[i].length; | |
1846 | } | |
1847 | if (num_sge == 0) | |
1848 | memset(sge, 0, sizeof(*sge)); | |
1849 | } | |
1850 | ||
117e6dd1 NG |
1851 | static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge) |
1852 | { | |
1853 | uint32_t total_len = 0, i; | |
1854 | ||
1855 | for (i = 0; i < num_sge; i++) | |
1856 | total_len += sg_list[i].length; | |
1857 | return total_len; | |
1858 | } | |
1859 | ||
1860 | ||
fe2caefc PP |
1861 | static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, |
1862 | struct ocrdma_hdr_wqe *hdr, | |
1863 | struct ocrdma_sge *sge, | |
1864 | struct ib_send_wr *wr, u32 wqe_size) | |
1865 | { | |
117e6dd1 NG |
1866 | int i; |
1867 | char *dpp_addr; | |
1868 | ||
43a6b402 | 1869 | if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) { |
117e6dd1 NG |
1870 | hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge); |
1871 | if (unlikely(hdr->total_len > qp->max_inline_data)) { | |
ef99c4c2 NG |
1872 | pr_err("%s() supported_len=0x%x,\n" |
1873 | " unspported len req=0x%x\n", __func__, | |
117e6dd1 | 1874 | qp->max_inline_data, hdr->total_len); |
fe2caefc PP |
1875 | return -EINVAL; |
1876 | } | |
117e6dd1 NG |
1877 | dpp_addr = (char *)sge; |
1878 | for (i = 0; i < wr->num_sge; i++) { | |
1879 | memcpy(dpp_addr, | |
1880 | (void *)(unsigned long)wr->sg_list[i].addr, | |
1881 | wr->sg_list[i].length); | |
1882 | dpp_addr += wr->sg_list[i].length; | |
1883 | } | |
1884 | ||
fe2caefc | 1885 | wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES); |
117e6dd1 | 1886 | if (0 == hdr->total_len) |
43a6b402 | 1887 | wqe_size += sizeof(struct ocrdma_sge); |
fe2caefc PP |
1888 | hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT); |
1889 | } else { | |
1890 | ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); | |
1891 | if (wr->num_sge) | |
1892 | wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge)); | |
1893 | else | |
1894 | wqe_size += sizeof(struct ocrdma_sge); | |
1895 | hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); | |
1896 | } | |
1897 | hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); | |
1898 | return 0; | |
1899 | } | |
1900 | ||
1901 | static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | |
1902 | struct ib_send_wr *wr) | |
1903 | { | |
1904 | int status; | |
1905 | struct ocrdma_sge *sge; | |
1906 | u32 wqe_size = sizeof(*hdr); | |
1907 | ||
1908 | if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { | |
1909 | ocrdma_build_ud_hdr(qp, hdr, wr); | |
1910 | sge = (struct ocrdma_sge *)(hdr + 2); | |
1911 | wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr); | |
f99b1649 | 1912 | } else { |
fe2caefc | 1913 | sge = (struct ocrdma_sge *)(hdr + 1); |
f99b1649 | 1914 | } |
fe2caefc PP |
1915 | |
1916 | status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); | |
1917 | return status; | |
1918 | } | |
1919 | ||
1920 | static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | |
1921 | struct ib_send_wr *wr) | |
1922 | { | |
1923 | int status; | |
1924 | struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); | |
1925 | struct ocrdma_sge *sge = ext_rw + 1; | |
1926 | u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw); | |
1927 | ||
1928 | status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); | |
1929 | if (status) | |
1930 | return status; | |
1931 | ext_rw->addr_lo = wr->wr.rdma.remote_addr; | |
1932 | ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); | |
1933 | ext_rw->lrkey = wr->wr.rdma.rkey; | |
1934 | ext_rw->len = hdr->total_len; | |
1935 | return 0; | |
1936 | } | |
1937 | ||
1938 | static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | |
1939 | struct ib_send_wr *wr) | |
1940 | { | |
1941 | struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1); | |
1942 | struct ocrdma_sge *sge = ext_rw + 1; | |
1943 | u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) + | |
1944 | sizeof(struct ocrdma_hdr_wqe); | |
1945 | ||
1946 | ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list); | |
1947 | hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); | |
1948 | hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT); | |
1949 | hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); | |
1950 | ||
1951 | ext_rw->addr_lo = wr->wr.rdma.remote_addr; | |
1952 | ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); | |
1953 | ext_rw->lrkey = wr->wr.rdma.rkey; | |
1954 | ext_rw->len = hdr->total_len; | |
1955 | } | |
1956 | ||
7c33880c NG |
1957 | static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl, |
1958 | struct ocrdma_hw_mr *hwmr) | |
1959 | { | |
1960 | int i; | |
1961 | u64 buf_addr = 0; | |
1962 | int num_pbes; | |
1963 | struct ocrdma_pbe *pbe; | |
1964 | ||
1965 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; | |
1966 | num_pbes = 0; | |
1967 | ||
1968 | /* go through the OS phy regions & fill hw pbe entries into pbls. */ | |
1969 | for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { | |
1970 | /* number of pbes can be more for one OS buf, when | |
1971 | * buffers are of different sizes. | |
1972 | * split the ib_buf to one or more pbes. | |
1973 | */ | |
1974 | buf_addr = wr->wr.fast_reg.page_list->page_list[i]; | |
1975 | pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK)); | |
1976 | pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr)); | |
1977 | num_pbes += 1; | |
1978 | pbe++; | |
1979 | ||
1980 | /* if the pbl is full storing the pbes, | |
1981 | * move to next pbl. | |
1982 | */ | |
1983 | if (num_pbes == (hwmr->pbl_size/sizeof(u64))) { | |
1984 | pbl_tbl++; | |
1985 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; | |
1986 | } | |
1987 | } | |
1988 | return; | |
1989 | } | |
1990 | ||
1991 | static int get_encoded_page_size(int pg_sz) | |
1992 | { | |
1993 | /* Max size is 256M 4096 << 16 */ | |
1994 | int i = 0; | |
1995 | for (; i < 17; i++) | |
1996 | if (pg_sz == (4096 << i)) | |
1997 | break; | |
1998 | return i; | |
1999 | } | |
2000 | ||
2001 | ||
2002 | static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | |
2003 | struct ib_send_wr *wr) | |
2004 | { | |
2005 | u64 fbo; | |
2006 | struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); | |
2007 | struct ocrdma_mr *mr; | |
2008 | u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr); | |
2009 | ||
2010 | wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); | |
2011 | ||
d5e3f378 | 2012 | if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr) |
7c33880c NG |
2013 | return -EINVAL; |
2014 | ||
2015 | hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); | |
2016 | hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); | |
2017 | ||
2018 | if (wr->wr.fast_reg.page_list_len == 0) | |
2019 | BUG(); | |
2020 | if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE) | |
2021 | hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR; | |
2022 | if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE) | |
2023 | hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR; | |
2024 | if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ) | |
2025 | hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD; | |
2026 | hdr->lkey = wr->wr.fast_reg.rkey; | |
2027 | hdr->total_len = wr->wr.fast_reg.length; | |
2028 | ||
2029 | fbo = wr->wr.fast_reg.iova_start - | |
2030 | (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK); | |
2031 | ||
2032 | fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start); | |
2033 | fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff); | |
2034 | fast_reg->fbo_hi = upper_32_bits(fbo); | |
2035 | fast_reg->fbo_lo = (u32) fbo & 0xffffffff; | |
2036 | fast_reg->num_sges = wr->wr.fast_reg.page_list_len; | |
2037 | fast_reg->size_sge = | |
2038 | get_encoded_page_size(1 << wr->wr.fast_reg.page_shift); | |
7a1e89d8 RD |
2039 | mr = (struct ocrdma_mr *) (unsigned long) |
2040 | qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; | |
7c33880c NG |
2041 | build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); |
2042 | return 0; | |
2043 | } | |
2044 | ||
fe2caefc PP |
2045 | static void ocrdma_ring_sq_db(struct ocrdma_qp *qp) |
2046 | { | |
2df84fa8 | 2047 | u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT); |
fe2caefc PP |
2048 | |
2049 | iowrite32(val, qp->sq_db); | |
2050 | } | |
2051 | ||
2052 | int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |
2053 | struct ib_send_wr **bad_wr) | |
2054 | { | |
2055 | int status = 0; | |
2056 | struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); | |
2057 | struct ocrdma_hdr_wqe *hdr; | |
2058 | unsigned long flags; | |
2059 | ||
2060 | spin_lock_irqsave(&qp->q_lock, flags); | |
2061 | if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) { | |
2062 | spin_unlock_irqrestore(&qp->q_lock, flags); | |
f6ddcf71 | 2063 | *bad_wr = wr; |
fe2caefc PP |
2064 | return -EINVAL; |
2065 | } | |
2066 | ||
2067 | while (wr) { | |
f252b5dc MA |
2068 | if (qp->qp_type == IB_QPT_UD && |
2069 | (wr->opcode != IB_WR_SEND && | |
2070 | wr->opcode != IB_WR_SEND_WITH_IMM)) { | |
2071 | *bad_wr = wr; | |
2072 | status = -EINVAL; | |
2073 | break; | |
2074 | } | |
fe2caefc PP |
2075 | if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || |
2076 | wr->num_sge > qp->sq.max_sges) { | |
f6ddcf71 | 2077 | *bad_wr = wr; |
fe2caefc PP |
2078 | status = -ENOMEM; |
2079 | break; | |
2080 | } | |
2081 | hdr = ocrdma_hwq_head(&qp->sq); | |
2082 | hdr->cw = 0; | |
2b51a9b9 | 2083 | if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled) |
fe2caefc PP |
2084 | hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); |
2085 | if (wr->send_flags & IB_SEND_FENCE) | |
2086 | hdr->cw |= | |
2087 | (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT); | |
2088 | if (wr->send_flags & IB_SEND_SOLICITED) | |
2089 | hdr->cw |= | |
2090 | (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT); | |
2091 | hdr->total_len = 0; | |
2092 | switch (wr->opcode) { | |
2093 | case IB_WR_SEND_WITH_IMM: | |
2094 | hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); | |
2095 | hdr->immdt = ntohl(wr->ex.imm_data); | |
2096 | case IB_WR_SEND: | |
2097 | hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); | |
2098 | ocrdma_build_send(qp, hdr, wr); | |
2099 | break; | |
2100 | case IB_WR_SEND_WITH_INV: | |
2101 | hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); | |
2102 | hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); | |
2103 | hdr->lkey = wr->ex.invalidate_rkey; | |
2104 | status = ocrdma_build_send(qp, hdr, wr); | |
2105 | break; | |
2106 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
2107 | hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); | |
2108 | hdr->immdt = ntohl(wr->ex.imm_data); | |
2109 | case IB_WR_RDMA_WRITE: | |
2110 | hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); | |
2111 | status = ocrdma_build_write(qp, hdr, wr); | |
2112 | break; | |
2113 | case IB_WR_RDMA_READ_WITH_INV: | |
2114 | hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); | |
2115 | case IB_WR_RDMA_READ: | |
2116 | ocrdma_build_read(qp, hdr, wr); | |
2117 | break; | |
2118 | case IB_WR_LOCAL_INV: | |
2119 | hdr->cw |= | |
2120 | (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT); | |
7c33880c NG |
2121 | hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) + |
2122 | sizeof(struct ocrdma_sge)) / | |
fe2caefc PP |
2123 | OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT; |
2124 | hdr->lkey = wr->ex.invalidate_rkey; | |
2125 | break; | |
7c33880c NG |
2126 | case IB_WR_FAST_REG_MR: |
2127 | status = ocrdma_build_fr(qp, hdr, wr); | |
2128 | break; | |
fe2caefc PP |
2129 | default: |
2130 | status = -EINVAL; | |
2131 | break; | |
2132 | } | |
2133 | if (status) { | |
2134 | *bad_wr = wr; | |
2135 | break; | |
2136 | } | |
2b51a9b9 | 2137 | if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled) |
fe2caefc PP |
2138 | qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1; |
2139 | else | |
2140 | qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0; | |
2141 | qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id; | |
2142 | ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) & | |
2143 | OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE); | |
2144 | /* make sure wqe is written before adapter can access it */ | |
2145 | wmb(); | |
2146 | /* inform hw to start processing it */ | |
2147 | ocrdma_ring_sq_db(qp); | |
2148 | ||
2149 | /* update pointer, counter for next wr */ | |
2150 | ocrdma_hwq_inc_head(&qp->sq); | |
2151 | wr = wr->next; | |
2152 | } | |
2153 | spin_unlock_irqrestore(&qp->q_lock, flags); | |
2154 | return status; | |
2155 | } | |
2156 | ||
2157 | static void ocrdma_ring_rq_db(struct ocrdma_qp *qp) | |
2158 | { | |
2df84fa8 | 2159 | u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT); |
fe2caefc | 2160 | |
2df84fa8 | 2161 | iowrite32(val, qp->rq_db); |
fe2caefc PP |
2162 | } |
2163 | ||
2164 | static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr, | |
2165 | u16 tag) | |
2166 | { | |
2167 | u32 wqe_size = 0; | |
2168 | struct ocrdma_sge *sge; | |
2169 | if (wr->num_sge) | |
2170 | wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe); | |
2171 | else | |
2172 | wqe_size = sizeof(*sge) + sizeof(*rqe); | |
2173 | ||
2174 | rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) << | |
2175 | OCRDMA_WQE_SIZE_SHIFT); | |
2176 | rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT); | |
2177 | rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); | |
2178 | rqe->total_len = 0; | |
2179 | rqe->rsvd_tag = tag; | |
2180 | sge = (struct ocrdma_sge *)(rqe + 1); | |
2181 | ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list); | |
2182 | ocrdma_cpu_to_le32(rqe, wqe_size); | |
2183 | } | |
2184 | ||
2185 | int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
2186 | struct ib_recv_wr **bad_wr) | |
2187 | { | |
2188 | int status = 0; | |
2189 | unsigned long flags; | |
2190 | struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); | |
2191 | struct ocrdma_hdr_wqe *rqe; | |
2192 | ||
2193 | spin_lock_irqsave(&qp->q_lock, flags); | |
2194 | if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) { | |
2195 | spin_unlock_irqrestore(&qp->q_lock, flags); | |
2196 | *bad_wr = wr; | |
2197 | return -EINVAL; | |
2198 | } | |
2199 | while (wr) { | |
2200 | if (ocrdma_hwq_free_cnt(&qp->rq) == 0 || | |
2201 | wr->num_sge > qp->rq.max_sges) { | |
2202 | *bad_wr = wr; | |
2203 | status = -ENOMEM; | |
2204 | break; | |
2205 | } | |
2206 | rqe = ocrdma_hwq_head(&qp->rq); | |
2207 | ocrdma_build_rqe(rqe, wr, 0); | |
2208 | ||
2209 | qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id; | |
2210 | /* make sure rqe is written before adapter can access it */ | |
2211 | wmb(); | |
2212 | ||
2213 | /* inform hw to start processing it */ | |
2214 | ocrdma_ring_rq_db(qp); | |
2215 | ||
2216 | /* update pointer, counter for next wr */ | |
2217 | ocrdma_hwq_inc_head(&qp->rq); | |
2218 | wr = wr->next; | |
2219 | } | |
2220 | spin_unlock_irqrestore(&qp->q_lock, flags); | |
2221 | return status; | |
2222 | } | |
2223 | ||
2224 | /* cqe for srq's rqe can potentially arrive out of order. | |
2225 | * index gives the entry in the shadow table where to store | |
2226 | * the wr_id. tag/index is returned in cqe to reference back | |
2227 | * for a given rqe. | |
2228 | */ | |
2229 | static int ocrdma_srq_get_idx(struct ocrdma_srq *srq) | |
2230 | { | |
2231 | int row = 0; | |
2232 | int indx = 0; | |
2233 | ||
2234 | for (row = 0; row < srq->bit_fields_len; row++) { | |
2235 | if (srq->idx_bit_fields[row]) { | |
2236 | indx = ffs(srq->idx_bit_fields[row]); | |
2237 | indx = (row * 32) + (indx - 1); | |
2238 | if (indx >= srq->rq.max_cnt) | |
2239 | BUG(); | |
2240 | ocrdma_srq_toggle_bit(srq, indx); | |
2241 | break; | |
2242 | } | |
2243 | } | |
2244 | ||
2245 | if (row == srq->bit_fields_len) | |
2246 | BUG(); | |
cf5788ad | 2247 | return indx + 1; /* Use from index 1 */ |
fe2caefc PP |
2248 | } |
2249 | ||
2250 | static void ocrdma_ring_srq_db(struct ocrdma_srq *srq) | |
2251 | { | |
2252 | u32 val = srq->rq.dbid | (1 << 16); | |
2253 | ||
2254 | iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET); | |
2255 | } | |
2256 | ||
2257 | int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |
2258 | struct ib_recv_wr **bad_wr) | |
2259 | { | |
2260 | int status = 0; | |
2261 | unsigned long flags; | |
2262 | struct ocrdma_srq *srq; | |
2263 | struct ocrdma_hdr_wqe *rqe; | |
2264 | u16 tag; | |
2265 | ||
2266 | srq = get_ocrdma_srq(ibsrq); | |
2267 | ||
2268 | spin_lock_irqsave(&srq->q_lock, flags); | |
2269 | while (wr) { | |
2270 | if (ocrdma_hwq_free_cnt(&srq->rq) == 0 || | |
2271 | wr->num_sge > srq->rq.max_sges) { | |
2272 | status = -ENOMEM; | |
2273 | *bad_wr = wr; | |
2274 | break; | |
2275 | } | |
2276 | tag = ocrdma_srq_get_idx(srq); | |
2277 | rqe = ocrdma_hwq_head(&srq->rq); | |
2278 | ocrdma_build_rqe(rqe, wr, tag); | |
2279 | ||
2280 | srq->rqe_wr_id_tbl[tag] = wr->wr_id; | |
2281 | /* make sure rqe is written before adapter can perform DMA */ | |
2282 | wmb(); | |
2283 | /* inform hw to start processing it */ | |
2284 | ocrdma_ring_srq_db(srq); | |
2285 | /* update pointer, counter for next wr */ | |
2286 | ocrdma_hwq_inc_head(&srq->rq); | |
2287 | wr = wr->next; | |
2288 | } | |
2289 | spin_unlock_irqrestore(&srq->q_lock, flags); | |
2290 | return status; | |
2291 | } | |
2292 | ||
2293 | static enum ib_wc_status ocrdma_to_ibwc_err(u16 status) | |
2294 | { | |
f99b1649 | 2295 | enum ib_wc_status ibwc_status; |
fe2caefc PP |
2296 | |
2297 | switch (status) { | |
2298 | case OCRDMA_CQE_GENERAL_ERR: | |
2299 | ibwc_status = IB_WC_GENERAL_ERR; | |
2300 | break; | |
2301 | case OCRDMA_CQE_LOC_LEN_ERR: | |
2302 | ibwc_status = IB_WC_LOC_LEN_ERR; | |
2303 | break; | |
2304 | case OCRDMA_CQE_LOC_QP_OP_ERR: | |
2305 | ibwc_status = IB_WC_LOC_QP_OP_ERR; | |
2306 | break; | |
2307 | case OCRDMA_CQE_LOC_EEC_OP_ERR: | |
2308 | ibwc_status = IB_WC_LOC_EEC_OP_ERR; | |
2309 | break; | |
2310 | case OCRDMA_CQE_LOC_PROT_ERR: | |
2311 | ibwc_status = IB_WC_LOC_PROT_ERR; | |
2312 | break; | |
2313 | case OCRDMA_CQE_WR_FLUSH_ERR: | |
2314 | ibwc_status = IB_WC_WR_FLUSH_ERR; | |
2315 | break; | |
2316 | case OCRDMA_CQE_MW_BIND_ERR: | |
2317 | ibwc_status = IB_WC_MW_BIND_ERR; | |
2318 | break; | |
2319 | case OCRDMA_CQE_BAD_RESP_ERR: | |
2320 | ibwc_status = IB_WC_BAD_RESP_ERR; | |
2321 | break; | |
2322 | case OCRDMA_CQE_LOC_ACCESS_ERR: | |
2323 | ibwc_status = IB_WC_LOC_ACCESS_ERR; | |
2324 | break; | |
2325 | case OCRDMA_CQE_REM_INV_REQ_ERR: | |
2326 | ibwc_status = IB_WC_REM_INV_REQ_ERR; | |
2327 | break; | |
2328 | case OCRDMA_CQE_REM_ACCESS_ERR: | |
2329 | ibwc_status = IB_WC_REM_ACCESS_ERR; | |
2330 | break; | |
2331 | case OCRDMA_CQE_REM_OP_ERR: | |
2332 | ibwc_status = IB_WC_REM_OP_ERR; | |
2333 | break; | |
2334 | case OCRDMA_CQE_RETRY_EXC_ERR: | |
2335 | ibwc_status = IB_WC_RETRY_EXC_ERR; | |
2336 | break; | |
2337 | case OCRDMA_CQE_RNR_RETRY_EXC_ERR: | |
2338 | ibwc_status = IB_WC_RNR_RETRY_EXC_ERR; | |
2339 | break; | |
2340 | case OCRDMA_CQE_LOC_RDD_VIOL_ERR: | |
2341 | ibwc_status = IB_WC_LOC_RDD_VIOL_ERR; | |
2342 | break; | |
2343 | case OCRDMA_CQE_REM_INV_RD_REQ_ERR: | |
2344 | ibwc_status = IB_WC_REM_INV_RD_REQ_ERR; | |
2345 | break; | |
2346 | case OCRDMA_CQE_REM_ABORT_ERR: | |
2347 | ibwc_status = IB_WC_REM_ABORT_ERR; | |
2348 | break; | |
2349 | case OCRDMA_CQE_INV_EECN_ERR: | |
2350 | ibwc_status = IB_WC_INV_EECN_ERR; | |
2351 | break; | |
2352 | case OCRDMA_CQE_INV_EEC_STATE_ERR: | |
2353 | ibwc_status = IB_WC_INV_EEC_STATE_ERR; | |
2354 | break; | |
2355 | case OCRDMA_CQE_FATAL_ERR: | |
2356 | ibwc_status = IB_WC_FATAL_ERR; | |
2357 | break; | |
2358 | case OCRDMA_CQE_RESP_TIMEOUT_ERR: | |
2359 | ibwc_status = IB_WC_RESP_TIMEOUT_ERR; | |
2360 | break; | |
2361 | default: | |
2362 | ibwc_status = IB_WC_GENERAL_ERR; | |
2363 | break; | |
2b50176d | 2364 | } |
fe2caefc PP |
2365 | return ibwc_status; |
2366 | } | |
2367 | ||
2368 | static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, | |
2369 | u32 wqe_idx) | |
2370 | { | |
2371 | struct ocrdma_hdr_wqe *hdr; | |
2372 | struct ocrdma_sge *rw; | |
2373 | int opcode; | |
2374 | ||
2375 | hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx); | |
2376 | ||
2377 | ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid; | |
2378 | /* Undo the hdr->cw swap */ | |
2379 | opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK; | |
2380 | switch (opcode) { | |
2381 | case OCRDMA_WRITE: | |
2382 | ibwc->opcode = IB_WC_RDMA_WRITE; | |
2383 | break; | |
2384 | case OCRDMA_READ: | |
2385 | rw = (struct ocrdma_sge *)(hdr + 1); | |
2386 | ibwc->opcode = IB_WC_RDMA_READ; | |
2387 | ibwc->byte_len = rw->len; | |
2388 | break; | |
2389 | case OCRDMA_SEND: | |
2390 | ibwc->opcode = IB_WC_SEND; | |
2391 | break; | |
7c33880c NG |
2392 | case OCRDMA_FR_MR: |
2393 | ibwc->opcode = IB_WC_FAST_REG_MR; | |
2394 | break; | |
fe2caefc PP |
2395 | case OCRDMA_LKEY_INV: |
2396 | ibwc->opcode = IB_WC_LOCAL_INV; | |
2397 | break; | |
2398 | default: | |
2399 | ibwc->status = IB_WC_GENERAL_ERR; | |
ef99c4c2 NG |
2400 | pr_err("%s() invalid opcode received = 0x%x\n", |
2401 | __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); | |
fe2caefc | 2402 | break; |
2b50176d | 2403 | } |
fe2caefc PP |
2404 | } |
2405 | ||
2406 | static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, | |
2407 | struct ocrdma_cqe *cqe) | |
2408 | { | |
2409 | if (is_cqe_for_sq(cqe)) { | |
2410 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | |
2411 | cqe->flags_status_srcqpn) & | |
2412 | ~OCRDMA_CQE_STATUS_MASK); | |
2413 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | |
2414 | cqe->flags_status_srcqpn) | | |
2415 | (OCRDMA_CQE_WR_FLUSH_ERR << | |
2416 | OCRDMA_CQE_STATUS_SHIFT)); | |
2417 | } else { | |
2418 | if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { | |
2419 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | |
2420 | cqe->flags_status_srcqpn) & | |
2421 | ~OCRDMA_CQE_UD_STATUS_MASK); | |
2422 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | |
2423 | cqe->flags_status_srcqpn) | | |
2424 | (OCRDMA_CQE_WR_FLUSH_ERR << | |
2425 | OCRDMA_CQE_UD_STATUS_SHIFT)); | |
2426 | } else { | |
2427 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | |
2428 | cqe->flags_status_srcqpn) & | |
2429 | ~OCRDMA_CQE_STATUS_MASK); | |
2430 | cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu( | |
2431 | cqe->flags_status_srcqpn) | | |
2432 | (OCRDMA_CQE_WR_FLUSH_ERR << | |
2433 | OCRDMA_CQE_STATUS_SHIFT)); | |
2434 | } | |
2435 | } | |
2436 | } | |
2437 | ||
2438 | static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, | |
2439 | struct ocrdma_qp *qp, int status) | |
2440 | { | |
2441 | bool expand = false; | |
2442 | ||
2443 | ibwc->byte_len = 0; | |
2444 | ibwc->qp = &qp->ibqp; | |
2445 | ibwc->status = ocrdma_to_ibwc_err(status); | |
2446 | ||
2447 | ocrdma_flush_qp(qp); | |
057729cb | 2448 | ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL); |
fe2caefc PP |
2449 | |
2450 | /* if wqe/rqe pending for which cqe needs to be returned, | |
2451 | * trigger inflating it. | |
2452 | */ | |
2453 | if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) { | |
2454 | expand = true; | |
2455 | ocrdma_set_cqe_status_flushed(qp, cqe); | |
2456 | } | |
2457 | return expand; | |
2458 | } | |
2459 | ||
2460 | static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, | |
2461 | struct ocrdma_qp *qp, int status) | |
2462 | { | |
2463 | ibwc->opcode = IB_WC_RECV; | |
2464 | ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; | |
2465 | ocrdma_hwq_inc_tail(&qp->rq); | |
2466 | ||
2467 | return ocrdma_update_err_cqe(ibwc, cqe, qp, status); | |
2468 | } | |
2469 | ||
2470 | static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, | |
2471 | struct ocrdma_qp *qp, int status) | |
2472 | { | |
2473 | ocrdma_update_wc(qp, ibwc, qp->sq.tail); | |
2474 | ocrdma_hwq_inc_tail(&qp->sq); | |
2475 | ||
2476 | return ocrdma_update_err_cqe(ibwc, cqe, qp, status); | |
2477 | } | |
2478 | ||
2479 | ||
2480 | static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp, | |
2481 | struct ocrdma_cqe *cqe, struct ib_wc *ibwc, | |
2482 | bool *polled, bool *stop) | |
2483 | { | |
2484 | bool expand; | |
2485 | int status = (le32_to_cpu(cqe->flags_status_srcqpn) & | |
2486 | OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; | |
2487 | ||
2488 | /* when hw sq is empty, but rq is not empty, so we continue | |
2489 | * to keep the cqe in order to get the cq event again. | |
2490 | */ | |
2491 | if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) { | |
2492 | /* when cq for rq and sq is same, it is safe to return | |
2493 | * flush cqe for RQEs. | |
2494 | */ | |
2495 | if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { | |
2496 | *polled = true; | |
2497 | status = OCRDMA_CQE_WR_FLUSH_ERR; | |
2498 | expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); | |
2499 | } else { | |
2500 | /* stop processing further cqe as this cqe is used for | |
2501 | * triggering cq event on buddy cq of RQ. | |
2502 | * When QP is destroyed, this cqe will be removed | |
2503 | * from the cq's hardware q. | |
2504 | */ | |
2505 | *polled = false; | |
2506 | *stop = true; | |
2507 | expand = false; | |
2508 | } | |
a96ffb1d SX |
2509 | } else if (is_hw_sq_empty(qp)) { |
2510 | /* Do nothing */ | |
2511 | expand = false; | |
2512 | *polled = false; | |
2513 | *stop = false; | |
fe2caefc PP |
2514 | } else { |
2515 | *polled = true; | |
2516 | expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); | |
2517 | } | |
2518 | return expand; | |
2519 | } | |
2520 | ||
2521 | static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp, | |
2522 | struct ocrdma_cqe *cqe, | |
2523 | struct ib_wc *ibwc, bool *polled) | |
2524 | { | |
2525 | bool expand = false; | |
2526 | int tail = qp->sq.tail; | |
2527 | u32 wqe_idx; | |
2528 | ||
2529 | if (!qp->wqe_wr_id_tbl[tail].signaled) { | |
fe2caefc PP |
2530 | *polled = false; /* WC cannot be consumed yet */ |
2531 | } else { | |
2532 | ibwc->status = IB_WC_SUCCESS; | |
2533 | ibwc->wc_flags = 0; | |
2534 | ibwc->qp = &qp->ibqp; | |
2535 | ocrdma_update_wc(qp, ibwc, tail); | |
2536 | *polled = true; | |
fe2caefc | 2537 | } |
43a6b402 NG |
2538 | wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) & |
2539 | OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx; | |
ae3bca90 PP |
2540 | if (tail != wqe_idx) |
2541 | expand = true; /* Coalesced CQE can't be consumed yet */ | |
2542 | ||
fe2caefc PP |
2543 | ocrdma_hwq_inc_tail(&qp->sq); |
2544 | return expand; | |
2545 | } | |
2546 | ||
2547 | static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, | |
2548 | struct ib_wc *ibwc, bool *polled, bool *stop) | |
2549 | { | |
2550 | int status; | |
2551 | bool expand; | |
2552 | ||
2553 | status = (le32_to_cpu(cqe->flags_status_srcqpn) & | |
2554 | OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; | |
2555 | ||
2556 | if (status == OCRDMA_CQE_SUCCESS) | |
2557 | expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled); | |
2558 | else | |
2559 | expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop); | |
2560 | return expand; | |
2561 | } | |
2562 | ||
2563 | static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe) | |
2564 | { | |
2565 | int status; | |
2566 | ||
2567 | status = (le32_to_cpu(cqe->flags_status_srcqpn) & | |
2568 | OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT; | |
2569 | ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) & | |
2570 | OCRDMA_CQE_SRCQP_MASK; | |
2571 | ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) & | |
2572 | OCRDMA_CQE_PKEY_MASK; | |
2573 | ibwc->wc_flags = IB_WC_GRH; | |
2574 | ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >> | |
2575 | OCRDMA_CQE_UD_XFER_LEN_SHIFT); | |
2576 | return status; | |
2577 | } | |
2578 | ||
2579 | static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc, | |
2580 | struct ocrdma_cqe *cqe, | |
2581 | struct ocrdma_qp *qp) | |
2582 | { | |
2583 | unsigned long flags; | |
2584 | struct ocrdma_srq *srq; | |
2585 | u32 wqe_idx; | |
2586 | ||
2587 | srq = get_ocrdma_srq(qp->ibqp.srq); | |
43a6b402 | 2588 | wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> |
cf5788ad SX |
2589 | OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx; |
2590 | if (wqe_idx < 1) | |
2591 | BUG(); | |
2592 | ||
fe2caefc PP |
2593 | ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx]; |
2594 | spin_lock_irqsave(&srq->q_lock, flags); | |
cf5788ad | 2595 | ocrdma_srq_toggle_bit(srq, wqe_idx - 1); |
fe2caefc PP |
2596 | spin_unlock_irqrestore(&srq->q_lock, flags); |
2597 | ocrdma_hwq_inc_tail(&srq->rq); | |
2598 | } | |
2599 | ||
2600 | static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, | |
2601 | struct ib_wc *ibwc, bool *polled, bool *stop, | |
2602 | int status) | |
2603 | { | |
2604 | bool expand; | |
2605 | ||
2606 | /* when hw_rq is empty, but wq is not empty, so continue | |
2607 | * to keep the cqe to get the cq event again. | |
2608 | */ | |
2609 | if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) { | |
2610 | if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { | |
2611 | *polled = true; | |
2612 | status = OCRDMA_CQE_WR_FLUSH_ERR; | |
2613 | expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); | |
2614 | } else { | |
2615 | *polled = false; | |
2616 | *stop = true; | |
2617 | expand = false; | |
2618 | } | |
a96ffb1d SX |
2619 | } else if (is_hw_rq_empty(qp)) { |
2620 | /* Do nothing */ | |
2621 | expand = false; | |
2622 | *polled = false; | |
2623 | *stop = false; | |
a3698a9b PP |
2624 | } else { |
2625 | *polled = true; | |
fe2caefc | 2626 | expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); |
a3698a9b | 2627 | } |
fe2caefc PP |
2628 | return expand; |
2629 | } | |
2630 | ||
2631 | static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp, | |
2632 | struct ocrdma_cqe *cqe, struct ib_wc *ibwc) | |
2633 | { | |
2634 | ibwc->opcode = IB_WC_RECV; | |
2635 | ibwc->qp = &qp->ibqp; | |
2636 | ibwc->status = IB_WC_SUCCESS; | |
2637 | ||
2638 | if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) | |
2639 | ocrdma_update_ud_rcqe(ibwc, cqe); | |
2640 | else | |
2641 | ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen); | |
2642 | ||
2643 | if (is_cqe_imm(cqe)) { | |
2644 | ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); | |
2645 | ibwc->wc_flags |= IB_WC_WITH_IMM; | |
2646 | } else if (is_cqe_wr_imm(cqe)) { | |
2647 | ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM; | |
2648 | ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt)); | |
2649 | ibwc->wc_flags |= IB_WC_WITH_IMM; | |
2650 | } else if (is_cqe_invalidated(cqe)) { | |
2651 | ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt); | |
2652 | ibwc->wc_flags |= IB_WC_WITH_INVALIDATE; | |
2653 | } | |
f99b1649 | 2654 | if (qp->ibqp.srq) { |
fe2caefc | 2655 | ocrdma_update_free_srq_cqe(ibwc, cqe, qp); |
f99b1649 | 2656 | } else { |
fe2caefc PP |
2657 | ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; |
2658 | ocrdma_hwq_inc_tail(&qp->rq); | |
2659 | } | |
2660 | } | |
2661 | ||
2662 | static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, | |
2663 | struct ib_wc *ibwc, bool *polled, bool *stop) | |
2664 | { | |
2665 | int status; | |
2666 | bool expand = false; | |
2667 | ||
2668 | ibwc->wc_flags = 0; | |
f99b1649 | 2669 | if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { |
fe2caefc PP |
2670 | status = (le32_to_cpu(cqe->flags_status_srcqpn) & |
2671 | OCRDMA_CQE_UD_STATUS_MASK) >> | |
2672 | OCRDMA_CQE_UD_STATUS_SHIFT; | |
f99b1649 | 2673 | } else { |
fe2caefc PP |
2674 | status = (le32_to_cpu(cqe->flags_status_srcqpn) & |
2675 | OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; | |
f99b1649 | 2676 | } |
fe2caefc PP |
2677 | |
2678 | if (status == OCRDMA_CQE_SUCCESS) { | |
2679 | *polled = true; | |
2680 | ocrdma_poll_success_rcqe(qp, cqe, ibwc); | |
2681 | } else { | |
2682 | expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop, | |
2683 | status); | |
2684 | } | |
2685 | return expand; | |
2686 | } | |
2687 | ||
2688 | static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe, | |
2689 | u16 cur_getp) | |
2690 | { | |
2691 | if (cq->phase_change) { | |
2692 | if (cur_getp == 0) | |
2693 | cq->phase = (~cq->phase & OCRDMA_CQE_VALID); | |
f99b1649 | 2694 | } else { |
fe2caefc PP |
2695 | /* clear valid bit */ |
2696 | cqe->flags_status_srcqpn = 0; | |
f99b1649 | 2697 | } |
fe2caefc PP |
2698 | } |
2699 | ||
2700 | static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries, | |
2701 | struct ib_wc *ibwc) | |
2702 | { | |
2703 | u16 qpn = 0; | |
2704 | int i = 0; | |
2705 | bool expand = false; | |
2706 | int polled_hw_cqes = 0; | |
2707 | struct ocrdma_qp *qp = NULL; | |
1afc0454 | 2708 | struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); |
fe2caefc PP |
2709 | struct ocrdma_cqe *cqe; |
2710 | u16 cur_getp; bool polled = false; bool stop = false; | |
2711 | ||
2712 | cur_getp = cq->getp; | |
2713 | while (num_entries) { | |
2714 | cqe = cq->va + cur_getp; | |
2715 | /* check whether valid cqe or not */ | |
2716 | if (!is_cqe_valid(cq, cqe)) | |
2717 | break; | |
2718 | qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK); | |
2719 | /* ignore discarded cqe */ | |
2720 | if (qpn == 0) | |
2721 | goto skip_cqe; | |
2722 | qp = dev->qp_tbl[qpn]; | |
2723 | BUG_ON(qp == NULL); | |
2724 | ||
2725 | if (is_cqe_for_sq(cqe)) { | |
2726 | expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled, | |
2727 | &stop); | |
2728 | } else { | |
2729 | expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled, | |
2730 | &stop); | |
2731 | } | |
2732 | if (expand) | |
2733 | goto expand_cqe; | |
2734 | if (stop) | |
2735 | goto stop_cqe; | |
2736 | /* clear qpn to avoid duplicate processing by discard_cqe() */ | |
2737 | cqe->cmn.qpn = 0; | |
2738 | skip_cqe: | |
2739 | polled_hw_cqes += 1; | |
2740 | cur_getp = (cur_getp + 1) % cq->max_hw_cqe; | |
2741 | ocrdma_change_cq_phase(cq, cqe, cur_getp); | |
2742 | expand_cqe: | |
2743 | if (polled) { | |
2744 | num_entries -= 1; | |
2745 | i += 1; | |
2746 | ibwc = ibwc + 1; | |
2747 | polled = false; | |
2748 | } | |
2749 | } | |
2750 | stop_cqe: | |
2751 | cq->getp = cur_getp; | |
ea617626 DS |
2752 | if (cq->deferred_arm) { |
2753 | ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol, | |
fe2caefc | 2754 | polled_hw_cqes); |
ea617626 DS |
2755 | cq->deferred_arm = false; |
2756 | cq->deferred_sol = false; | |
2757 | } else { | |
2758 | /* We need to pop the CQE. No need to arm */ | |
2759 | ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol, | |
fe2caefc | 2760 | polled_hw_cqes); |
ea617626 | 2761 | cq->deferred_sol = false; |
fe2caefc | 2762 | } |
ea617626 | 2763 | |
fe2caefc PP |
2764 | return i; |
2765 | } | |
2766 | ||
2767 | /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */ | |
2768 | static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries, | |
2769 | struct ocrdma_qp *qp, struct ib_wc *ibwc) | |
2770 | { | |
2771 | int err_cqes = 0; | |
2772 | ||
2773 | while (num_entries) { | |
2774 | if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp)) | |
2775 | break; | |
2776 | if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) { | |
2777 | ocrdma_update_wc(qp, ibwc, qp->sq.tail); | |
2778 | ocrdma_hwq_inc_tail(&qp->sq); | |
2779 | } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) { | |
2780 | ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; | |
2781 | ocrdma_hwq_inc_tail(&qp->rq); | |
f99b1649 | 2782 | } else { |
fe2caefc | 2783 | return err_cqes; |
f99b1649 | 2784 | } |
fe2caefc PP |
2785 | ibwc->byte_len = 0; |
2786 | ibwc->status = IB_WC_WR_FLUSH_ERR; | |
2787 | ibwc = ibwc + 1; | |
2788 | err_cqes += 1; | |
2789 | num_entries -= 1; | |
2790 | } | |
2791 | return err_cqes; | |
2792 | } | |
2793 | ||
2794 | int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |
2795 | { | |
2796 | int cqes_to_poll = num_entries; | |
1afc0454 NG |
2797 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); |
2798 | struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); | |
fe2caefc PP |
2799 | int num_os_cqe = 0, err_cqes = 0; |
2800 | struct ocrdma_qp *qp; | |
1afc0454 | 2801 | unsigned long flags; |
fe2caefc PP |
2802 | |
2803 | /* poll cqes from adapter CQ */ | |
2804 | spin_lock_irqsave(&cq->cq_lock, flags); | |
2805 | num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc); | |
2806 | spin_unlock_irqrestore(&cq->cq_lock, flags); | |
2807 | cqes_to_poll -= num_os_cqe; | |
2808 | ||
2809 | if (cqes_to_poll) { | |
2810 | wc = wc + num_os_cqe; | |
2811 | /* adapter returns single error cqe when qp moves to | |
2812 | * error state. So insert error cqes with wc_status as | |
2813 | * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ | |
2814 | * respectively which uses this CQ. | |
2815 | */ | |
2816 | spin_lock_irqsave(&dev->flush_q_lock, flags); | |
2817 | list_for_each_entry(qp, &cq->sq_head, sq_entry) { | |
2818 | if (cqes_to_poll == 0) | |
2819 | break; | |
2820 | err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc); | |
2821 | cqes_to_poll -= err_cqes; | |
2822 | num_os_cqe += err_cqes; | |
2823 | wc = wc + err_cqes; | |
2824 | } | |
2825 | spin_unlock_irqrestore(&dev->flush_q_lock, flags); | |
2826 | } | |
2827 | return num_os_cqe; | |
2828 | } | |
2829 | ||
2830 | int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) | |
2831 | { | |
1afc0454 NG |
2832 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); |
2833 | struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); | |
fe2caefc | 2834 | u16 cq_id; |
1afc0454 | 2835 | unsigned long flags; |
ea617626 | 2836 | bool arm_needed = false, sol_needed = false; |
fe2caefc | 2837 | |
fe2caefc | 2838 | cq_id = cq->id; |
fe2caefc PP |
2839 | |
2840 | spin_lock_irqsave(&cq->cq_lock, flags); | |
2841 | if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED) | |
ea617626 | 2842 | arm_needed = true; |
fe2caefc | 2843 | if (cq_flags & IB_CQ_SOLICITED) |
ea617626 | 2844 | sol_needed = true; |
fe2caefc | 2845 | |
ea617626 DS |
2846 | if (cq->first_arm) { |
2847 | ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0); | |
2848 | cq->first_arm = false; | |
2849 | goto skip_defer; | |
fe2caefc | 2850 | } |
ea617626 DS |
2851 | cq->deferred_arm = true; |
2852 | ||
2853 | skip_defer: | |
2854 | cq->deferred_sol = sol_needed; | |
fe2caefc | 2855 | spin_unlock_irqrestore(&cq->cq_lock, flags); |
ea617626 | 2856 | |
fe2caefc PP |
2857 | return 0; |
2858 | } | |
7c33880c NG |
2859 | |
2860 | struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len) | |
2861 | { | |
2862 | int status; | |
2863 | struct ocrdma_mr *mr; | |
2864 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | |
2865 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); | |
2866 | ||
2867 | if (max_page_list_len > dev->attr.max_pages_per_frmr) | |
2868 | return ERR_PTR(-EINVAL); | |
2869 | ||
2870 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
2871 | if (!mr) | |
2872 | return ERR_PTR(-ENOMEM); | |
2873 | ||
2874 | status = ocrdma_get_pbl_info(dev, mr, max_page_list_len); | |
2875 | if (status) | |
2876 | goto pbl_err; | |
2877 | mr->hwmr.fr_mr = 1; | |
2878 | mr->hwmr.remote_rd = 0; | |
2879 | mr->hwmr.remote_wr = 0; | |
2880 | mr->hwmr.local_rd = 0; | |
2881 | mr->hwmr.local_wr = 0; | |
2882 | mr->hwmr.mw_bind = 0; | |
2883 | status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); | |
2884 | if (status) | |
2885 | goto pbl_err; | |
2886 | status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0); | |
2887 | if (status) | |
2888 | goto mbx_err; | |
2889 | mr->ibmr.rkey = mr->hwmr.lkey; | |
2890 | mr->ibmr.lkey = mr->hwmr.lkey; | |
7a1e89d8 RD |
2891 | dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = |
2892 | (unsigned long) mr; | |
7c33880c NG |
2893 | return &mr->ibmr; |
2894 | mbx_err: | |
2895 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); | |
2896 | pbl_err: | |
2897 | kfree(mr); | |
2898 | return ERR_PTR(-ENOMEM); | |
2899 | } | |
2900 | ||
2901 | struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device | |
2902 | *ibdev, | |
2903 | int page_list_len) | |
2904 | { | |
2905 | struct ib_fast_reg_page_list *frmr_list; | |
2906 | int size; | |
2907 | ||
2908 | size = sizeof(*frmr_list) + (page_list_len * sizeof(u64)); | |
2909 | frmr_list = kzalloc(size, GFP_KERNEL); | |
2910 | if (!frmr_list) | |
2911 | return ERR_PTR(-ENOMEM); | |
2912 | frmr_list->page_list = (u64 *)(frmr_list + 1); | |
2913 | return frmr_list; | |
2914 | } | |
2915 | ||
2916 | void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list) | |
2917 | { | |
2918 | kfree(page_list); | |
2919 | } | |
cffce990 NG |
2920 | |
2921 | #define MAX_KERNEL_PBE_SIZE 65536 | |
2922 | static inline int count_kernel_pbes(struct ib_phys_buf *buf_list, | |
2923 | int buf_cnt, u32 *pbe_size) | |
2924 | { | |
2925 | u64 total_size = 0; | |
2926 | u64 buf_size = 0; | |
2927 | int i; | |
2928 | *pbe_size = roundup(buf_list[0].size, PAGE_SIZE); | |
2929 | *pbe_size = roundup_pow_of_two(*pbe_size); | |
2930 | ||
2931 | /* find the smallest PBE size that we can have */ | |
2932 | for (i = 0; i < buf_cnt; i++) { | |
2933 | /* first addr may not be page aligned, so ignore checking */ | |
2934 | if ((i != 0) && ((buf_list[i].addr & ~PAGE_MASK) || | |
2935 | (buf_list[i].size & ~PAGE_MASK))) { | |
2936 | return 0; | |
2937 | } | |
2938 | ||
2939 | /* if configured PBE size is greater then the chosen one, | |
2940 | * reduce the PBE size. | |
2941 | */ | |
2942 | buf_size = roundup(buf_list[i].size, PAGE_SIZE); | |
2943 | /* pbe_size has to be even multiple of 4K 1,2,4,8...*/ | |
2944 | buf_size = roundup_pow_of_two(buf_size); | |
2945 | if (*pbe_size > buf_size) | |
2946 | *pbe_size = buf_size; | |
2947 | ||
2948 | total_size += buf_size; | |
2949 | } | |
2950 | *pbe_size = *pbe_size > MAX_KERNEL_PBE_SIZE ? | |
2951 | (MAX_KERNEL_PBE_SIZE) : (*pbe_size); | |
2952 | ||
2953 | /* num_pbes = total_size / (*pbe_size); this is implemented below. */ | |
2954 | ||
2955 | return total_size >> ilog2(*pbe_size); | |
2956 | } | |
2957 | ||
2958 | static void build_kernel_pbes(struct ib_phys_buf *buf_list, int ib_buf_cnt, | |
2959 | u32 pbe_size, struct ocrdma_pbl *pbl_tbl, | |
2960 | struct ocrdma_hw_mr *hwmr) | |
2961 | { | |
2962 | int i; | |
2963 | int idx; | |
2964 | int pbes_per_buf = 0; | |
2965 | u64 buf_addr = 0; | |
2966 | int num_pbes; | |
2967 | struct ocrdma_pbe *pbe; | |
2968 | int total_num_pbes = 0; | |
2969 | ||
2970 | if (!hwmr->num_pbes) | |
2971 | return; | |
2972 | ||
2973 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; | |
2974 | num_pbes = 0; | |
2975 | ||
2976 | /* go through the OS phy regions & fill hw pbe entries into pbls. */ | |
2977 | for (i = 0; i < ib_buf_cnt; i++) { | |
2978 | buf_addr = buf_list[i].addr; | |
2979 | pbes_per_buf = | |
2980 | roundup_pow_of_two(roundup(buf_list[i].size, PAGE_SIZE)) / | |
2981 | pbe_size; | |
2982 | hwmr->len += buf_list[i].size; | |
2983 | /* number of pbes can be more for one OS buf, when | |
2984 | * buffers are of different sizes. | |
2985 | * split the ib_buf to one or more pbes. | |
2986 | */ | |
2987 | for (idx = 0; idx < pbes_per_buf; idx++) { | |
2988 | /* we program always page aligned addresses, | |
2989 | * first unaligned address is taken care by fbo. | |
2990 | */ | |
2991 | if (i == 0) { | |
2992 | /* for non zero fbo, assign the | |
2993 | * start of the page. | |
2994 | */ | |
2995 | pbe->pa_lo = | |
2996 | cpu_to_le32((u32) (buf_addr & PAGE_MASK)); | |
2997 | pbe->pa_hi = | |
2998 | cpu_to_le32((u32) upper_32_bits(buf_addr)); | |
2999 | } else { | |
3000 | pbe->pa_lo = | |
3001 | cpu_to_le32((u32) (buf_addr & 0xffffffff)); | |
3002 | pbe->pa_hi = | |
3003 | cpu_to_le32((u32) upper_32_bits(buf_addr)); | |
3004 | } | |
3005 | buf_addr += pbe_size; | |
3006 | num_pbes += 1; | |
3007 | total_num_pbes += 1; | |
3008 | pbe++; | |
3009 | ||
3010 | if (total_num_pbes == hwmr->num_pbes) | |
3011 | goto mr_tbl_done; | |
3012 | /* if the pbl is full storing the pbes, | |
3013 | * move to next pbl. | |
3014 | */ | |
3015 | if (num_pbes == (hwmr->pbl_size/sizeof(u64))) { | |
3016 | pbl_tbl++; | |
3017 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; | |
3018 | num_pbes = 0; | |
3019 | } | |
3020 | } | |
3021 | } | |
3022 | mr_tbl_done: | |
3023 | return; | |
3024 | } | |
3025 | ||
3026 | struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *ibpd, | |
3027 | struct ib_phys_buf *buf_list, | |
3028 | int buf_cnt, int acc, u64 *iova_start) | |
3029 | { | |
3030 | int status = -ENOMEM; | |
3031 | struct ocrdma_mr *mr; | |
3032 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); | |
3033 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); | |
3034 | u32 num_pbes; | |
3035 | u32 pbe_size = 0; | |
3036 | ||
3037 | if ((acc & IB_ACCESS_REMOTE_WRITE) && !(acc & IB_ACCESS_LOCAL_WRITE)) | |
3038 | return ERR_PTR(-EINVAL); | |
3039 | ||
3040 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); | |
3041 | if (!mr) | |
3042 | return ERR_PTR(status); | |
3043 | ||
3044 | num_pbes = count_kernel_pbes(buf_list, buf_cnt, &pbe_size); | |
3045 | if (num_pbes == 0) { | |
3046 | status = -EINVAL; | |
3047 | goto pbl_err; | |
3048 | } | |
3049 | status = ocrdma_get_pbl_info(dev, mr, num_pbes); | |
3050 | if (status) | |
3051 | goto pbl_err; | |
3052 | ||
3053 | mr->hwmr.pbe_size = pbe_size; | |
3054 | mr->hwmr.fbo = *iova_start - (buf_list[0].addr & PAGE_MASK); | |
3055 | mr->hwmr.va = *iova_start; | |
3056 | mr->hwmr.local_rd = 1; | |
3057 | mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; | |
3058 | mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0; | |
3059 | mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0; | |
3060 | mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; | |
3061 | mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0; | |
3062 | ||
3063 | status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); | |
3064 | if (status) | |
3065 | goto pbl_err; | |
3066 | build_kernel_pbes(buf_list, buf_cnt, pbe_size, mr->hwmr.pbl_table, | |
3067 | &mr->hwmr); | |
3068 | status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc); | |
3069 | if (status) | |
3070 | goto mbx_err; | |
3071 | ||
3072 | mr->ibmr.lkey = mr->hwmr.lkey; | |
3073 | if (mr->hwmr.remote_wr || mr->hwmr.remote_rd) | |
3074 | mr->ibmr.rkey = mr->hwmr.lkey; | |
3075 | return &mr->ibmr; | |
3076 | ||
3077 | mbx_err: | |
3078 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); | |
3079 | pbl_err: | |
3080 | kfree(mr); | |
3081 | return ERR_PTR(status); | |
3082 | } |