Commit | Line | Data |
---|---|---|
0266a177 LL |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * Copyright (c) 2022, Microsoft Corporation. All rights reserved. | |
4 | */ | |
5 | ||
6 | #include "mana_ib.h" | |
7 | ||
8 | void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd, | |
9 | u32 port) | |
10 | { | |
a7f0636d | 11 | struct gdma_dev *gd = &dev->gdma_dev->gdma_context->mana; |
0266a177 LL |
12 | struct mana_port_context *mpc; |
13 | struct net_device *ndev; | |
14 | struct mana_context *mc; | |
15 | ||
16 | mc = gd->driver_data; | |
17 | ndev = mc->ports[port]; | |
18 | mpc = netdev_priv(ndev); | |
19 | ||
20 | mutex_lock(&pd->vport_mutex); | |
21 | ||
22 | pd->vport_use_count--; | |
23 | WARN_ON(pd->vport_use_count < 0); | |
24 | ||
25 | if (!pd->vport_use_count) | |
26 | mana_uncfg_vport(mpc); | |
27 | ||
28 | mutex_unlock(&pd->vport_mutex); | |
29 | } | |
30 | ||
31 | int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd, | |
32 | u32 doorbell_id) | |
33 | { | |
a7f0636d | 34 | struct gdma_dev *mdev = &dev->gdma_dev->gdma_context->mana; |
0266a177 LL |
35 | struct mana_port_context *mpc; |
36 | struct mana_context *mc; | |
37 | struct net_device *ndev; | |
38 | int err; | |
39 | ||
40 | mc = mdev->driver_data; | |
41 | ndev = mc->ports[port]; | |
42 | mpc = netdev_priv(ndev); | |
43 | ||
44 | mutex_lock(&pd->vport_mutex); | |
45 | ||
46 | pd->vport_use_count++; | |
47 | if (pd->vport_use_count > 1) { | |
48 | ibdev_dbg(&dev->ib_dev, | |
49 | "Skip as this PD is already configured vport\n"); | |
50 | mutex_unlock(&pd->vport_mutex); | |
51 | return 0; | |
52 | } | |
53 | ||
54 | err = mana_cfg_vport(mpc, pd->pdn, doorbell_id); | |
55 | if (err) { | |
56 | pd->vport_use_count--; | |
57 | mutex_unlock(&pd->vport_mutex); | |
58 | ||
59 | ibdev_dbg(&dev->ib_dev, "Failed to configure vPort %d\n", err); | |
60 | return err; | |
61 | } | |
62 | ||
63 | mutex_unlock(&pd->vport_mutex); | |
64 | ||
65 | pd->tx_shortform_allowed = mpc->tx_shortform_allowed; | |
66 | pd->tx_vp_offset = mpc->tx_vp_offset; | |
67 | ||
68 | ibdev_dbg(&dev->ib_dev, "vport handle %llx pdid %x doorbell_id %x\n", | |
69 | mpc->port_handle, pd->pdn, doorbell_id); | |
70 | ||
71 | return 0; | |
72 | } | |
73 | ||
74 | int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) | |
75 | { | |
76 | struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd); | |
77 | struct ib_device *ibdev = ibpd->device; | |
78 | struct gdma_create_pd_resp resp = {}; | |
79 | struct gdma_create_pd_req req = {}; | |
80 | enum gdma_pd_flags flags = 0; | |
81 | struct mana_ib_dev *dev; | |
82 | struct gdma_dev *mdev; | |
83 | int err; | |
84 | ||
85 | dev = container_of(ibdev, struct mana_ib_dev, ib_dev); | |
86 | mdev = dev->gdma_dev; | |
87 | ||
88 | mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req), | |
89 | sizeof(resp)); | |
90 | ||
91 | req.flags = flags; | |
92 | err = mana_gd_send_request(mdev->gdma_context, sizeof(req), &req, | |
93 | sizeof(resp), &resp); | |
94 | ||
95 | if (err || resp.hdr.status) { | |
96 | ibdev_dbg(&dev->ib_dev, | |
97 | "Failed to get pd_id err %d status %u\n", err, | |
98 | resp.hdr.status); | |
99 | if (!err) | |
100 | err = -EPROTO; | |
101 | ||
102 | return err; | |
103 | } | |
104 | ||
105 | pd->pd_handle = resp.pd_handle; | |
106 | pd->pdn = resp.pd_id; | |
107 | ibdev_dbg(&dev->ib_dev, "pd_handle 0x%llx pd_id %d\n", | |
108 | pd->pd_handle, pd->pdn); | |
109 | ||
110 | mutex_init(&pd->vport_mutex); | |
111 | pd->vport_use_count = 0; | |
112 | return 0; | |
113 | } | |
114 | ||
115 | int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) | |
116 | { | |
117 | struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd); | |
118 | struct ib_device *ibdev = ibpd->device; | |
119 | struct gdma_destory_pd_resp resp = {}; | |
120 | struct gdma_destroy_pd_req req = {}; | |
121 | struct mana_ib_dev *dev; | |
122 | struct gdma_dev *mdev; | |
123 | int err; | |
124 | ||
125 | dev = container_of(ibdev, struct mana_ib_dev, ib_dev); | |
126 | mdev = dev->gdma_dev; | |
127 | ||
128 | mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req), | |
129 | sizeof(resp)); | |
130 | ||
131 | req.pd_handle = pd->pd_handle; | |
132 | err = mana_gd_send_request(mdev->gdma_context, sizeof(req), &req, | |
133 | sizeof(resp), &resp); | |
134 | ||
135 | if (err || resp.hdr.status) { | |
136 | ibdev_dbg(&dev->ib_dev, | |
137 | "Failed to destroy pd_handle 0x%llx err %d status %u", | |
138 | pd->pd_handle, err, resp.hdr.status); | |
139 | if (!err) | |
140 | err = -EPROTO; | |
141 | } | |
142 | ||
143 | return err; | |
144 | } | |
145 | ||
146 | static int mana_gd_destroy_doorbell_page(struct gdma_context *gc, | |
147 | int doorbell_page) | |
148 | { | |
149 | struct gdma_destroy_resource_range_req req = {}; | |
150 | struct gdma_resp_hdr resp = {}; | |
151 | int err; | |
152 | ||
153 | mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_RESOURCE_RANGE, | |
154 | sizeof(req), sizeof(resp)); | |
155 | ||
156 | req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE; | |
157 | req.num_resources = 1; | |
158 | req.allocated_resources = doorbell_page; | |
159 | ||
160 | err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); | |
161 | if (err || resp.status) { | |
162 | dev_err(gc->dev, | |
163 | "Failed to destroy doorbell page: ret %d, 0x%x\n", | |
164 | err, resp.status); | |
165 | return err ?: -EPROTO; | |
166 | } | |
167 | ||
168 | return 0; | |
169 | } | |
170 | ||
171 | static int mana_gd_allocate_doorbell_page(struct gdma_context *gc, | |
172 | int *doorbell_page) | |
173 | { | |
174 | struct gdma_allocate_resource_range_req req = {}; | |
175 | struct gdma_allocate_resource_range_resp resp = {}; | |
176 | int err; | |
177 | ||
178 | mana_gd_init_req_hdr(&req.hdr, GDMA_ALLOCATE_RESOURCE_RANGE, | |
179 | sizeof(req), sizeof(resp)); | |
180 | ||
181 | req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE; | |
182 | req.num_resources = 1; | |
183 | req.alignment = 1; | |
184 | ||
185 | /* Have GDMA start searching from 0 */ | |
186 | req.allocated_resources = 0; | |
187 | ||
188 | err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); | |
189 | if (err || resp.hdr.status) { | |
190 | dev_err(gc->dev, | |
191 | "Failed to allocate doorbell page: ret %d, 0x%x\n", | |
192 | err, resp.hdr.status); | |
193 | return err ?: -EPROTO; | |
194 | } | |
195 | ||
196 | *doorbell_page = resp.allocated_resources; | |
197 | ||
198 | return 0; | |
199 | } | |
200 | ||
201 | int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext, | |
202 | struct ib_udata *udata) | |
203 | { | |
204 | struct mana_ib_ucontext *ucontext = | |
205 | container_of(ibcontext, struct mana_ib_ucontext, ibucontext); | |
206 | struct ib_device *ibdev = ibcontext->device; | |
207 | struct mana_ib_dev *mdev; | |
208 | struct gdma_context *gc; | |
209 | struct gdma_dev *dev; | |
210 | int doorbell_page; | |
211 | int ret; | |
212 | ||
213 | mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); | |
214 | dev = mdev->gdma_dev; | |
215 | gc = dev->gdma_context; | |
216 | ||
217 | /* Allocate a doorbell page index */ | |
218 | ret = mana_gd_allocate_doorbell_page(gc, &doorbell_page); | |
219 | if (ret) { | |
220 | ibdev_dbg(ibdev, "Failed to allocate doorbell page %d\n", ret); | |
221 | return ret; | |
222 | } | |
223 | ||
224 | ibdev_dbg(ibdev, "Doorbell page allocated %d\n", doorbell_page); | |
225 | ||
226 | ucontext->doorbell = doorbell_page; | |
227 | ||
228 | return 0; | |
229 | } | |
230 | ||
231 | void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) | |
232 | { | |
233 | struct mana_ib_ucontext *mana_ucontext = | |
234 | container_of(ibcontext, struct mana_ib_ucontext, ibucontext); | |
235 | struct ib_device *ibdev = ibcontext->device; | |
236 | struct mana_ib_dev *mdev; | |
237 | struct gdma_context *gc; | |
238 | int ret; | |
239 | ||
240 | mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); | |
241 | gc = mdev->gdma_dev->gdma_context; | |
242 | ||
243 | ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext->doorbell); | |
244 | if (ret) | |
245 | ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret); | |
246 | } | |
247 | ||
248 | static int | |
249 | mana_ib_gd_first_dma_region(struct mana_ib_dev *dev, | |
250 | struct gdma_context *gc, | |
251 | struct gdma_create_dma_region_req *create_req, | |
89d42b8c LL |
252 | size_t num_pages, mana_handle_t *gdma_region, |
253 | u32 expected_status) | |
0266a177 LL |
254 | { |
255 | struct gdma_create_dma_region_resp create_resp = {}; | |
256 | unsigned int create_req_msg_size; | |
257 | int err; | |
258 | ||
259 | create_req_msg_size = | |
260 | struct_size(create_req, page_addr_list, num_pages); | |
261 | create_req->page_addr_list_len = num_pages; | |
262 | ||
263 | err = mana_gd_send_request(gc, create_req_msg_size, create_req, | |
264 | sizeof(create_resp), &create_resp); | |
89d42b8c | 265 | if (err || create_resp.hdr.status != expected_status) { |
0266a177 LL |
266 | ibdev_dbg(&dev->ib_dev, |
267 | "Failed to create DMA region: %d, 0x%x\n", | |
268 | err, create_resp.hdr.status); | |
269 | if (!err) | |
270 | err = -EPROTO; | |
271 | ||
272 | return err; | |
273 | } | |
274 | ||
275 | *gdma_region = create_resp.dma_region_handle; | |
276 | ibdev_dbg(&dev->ib_dev, "Created DMA region handle 0x%llx\n", | |
277 | *gdma_region); | |
278 | ||
279 | return 0; | |
280 | } | |
281 | ||
282 | static int | |
283 | mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc, | |
284 | struct gdma_dma_region_add_pages_req *add_req, | |
285 | unsigned int num_pages, u32 expected_status) | |
286 | { | |
287 | unsigned int add_req_msg_size = | |
288 | struct_size(add_req, page_addr_list, num_pages); | |
289 | struct gdma_general_resp add_resp = {}; | |
290 | int err; | |
291 | ||
292 | mana_gd_init_req_hdr(&add_req->hdr, GDMA_DMA_REGION_ADD_PAGES, | |
293 | add_req_msg_size, sizeof(add_resp)); | |
294 | add_req->page_addr_list_len = num_pages; | |
295 | ||
296 | err = mana_gd_send_request(gc, add_req_msg_size, add_req, | |
297 | sizeof(add_resp), &add_resp); | |
298 | if (err || add_resp.hdr.status != expected_status) { | |
299 | ibdev_dbg(&dev->ib_dev, | |
300 | "Failed to create DMA region: %d, 0x%x\n", | |
301 | err, add_resp.hdr.status); | |
302 | ||
303 | if (!err) | |
304 | err = -EPROTO; | |
305 | ||
306 | return err; | |
307 | } | |
308 | ||
309 | return 0; | |
310 | } | |
311 | ||
312 | int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem, | |
313 | mana_handle_t *gdma_region) | |
314 | { | |
315 | struct gdma_dma_region_add_pages_req *add_req = NULL; | |
316 | size_t num_pages_processed = 0, num_pages_to_handle; | |
317 | struct gdma_create_dma_region_req *create_req; | |
318 | unsigned int create_req_msg_size; | |
319 | struct hw_channel_context *hwc; | |
320 | struct ib_block_iter biter; | |
321 | size_t max_pgs_add_cmd = 0; | |
322 | size_t max_pgs_create_cmd; | |
323 | struct gdma_context *gc; | |
324 | size_t num_pages_total; | |
325 | struct gdma_dev *mdev; | |
326 | unsigned long page_sz; | |
327 | unsigned int tail = 0; | |
328 | u64 *page_addr_list; | |
329 | void *request_buf; | |
330 | int err; | |
331 | ||
332 | mdev = dev->gdma_dev; | |
333 | gc = mdev->gdma_context; | |
334 | hwc = gc->hwc.driver_data; | |
335 | ||
336 | /* Hardware requires dma region to align to chosen page size */ | |
337 | page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, 0); | |
338 | if (!page_sz) { | |
339 | ibdev_dbg(&dev->ib_dev, "failed to find page size.\n"); | |
340 | return -ENOMEM; | |
341 | } | |
342 | num_pages_total = ib_umem_num_dma_blocks(umem, page_sz); | |
343 | ||
344 | max_pgs_create_cmd = | |
345 | (hwc->max_req_msg_size - sizeof(*create_req)) / sizeof(u64); | |
346 | num_pages_to_handle = | |
347 | min_t(size_t, num_pages_total, max_pgs_create_cmd); | |
348 | create_req_msg_size = | |
349 | struct_size(create_req, page_addr_list, num_pages_to_handle); | |
350 | ||
351 | request_buf = kzalloc(hwc->max_req_msg_size, GFP_KERNEL); | |
352 | if (!request_buf) | |
353 | return -ENOMEM; | |
354 | ||
355 | create_req = request_buf; | |
356 | mana_gd_init_req_hdr(&create_req->hdr, GDMA_CREATE_DMA_REGION, | |
357 | create_req_msg_size, | |
358 | sizeof(struct gdma_create_dma_region_resp)); | |
359 | ||
360 | create_req->length = umem->length; | |
361 | create_req->offset_in_page = umem->address & (page_sz - 1); | |
362 | create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT; | |
363 | create_req->page_count = num_pages_total; | |
364 | ||
365 | ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n", | |
366 | umem->length, num_pages_total); | |
367 | ||
368 | ibdev_dbg(&dev->ib_dev, "page_sz %lu offset_in_page %u\n", | |
369 | page_sz, create_req->offset_in_page); | |
370 | ||
371 | ibdev_dbg(&dev->ib_dev, "num_pages_to_handle %lu, gdma_page_type %u", | |
372 | num_pages_to_handle, create_req->gdma_page_type); | |
373 | ||
374 | page_addr_list = create_req->page_addr_list; | |
375 | rdma_umem_for_each_dma_block(umem, &biter, page_sz) { | |
89d42b8c LL |
376 | u32 expected_status = 0; |
377 | ||
0266a177 LL |
378 | page_addr_list[tail++] = rdma_block_iter_dma_address(&biter); |
379 | if (tail < num_pages_to_handle) | |
380 | continue; | |
381 | ||
89d42b8c LL |
382 | if (num_pages_processed + num_pages_to_handle < |
383 | num_pages_total) | |
384 | expected_status = GDMA_STATUS_MORE_ENTRIES; | |
385 | ||
0266a177 LL |
386 | if (!num_pages_processed) { |
387 | /* First create message */ | |
388 | err = mana_ib_gd_first_dma_region(dev, gc, create_req, | |
89d42b8c LL |
389 | tail, gdma_region, |
390 | expected_status); | |
0266a177 LL |
391 | if (err) |
392 | goto out; | |
393 | ||
394 | max_pgs_add_cmd = (hwc->max_req_msg_size - | |
395 | sizeof(*add_req)) / sizeof(u64); | |
396 | ||
397 | add_req = request_buf; | |
398 | add_req->dma_region_handle = *gdma_region; | |
399 | add_req->reserved3 = 0; | |
400 | page_addr_list = add_req->page_addr_list; | |
401 | } else { | |
402 | /* Subsequent create messages */ | |
0266a177 | 403 | err = mana_ib_gd_add_dma_region(dev, gc, add_req, tail, |
89d42b8c | 404 | expected_status); |
0266a177 LL |
405 | if (err) |
406 | break; | |
407 | } | |
408 | ||
409 | num_pages_processed += tail; | |
410 | tail = 0; | |
411 | ||
412 | /* The remaining pages to create */ | |
413 | num_pages_to_handle = | |
414 | min_t(size_t, | |
415 | num_pages_total - num_pages_processed, | |
416 | max_pgs_add_cmd); | |
417 | } | |
418 | ||
419 | if (err) | |
420 | mana_ib_gd_destroy_dma_region(dev, *gdma_region); | |
421 | ||
422 | out: | |
423 | kfree(request_buf); | |
424 | return err; | |
425 | } | |
426 | ||
427 | int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64 gdma_region) | |
428 | { | |
429 | struct gdma_dev *mdev = dev->gdma_dev; | |
430 | struct gdma_context *gc; | |
431 | ||
432 | gc = mdev->gdma_context; | |
433 | ibdev_dbg(&dev->ib_dev, "destroy dma region 0x%llx\n", gdma_region); | |
434 | ||
435 | return mana_gd_destroy_dma_region(gc, gdma_region); | |
436 | } | |
437 | ||
438 | int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) | |
439 | { | |
440 | struct mana_ib_ucontext *mana_ucontext = | |
441 | container_of(ibcontext, struct mana_ib_ucontext, ibucontext); | |
442 | struct ib_device *ibdev = ibcontext->device; | |
443 | struct mana_ib_dev *mdev; | |
444 | struct gdma_context *gc; | |
445 | phys_addr_t pfn; | |
446 | pgprot_t prot; | |
447 | int ret; | |
448 | ||
449 | mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); | |
450 | gc = mdev->gdma_dev->gdma_context; | |
451 | ||
452 | if (vma->vm_pgoff != 0) { | |
453 | ibdev_dbg(ibdev, "Unexpected vm_pgoff %lu\n", vma->vm_pgoff); | |
454 | return -EINVAL; | |
455 | } | |
456 | ||
457 | /* Map to the page indexed by ucontext->doorbell */ | |
458 | pfn = (gc->phys_db_page_base + | |
459 | gc->db_page_size * mana_ucontext->doorbell) >> | |
460 | PAGE_SHIFT; | |
461 | prot = pgprot_writecombine(vma->vm_page_prot); | |
462 | ||
463 | ret = rdma_user_mmap_io(ibcontext, vma, pfn, gc->db_page_size, prot, | |
464 | NULL); | |
465 | if (ret) | |
466 | ibdev_dbg(ibdev, "can't rdma_user_mmap_io ret %d\n", ret); | |
467 | else | |
468 | ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %u, ret %d\n", | |
469 | pfn, gc->db_page_size, ret); | |
470 | ||
471 | return ret; | |
472 | } | |
473 | ||
474 | int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num, | |
475 | struct ib_port_immutable *immutable) | |
476 | { | |
477 | /* | |
478 | * This version only support RAW_PACKET | |
479 | * other values need to be filled for other types | |
480 | */ | |
481 | immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; | |
482 | ||
483 | return 0; | |
484 | } | |
485 | ||
486 | int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, | |
487 | struct ib_udata *uhw) | |
488 | { | |
2c20e20b LL |
489 | struct mana_ib_dev *dev = container_of(ibdev, |
490 | struct mana_ib_dev, ib_dev); | |
491 | ||
492 | props->max_qp = dev->adapter_caps.max_qp_count; | |
493 | props->max_qp_wr = dev->adapter_caps.max_qp_wr; | |
494 | props->max_cq = dev->adapter_caps.max_cq_count; | |
495 | props->max_cqe = dev->adapter_caps.max_qp_wr; | |
496 | props->max_mr = dev->adapter_caps.max_mr_count; | |
0266a177 | 497 | props->max_mr_size = MANA_IB_MAX_MR_SIZE; |
2c20e20b LL |
498 | props->max_send_sge = dev->adapter_caps.max_send_sge_count; |
499 | props->max_recv_sge = dev->adapter_caps.max_recv_sge_count; | |
0266a177 LL |
500 | |
501 | return 0; | |
502 | } | |
503 | ||
504 | int mana_ib_query_port(struct ib_device *ibdev, u32 port, | |
505 | struct ib_port_attr *props) | |
506 | { | |
507 | /* This version doesn't return port properties */ | |
508 | return 0; | |
509 | } | |
510 | ||
511 | int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index, | |
512 | union ib_gid *gid) | |
513 | { | |
514 | /* This version doesn't return GID properties */ | |
515 | return 0; | |
516 | } | |
517 | ||
518 | void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) | |
519 | { | |
520 | } | |
2c20e20b LL |
521 | |
522 | int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev) | |
523 | { | |
524 | struct mana_ib_adapter_caps *caps = &dev->adapter_caps; | |
525 | struct mana_ib_query_adapter_caps_resp resp = {}; | |
526 | struct mana_ib_query_adapter_caps_req req = {}; | |
527 | int err; | |
528 | ||
529 | mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP, sizeof(req), | |
530 | sizeof(resp)); | |
531 | req.hdr.resp.msg_version = GDMA_MESSAGE_V3; | |
532 | req.hdr.dev_id = dev->gdma_dev->dev_id; | |
533 | ||
534 | err = mana_gd_send_request(dev->gdma_dev->gdma_context, sizeof(req), | |
535 | &req, sizeof(resp), &resp); | |
536 | ||
537 | if (err) { | |
538 | ibdev_err(&dev->ib_dev, | |
539 | "Failed to query adapter caps err %d", err); | |
540 | return err; | |
541 | } | |
542 | ||
543 | caps->max_sq_id = resp.max_sq_id; | |
544 | caps->max_rq_id = resp.max_rq_id; | |
545 | caps->max_cq_id = resp.max_cq_id; | |
546 | caps->max_qp_count = resp.max_qp_count; | |
547 | caps->max_cq_count = resp.max_cq_count; | |
548 | caps->max_mr_count = resp.max_mr_count; | |
549 | caps->max_pd_count = resp.max_pd_count; | |
550 | caps->max_inbound_read_limit = resp.max_inbound_read_limit; | |
551 | caps->max_outbound_read_limit = resp.max_outbound_read_limit; | |
552 | caps->mw_count = resp.mw_count; | |
553 | caps->max_srq_count = resp.max_srq_count; | |
554 | caps->max_qp_wr = min_t(u32, | |
555 | resp.max_requester_sq_size / GDMA_MAX_SQE_SIZE, | |
556 | resp.max_requester_rq_size / GDMA_MAX_RQE_SIZE); | |
557 | caps->max_inline_data_size = resp.max_inline_data_size; | |
558 | caps->max_send_sge_count = resp.max_send_sge_count; | |
559 | caps->max_recv_sge_count = resp.max_recv_sge_count; | |
560 | ||
561 | return 0; | |
562 | } |