Commit | Line | Data |
---|---|---|
1ac5a404 SX |
1 | /* |
2 | * Broadcom NetXtreme-E RoCE driver. | |
3 | * | |
4 | * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term | |
5 | * Broadcom refers to Broadcom Limited and/or its subsidiaries. | |
6 | * | |
7 | * This software is available to you under a choice of one of two | |
8 | * licenses. You may choose to be licensed under the terms of the GNU | |
9 | * General Public License (GPL) Version 2, available from the file | |
10 | * COPYING in the main directory of this source tree, or the | |
11 | * BSD license below: | |
12 | * | |
13 | * Redistribution and use in source and binary forms, with or without | |
14 | * modification, are permitted provided that the following conditions | |
15 | * are met: | |
16 | * | |
17 | * 1. Redistributions of source code must retain the above copyright | |
18 | * notice, this list of conditions and the following disclaimer. | |
19 | * 2. Redistributions in binary form must reproduce the above copyright | |
20 | * notice, this list of conditions and the following disclaimer in | |
21 | * the documentation and/or other materials provided with the | |
22 | * distribution. | |
23 | * | |
24 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' | |
25 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, | |
26 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
27 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS | |
28 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | |
31 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | |
32 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | |
33 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | |
34 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
35 | * | |
36 | * Description: QPLib resource manager | |
37 | */ | |
38 | ||
08920b8f JP |
39 | #define dev_fmt(fmt) "QPLIB: " fmt |
40 | ||
1ac5a404 SX |
41 | #include <linux/spinlock.h> |
42 | #include <linux/pci.h> | |
43 | #include <linux/interrupt.h> | |
44 | #include <linux/inetdevice.h> | |
45 | #include <linux/dma-mapping.h> | |
46 | #include <linux/if_vlan.h> | |
65a16620 | 47 | #include <linux/vmalloc.h> |
6ef999f5 JG |
48 | #include <rdma/ib_verbs.h> |
49 | #include <rdma/ib_umem.h> | |
50 | ||
1ac5a404 SX |
51 | #include "roce_hsi.h" |
52 | #include "qplib_res.h" | |
53 | #include "qplib_sp.h" | |
54 | #include "qplib_rcfw.h" | |
55 | ||
56 | static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, | |
57 | struct bnxt_qplib_stats *stats); | |
58 | static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, | |
0c23af52 | 59 | struct bnxt_qplib_chip_ctx *cctx, |
1ac5a404 SX |
60 | struct bnxt_qplib_stats *stats); |
61 | ||
62 | /* PBL */ | |
0c4dcd60 | 63 | static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl, |
1ac5a404 SX |
64 | bool is_umem) |
65 | { | |
0c4dcd60 | 66 | struct pci_dev *pdev = res->pdev; |
1ac5a404 SX |
67 | int i; |
68 | ||
69 | if (!is_umem) { | |
70 | for (i = 0; i < pbl->pg_count; i++) { | |
71 | if (pbl->pg_arr[i]) | |
72 | dma_free_coherent(&pdev->dev, pbl->pg_size, | |
73 | (void *)((unsigned long) | |
74 | pbl->pg_arr[i] & | |
75 | PAGE_MASK), | |
76 | pbl->pg_map_arr[i]); | |
77 | else | |
78 | dev_warn(&pdev->dev, | |
08920b8f | 79 | "PBL free pg_arr[%d] empty?!\n", i); |
1ac5a404 SX |
80 | pbl->pg_arr[i] = NULL; |
81 | } | |
82 | } | |
0c4dcd60 | 83 | vfree(pbl->pg_arr); |
1ac5a404 | 84 | pbl->pg_arr = NULL; |
0c4dcd60 | 85 | vfree(pbl->pg_map_arr); |
1ac5a404 SX |
86 | pbl->pg_map_arr = NULL; |
87 | pbl->pg_count = 0; | |
88 | pbl->pg_size = 0; | |
89 | } | |
90 | ||
0c4dcd60 DS |
91 | static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl, |
92 | struct bnxt_qplib_sg_info *sginfo) | |
1ac5a404 | 93 | { |
6ef999f5 | 94 | struct ib_block_iter biter; |
0c4dcd60 DS |
95 | int i = 0; |
96 | ||
6ef999f5 JG |
97 | rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) { |
98 | pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter); | |
0c4dcd60 DS |
99 | pbl->pg_arr[i] = NULL; |
100 | pbl->pg_count++; | |
101 | i++; | |
102 | } | |
103 | } | |
104 | ||
105 | static int __alloc_pbl(struct bnxt_qplib_res *res, | |
106 | struct bnxt_qplib_pbl *pbl, | |
107 | struct bnxt_qplib_sg_info *sginfo) | |
108 | { | |
109 | struct pci_dev *pdev = res->pdev; | |
1ac5a404 | 110 | bool is_umem = false; |
6be2067d | 111 | u32 pages; |
1ac5a404 SX |
112 | int i; |
113 | ||
0c4dcd60 DS |
114 | if (sginfo->nopte) |
115 | return 0; | |
6ef999f5 JG |
116 | if (sginfo->umem) |
117 | pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize); | |
118 | else | |
119 | pages = sginfo->npages; | |
1ac5a404 | 120 | /* page ptr arrays */ |
0c4dcd60 | 121 | pbl->pg_arr = vmalloc(pages * sizeof(void *)); |
1ac5a404 SX |
122 | if (!pbl->pg_arr) |
123 | return -ENOMEM; | |
124 | ||
0c4dcd60 | 125 | pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t)); |
1ac5a404 | 126 | if (!pbl->pg_map_arr) { |
0c4dcd60 | 127 | vfree(pbl->pg_arr); |
1ac5a404 SX |
128 | pbl->pg_arr = NULL; |
129 | return -ENOMEM; | |
130 | } | |
131 | pbl->pg_count = 0; | |
0c4dcd60 | 132 | pbl->pg_size = sginfo->pgsize; |
1ac5a404 | 133 | |
6ef999f5 | 134 | if (!sginfo->umem) { |
1ac5a404 | 135 | for (i = 0; i < pages; i++) { |
750afb08 LC |
136 | pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev, |
137 | pbl->pg_size, | |
138 | &pbl->pg_map_arr[i], | |
139 | GFP_KERNEL); | |
1ac5a404 SX |
140 | if (!pbl->pg_arr[i]) |
141 | goto fail; | |
1ac5a404 SX |
142 | pbl->pg_count++; |
143 | } | |
144 | } else { | |
1ac5a404 | 145 | is_umem = true; |
0c4dcd60 | 146 | bnxt_qplib_fill_user_dma_pages(pbl, sginfo); |
1ac5a404 SX |
147 | } |
148 | ||
149 | return 0; | |
1ac5a404 | 150 | fail: |
0c4dcd60 | 151 | __free_pbl(res, pbl, is_umem); |
1ac5a404 SX |
152 | return -ENOMEM; |
153 | } | |
154 | ||
155 | /* HWQ */ | |
0c4dcd60 DS |
156 | void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res, |
157 | struct bnxt_qplib_hwq *hwq) | |
1ac5a404 SX |
158 | { |
159 | int i; | |
160 | ||
161 | if (!hwq->max_elements) | |
162 | return; | |
163 | if (hwq->level >= PBL_LVL_MAX) | |
164 | return; | |
165 | ||
166 | for (i = 0; i < hwq->level + 1; i++) { | |
167 | if (i == hwq->level) | |
0c4dcd60 | 168 | __free_pbl(res, &hwq->pbl[i], hwq->is_user); |
1ac5a404 | 169 | else |
0c4dcd60 | 170 | __free_pbl(res, &hwq->pbl[i], false); |
1ac5a404 SX |
171 | } |
172 | ||
173 | hwq->level = PBL_LVL_MAX; | |
174 | hwq->max_elements = 0; | |
175 | hwq->element_size = 0; | |
176 | hwq->prod = 0; | |
177 | hwq->cons = 0; | |
178 | hwq->cp_bit = 0; | |
179 | } | |
180 | ||
181 | /* All HWQs are power of 2 in size */ | |
0c4dcd60 DS |
182 | |
183 | int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, | |
184 | struct bnxt_qplib_hwq_attr *hwq_attr) | |
1ac5a404 | 185 | { |
0c4dcd60 DS |
186 | u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0; |
187 | struct bnxt_qplib_sg_info sginfo = {}; | |
188 | u32 depth, stride, npbl, npde; | |
1ac5a404 | 189 | dma_addr_t *src_phys_ptr, **dst_virt_ptr; |
0c4dcd60 DS |
190 | struct bnxt_qplib_res *res; |
191 | struct pci_dev *pdev; | |
192 | int i, rc, lvl; | |
193 | ||
194 | res = hwq_attr->res; | |
195 | pdev = res->pdev; | |
0c4dcd60 | 196 | pg_size = hwq_attr->sginfo->pgsize; |
1ac5a404 SX |
197 | hwq->level = PBL_LVL_MAX; |
198 | ||
0c4dcd60 DS |
199 | depth = roundup_pow_of_two(hwq_attr->depth); |
200 | stride = roundup_pow_of_two(hwq_attr->stride); | |
201 | if (hwq_attr->aux_depth) { | |
202 | aux_slots = hwq_attr->aux_depth; | |
203 | aux_size = roundup_pow_of_two(hwq_attr->aux_stride); | |
204 | aux_pages = (aux_slots * aux_size) / pg_size; | |
205 | if ((aux_slots * aux_size) % pg_size) | |
1ac5a404 SX |
206 | aux_pages++; |
207 | } | |
5aa84840 | 208 | |
6ef999f5 | 209 | if (!hwq_attr->sginfo->umem) { |
1ac5a404 | 210 | hwq->is_user = false; |
0c4dcd60 DS |
211 | npages = (depth * stride) / pg_size + aux_pages; |
212 | if ((depth * stride) % pg_size) | |
213 | npages++; | |
214 | if (!npages) | |
1ac5a404 | 215 | return -EINVAL; |
0c4dcd60 | 216 | hwq_attr->sginfo->npages = npages; |
1ac5a404 | 217 | } else { |
08c7f093 SX |
218 | npages = ib_umem_num_dma_blocks(hwq_attr->sginfo->umem, |
219 | hwq_attr->sginfo->pgsize); | |
1ac5a404 | 220 | hwq->is_user = true; |
1ac5a404 SX |
221 | } |
222 | ||
2b4ccce6 | 223 | if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) { |
0c4dcd60 DS |
224 | /* This request is Level 0, map PTE */ |
225 | rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo); | |
226 | if (rc) | |
227 | goto fail; | |
228 | hwq->level = PBL_LVL_0; | |
2b4ccce6 | 229 | goto done; |
0c4dcd60 | 230 | } |
1ac5a404 | 231 | |
2b4ccce6 | 232 | if (npages >= MAX_PBL_LVL_0_PGS) { |
0c4dcd60 DS |
233 | if (npages > MAX_PBL_LVL_1_PGS) { |
234 | u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ? | |
235 | 0 : PTU_PTE_VALID; | |
1ac5a404 | 236 | /* 2 levels of indirection */ |
0c4dcd60 DS |
237 | npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT; |
238 | if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT)) | |
239 | npbl++; | |
240 | npde = npbl >> MAX_PDL_LVL_SHIFT; | |
241 | if (npbl % BIT(MAX_PDL_LVL_SHIFT)) | |
242 | npde++; | |
243 | /* Alloc PDE pages */ | |
244 | sginfo.pgsize = npde * pg_size; | |
245 | sginfo.npages = 1; | |
246 | rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo); | |
247 | ||
248 | /* Alloc PBL pages */ | |
249 | sginfo.npages = npbl; | |
250 | sginfo.pgsize = PAGE_SIZE; | |
251 | rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo); | |
1ac5a404 SX |
252 | if (rc) |
253 | goto fail; | |
0c4dcd60 | 254 | /* Fill PDL with PBL page pointers */ |
1ac5a404 SX |
255 | dst_virt_ptr = |
256 | (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr; | |
257 | src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr; | |
0c4dcd60 DS |
258 | if (hwq_attr->type == HWQ_TYPE_MR) { |
259 | /* For MR it is expected that we supply only 1 contigous | |
260 | * page i.e only 1 entry in the PDL that will contain | |
261 | * all the PBLs for the user supplied memory region | |
262 | */ | |
263 | for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; | |
264 | i++) | |
265 | dst_virt_ptr[0][i] = src_phys_ptr[i] | | |
266 | flag; | |
267 | } else { | |
268 | for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; | |
269 | i++) | |
270 | dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] = | |
271 | src_phys_ptr[i] | | |
272 | PTU_PDE_VALID; | |
273 | } | |
274 | /* Alloc or init PTEs */ | |
275 | rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2], | |
276 | hwq_attr->sginfo); | |
1ac5a404 SX |
277 | if (rc) |
278 | goto fail; | |
0c4dcd60 DS |
279 | hwq->level = PBL_LVL_2; |
280 | if (hwq_attr->sginfo->nopte) | |
281 | goto done; | |
282 | /* Fill PBLs with PTE pointers */ | |
1ac5a404 SX |
283 | dst_virt_ptr = |
284 | (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr; | |
285 | src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr; | |
286 | for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) { | |
287 | dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] = | |
288 | src_phys_ptr[i] | PTU_PTE_VALID; | |
289 | } | |
0c4dcd60 | 290 | if (hwq_attr->type == HWQ_TYPE_QUEUE) { |
1ac5a404 SX |
291 | /* Find the last pg of the size */ |
292 | i = hwq->pbl[PBL_LVL_2].pg_count; | |
293 | dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |= | |
294 | PTU_PTE_LAST; | |
295 | if (i > 1) | |
296 | dst_virt_ptr[PTR_PG(i - 2)] | |
297 | [PTR_IDX(i - 2)] |= | |
298 | PTU_PTE_NEXT_TO_LAST; | |
299 | } | |
0c4dcd60 DS |
300 | } else { /* pages < 512 npbl = 1, npde = 0 */ |
301 | u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ? | |
302 | 0 : PTU_PTE_VALID; | |
1ac5a404 SX |
303 | |
304 | /* 1 level of indirection */ | |
0c4dcd60 DS |
305 | npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT; |
306 | if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT)) | |
307 | npbl++; | |
308 | sginfo.npages = npbl; | |
309 | sginfo.pgsize = PAGE_SIZE; | |
310 | /* Alloc PBL page */ | |
311 | rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo); | |
1ac5a404 SX |
312 | if (rc) |
313 | goto fail; | |
0c4dcd60 DS |
314 | /* Alloc or init PTEs */ |
315 | rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], | |
316 | hwq_attr->sginfo); | |
317 | if (rc) | |
318 | goto fail; | |
319 | hwq->level = PBL_LVL_1; | |
320 | if (hwq_attr->sginfo->nopte) | |
321 | goto done; | |
322 | /* Fill PBL with PTE pointers */ | |
1ac5a404 SX |
323 | dst_virt_ptr = |
324 | (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr; | |
325 | src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr; | |
0c4dcd60 | 326 | for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) |
1ac5a404 SX |
327 | dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] = |
328 | src_phys_ptr[i] | flag; | |
0c4dcd60 | 329 | if (hwq_attr->type == HWQ_TYPE_QUEUE) { |
1ac5a404 SX |
330 | /* Find the last pg of the size */ |
331 | i = hwq->pbl[PBL_LVL_1].pg_count; | |
332 | dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |= | |
333 | PTU_PTE_LAST; | |
334 | if (i > 1) | |
335 | dst_virt_ptr[PTR_PG(i - 2)] | |
336 | [PTR_IDX(i - 2)] |= | |
337 | PTU_PTE_NEXT_TO_LAST; | |
338 | } | |
1ac5a404 SX |
339 | } |
340 | } | |
0c4dcd60 | 341 | done: |
1ac5a404 SX |
342 | hwq->prod = 0; |
343 | hwq->cons = 0; | |
0c4dcd60 DS |
344 | hwq->pdev = pdev; |
345 | hwq->depth = hwq_attr->depth; | |
346 | hwq->max_elements = depth; | |
347 | hwq->element_size = stride; | |
fddcbbb0 | 348 | hwq->qe_ppg = pg_size / stride; |
1ac5a404 | 349 | /* For direct access to the elements */ |
0c4dcd60 DS |
350 | lvl = hwq->level; |
351 | if (hwq_attr->sginfo->nopte && hwq->level) | |
352 | lvl = hwq->level - 1; | |
353 | hwq->pbl_ptr = hwq->pbl[lvl].pg_arr; | |
354 | hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr; | |
355 | spin_lock_init(&hwq->lock); | |
1ac5a404 SX |
356 | |
357 | return 0; | |
1ac5a404 | 358 | fail: |
0c4dcd60 | 359 | bnxt_qplib_free_hwq(res, hwq); |
1ac5a404 SX |
360 | return -ENOMEM; |
361 | } | |
362 | ||
363 | /* Context Tables */ | |
0c4dcd60 | 364 | void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res, |
1ac5a404 SX |
365 | struct bnxt_qplib_ctx *ctx) |
366 | { | |
367 | int i; | |
368 | ||
0c4dcd60 DS |
369 | bnxt_qplib_free_hwq(res, &ctx->qpc_tbl); |
370 | bnxt_qplib_free_hwq(res, &ctx->mrw_tbl); | |
371 | bnxt_qplib_free_hwq(res, &ctx->srqc_tbl); | |
372 | bnxt_qplib_free_hwq(res, &ctx->cq_tbl); | |
373 | bnxt_qplib_free_hwq(res, &ctx->tim_tbl); | |
1ac5a404 | 374 | for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) |
0c4dcd60 DS |
375 | bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]); |
376 | /* restore original pde level before destroy */ | |
377 | ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level; | |
378 | bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde); | |
379 | bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats); | |
380 | } | |
381 | ||
382 | static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res, | |
383 | struct bnxt_qplib_ctx *ctx) | |
384 | { | |
385 | struct bnxt_qplib_hwq_attr hwq_attr = {}; | |
386 | struct bnxt_qplib_sg_info sginfo = {}; | |
387 | struct bnxt_qplib_tqm_ctx *tqmctx; | |
388 | int rc = 0; | |
389 | int i; | |
390 | ||
391 | tqmctx = &ctx->tqm_ctx; | |
392 | ||
393 | sginfo.pgsize = PAGE_SIZE; | |
394 | sginfo.pgshft = PAGE_SHIFT; | |
395 | hwq_attr.sginfo = &sginfo; | |
396 | hwq_attr.res = res; | |
397 | hwq_attr.type = HWQ_TYPE_CTX; | |
398 | hwq_attr.depth = 512; | |
399 | hwq_attr.stride = sizeof(u64); | |
400 | /* Alloc pdl buffer */ | |
401 | rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr); | |
402 | if (rc) | |
403 | goto out; | |
404 | /* Save original pdl level */ | |
405 | tqmctx->pde_level = tqmctx->pde.level; | |
406 | ||
407 | hwq_attr.stride = 1; | |
408 | for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) { | |
409 | if (!tqmctx->qcount[i]) | |
410 | continue; | |
411 | hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i]; | |
412 | rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr); | |
413 | if (rc) | |
414 | goto out; | |
415 | } | |
416 | out: | |
417 | return rc; | |
418 | } | |
419 | ||
420 | static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx) | |
421 | { | |
422 | struct bnxt_qplib_hwq *tbl; | |
423 | dma_addr_t *dma_ptr; | |
424 | __le64 **pbl_ptr, *ptr; | |
425 | int i, j, k; | |
426 | int fnz_idx = -1; | |
427 | int pg_count; | |
428 | ||
429 | pbl_ptr = (__le64 **)ctx->pde.pbl_ptr; | |
430 | ||
431 | for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ; | |
432 | i++, j += MAX_TQM_ALLOC_BLK_SIZE) { | |
433 | tbl = &ctx->qtbl[i]; | |
434 | if (!tbl->max_elements) | |
435 | continue; | |
436 | if (fnz_idx == -1) | |
437 | fnz_idx = i; /* first non-zero index */ | |
438 | switch (tbl->level) { | |
439 | case PBL_LVL_2: | |
440 | pg_count = tbl->pbl[PBL_LVL_1].pg_count; | |
441 | for (k = 0; k < pg_count; k++) { | |
442 | ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)]; | |
443 | dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k]; | |
444 | *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID); | |
445 | } | |
446 | break; | |
447 | case PBL_LVL_1: | |
448 | case PBL_LVL_0: | |
449 | default: | |
450 | ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)]; | |
451 | *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] | | |
452 | PTU_PTE_VALID); | |
453 | break; | |
454 | } | |
455 | } | |
456 | if (fnz_idx == -1) | |
457 | fnz_idx = 0; | |
458 | /* update pde level as per page table programming */ | |
459 | ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 : | |
460 | ctx->qtbl[fnz_idx].level + 1; | |
461 | } | |
462 | ||
463 | static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res, | |
464 | struct bnxt_qplib_ctx *ctx) | |
465 | { | |
466 | int rc = 0; | |
467 | ||
468 | rc = bnxt_qplib_alloc_tqm_rings(res, ctx); | |
469 | if (rc) | |
470 | goto fail; | |
471 | ||
472 | bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx); | |
473 | fail: | |
474 | return rc; | |
1ac5a404 SX |
475 | } |
476 | ||
477 | /* | |
478 | * Routine: bnxt_qplib_alloc_ctx | |
479 | * Description: | |
480 | * Context tables are memories which are used by the chip fw. | |
481 | * The 6 tables defined are: | |
482 | * QPC ctx - holds QP states | |
483 | * MRW ctx - holds memory region and window | |
484 | * SRQ ctx - holds shared RQ states | |
485 | * CQ ctx - holds completion queue states | |
486 | * TQM ctx - holds Tx Queue Manager context | |
487 | * TIM ctx - holds timer context | |
488 | * Depending on the size of the tbl requested, either a 1 Page Buffer List | |
489 | * or a 1-to-2-stage indirection Page Directory List + 1 PBL is used | |
490 | * instead. | |
491 | * Table might be employed as follows: | |
492 | * For 0 < ctx size <= 1 PAGE, 0 level of ind is used | |
493 | * For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used | |
494 | * For 512 < ctx size <= MAX, 2 levels of ind is used | |
495 | * Returns: | |
496 | * 0 if success, else -ERRORS | |
497 | */ | |
0c4dcd60 | 498 | int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res, |
1ac5a404 | 499 | struct bnxt_qplib_ctx *ctx, |
e0387e1d | 500 | bool virt_fn, bool is_p5) |
1ac5a404 | 501 | { |
0c4dcd60 DS |
502 | struct bnxt_qplib_hwq_attr hwq_attr = {}; |
503 | struct bnxt_qplib_sg_info sginfo = {}; | |
504 | int rc = 0; | |
1ac5a404 | 505 | |
e0387e1d | 506 | if (virt_fn || is_p5) |
1ac5a404 SX |
507 | goto stats_alloc; |
508 | ||
509 | /* QPC Tables */ | |
0c4dcd60 DS |
510 | sginfo.pgsize = PAGE_SIZE; |
511 | sginfo.pgshft = PAGE_SHIFT; | |
512 | hwq_attr.sginfo = &sginfo; | |
513 | ||
514 | hwq_attr.res = res; | |
515 | hwq_attr.depth = ctx->qpc_count; | |
516 | hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE; | |
517 | hwq_attr.type = HWQ_TYPE_CTX; | |
518 | rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr); | |
1ac5a404 SX |
519 | if (rc) |
520 | goto fail; | |
521 | ||
522 | /* MRW Tables */ | |
0c4dcd60 DS |
523 | hwq_attr.depth = ctx->mrw_count; |
524 | hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE; | |
525 | rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr); | |
1ac5a404 SX |
526 | if (rc) |
527 | goto fail; | |
528 | ||
529 | /* SRQ Tables */ | |
0c4dcd60 DS |
530 | hwq_attr.depth = ctx->srqc_count; |
531 | hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE; | |
532 | rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr); | |
1ac5a404 SX |
533 | if (rc) |
534 | goto fail; | |
535 | ||
536 | /* CQ Tables */ | |
0c4dcd60 DS |
537 | hwq_attr.depth = ctx->cq_count; |
538 | hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE; | |
539 | rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr); | |
1ac5a404 SX |
540 | if (rc) |
541 | goto fail; | |
542 | ||
543 | /* TQM Buffer */ | |
0c4dcd60 | 544 | rc = bnxt_qplib_setup_tqm_rings(res, ctx); |
1ac5a404 SX |
545 | if (rc) |
546 | goto fail; | |
1ac5a404 SX |
547 | /* TIM Buffer */ |
548 | ctx->tim_tbl.max_elements = ctx->qpc_count * 16; | |
0c4dcd60 DS |
549 | hwq_attr.depth = ctx->qpc_count * 16; |
550 | hwq_attr.stride = 1; | |
551 | rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr); | |
1ac5a404 SX |
552 | if (rc) |
553 | goto fail; | |
1ac5a404 SX |
554 | stats_alloc: |
555 | /* Stats */ | |
0c23af52 | 556 | rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats); |
1ac5a404 SX |
557 | if (rc) |
558 | goto fail; | |
559 | ||
560 | return 0; | |
561 | ||
562 | fail: | |
0c4dcd60 | 563 | bnxt_qplib_free_ctx(res, ctx); |
1ac5a404 SX |
564 | return rc; |
565 | } | |
566 | ||
1ac5a404 SX |
567 | static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res, |
568 | struct bnxt_qplib_sgid_tbl *sgid_tbl) | |
569 | { | |
570 | kfree(sgid_tbl->tbl); | |
571 | kfree(sgid_tbl->hw_id); | |
572 | kfree(sgid_tbl->ctx); | |
5fac5b1b | 573 | kfree(sgid_tbl->vlan); |
1ac5a404 SX |
574 | sgid_tbl->tbl = NULL; |
575 | sgid_tbl->hw_id = NULL; | |
576 | sgid_tbl->ctx = NULL; | |
5fac5b1b | 577 | sgid_tbl->vlan = NULL; |
1ac5a404 SX |
578 | sgid_tbl->max = 0; |
579 | sgid_tbl->active = 0; | |
580 | } | |
581 | ||
582 | static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res, | |
583 | struct bnxt_qplib_sgid_tbl *sgid_tbl, | |
584 | u16 max) | |
585 | { | |
c56b593d | 586 | sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL); |
1ac5a404 SX |
587 | if (!sgid_tbl->tbl) |
588 | return -ENOMEM; | |
589 | ||
590 | sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL); | |
591 | if (!sgid_tbl->hw_id) | |
592 | goto out_free1; | |
593 | ||
594 | sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL); | |
595 | if (!sgid_tbl->ctx) | |
596 | goto out_free2; | |
597 | ||
5fac5b1b KA |
598 | sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL); |
599 | if (!sgid_tbl->vlan) | |
600 | goto out_free3; | |
601 | ||
1ac5a404 SX |
602 | sgid_tbl->max = max; |
603 | return 0; | |
5fac5b1b KA |
604 | out_free3: |
605 | kfree(sgid_tbl->ctx); | |
606 | sgid_tbl->ctx = NULL; | |
1ac5a404 SX |
607 | out_free2: |
608 | kfree(sgid_tbl->hw_id); | |
609 | sgid_tbl->hw_id = NULL; | |
610 | out_free1: | |
611 | kfree(sgid_tbl->tbl); | |
612 | sgid_tbl->tbl = NULL; | |
613 | return -ENOMEM; | |
614 | }; | |
615 | ||
616 | static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res, | |
617 | struct bnxt_qplib_sgid_tbl *sgid_tbl) | |
618 | { | |
619 | int i; | |
620 | ||
621 | for (i = 0; i < sgid_tbl->max; i++) { | |
622 | if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero, | |
623 | sizeof(bnxt_qplib_gid_zero))) | |
c56b593d SX |
624 | bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid, |
625 | sgid_tbl->tbl[i].vlan_id, true); | |
1ac5a404 | 626 | } |
c56b593d | 627 | memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max); |
1ac5a404 | 628 | memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); |
5fac5b1b | 629 | memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max); |
1ac5a404 SX |
630 | sgid_tbl->active = 0; |
631 | } | |
632 | ||
633 | static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl, | |
634 | struct net_device *netdev) | |
635 | { | |
c56b593d SX |
636 | u32 i; |
637 | ||
638 | for (i = 0; i < sgid_tbl->max; i++) | |
639 | sgid_tbl->tbl[i].vlan_id = 0xffff; | |
640 | ||
1ac5a404 SX |
641 | memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); |
642 | } | |
643 | ||
1ac5a404 SX |
644 | /* PDs */ |
645 | int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd) | |
646 | { | |
647 | u32 bit_num; | |
648 | ||
649 | bit_num = find_first_bit(pdt->tbl, pdt->max); | |
650 | if (bit_num == pdt->max) | |
651 | return -ENOMEM; | |
652 | ||
653 | /* Found unused PD */ | |
654 | clear_bit(bit_num, pdt->tbl); | |
655 | pd->id = bit_num; | |
656 | return 0; | |
657 | } | |
658 | ||
659 | int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res, | |
660 | struct bnxt_qplib_pd_tbl *pdt, | |
661 | struct bnxt_qplib_pd *pd) | |
662 | { | |
663 | if (test_and_set_bit(pd->id, pdt->tbl)) { | |
08920b8f | 664 | dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n", |
1ac5a404 SX |
665 | pd->id); |
666 | return -EINVAL; | |
667 | } | |
668 | pd->id = 0; | |
669 | return 0; | |
670 | } | |
671 | ||
672 | static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt) | |
673 | { | |
674 | kfree(pdt->tbl); | |
675 | pdt->tbl = NULL; | |
676 | pdt->max = 0; | |
677 | } | |
678 | ||
679 | static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res, | |
680 | struct bnxt_qplib_pd_tbl *pdt, | |
681 | u32 max) | |
682 | { | |
683 | u32 bytes; | |
684 | ||
685 | bytes = max >> 3; | |
686 | if (!bytes) | |
687 | bytes = 1; | |
688 | pdt->tbl = kmalloc(bytes, GFP_KERNEL); | |
689 | if (!pdt->tbl) | |
690 | return -ENOMEM; | |
691 | ||
692 | pdt->max = max; | |
693 | memset((u8 *)pdt->tbl, 0xFF, bytes); | |
694 | ||
695 | return 0; | |
696 | } | |
697 | ||
698 | /* DPIs */ | |
0ac20faf SX |
699 | int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res, |
700 | struct bnxt_qplib_dpi *dpi, | |
701 | void *app, u8 type) | |
1ac5a404 | 702 | { |
0ac20faf SX |
703 | struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl; |
704 | struct bnxt_qplib_reg_desc *reg; | |
1ac5a404 | 705 | u32 bit_num; |
0ac20faf SX |
706 | u64 umaddr; |
707 | ||
708 | reg = &dpit->wcreg; | |
709 | mutex_lock(&res->dpi_tbl_lock); | |
1ac5a404 SX |
710 | |
711 | bit_num = find_first_bit(dpit->tbl, dpit->max); | |
0ac20faf SX |
712 | if (bit_num == dpit->max) { |
713 | mutex_unlock(&res->dpi_tbl_lock); | |
1ac5a404 | 714 | return -ENOMEM; |
0ac20faf | 715 | } |
1ac5a404 SX |
716 | |
717 | /* Found unused DPI */ | |
718 | clear_bit(bit_num, dpit->tbl); | |
719 | dpit->app_tbl[bit_num] = app; | |
720 | ||
0ac20faf SX |
721 | dpi->bit = bit_num; |
722 | dpi->dpi = bit_num + (reg->offset - dpit->ucreg.offset) / PAGE_SIZE; | |
723 | ||
724 | umaddr = reg->bar_base + reg->offset + bit_num * PAGE_SIZE; | |
725 | dpi->umdbr = umaddr; | |
726 | ||
727 | switch (type) { | |
728 | case BNXT_QPLIB_DPI_TYPE_KERNEL: | |
d1d7fc3b | 729 | /* privileged dbr was already mapped just initialize it. */ |
0ac20faf SX |
730 | dpi->umdbr = dpit->ucreg.bar_base + |
731 | dpit->ucreg.offset + bit_num * PAGE_SIZE; | |
732 | dpi->dbr = dpit->priv_db; | |
733 | dpi->dpi = dpi->bit; | |
734 | break; | |
360da60d SX |
735 | case BNXT_QPLIB_DPI_TYPE_WC: |
736 | dpi->dbr = ioremap_wc(umaddr, PAGE_SIZE); | |
737 | break; | |
0ac20faf SX |
738 | default: |
739 | dpi->dbr = ioremap(umaddr, PAGE_SIZE); | |
740 | break; | |
741 | } | |
1ac5a404 | 742 | |
0ac20faf SX |
743 | dpi->type = type; |
744 | mutex_unlock(&res->dpi_tbl_lock); | |
1ac5a404 | 745 | return 0; |
0ac20faf | 746 | |
1ac5a404 SX |
747 | } |
748 | ||
749 | int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res, | |
0ac20faf | 750 | struct bnxt_qplib_dpi *dpi) |
1ac5a404 | 751 | { |
0ac20faf SX |
752 | struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl; |
753 | ||
754 | mutex_lock(&res->dpi_tbl_lock); | |
755 | if (dpi->dpi && dpi->type != BNXT_QPLIB_DPI_TYPE_KERNEL) | |
756 | pci_iounmap(res->pdev, dpi->dbr); | |
757 | ||
758 | if (test_and_set_bit(dpi->bit, dpit->tbl)) { | |
759 | dev_warn(&res->pdev->dev, | |
760 | "Freeing an unused DPI? dpi = %d, bit = %d\n", | |
761 | dpi->dpi, dpi->bit); | |
762 | mutex_unlock(&res->dpi_tbl_lock); | |
1ac5a404 SX |
763 | return -EINVAL; |
764 | } | |
765 | if (dpit->app_tbl) | |
0ac20faf | 766 | dpit->app_tbl[dpi->bit] = NULL; |
1ac5a404 | 767 | memset(dpi, 0, sizeof(*dpi)); |
0ac20faf | 768 | mutex_unlock(&res->dpi_tbl_lock); |
1ac5a404 SX |
769 | return 0; |
770 | } | |
771 | ||
772 | static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res, | |
773 | struct bnxt_qplib_dpi_tbl *dpit) | |
774 | { | |
775 | kfree(dpit->tbl); | |
776 | kfree(dpit->app_tbl); | |
0ac20faf SX |
777 | dpit->tbl = NULL; |
778 | dpit->app_tbl = NULL; | |
779 | dpit->max = 0; | |
1ac5a404 SX |
780 | } |
781 | ||
0ac20faf SX |
782 | static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res, |
783 | struct bnxt_qplib_dev_attr *dev_attr) | |
1ac5a404 | 784 | { |
0ac20faf SX |
785 | struct bnxt_qplib_dpi_tbl *dpit; |
786 | struct bnxt_qplib_reg_desc *reg; | |
787 | unsigned long bar_len; | |
788 | u32 dbr_offset; | |
789 | u32 bytes; | |
1ac5a404 | 790 | |
0ac20faf SX |
791 | dpit = &res->dpi_tbl; |
792 | reg = &dpit->wcreg; | |
1ac5a404 | 793 | |
0ac20faf SX |
794 | if (!bnxt_qplib_is_chip_gen_p5(res->cctx)) { |
795 | /* Offest should come from L2 driver */ | |
796 | dbr_offset = dev_attr->l2_db_size; | |
797 | dpit->ucreg.offset = dbr_offset; | |
798 | dpit->wcreg.offset = dbr_offset; | |
1ac5a404 SX |
799 | } |
800 | ||
0ac20faf SX |
801 | bar_len = pci_resource_len(res->pdev, reg->bar_id); |
802 | dpit->max = (bar_len - reg->offset) / PAGE_SIZE; | |
803 | if (dev_attr->max_dpi) | |
804 | dpit->max = min_t(u32, dpit->max, dev_attr->max_dpi); | |
1ac5a404 | 805 | |
0ac20faf | 806 | dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL); |
e5b89843 | 807 | if (!dpit->app_tbl) |
0ac20faf | 808 | return -ENOMEM; |
1ac5a404 SX |
809 | |
810 | bytes = dpit->max >> 3; | |
811 | if (!bytes) | |
812 | bytes = 1; | |
813 | ||
814 | dpit->tbl = kmalloc(bytes, GFP_KERNEL); | |
815 | if (!dpit->tbl) { | |
1ac5a404 SX |
816 | kfree(dpit->app_tbl); |
817 | dpit->app_tbl = NULL; | |
0ac20faf | 818 | return -ENOMEM; |
1ac5a404 SX |
819 | } |
820 | ||
821 | memset((u8 *)dpit->tbl, 0xFF, bytes); | |
64b63265 | 822 | mutex_init(&res->dpi_tbl_lock); |
0ac20faf | 823 | dpit->priv_db = dpit->ucreg.bar_reg + dpit->ucreg.offset; |
1ac5a404 SX |
824 | |
825 | return 0; | |
e5b89843 | 826 | |
1ac5a404 SX |
827 | } |
828 | ||
1ac5a404 SX |
829 | /* Stats */ |
830 | static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, | |
831 | struct bnxt_qplib_stats *stats) | |
832 | { | |
833 | if (stats->dma) { | |
834 | dma_free_coherent(&pdev->dev, stats->size, | |
835 | stats->dma, stats->dma_map); | |
836 | } | |
837 | memset(stats, 0, sizeof(*stats)); | |
838 | stats->fw_id = -1; | |
839 | } | |
840 | ||
841 | static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, | |
0c23af52 | 842 | struct bnxt_qplib_chip_ctx *cctx, |
1ac5a404 SX |
843 | struct bnxt_qplib_stats *stats) |
844 | { | |
845 | memset(stats, 0, sizeof(*stats)); | |
846 | stats->fw_id = -1; | |
0c23af52 | 847 | stats->size = cctx->hw_stats_size; |
1ac5a404 SX |
848 | stats->dma = dma_alloc_coherent(&pdev->dev, stats->size, |
849 | &stats->dma_map, GFP_KERNEL); | |
850 | if (!stats->dma) { | |
08920b8f | 851 | dev_err(&pdev->dev, "Stats DMA allocation failed\n"); |
1ac5a404 SX |
852 | return -ENOMEM; |
853 | } | |
854 | return 0; | |
855 | } | |
856 | ||
857 | void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res) | |
858 | { | |
1ac5a404 SX |
859 | bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl); |
860 | } | |
861 | ||
862 | int bnxt_qplib_init_res(struct bnxt_qplib_res *res) | |
863 | { | |
864 | bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev); | |
1ac5a404 SX |
865 | |
866 | return 0; | |
867 | } | |
868 | ||
869 | void bnxt_qplib_free_res(struct bnxt_qplib_res *res) | |
870 | { | |
1ac5a404 SX |
871 | bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl); |
872 | bnxt_qplib_free_pd_tbl(&res->pd_tbl); | |
873 | bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl); | |
1ac5a404 SX |
874 | } |
875 | ||
876 | int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev, | |
877 | struct net_device *netdev, | |
878 | struct bnxt_qplib_dev_attr *dev_attr) | |
879 | { | |
880 | int rc = 0; | |
881 | ||
882 | res->pdev = pdev; | |
883 | res->netdev = netdev; | |
884 | ||
885 | rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid); | |
886 | if (rc) | |
887 | goto fail; | |
888 | ||
1ac5a404 SX |
889 | rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd); |
890 | if (rc) | |
891 | goto fail; | |
892 | ||
0ac20faf | 893 | rc = bnxt_qplib_alloc_dpi_tbl(res, dev_attr); |
1ac5a404 SX |
894 | if (rc) |
895 | goto fail; | |
896 | ||
897 | return 0; | |
898 | fail: | |
899 | bnxt_qplib_free_res(res); | |
900 | return rc; | |
901 | } | |
35f5ace5 | 902 | |
0ac20faf SX |
903 | void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res) |
904 | { | |
905 | struct bnxt_qplib_reg_desc *reg; | |
906 | ||
907 | reg = &res->dpi_tbl.ucreg; | |
908 | if (reg->bar_reg) | |
909 | pci_iounmap(res->pdev, reg->bar_reg); | |
910 | reg->bar_reg = NULL; | |
911 | reg->bar_base = 0; | |
912 | reg->len = 0; | |
913 | reg->bar_id = 0; | |
914 | } | |
915 | ||
916 | int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res) | |
917 | { | |
918 | struct bnxt_qplib_reg_desc *ucreg; | |
919 | struct bnxt_qplib_reg_desc *wcreg; | |
920 | ||
921 | wcreg = &res->dpi_tbl.wcreg; | |
922 | wcreg->bar_id = RCFW_DBR_PCI_BAR_REGION; | |
923 | wcreg->bar_base = pci_resource_start(res->pdev, wcreg->bar_id); | |
924 | ||
925 | ucreg = &res->dpi_tbl.ucreg; | |
926 | ucreg->bar_id = RCFW_DBR_PCI_BAR_REGION; | |
927 | ucreg->bar_base = pci_resource_start(res->pdev, ucreg->bar_id); | |
928 | ucreg->len = ucreg->offset + PAGE_SIZE; | |
929 | if (!ucreg->len || ((ucreg->len & (PAGE_SIZE - 1)) != 0)) { | |
930 | dev_err(&res->pdev->dev, "QPLIB: invalid dbr length %d", | |
931 | (int)ucreg->len); | |
932 | return -EINVAL; | |
933 | } | |
934 | ucreg->bar_reg = ioremap(ucreg->bar_base, ucreg->len); | |
935 | if (!ucreg->bar_reg) { | |
d1d7fc3b | 936 | dev_err(&res->pdev->dev, "privileged dpi map failed!"); |
0ac20faf SX |
937 | return -ENOMEM; |
938 | } | |
939 | ||
940 | return 0; | |
941 | } | |
942 | ||
35f5ace5 DS |
943 | int bnxt_qplib_determine_atomics(struct pci_dev *dev) |
944 | { | |
945 | int comp; | |
946 | u16 ctl2; | |
947 | ||
948 | comp = pci_enable_atomic_ops_to_root(dev, | |
949 | PCI_EXP_DEVCAP2_ATOMIC_COMP32); | |
950 | if (comp) | |
951 | return -EOPNOTSUPP; | |
952 | comp = pci_enable_atomic_ops_to_root(dev, | |
953 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); | |
954 | if (comp) | |
955 | return -EOPNOTSUPP; | |
956 | pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctl2); | |
957 | return !(ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ); | |
958 | } |