1 // SPDX-License-Identifier: GPL-2.0-only
3 * Huawei HiNIC PCI Express Linux driver
4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
7 #include <linux/kernel.h>
8 #include <linux/types.h>
10 #include <linux/device.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/semaphore.h>
14 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
18 #include "hinic_hw_if.h"
19 #include "hinic_hw_eqs.h"
20 #include "hinic_hw_wqe.h"
21 #include "hinic_hw_wq.h"
22 #include "hinic_hw_cmdq.h"
23 #include "hinic_hw_qp_ctxt.h"
24 #include "hinic_hw_qp.h"
25 #include "hinic_hw_io.h"
27 #define CI_Q_ADDR_SIZE sizeof(u32)
29 #define CI_ADDR(base_addr, q_id) ((base_addr) + \
30 (q_id) * CI_Q_ADDR_SIZE)
32 #define CI_TABLE_SIZE(num_qps) ((num_qps) * CI_Q_ADDR_SIZE)
34 #define DB_IDX(db, db_base) \
35 (((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE)
38 IO_CMD_MODIFY_QUEUE_CTXT = 0,
41 static void init_db_area_idx(struct hinic_free_db_area *free_db_area)
45 for (i = 0; i < HINIC_DB_MAX_AREAS; i++)
46 free_db_area->db_idx[i] = i;
48 free_db_area->alloc_pos = 0;
49 free_db_area->return_pos = HINIC_DB_MAX_AREAS;
51 free_db_area->num_free = HINIC_DB_MAX_AREAS;
53 sema_init(&free_db_area->idx_lock, 1);
56 static void __iomem *get_db_area(struct hinic_func_to_io *func_to_io)
58 struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
61 down(&free_db_area->idx_lock);
63 free_db_area->num_free--;
65 if (free_db_area->num_free < 0) {
66 free_db_area->num_free++;
67 up(&free_db_area->idx_lock);
68 return ERR_PTR(-ENOMEM);
71 pos = free_db_area->alloc_pos++;
72 pos &= HINIC_DB_MAX_AREAS - 1;
74 idx = free_db_area->db_idx[pos];
76 free_db_area->db_idx[pos] = -1;
78 up(&free_db_area->idx_lock);
80 return func_to_io->db_base + idx * HINIC_DB_PAGE_SIZE;
83 static void return_db_area(struct hinic_func_to_io *func_to_io,
84 void __iomem *db_base)
86 struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
87 int pos, idx = DB_IDX(db_base, func_to_io->db_base);
89 down(&free_db_area->idx_lock);
91 pos = free_db_area->return_pos++;
92 pos &= HINIC_DB_MAX_AREAS - 1;
94 free_db_area->db_idx[pos] = idx;
96 free_db_area->num_free++;
98 up(&free_db_area->idx_lock);
101 static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
104 struct hinic_hwif *hwif = func_to_io->hwif;
105 struct hinic_sq_ctxt_block *sq_ctxt_block;
106 struct pci_dev *pdev = hwif->pdev;
107 struct hinic_cmdq_buf cmdq_buf;
108 struct hinic_sq_ctxt *sq_ctxt;
113 err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
115 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
119 sq_ctxt_block = cmdq_buf.buf;
120 sq_ctxt = sq_ctxt_block->sq_ctxt;
122 hinic_qp_prepare_header(&sq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_SQ,
123 num_sqs, func_to_io->max_qps);
124 for (i = 0; i < num_sqs; i++) {
125 qp = &func_to_io->qps[i];
127 hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq,
128 base_qpn + qp->q_id);
131 cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs);
133 err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
134 IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
136 if ((err) || (out_param != 0)) {
137 dev_err(&pdev->dev, "Failed to set SQ ctxts\n");
141 hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
145 static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
148 struct hinic_hwif *hwif = func_to_io->hwif;
149 struct hinic_rq_ctxt_block *rq_ctxt_block;
150 struct pci_dev *pdev = hwif->pdev;
151 struct hinic_cmdq_buf cmdq_buf;
152 struct hinic_rq_ctxt *rq_ctxt;
157 err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
159 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
163 rq_ctxt_block = cmdq_buf.buf;
164 rq_ctxt = rq_ctxt_block->rq_ctxt;
166 hinic_qp_prepare_header(&rq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_RQ,
167 num_rqs, func_to_io->max_qps);
168 for (i = 0; i < num_rqs; i++) {
169 qp = &func_to_io->qps[i];
171 hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq,
172 base_qpn + qp->q_id);
175 cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs);
177 err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
178 IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
180 if ((err) || (out_param != 0)) {
181 dev_err(&pdev->dev, "Failed to set RQ ctxts\n");
185 hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
190 * write_qp_ctxts - write the qp ctxt to HW
191 * @func_to_io: func to io channel that holds the IO components
192 * @base_qpn: first qp number
193 * @num_qps: number of qps to write
195 * Return 0 - Success, negative - Failure
197 static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
200 return (write_sq_ctxts(func_to_io, base_qpn, num_qps) ||
201 write_rq_ctxts(func_to_io, base_qpn, num_qps));
205 * init_qp - Initialize a Queue Pair
206 * @func_to_io: func to io channel that holds the IO components
207 * @qp: pointer to the qp to initialize
208 * @q_id: the id of the qp
209 * @sq_msix_entry: msix entry for sq
210 * @rq_msix_entry: msix entry for rq
212 * Return 0 - Success, negative - Failure
214 static int init_qp(struct hinic_func_to_io *func_to_io,
215 struct hinic_qp *qp, int q_id,
216 struct msix_entry *sq_msix_entry,
217 struct msix_entry *rq_msix_entry)
219 struct hinic_hwif *hwif = func_to_io->hwif;
220 struct pci_dev *pdev = hwif->pdev;
221 void __iomem *db_base;
226 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id],
227 HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE,
228 HINIC_SQ_DEPTH, HINIC_SQ_WQE_MAX_SIZE);
230 dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n");
234 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id],
235 HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE,
236 HINIC_RQ_DEPTH, HINIC_RQ_WQE_SIZE);
238 dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n");
242 db_base = get_db_area(func_to_io);
243 if (IS_ERR(db_base)) {
244 dev_err(&pdev->dev, "Failed to get DB area for SQ\n");
245 err = PTR_ERR(db_base);
249 func_to_io->sq_db[q_id] = db_base;
251 err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id],
253 CI_ADDR(func_to_io->ci_addr_base, q_id),
254 CI_ADDR(func_to_io->ci_dma_base, q_id), db_base);
256 dev_err(&pdev->dev, "Failed to init SQ\n");
260 err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id],
263 dev_err(&pdev->dev, "Failed to init RQ\n");
270 hinic_clean_sq(&qp->sq);
273 return_db_area(func_to_io, db_base);
276 hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
279 hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
284 * destroy_qp - Clean the resources of a Queue Pair
285 * @func_to_io: func to io channel that holds the IO components
286 * @qp: pointer to the qp to clean
288 static void destroy_qp(struct hinic_func_to_io *func_to_io,
293 hinic_clean_rq(&qp->rq);
294 hinic_clean_sq(&qp->sq);
296 return_db_area(func_to_io, func_to_io->sq_db[q_id]);
298 hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
299 hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
303 * hinic_io_create_qps - Create Queue Pairs
304 * @func_to_io: func to io channel that holds the IO components
305 * @base_qpn: base qp number
306 * @num_qps: number queue pairs to create
307 * @sq_msix_entry: msix entries for sq
308 * @rq_msix_entry: msix entries for rq
310 * Return 0 - Success, negative - Failure
312 int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
313 u16 base_qpn, int num_qps,
314 struct msix_entry *sq_msix_entries,
315 struct msix_entry *rq_msix_entries)
317 struct hinic_hwif *hwif = func_to_io->hwif;
318 struct pci_dev *pdev = hwif->pdev;
319 size_t qps_size, wq_size, db_size;
323 qps_size = num_qps * sizeof(*func_to_io->qps);
324 func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL);
325 if (!func_to_io->qps)
328 wq_size = num_qps * sizeof(*func_to_io->sq_wq);
329 func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
330 if (!func_to_io->sq_wq) {
335 wq_size = num_qps * sizeof(*func_to_io->rq_wq);
336 func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
337 if (!func_to_io->rq_wq) {
342 db_size = num_qps * sizeof(*func_to_io->sq_db);
343 func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL);
344 if (!func_to_io->sq_db) {
349 ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
350 &func_to_io->ci_dma_base,
353 dev_err(&pdev->dev, "Failed to allocate CI area\n");
358 func_to_io->ci_addr_base = ci_addr_base;
360 for (i = 0; i < num_qps; i++) {
361 err = init_qp(func_to_io, &func_to_io->qps[i], i,
362 &sq_msix_entries[i], &rq_msix_entries[i]);
364 dev_err(&pdev->dev, "Failed to create QP %d\n", i);
369 err = write_qp_ctxts(func_to_io, base_qpn, num_qps);
371 dev_err(&pdev->dev, "Failed to init QP ctxts\n");
372 goto err_write_qp_ctxts;
379 for (j = 0; j < i; j++)
380 destroy_qp(func_to_io, &func_to_io->qps[j]);
382 dma_free_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
383 func_to_io->ci_addr_base, func_to_io->ci_dma_base);
386 devm_kfree(&pdev->dev, func_to_io->sq_db);
389 devm_kfree(&pdev->dev, func_to_io->rq_wq);
392 devm_kfree(&pdev->dev, func_to_io->sq_wq);
395 devm_kfree(&pdev->dev, func_to_io->qps);
400 * hinic_io_destroy_qps - Destroy the IO Queue Pairs
401 * @func_to_io: func to io channel that holds the IO components
402 * @num_qps: number queue pairs to destroy
404 void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps)
406 struct hinic_hwif *hwif = func_to_io->hwif;
407 struct pci_dev *pdev = hwif->pdev;
408 size_t ci_table_size;
411 ci_table_size = CI_TABLE_SIZE(num_qps);
413 for (i = 0; i < num_qps; i++)
414 destroy_qp(func_to_io, &func_to_io->qps[i]);
416 dma_free_coherent(&pdev->dev, ci_table_size, func_to_io->ci_addr_base,
417 func_to_io->ci_dma_base);
419 devm_kfree(&pdev->dev, func_to_io->sq_db);
421 devm_kfree(&pdev->dev, func_to_io->rq_wq);
422 devm_kfree(&pdev->dev, func_to_io->sq_wq);
424 devm_kfree(&pdev->dev, func_to_io->qps);
428 * hinic_io_init - Initialize the IO components
429 * @func_to_io: func to io channel that holds the IO components
430 * @hwif: HW interface for accessing IO
431 * @max_qps: maximum QPs in HW
432 * @num_ceqs: number completion event queues
433 * @ceq_msix_entries: msix entries for ceqs
435 * Return 0 - Success, negative - Failure
437 int hinic_io_init(struct hinic_func_to_io *func_to_io,
438 struct hinic_hwif *hwif, u16 max_qps, int num_ceqs,
439 struct msix_entry *ceq_msix_entries)
441 struct pci_dev *pdev = hwif->pdev;
442 enum hinic_cmdq_type cmdq, type;
443 void __iomem *db_area;
446 func_to_io->hwif = hwif;
447 func_to_io->qps = NULL;
448 func_to_io->max_qps = max_qps;
450 err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs,
451 HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE,
454 dev_err(&pdev->dev, "Failed to init CEQs\n");
458 err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif);
460 dev_err(&pdev->dev, "Failed to allocate WQS for IO\n");
464 func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR);
465 if (!func_to_io->db_base) {
466 dev_err(&pdev->dev, "Failed to remap IO DB area\n");
471 init_db_area_idx(&func_to_io->free_db_area);
473 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) {
474 db_area = get_db_area(func_to_io);
475 if (IS_ERR(db_area)) {
476 dev_err(&pdev->dev, "Failed to get cmdq db area\n");
477 err = PTR_ERR(db_area);
481 func_to_io->cmdq_db_area[cmdq] = db_area;
484 err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif,
485 func_to_io->cmdq_db_area);
487 dev_err(&pdev->dev, "Failed to initialize cmdqs\n");
495 for (type = HINIC_CMDQ_SYNC; type < cmdq; type++)
496 return_db_area(func_to_io, func_to_io->cmdq_db_area[type]);
498 iounmap(func_to_io->db_base);
501 hinic_wqs_free(&func_to_io->wqs);
504 hinic_ceqs_free(&func_to_io->ceqs);
509 * hinic_io_free - Free the IO components
510 * @func_to_io: func to io channel that holds the IO components
512 void hinic_io_free(struct hinic_func_to_io *func_to_io)
514 enum hinic_cmdq_type cmdq;
516 hinic_free_cmdqs(&func_to_io->cmdqs);
518 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++)
519 return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]);
521 iounmap(func_to_io->db_base);
522 hinic_wqs_free(&func_to_io->wqs);
523 hinic_ceqs_free(&func_to_io->ceqs);