1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
14 #include "rvu_struct.h"
18 static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
19 struct npa_aq_inst_s *inst)
21 struct admin_queue *aq = block->aq;
22 struct npa_aq_res_s *result;
26 result = (struct npa_aq_res_s *)aq->res->base;
28 /* Get current head pointer where to append this instruction */
29 reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
30 head = (reg >> 4) & AQ_PTR_MASK;
32 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
33 (void *)inst, aq->inst->entry_sz);
34 memset(result, 0, sizeof(*result));
35 /* sync into memory */
38 /* Ring the doorbell and wait for result */
39 rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
40 while (result->compcode == NPA_AQ_COMP_NOTDONE) {
48 if (result->compcode != NPA_AQ_COMP_GOOD)
49 /* TODO: Replace this with some error code */
55 static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
56 struct npa_aq_enq_rsp *rsp)
58 struct rvu_hwinfo *hw = rvu->hw;
59 u16 pcifunc = req->hdr.pcifunc;
60 int blkaddr, npalf, rc = 0;
61 struct npa_aq_inst_s inst;
62 struct rvu_block *block;
63 struct admin_queue *aq;
64 struct rvu_pfvf *pfvf;
68 pfvf = rvu_get_pfvf(rvu, pcifunc);
69 if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
70 return NPA_AF_ERR_AQ_ENQUEUE;
72 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
73 if (!pfvf->npalf || blkaddr < 0)
74 return NPA_AF_ERR_AF_LF_INVALID;
76 block = &hw->block[blkaddr];
79 dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
80 return NPA_AF_ERR_AQ_ENQUEUE;
83 npalf = rvu_get_lf(rvu, block, pcifunc, 0);
85 return NPA_AF_ERR_AF_LF_INVALID;
87 memset(&inst, 0, sizeof(struct npa_aq_inst_s));
88 inst.cindex = req->aura_id;
90 inst.ctype = req->ctype;
92 /* Currently we are not supporting enqueuing multiple instructions,
93 * so always choose first entry in result memory.
95 inst.res_addr = (u64)aq->res->iova;
97 /* Clean result + context memory */
98 memset(aq->res->base, 0, aq->res->entry_sz);
99 /* Context needs to be written at RES_ADDR + 128 */
100 ctx = aq->res->base + 128;
101 /* Mask needs to be written at RES_ADDR + 256 */
102 mask = aq->res->base + 256;
105 case NPA_AQ_INSTOP_WRITE:
106 /* Copy context and write mask */
107 if (req->ctype == NPA_AQ_CTYPE_AURA) {
108 memcpy(mask, &req->aura_mask,
109 sizeof(struct npa_aura_s));
110 memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
112 memcpy(mask, &req->pool_mask,
113 sizeof(struct npa_pool_s));
114 memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
117 case NPA_AQ_INSTOP_INIT:
118 if (req->ctype == NPA_AQ_CTYPE_AURA) {
119 if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
120 rc = NPA_AF_ERR_AQ_FULL;
123 /* Set pool's context address */
124 req->aura.pool_addr = pfvf->pool_ctx->iova +
125 (req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
126 memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
127 } else { /* POOL's context */
128 memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
131 case NPA_AQ_INSTOP_NOP:
132 case NPA_AQ_INSTOP_READ:
133 case NPA_AQ_INSTOP_LOCK:
134 case NPA_AQ_INSTOP_UNLOCK:
137 rc = NPA_AF_ERR_AQ_FULL;
144 spin_lock(&aq->lock);
146 /* Submit the instruction to AQ */
147 rc = npa_aq_enqueue_wait(rvu, block, &inst);
149 spin_unlock(&aq->lock);
153 /* Set aura bitmap if aura hw context is enabled */
154 if (req->ctype == NPA_AQ_CTYPE_AURA) {
155 if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
156 __set_bit(req->aura_id, pfvf->aura_bmap);
157 if (req->op == NPA_AQ_INSTOP_WRITE) {
158 ena = (req->aura.ena & req->aura_mask.ena) |
159 (test_bit(req->aura_id, pfvf->aura_bmap) &
160 ~req->aura_mask.ena);
162 __set_bit(req->aura_id, pfvf->aura_bmap);
164 __clear_bit(req->aura_id, pfvf->aura_bmap);
168 /* Set pool bitmap if pool hw context is enabled */
169 if (req->ctype == NPA_AQ_CTYPE_POOL) {
170 if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
171 __set_bit(req->aura_id, pfvf->pool_bmap);
172 if (req->op == NPA_AQ_INSTOP_WRITE) {
173 ena = (req->pool.ena & req->pool_mask.ena) |
174 (test_bit(req->aura_id, pfvf->pool_bmap) &
175 ~req->pool_mask.ena);
177 __set_bit(req->aura_id, pfvf->pool_bmap);
179 __clear_bit(req->aura_id, pfvf->pool_bmap);
182 spin_unlock(&aq->lock);
185 /* Copy read context into mailbox */
186 if (req->op == NPA_AQ_INSTOP_READ) {
187 if (req->ctype == NPA_AQ_CTYPE_AURA)
188 memcpy(&rsp->aura, ctx,
189 sizeof(struct npa_aura_s));
191 memcpy(&rsp->pool, ctx,
192 sizeof(struct npa_pool_s));
199 static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
201 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
202 struct npa_aq_enq_req aq_req;
207 if (!pfvf->pool_ctx || !pfvf->aura_ctx)
208 return NPA_AF_ERR_AQ_ENQUEUE;
210 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
211 aq_req.hdr.pcifunc = req->hdr.pcifunc;
213 if (req->ctype == NPA_AQ_CTYPE_POOL) {
215 aq_req.pool_mask.ena = 1;
216 cnt = pfvf->pool_ctx->qsize;
217 bmap = pfvf->pool_bmap;
218 } else if (req->ctype == NPA_AQ_CTYPE_AURA) {
220 aq_req.aura_mask.ena = 1;
221 cnt = pfvf->aura_ctx->qsize;
222 bmap = pfvf->aura_bmap;
225 aq_req.ctype = req->ctype;
226 aq_req.op = NPA_AQ_INSTOP_WRITE;
228 for (id = 0; id < cnt; id++) {
229 if (!test_bit(id, bmap))
232 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
235 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
236 (req->ctype == NPA_AQ_CTYPE_AURA) ?
237 "Aura" : "Pool", id);
244 int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
245 struct npa_aq_enq_req *req,
246 struct npa_aq_enq_rsp *rsp)
248 return rvu_npa_aq_enq_inst(rvu, req, rsp);
251 int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
252 struct hwctx_disable_req *req,
255 return npa_lf_hwctx_disable(rvu, req);
258 static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
260 kfree(pfvf->aura_bmap);
261 pfvf->aura_bmap = NULL;
263 qmem_free(rvu->dev, pfvf->aura_ctx);
264 pfvf->aura_ctx = NULL;
266 kfree(pfvf->pool_bmap);
267 pfvf->pool_bmap = NULL;
269 qmem_free(rvu->dev, pfvf->pool_ctx);
270 pfvf->pool_ctx = NULL;
272 qmem_free(rvu->dev, pfvf->npa_qints_ctx);
273 pfvf->npa_qints_ctx = NULL;
276 int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
277 struct npa_lf_alloc_req *req,
278 struct npa_lf_alloc_rsp *rsp)
280 int npalf, qints, hwctx_size, err, rc = 0;
281 struct rvu_hwinfo *hw = rvu->hw;
282 u16 pcifunc = req->hdr.pcifunc;
283 struct rvu_block *block;
284 struct rvu_pfvf *pfvf;
288 if (req->aura_sz > NPA_AURA_SZ_MAX ||
289 req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
290 return NPA_AF_ERR_PARAM;
292 pfvf = rvu_get_pfvf(rvu, pcifunc);
293 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
294 if (!pfvf->npalf || blkaddr < 0)
295 return NPA_AF_ERR_AF_LF_INVALID;
297 block = &hw->block[blkaddr];
298 npalf = rvu_get_lf(rvu, block, pcifunc, 0);
300 return NPA_AF_ERR_AF_LF_INVALID;
302 /* Reset this NPA LF */
303 err = rvu_lf_reset(rvu, block, npalf);
305 dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
306 return NPA_AF_ERR_LF_RESET;
309 ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1);
311 /* Alloc memory for aura HW contexts */
312 hwctx_size = 1UL << (ctx_cfg & 0xF);
313 err = qmem_alloc(rvu->dev, &pfvf->aura_ctx,
314 NPA_AURA_COUNT(req->aura_sz), hwctx_size);
318 pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
320 if (!pfvf->aura_bmap)
323 /* Alloc memory for pool HW contexts */
324 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
325 err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
329 pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
331 if (!pfvf->pool_bmap)
334 /* Get no of queue interrupts supported */
335 cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
336 qints = (cfg >> 28) & 0xFFF;
338 /* Alloc memory for Qints HW contexts */
339 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
340 err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size);
344 cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf));
345 /* Clear way partition mask and set aura offset to '0' */
346 cfg &= ~(BIT_ULL(34) - 1);
347 /* Set aura size & enable caching of contexts */
348 cfg |= (req->aura_sz << 16) | BIT_ULL(34);
349 rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
351 /* Configure aura HW context's base */
352 rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf),
353 (u64)pfvf->aura_ctx->iova);
355 /* Enable caching of qints hw context */
356 rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36));
357 rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
358 (u64)pfvf->npa_qints_ctx->iova);
363 npa_ctx_free(rvu, pfvf);
367 /* set stack page info */
368 cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
369 rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
370 rsp->stack_pg_bytes = cfg & 0xFF;
371 rsp->qints = (cfg >> 28) & 0xFFF;
375 int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
378 struct rvu_hwinfo *hw = rvu->hw;
379 u16 pcifunc = req->hdr.pcifunc;
380 struct rvu_block *block;
381 struct rvu_pfvf *pfvf;
385 pfvf = rvu_get_pfvf(rvu, pcifunc);
386 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
387 if (!pfvf->npalf || blkaddr < 0)
388 return NPA_AF_ERR_AF_LF_INVALID;
390 block = &hw->block[blkaddr];
391 npalf = rvu_get_lf(rvu, block, pcifunc, 0);
393 return NPA_AF_ERR_AF_LF_INVALID;
395 /* Reset this NPA LF */
396 err = rvu_lf_reset(rvu, block, npalf);
398 dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
399 return NPA_AF_ERR_LF_RESET;
402 npa_ctx_free(rvu, pfvf);
407 static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
412 /* Set admin queue endianness */
413 cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
416 rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
419 rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
422 /* Do not bypass NDC cache */
423 cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
425 rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
427 /* Result structure can be followed by Aura/Pool context at
428 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
429 * operation type. Alloc sufficient result memory for all operations.
431 err = rvu_aq_alloc(rvu, &block->aq,
432 Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
433 ALIGN(sizeof(struct npa_aq_res_s), 128) + 256);
437 rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
438 rvu_write64(rvu, block->addr,
439 NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
443 int rvu_npa_init(struct rvu *rvu)
445 struct rvu_hwinfo *hw = rvu->hw;
446 struct rvu_block *block;
449 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
453 block = &hw->block[blkaddr];
455 /* Initialize admin queue */
456 err = npa_aq_init(rvu, &hw->block[blkaddr]);
463 void rvu_npa_freemem(struct rvu *rvu)
465 struct rvu_hwinfo *hw = rvu->hw;
466 struct rvu_block *block;
469 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
473 block = &hw->block[blkaddr];
474 rvu_aq_free(rvu, block->aq);