1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
4 * Copyright (C) 2022 Marvell.
8 #include <linux/bitfield.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/firmware.h>
12 #include <linux/stddef.h>
13 #include <linux/debugfs.h>
15 #include "rvu_struct.h"
20 #include "rvu_npc_fs.h"
21 #include "rvu_npc_hash.h"
23 static u64 rvu_npc_wide_extract(const u64 input[], size_t start_bit,
26 const u64 mask = ~(u64)((~(__uint128_t)0) << width_bits);
27 const size_t msb = start_bit + width_bits - 1;
28 const size_t lword = start_bit >> 6;
29 const size_t uword = msb >> 6;
34 return (input[lword] >> (start_bit & 63)) & mask;
36 lbits = 64 - (start_bit & 63);
38 lo = (input[lword] >> (start_bit & 63));
39 return ((hi << lbits) | lo) & mask;
42 static void rvu_npc_lshift_key(u64 *key, size_t key_bit_len)
44 u64 prev_orig_word = 0;
45 u64 cur_orig_word = 0;
46 size_t extra = key_bit_len % 64;
47 size_t max_idx = key_bit_len / 64;
53 for (i = 0; i < max_idx; i++) {
54 cur_orig_word = key[i];
56 key[i] |= ((prev_orig_word >> 63) & 0x1);
57 prev_orig_word = cur_orig_word;
61 static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len,
68 for (i = data_bit_len - 1; i >= 0; i--) {
69 temp_data = (data[i / 64]);
70 temp_data = temp_data >> (i % 64);
73 hash_out ^= (u32)(rvu_npc_wide_extract(key, key_bit_len - 32, 32));
75 rvu_npc_lshift_key(key, key_bit_len);
81 u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp,
88 hash_key[0] = rsp.secret_key[1] << 31;
89 hash_key[0] |= rsp.secret_key[2];
90 hash_key[1] = rsp.secret_key[1] >> 33;
91 hash_key[1] |= rsp.secret_key[0] << 31;
92 hash_key[2] = rsp.secret_key[0] >> 33;
94 data_padded[0] = rsp.hash_mask[intf][hash_idx][0] & ldata[0];
95 data_padded[1] = rsp.hash_mask[intf][hash_idx][1] & ldata[1];
96 field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159);
98 field_hash &= FIELD_GET(GENMASK(63, 32), rsp.hash_ctrl[intf][hash_idx]);
99 field_hash += FIELD_GET(GENMASK(31, 0), rsp.hash_ctrl[intf][hash_idx]);
103 static u64 npc_update_use_hash(struct rvu *rvu, int blkaddr,
104 u8 intf, int lid, int lt, int ld)
109 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld));
110 hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
111 key = FIELD_GET(NPC_KEY_OFFSET, cfg);
113 /* Update use_hash(bit-20) to 'true' and
114 * bytesm1(bit-16:19) to '0x3' in KEX_LD_CFG
116 cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03,
122 static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr,
125 struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
126 int lid, lt, ld, hash_cnt = 0;
128 if (is_npc_intf_tx(intf))
131 /* Program HASH_CFG */
132 for (lid = 0; lid < NPC_MAX_LID; lid++) {
133 for (lt = 0; lt < NPC_MAX_LT; lt++) {
134 for (ld = 0; ld < NPC_MAX_LD; ld++) {
135 if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
138 if (hash_cnt == NPC_MAX_HASH)
141 cfg = npc_update_use_hash(rvu, blkaddr,
143 /* Set updated KEX configuration */
144 SET_KEX_LD(intf, lid, lt, ld, cfg);
145 /* Set HASH configuration */
146 SET_KEX_LD_HASH(intf, ld,
147 mkex_hash->hash[intf][ld]);
148 SET_KEX_LD_HASH_MASK(intf, ld, 0,
149 mkex_hash->hash_mask[intf][ld][0]);
150 SET_KEX_LD_HASH_MASK(intf, ld, 1,
151 mkex_hash->hash_mask[intf][ld][1]);
152 SET_KEX_LD_HASH_CTRL(intf, ld,
153 mkex_hash->hash_ctrl[intf][ld]);
162 static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr,
165 struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
166 int lid, lt, ld, hash_cnt = 0;
168 if (is_npc_intf_rx(intf))
171 /* Program HASH_CFG */
172 for (lid = 0; lid < NPC_MAX_LID; lid++) {
173 for (lt = 0; lt < NPC_MAX_LT; lt++) {
174 for (ld = 0; ld < NPC_MAX_LD; ld++)
175 if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
178 if (hash_cnt == NPC_MAX_HASH)
181 cfg = npc_update_use_hash(rvu, blkaddr,
183 /* Set updated KEX configuration */
184 SET_KEX_LD(intf, lid, lt, ld, cfg);
185 /* Set HASH configuration */
186 SET_KEX_LD_HASH(intf, ld,
187 mkex_hash->hash[intf][ld]);
188 SET_KEX_LD_HASH_MASK(intf, ld, 0,
189 mkex_hash->hash_mask[intf][ld][0]);
190 SET_KEX_LD_HASH_MASK(intf, ld, 1,
191 mkex_hash->hash_mask[intf][ld][1]);
192 SET_KEX_LD_HASH_CTRL(intf, ld,
193 mkex_hash->hash_ctrl[intf][ld]);
200 void npc_config_secret_key(struct rvu *rvu, int blkaddr)
202 struct hw_cap *hwcap = &rvu->hw->cap;
203 struct rvu_hwinfo *hw = rvu->hw;
206 if (!hwcap->npc_hash_extract)
209 for (intf = 0; intf < hw->npc_intfs; intf++) {
210 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf),
211 RVU_NPC_HASH_SECRET_KEY0);
212 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf),
213 RVU_NPC_HASH_SECRET_KEY1);
214 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf),
215 RVU_NPC_HASH_SECRET_KEY2);
219 void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
221 struct hw_cap *hwcap = &rvu->hw->cap;
222 struct rvu_hwinfo *hw = rvu->hw;
225 if (!hwcap->npc_hash_extract)
228 for (intf = 0; intf < hw->npc_intfs; intf++) {
229 npc_program_mkex_hash_rx(rvu, blkaddr, intf);
230 npc_program_mkex_hash_tx(rvu, blkaddr, intf);
234 void npc_update_field_hash(struct rvu *rvu, u8 intf,
235 struct mcam_entry *entry,
238 struct flow_msg *pkt,
239 struct flow_msg *mask,
240 struct flow_msg *opkt,
241 struct flow_msg *omask)
243 struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
244 struct npc_get_field_hash_info_req req;
245 struct npc_get_field_hash_info_rsp rsp;
250 if (!rvu->hw->cap.npc_hash_extract) {
251 dev_dbg(rvu->dev, "%s: Field hash extract feature is not supported\n", __func__);
256 rvu_mbox_handler_npc_get_field_hash_info(rvu, &req, &rsp);
258 for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) {
259 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx));
260 if ((cfg & BIT_ULL(11)) && (cfg & BIT_ULL(12))) {
261 u8 lid = (cfg & GENMASK_ULL(10, 8)) >> 8;
262 u8 ltype = (cfg & GENMASK_ULL(7, 4)) >> 4;
263 u8 ltype_mask = cfg & GENMASK_ULL(3, 0);
265 if (mkex_hash->lid_lt_ld_hash_en[intf][lid][ltype][hash_idx]) {
266 switch (ltype & ltype_mask) {
267 /* If hash extract enabled is supported for IPv6 then
268 * 128 bit IPv6 source and destination addressed
269 * is hashed to 32 bit value.
272 /* ld[0] == hash_idx[0] == Source IPv6
273 * ld[1] == hash_idx[1] == Destination IPv6
275 if ((features & BIT_ULL(NPC_SIP_IPV6)) && !hash_idx) {
276 u32 src_ip[IPV6_WORDS];
278 be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
279 ldata[1] = (u64)src_ip[0] << 32 | src_ip[1];
280 ldata[0] = (u64)src_ip[2] << 32 | src_ip[3];
281 field_hash = npc_field_hash_calc(ldata,
285 npc_update_entry(rvu, NPC_SIP_IPV6, entry,
287 GENMASK(31, 0), 0, intf);
288 memcpy(&opkt->ip6src, &pkt->ip6src,
289 sizeof(pkt->ip6src));
290 memcpy(&omask->ip6src, &mask->ip6src,
291 sizeof(mask->ip6src));
292 } else if ((features & BIT_ULL(NPC_DIP_IPV6)) && hash_idx) {
293 u32 dst_ip[IPV6_WORDS];
295 be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
296 ldata[1] = (u64)dst_ip[0] << 32 | dst_ip[1];
297 ldata[0] = (u64)dst_ip[2] << 32 | dst_ip[3];
298 field_hash = npc_field_hash_calc(ldata,
302 npc_update_entry(rvu, NPC_DIP_IPV6, entry,
304 GENMASK(31, 0), 0, intf);
305 memcpy(&opkt->ip6dst, &pkt->ip6dst,
306 sizeof(pkt->ip6dst));
307 memcpy(&omask->ip6dst, &mask->ip6dst,
308 sizeof(mask->ip6dst));
318 int rvu_mbox_handler_npc_get_field_hash_info(struct rvu *rvu,
319 struct npc_get_field_hash_info_req *req,
320 struct npc_get_field_hash_info_rsp *rsp)
322 u64 *secret_key = rsp->secret_key;
326 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
328 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
332 secret_key[0] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf));
333 secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf));
334 secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf));
336 for (i = 0; i < NPC_MAX_HASH; i++) {
337 for (j = 0; j < NPC_MAX_HASH_MASK; j++) {
338 rsp->hash_mask[NIX_INTF_RX][i][j] =
339 GET_KEX_LD_HASH_MASK(NIX_INTF_RX, i, j);
340 rsp->hash_mask[NIX_INTF_TX][i][j] =
341 GET_KEX_LD_HASH_MASK(NIX_INTF_TX, i, j);
345 for (i = 0; i < NPC_MAX_INTF; i++)
346 for (j = 0; j < NPC_MAX_HASH; j++)
347 rsp->hash_ctrl[i][j] = GET_KEX_LD_HASH_CTRL(i, j);
353 * rvu_npc_exact_mac2u64 - utility function to convert mac address to u64.
354 * @mac_addr: MAC address.
355 * Return: mdata for exact match table.
357 static u64 rvu_npc_exact_mac2u64(u8 *mac_addr)
362 for (index = ETH_ALEN - 1; index >= 0; index--)
363 mac |= ((u64)*mac_addr++) << (8 * index);
369 * rvu_exact_prepare_mdata - Make mdata for mcam entry
371 * @chan: Channel number.
372 * @ctype: Channel Type.
376 static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask)
378 u64 ldata = rvu_npc_exact_mac2u64(mac);
380 /* Please note that mask is 48bit which excludes chan and ctype.
381 * Increase mask bits if we need to include them as well.
383 ldata |= ((u64)chan << 48);
384 ldata |= ((u64)ctype << 60);
392 * rvu_exact_calculate_hash - calculate hash index to mem table.
393 * @rvu: resource virtualization unit.
394 * @chan: Channel number
395 * @ctype: Channel type.
398 * @table_depth: Depth of table.
401 static u32 rvu_exact_calculate_hash(struct rvu *rvu, u16 chan, u16 ctype, u8 *mac,
402 u64 mask, u32 table_depth)
404 struct npc_exact_table *table = rvu->hw->table;
410 key_in[0] = RVU_NPC_HASH_SECRET_KEY0;
411 key_in[1] = RVU_NPC_HASH_SECRET_KEY2;
413 hash_key[0] = key_in[0] << 31;
414 hash_key[0] |= key_in[1];
415 hash_key[1] = key_in[0] >> 33;
417 ldata = rvu_exact_prepare_mdata(mac, chan, ctype, mask);
419 dev_dbg(rvu->dev, "%s: ldata=0x%llx hash_key0=0x%llx hash_key2=0x%llx\n", __func__,
420 ldata, hash_key[1], hash_key[0]);
421 hash = rvu_npc_toeplitz_hash(&ldata, (u64 *)hash_key, 64, 95);
423 hash &= table->mem_table.hash_mask;
424 hash += table->mem_table.hash_offset;
425 dev_dbg(rvu->dev, "%s: hash=%x\n", __func__, hash);
431 * rvu_npc_exact_alloc_mem_table_entry - find free entry in 4 way table.
432 * @rvu: resource virtualization unit.
433 * @way: Indicate way to table.
434 * @index: Hash index to 4 way table.
437 * Searches 4 way table using hash index. Returns 0 on success.
438 * Return: 0 upon success.
440 static int rvu_npc_exact_alloc_mem_table_entry(struct rvu *rvu, u8 *way,
441 u32 *index, unsigned int hash)
443 struct npc_exact_table *table;
446 table = rvu->hw->table;
447 depth = table->mem_table.depth;
449 /* Check all the 4 ways for a free slot. */
450 mutex_lock(&table->lock);
451 for (i = 0; i < table->mem_table.ways; i++) {
452 if (test_bit(hash + i * depth, table->mem_table.bmap))
455 set_bit(hash + i * depth, table->mem_table.bmap);
456 mutex_unlock(&table->lock);
458 dev_dbg(rvu->dev, "%s: mem table entry alloc success (way=%d index=%d)\n",
465 mutex_unlock(&table->lock);
467 dev_dbg(rvu->dev, "%s: No space in 4 way exact way, weight=%u\n", __func__,
468 bitmap_weight(table->mem_table.bmap, table->mem_table.depth));
473 * rvu_npc_exact_free_id - Free seq id from bitmat.
474 * @rvu: Resource virtualization unit.
475 * @seq_id: Sequence identifier to be freed.
477 static void rvu_npc_exact_free_id(struct rvu *rvu, u32 seq_id)
479 struct npc_exact_table *table;
481 table = rvu->hw->table;
482 mutex_lock(&table->lock);
483 clear_bit(seq_id, table->id_bmap);
484 mutex_unlock(&table->lock);
485 dev_dbg(rvu->dev, "%s: freed id %d\n", __func__, seq_id);
489 * rvu_npc_exact_alloc_id - Alloc seq id from bitmap.
490 * @rvu: Resource virtualization unit.
491 * @seq_id: Sequence identifier.
492 * Return: True or false.
494 static bool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id)
496 struct npc_exact_table *table;
499 table = rvu->hw->table;
501 mutex_lock(&table->lock);
502 idx = find_first_zero_bit(table->id_bmap, table->tot_ids);
503 if (idx == table->tot_ids) {
504 mutex_unlock(&table->lock);
505 dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n",
506 __func__, table->tot_ids);
511 /* Mark bit map to indicate that slot is used.*/
512 set_bit(idx, table->id_bmap);
513 mutex_unlock(&table->lock);
516 dev_dbg(rvu->dev, "%s: Allocated id (%d)\n", __func__, *seq_id);
522 * rvu_npc_exact_alloc_cam_table_entry - find free slot in fully associative table.
523 * @rvu: resource virtualization unit.
524 * @index: Index to exact CAM table.
525 * Return: 0 upon success; else error number.
527 static int rvu_npc_exact_alloc_cam_table_entry(struct rvu *rvu, int *index)
529 struct npc_exact_table *table;
532 table = rvu->hw->table;
534 mutex_lock(&table->lock);
535 idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth);
536 if (idx == table->cam_table.depth) {
537 mutex_unlock(&table->lock);
538 dev_info(rvu->dev, "%s: No space in exact cam table, weight=%u\n", __func__,
539 bitmap_weight(table->cam_table.bmap, table->cam_table.depth));
543 /* Mark bit map to indicate that slot is used.*/
544 set_bit(idx, table->cam_table.bmap);
545 mutex_unlock(&table->lock);
548 dev_dbg(rvu->dev, "%s: cam table entry alloc success (index=%d)\n",
554 * rvu_exact_prepare_table_entry - Data for exact match table entry.
555 * @rvu: Resource virtualization unit.
556 * @enable: Enable/Disable entry
557 * @ctype: Software defined channel type. Currently set as 0.
558 * @chan: Channel number.
559 * @mac_addr: Destination mac address.
560 * Return: mdata for exact match table.
562 static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable,
563 u8 ctype, u16 chan, u8 *mac_addr)
566 u64 ldata = rvu_npc_exact_mac2u64(mac_addr);
568 /* Enable or disable */
569 u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0);
572 mdata |= FIELD_PREP(GENMASK_ULL(61, 60), ctype);
575 mdata |= FIELD_PREP(GENMASK_ULL(59, 48), chan);
578 mdata |= FIELD_PREP(GENMASK_ULL(47, 0), ldata);
584 * rvu_exact_config_secret_key - Configure secret key.
585 * @rvu: Resource virtualization unit.
587 static void rvu_exact_config_secret_key(struct rvu *rvu)
591 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
592 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET0(NIX_INTF_RX),
593 RVU_NPC_HASH_SECRET_KEY0);
595 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET1(NIX_INTF_RX),
596 RVU_NPC_HASH_SECRET_KEY1);
598 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET2(NIX_INTF_RX),
599 RVU_NPC_HASH_SECRET_KEY2);
603 * rvu_exact_config_search_key - Configure search key
604 * @rvu: Resource virtualization unit.
606 static void rvu_exact_config_search_key(struct rvu *rvu)
611 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
614 reg_val = FIELD_PREP(GENMASK_ULL(39, 32), 0);
616 /* BYTESM1, number of bytes - 1 */
617 reg_val |= FIELD_PREP(GENMASK_ULL(18, 16), ETH_ALEN - 1);
619 /* Enable LID and set LID to NPC_LID_LA */
620 reg_val |= FIELD_PREP(GENMASK_ULL(11, 11), 1);
621 reg_val |= FIELD_PREP(GENMASK_ULL(10, 8), NPC_LID_LA);
623 /* Clear layer type based extraction */
626 reg_val |= FIELD_PREP(GENMASK_ULL(12, 12), 0);
628 /* Set LTYPE_MATCH to 0 */
629 reg_val |= FIELD_PREP(GENMASK_ULL(7, 4), 0);
631 /* Set LTYPE_MASK to 0 */
632 reg_val |= FIELD_PREP(GENMASK_ULL(3, 0), 0);
634 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_CFG(NIX_INTF_RX), reg_val);
638 * rvu_exact_config_result_ctrl - Set exact table hash control
639 * @rvu: Resource virtualization unit.
640 * @depth: Depth of Exact match table.
642 * Sets mask and offset for hash for mem table.
644 static void rvu_exact_config_result_ctrl(struct rvu *rvu, uint32_t depth)
649 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
651 /* Set mask. Note that depth is a power of 2 */
652 rvu->hw->table->mem_table.hash_mask = (depth - 1);
653 reg |= FIELD_PREP(GENMASK_ULL(42, 32), (depth - 1));
655 /* Set offset as 0 */
656 rvu->hw->table->mem_table.hash_offset = 0;
657 reg |= FIELD_PREP(GENMASK_ULL(10, 0), 0);
660 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_RESULT_CTL(NIX_INTF_RX), reg);
661 /* Store hash mask and offset for s/w algorithm */
665 * rvu_exact_config_table_mask - Set exact table mask.
666 * @rvu: Resource virtualization unit.
668 static void rvu_exact_config_table_mask(struct rvu *rvu)
673 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
675 /* Don't use Ctype */
676 mask |= FIELD_PREP(GENMASK_ULL(61, 60), 0);
679 mask |= GENMASK_ULL(59, 48);
682 mask |= GENMASK_ULL(47, 0);
684 /* Store mask for s/w hash calcualtion */
685 rvu->hw->table->mem_table.mask = mask;
687 /* Set mask for RX.*/
688 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_MASK(NIX_INTF_RX), mask);
692 * rvu_npc_exact_get_max_entries - Get total number of entries in table.
693 * @rvu: resource virtualization unit.
694 * Return: Maximum table entries possible.
696 u32 rvu_npc_exact_get_max_entries(struct rvu *rvu)
698 struct npc_exact_table *table;
700 table = rvu->hw->table;
701 return table->tot_ids;
705 * rvu_npc_exact_has_match_table - Checks support for exact match.
706 * @rvu: resource virtualization unit.
707 * Return: True if exact match table is supported/enabled.
709 bool rvu_npc_exact_has_match_table(struct rvu *rvu)
711 return rvu->hw->cap.npc_exact_match_enabled;
715 * __rvu_npc_exact_find_entry_by_seq_id - find entry by id
716 * @rvu: resource virtualization unit.
717 * @seq_id: Sequence identifier.
719 * Caller should acquire the lock.
720 * Return: Pointer to table entry.
722 static struct npc_exact_table_entry *
723 __rvu_npc_exact_find_entry_by_seq_id(struct rvu *rvu, u32 seq_id)
725 struct npc_exact_table *table = rvu->hw->table;
726 struct npc_exact_table_entry *entry = NULL;
727 struct list_head *lhead;
729 lhead = &table->lhead_gbl;
731 /* traverse to find the matching entry */
732 list_for_each_entry(entry, lhead, glist) {
733 if (entry->seq_id != seq_id)
743 * rvu_npc_exact_add_to_list - Add entry to list
744 * @rvu: resource virtualization unit.
745 * @opc_type: OPCODE to select MEM/CAM table.
746 * @ways: MEM table ways.
747 * @index: Index in MEM/CAM table.
748 * @cgx_id: CGX identifier.
749 * @lmac_id: LMAC identifier.
750 * @mac_addr: MAC address.
751 * @chan: Channel number.
752 * @ctype: Channel Type.
753 * @seq_id: Sequence identifier
754 * @cmd: True if function is called by ethtool cmd
755 * @mcam_idx: NPC mcam index of DMAC entry in NPC mcam.
756 * @pcifunc: pci function
757 * Return: 0 upon success.
759 static int rvu_npc_exact_add_to_list(struct rvu *rvu, enum npc_exact_opc_type opc_type, u8 ways,
760 u32 index, u8 cgx_id, u8 lmac_id, u8 *mac_addr, u16 chan,
761 u8 ctype, u32 *seq_id, bool cmd, u32 mcam_idx, u16 pcifunc)
763 struct npc_exact_table_entry *entry, *tmp, *iter;
764 struct npc_exact_table *table = rvu->hw->table;
765 struct list_head *lhead, *pprev;
767 WARN_ON(ways >= NPC_EXACT_TBL_MAX_WAYS);
769 if (!rvu_npc_exact_alloc_id(rvu, seq_id)) {
770 dev_err(rvu->dev, "%s: Generate seq id failed\n", __func__);
774 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
776 rvu_npc_exact_free_id(rvu, *seq_id);
777 dev_err(rvu->dev, "%s: Memory allocation failed\n", __func__);
781 mutex_lock(&table->lock);
783 case NPC_EXACT_OPC_CAM:
784 lhead = &table->lhead_cam_tbl_entry;
785 table->cam_tbl_entry_cnt++;
788 case NPC_EXACT_OPC_MEM:
789 lhead = &table->lhead_mem_tbl_entry[ways];
790 table->mem_tbl_entry_cnt++;
794 mutex_unlock(&table->lock);
796 rvu_npc_exact_free_id(rvu, *seq_id);
798 dev_err(rvu->dev, "%s: Unknown opc type%d\n", __func__, opc_type);
802 /* Add to global list */
803 INIT_LIST_HEAD(&entry->glist);
804 list_add_tail(&entry->glist, &table->lhead_gbl);
805 INIT_LIST_HEAD(&entry->list);
806 entry->index = index;
808 entry->opc_type = opc_type;
810 entry->pcifunc = pcifunc;
812 ether_addr_copy(entry->mac, mac_addr);
814 entry->ctype = ctype;
815 entry->cgx_id = cgx_id;
816 entry->lmac_id = lmac_id;
818 entry->seq_id = *seq_id;
820 entry->mcam_idx = mcam_idx;
825 /* Insert entry in ascending order of index */
826 list_for_each_entry_safe(iter, tmp, lhead, list) {
827 if (index < iter->index)
833 /* Add to each table list */
834 list_add(&entry->list, pprev);
835 mutex_unlock(&table->lock);
840 * rvu_npc_exact_mem_table_write - Wrapper for register write
841 * @rvu: resource virtualization unit.
842 * @blkaddr: Block address
843 * @ways: ways for MEM table.
844 * @index: Index in MEM
845 * @mdata: Meta data to be written to register.
847 static void rvu_npc_exact_mem_table_write(struct rvu *rvu, int blkaddr, u8 ways,
848 u32 index, u64 mdata)
850 rvu_write64(rvu, blkaddr, NPC_AF_EXACT_MEM_ENTRY(ways, index), mdata);
854 * rvu_npc_exact_cam_table_write - Wrapper for register write
855 * @rvu: resource virtualization unit.
856 * @blkaddr: Block address
857 * @index: Index in MEM
858 * @mdata: Meta data to be written to register.
860 static void rvu_npc_exact_cam_table_write(struct rvu *rvu, int blkaddr,
861 u32 index, u64 mdata)
863 rvu_write64(rvu, blkaddr, NPC_AF_EXACT_CAM_ENTRY(index), mdata);
867 * rvu_npc_exact_dealloc_table_entry - dealloc table entry
868 * @rvu: resource virtualization unit.
869 * @opc_type: OPCODE for selection of table(MEM or CAM)
870 * @ways: ways if opc_type is MEM table.
871 * @index: Index of MEM or CAM table.
872 * Return: 0 upon success.
874 static int rvu_npc_exact_dealloc_table_entry(struct rvu *rvu, enum npc_exact_opc_type opc_type,
877 int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
878 struct npc_exact_table *table;
879 u8 null_dmac[6] = { 0 };
882 /* Prepare entry with all fields set to zero */
883 u64 null_mdata = rvu_exact_prepare_table_entry(rvu, false, 0, 0, null_dmac);
885 table = rvu->hw->table;
886 depth = table->mem_table.depth;
888 mutex_lock(&table->lock);
891 case NPC_EXACT_OPC_CAM:
893 /* Check whether entry is used already */
894 if (!test_bit(index, table->cam_table.bmap)) {
895 mutex_unlock(&table->lock);
896 dev_err(rvu->dev, "%s: Trying to free an unused entry ways=%d index=%d\n",
897 __func__, ways, index);
901 rvu_npc_exact_cam_table_write(rvu, blkaddr, index, null_mdata);
902 clear_bit(index, table->cam_table.bmap);
905 case NPC_EXACT_OPC_MEM:
907 /* Check whether entry is used already */
908 if (!test_bit(index + ways * depth, table->mem_table.bmap)) {
909 mutex_unlock(&table->lock);
910 dev_err(rvu->dev, "%s: Trying to free an unused entry index=%d\n",
915 rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, null_mdata);
916 clear_bit(index + ways * depth, table->mem_table.bmap);
920 mutex_unlock(&table->lock);
921 dev_err(rvu->dev, "%s: invalid opc type %d", __func__, opc_type);
925 mutex_unlock(&table->lock);
927 dev_dbg(rvu->dev, "%s: Successfully deleted entry (index=%d, ways=%d opc_type=%d\n",
928 __func__, index, ways, opc_type);
934 * rvu_npc_exact_alloc_table_entry - Allociate an entry
935 * @rvu: resource virtualization unit.
937 * @chan: Channel number.
938 * @ctype: Channel Type.
939 * @index: Index of MEM table or CAM table.
940 * @ways: Ways. Only valid for MEM table.
941 * @opc_type: OPCODE to select table (MEM or CAM)
943 * Try allocating a slot from MEM table. If all 4 ways
944 * slot are full for a hash index, check availability in
945 * 32-entry CAM table for allocation.
946 * Return: 0 upon success.
948 static int rvu_npc_exact_alloc_table_entry(struct rvu *rvu, char *mac, u16 chan, u8 ctype,
949 u32 *index, u8 *ways, enum npc_exact_opc_type *opc_type)
951 struct npc_exact_table *table;
955 table = rvu->hw->table;
957 /* Check in 4-ways mem entry for free slote */
958 hash = rvu_exact_calculate_hash(rvu, chan, ctype, mac, table->mem_table.mask,
959 table->mem_table.depth);
960 err = rvu_npc_exact_alloc_mem_table_entry(rvu, ways, index, hash);
962 *opc_type = NPC_EXACT_OPC_MEM;
963 dev_dbg(rvu->dev, "%s: inserted in 4 ways hash table ways=%d, index=%d\n",
964 __func__, *ways, *index);
968 dev_dbg(rvu->dev, "%s: failed to insert in 4 ways hash table\n", __func__);
970 /* wayss is 0 for cam table */
972 err = rvu_npc_exact_alloc_cam_table_entry(rvu, index);
974 *opc_type = NPC_EXACT_OPC_CAM;
975 dev_dbg(rvu->dev, "%s: inserted in fully associative hash table index=%u\n",
980 dev_err(rvu->dev, "%s: failed to insert in fully associative hash table\n", __func__);
985 * rvu_npc_exact_save_drop_rule_chan_and_mask - Save drop rules info in data base.
986 * @rvu: resource virtualization unit.
987 * @drop_mcam_idx: Drop rule index in NPC mcam.
988 * @chan_val: Channel value.
989 * @chan_mask: Channel Mask.
990 * @pcifunc: pcifunc of interface.
991 * Return: True upon success.
993 static bool rvu_npc_exact_save_drop_rule_chan_and_mask(struct rvu *rvu, int drop_mcam_idx,
994 u64 chan_val, u64 chan_mask, u16 pcifunc)
996 struct npc_exact_table *table;
999 table = rvu->hw->table;
1001 for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
1002 if (!table->drop_rule_map[i].valid)
1005 if (table->drop_rule_map[i].chan_val != (u16)chan_val)
1008 if (table->drop_rule_map[i].chan_mask != (u16)chan_mask)
1014 if (i == NPC_MCAM_DROP_RULE_MAX)
1017 table->drop_rule_map[i].drop_rule_idx = drop_mcam_idx;
1018 table->drop_rule_map[i].chan_val = (u16)chan_val;
1019 table->drop_rule_map[i].chan_mask = (u16)chan_mask;
1020 table->drop_rule_map[i].pcifunc = pcifunc;
1021 table->drop_rule_map[i].valid = true;
1026 * rvu_npc_exact_calc_drop_rule_chan_and_mask - Calculate Channel number and mask.
1027 * @rvu: resource virtualization unit.
1028 * @intf_type: Interface type (SDK, LBK or CGX)
1029 * @cgx_id: CGX identifier.
1030 * @lmac_id: LAMC identifier.
1031 * @val: Channel number.
1032 * @mask: Channel mask.
1033 * Return: True upon success.
1035 static bool rvu_npc_exact_calc_drop_rule_chan_and_mask(struct rvu *rvu, u8 intf_type,
1036 u8 cgx_id, u8 lmac_id,
1037 u64 *val, u64 *mask)
1039 u16 chan_val, chan_mask;
1041 /* No support for SDP and LBK */
1042 if (intf_type != NIX_INTF_TYPE_CGX)
1045 chan_val = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
1058 * rvu_npc_exact_drop_rule_to_pcifunc - Retrieve pcifunc
1059 * @rvu: resource virtualization unit.
1060 * @drop_rule_idx: Drop rule index in NPC mcam.
1062 * Debugfs (exact_drop_cnt) entry displays pcifunc for interface
1063 * by retrieving the pcifunc value from data base.
1064 * Return: Drop rule index.
1066 u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx)
1068 struct npc_exact_table *table;
1071 table = rvu->hw->table;
1073 for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
1074 if (!table->drop_rule_map[i].valid)
1077 if (table->drop_rule_map[i].drop_rule_idx != drop_rule_idx)
1080 return table->drop_rule_map[i].pcifunc;
1083 dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
1084 __func__, drop_rule_idx);
1089 * rvu_npc_exact_get_drop_rule_info - Get drop rule information.
1090 * @rvu: resource virtualization unit.
1091 * @intf_type: Interface type (CGX, SDP or LBK)
1092 * @cgx_id: CGX identifier.
1093 * @lmac_id: LMAC identifier.
1094 * @drop_mcam_idx: NPC mcam drop rule index.
1095 * @val: Channel value.
1096 * @mask: Channel mask.
1097 * @pcifunc: pcifunc of interface corresponding to the drop rule.
1098 * Return: True upon success.
1100 static bool rvu_npc_exact_get_drop_rule_info(struct rvu *rvu, u8 intf_type, u8 cgx_id,
1101 u8 lmac_id, u32 *drop_mcam_idx, u64 *val,
1102 u64 *mask, u16 *pcifunc)
1104 struct npc_exact_table *table;
1105 u64 chan_val, chan_mask;
1109 table = rvu->hw->table;
1111 if (intf_type != NIX_INTF_TYPE_CGX) {
1112 dev_err(rvu->dev, "%s: No drop rule for LBK/SDP mode\n", __func__);
1116 rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, intf_type, cgx_id,
1117 lmac_id, &chan_val, &chan_mask);
1121 for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
1122 if (!table->drop_rule_map[i].valid)
1125 if (table->drop_rule_map[i].chan_val != (u16)chan_val)
1129 *val = table->drop_rule_map[i].chan_val;
1131 *mask = table->drop_rule_map[i].chan_mask;
1133 *pcifunc = table->drop_rule_map[i].pcifunc;
1139 if (i == NPC_MCAM_DROP_RULE_MAX) {
1140 dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
1141 __func__, *drop_mcam_idx);
1145 dev_err(rvu->dev, "%s: Could not retrieve for cgx=%d, lmac=%d\n",
1146 __func__, cgx_id, lmac_id);
1151 * __rvu_npc_exact_cmd_rules_cnt_update - Update number dmac rules against a drop rule.
1152 * @rvu: resource virtualization unit.
1153 * @drop_mcam_idx: NPC mcam drop rule index.
1155 * @enable_or_disable_cam: If no exact match rules against a drop rule, disable it.
1157 * when first exact match entry against a drop rule is added, enable_or_disable_cam
1158 * is set to true. When last exact match entry against a drop rule is deleted,
1159 * enable_or_disable_cam is set to true.
1160 * Return: Number of rules
1162 static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_idx,
1163 int val, bool *enable_or_disable_cam)
1165 struct npc_exact_table *table;
1168 table = rvu->hw->table;
1170 cnt = &table->cnt_cmd_rules[drop_mcam_idx];
1175 if (!enable_or_disable_cam)
1178 *enable_or_disable_cam = false;
1180 /* If all rules are deleted, disable cam */
1181 if (!*cnt && val < 0) {
1182 *enable_or_disable_cam = true;
1186 /* If rule got added, enable cam */
1187 if (!old_cnt && val > 0) {
1188 *enable_or_disable_cam = true;
1197 * rvu_npc_exact_del_table_entry_by_id - Delete and free table entry.
1198 * @rvu: resource virtualization unit.
1199 * @seq_id: Sequence identifier of the entry.
1201 * Deletes entry from linked lists and free up slot in HW MEM or CAM
1203 * Return: 0 upon success.
1205 static int rvu_npc_exact_del_table_entry_by_id(struct rvu *rvu, u32 seq_id)
1207 struct npc_exact_table_entry *entry = NULL;
1208 struct npc_exact_table *table;
1209 bool disable_cam = false;
1210 u32 drop_mcam_idx = -1;
1214 table = rvu->hw->table;
1216 mutex_lock(&table->lock);
1218 /* Lookup for entry which needs to be updated */
1219 entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, seq_id);
1221 dev_dbg(rvu->dev, "%s: failed to find entry for id=%d\n", __func__, seq_id);
1222 mutex_unlock(&table->lock);
1226 cnt = (entry->opc_type == NPC_EXACT_OPC_CAM) ? &table->cam_tbl_entry_cnt :
1227 &table->mem_tbl_entry_cnt;
1229 /* delete from lists */
1230 list_del_init(&entry->list);
1231 list_del_init(&entry->glist);
1235 rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, entry->cgx_id,
1236 entry->lmac_id, &drop_mcam_idx, NULL, NULL, NULL);
1238 dev_dbg(rvu->dev, "%s: failed to retrieve drop info for id=0x%x\n",
1240 mutex_unlock(&table->lock);
1245 __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, -1, &disable_cam);
1247 /* No dmac filter rules; disable drop on hit rule */
1249 rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
1250 dev_dbg(rvu->dev, "%s: Disabling mcam idx %d\n",
1251 __func__, drop_mcam_idx);
1254 mutex_unlock(&table->lock);
1256 rvu_npc_exact_dealloc_table_entry(rvu, entry->opc_type, entry->ways, entry->index);
1258 rvu_npc_exact_free_id(rvu, seq_id);
1260 dev_dbg(rvu->dev, "%s: delete entry success for id=0x%x, mca=%pM\n",
1261 __func__, seq_id, entry->mac);
1268 * rvu_npc_exact_add_table_entry - Adds a table entry
1269 * @rvu: resource virtualization unit.
1270 * @cgx_id: cgx identifier.
1271 * @lmac_id: lmac identifier.
1272 * @mac: MAC address.
1273 * @chan: Channel number.
1274 * @ctype: Channel Type.
1275 * @seq_id: Sequence number.
1276 * @cmd: Whether it is invoked by ethtool cmd.
1277 * @mcam_idx: NPC mcam index corresponding to MAC
1278 * @pcifunc: PCI func.
1280 * Creates a new exact match table entry in either CAM or
1282 * Return: 0 upon success.
1284 static int rvu_npc_exact_add_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, u8 *mac,
1285 u16 chan, u8 ctype, u32 *seq_id, bool cmd,
1286 u32 mcam_idx, u16 pcifunc)
1288 int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1289 enum npc_exact_opc_type opc_type;
1290 bool enable_cam = false;
1300 err = rvu_npc_exact_alloc_table_entry(rvu, mac, chan, ctype, &index, &ways, &opc_type);
1302 dev_err(rvu->dev, "%s: Could not alloc in exact match table\n", __func__);
1306 /* Write mdata to table */
1307 mdata = rvu_exact_prepare_table_entry(rvu, true, ctype, chan, mac);
1309 if (opc_type == NPC_EXACT_OPC_CAM)
1310 rvu_npc_exact_cam_table_write(rvu, blkaddr, index, mdata);
1312 rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, mdata);
1314 /* Insert entry to linked list */
1315 err = rvu_npc_exact_add_to_list(rvu, opc_type, ways, index, cgx_id, lmac_id,
1316 mac, chan, ctype, seq_id, cmd, mcam_idx, pcifunc);
1318 rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
1319 dev_err(rvu->dev, "%s: could not add to exact match table\n", __func__);
1323 rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
1324 &drop_mcam_idx, NULL, NULL, NULL);
1326 rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
1327 dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
1328 __func__, cgx_id, lmac_id);
1333 __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 1, &enable_cam);
1335 /* First command rule; enable drop on hit rule */
1337 rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, true);
1338 dev_dbg(rvu->dev, "%s: Enabling mcam idx %d\n",
1339 __func__, drop_mcam_idx);
1343 "%s: Successfully added entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
1344 __func__, index, mac, ways, opc_type);
1350 * rvu_npc_exact_update_table_entry - Update exact match table.
1351 * @rvu: resource virtualization unit.
1352 * @cgx_id: CGX identifier.
1353 * @lmac_id: LMAC identifier.
1354 * @old_mac: Existing MAC address entry.
1355 * @new_mac: New MAC address entry.
1356 * @seq_id: Sequence identifier of the entry.
1358 * Updates MAC address of an entry. If entry is in MEM table, new
1359 * hash value may not match with old one.
1360 * Return: 0 upon success.
1362 static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id,
1363 u8 *old_mac, u8 *new_mac, u32 *seq_id)
1365 int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1366 struct npc_exact_table_entry *entry;
1367 struct npc_exact_table *table;
1371 table = rvu->hw->table;
1373 mutex_lock(&table->lock);
1375 /* Lookup for entry which needs to be updated */
1376 entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, *seq_id);
1378 mutex_unlock(&table->lock);
1380 "%s: failed to find entry for cgx_id=%d lmac_id=%d old_mac=%pM\n",
1381 __func__, cgx_id, lmac_id, old_mac);
1385 /* If entry is in mem table and new hash index is different than old
1386 * hash index, we cannot update the entry. Fail in these scenarios.
1388 if (entry->opc_type == NPC_EXACT_OPC_MEM) {
1389 hash_index = rvu_exact_calculate_hash(rvu, entry->chan, entry->ctype,
1390 new_mac, table->mem_table.mask,
1391 table->mem_table.depth);
1392 if (hash_index != entry->index) {
1394 "%s: Update failed due to index mismatch(new=0x%x, old=%x)\n",
1395 __func__, hash_index, entry->index);
1396 mutex_unlock(&table->lock);
1401 mdata = rvu_exact_prepare_table_entry(rvu, true, entry->ctype, entry->chan, new_mac);
1403 if (entry->opc_type == NPC_EXACT_OPC_MEM)
1404 rvu_npc_exact_mem_table_write(rvu, blkaddr, entry->ways, entry->index, mdata);
1406 rvu_npc_exact_cam_table_write(rvu, blkaddr, entry->index, mdata);
1408 /* Update entry fields */
1409 ether_addr_copy(entry->mac, new_mac);
1410 *seq_id = entry->seq_id;
1413 "%s: Successfully updated entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
1414 __func__, entry->index, entry->mac, entry->ways, entry->opc_type);
1416 dev_dbg(rvu->dev, "%s: Successfully updated entry (old mac=%pM new_mac=%pM\n",
1417 __func__, old_mac, new_mac);
1419 mutex_unlock(&table->lock);
1424 * rvu_npc_exact_promisc_disable - Disable promiscuous mode.
1425 * @rvu: resource virtualization unit.
1428 * Drop rule is against each PF. We dont support DMAC filter for
1430 * Return: 0 upon success
1433 int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
1435 struct npc_exact_table *table;
1436 int pf = rvu_get_pf(pcifunc);
1442 table = rvu->hw->table;
1444 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1445 rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
1446 &drop_mcam_idx, NULL, NULL, NULL);
1448 dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
1449 __func__, cgx_id, lmac_id);
1453 mutex_lock(&table->lock);
1454 promisc = &table->promisc_mode[drop_mcam_idx];
1457 mutex_unlock(&table->lock);
1458 dev_dbg(rvu->dev, "%s: Err Already promisc mode disabled (cgx=%d lmac=%d)\n",
1459 __func__, cgx_id, lmac_id);
1460 return LMAC_AF_ERR_INVALID_PARAM;
1463 mutex_unlock(&table->lock);
1469 * rvu_npc_exact_promisc_enable - Enable promiscuous mode.
1470 * @rvu: resource virtualization unit.
1471 * @pcifunc: pcifunc.
1472 * Return: 0 upon success
1474 int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
1476 struct npc_exact_table *table;
1477 int pf = rvu_get_pf(pcifunc);
1483 table = rvu->hw->table;
1485 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1486 rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
1487 &drop_mcam_idx, NULL, NULL, NULL);
1489 dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
1490 __func__, cgx_id, lmac_id);
1494 mutex_lock(&table->lock);
1495 promisc = &table->promisc_mode[drop_mcam_idx];
1498 mutex_unlock(&table->lock);
1499 dev_dbg(rvu->dev, "%s: Already in promisc mode (cgx=%d lmac=%d)\n",
1500 __func__, cgx_id, lmac_id);
1501 return LMAC_AF_ERR_INVALID_PARAM;
1504 mutex_unlock(&table->lock);
1510 * rvu_npc_exact_mac_addr_reset - Delete PF mac address.
1511 * @rvu: resource virtualization unit.
1512 * @req: Reset request
1513 * @rsp: Reset response.
1514 * Return: 0 upon success
1516 int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
1517 struct msg_rsp *rsp)
1519 int pf = rvu_get_pf(req->hdr.pcifunc);
1520 u32 seq_id = req->index;
1521 struct rvu_pfvf *pfvf;
1525 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1527 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1529 rc = rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
1531 /* TODO: how to handle this error case ? */
1532 dev_err(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf);
1536 dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d success (seq_id=%u)\n",
1537 __func__, pfvf->mac_addr, pf, seq_id);
1542 * rvu_npc_exact_mac_addr_update - Update mac address field with new value.
1543 * @rvu: resource virtualization unit.
1544 * @req: Update request.
1545 * @rsp: Update response.
1546 * Return: 0 upon success
1548 int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
1549 struct cgx_mac_addr_update_req *req,
1550 struct cgx_mac_addr_update_rsp *rsp)
1552 int pf = rvu_get_pf(req->hdr.pcifunc);
1553 struct npc_exact_table_entry *entry;
1554 struct npc_exact_table *table;
1555 struct rvu_pfvf *pfvf;
1556 u32 seq_id, mcam_idx;
1557 u8 old_mac[ETH_ALEN];
1561 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1562 return LMAC_AF_ERR_PERM_DENIED;
1564 dev_dbg(rvu->dev, "%s: Update request for seq_id=%d, mac=%pM\n",
1565 __func__, req->index, req->mac_addr);
1567 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1569 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1571 table = rvu->hw->table;
1573 mutex_lock(&table->lock);
1575 /* Lookup for entry which needs to be updated */
1576 entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, req->index);
1578 dev_err(rvu->dev, "%s: failed to find entry for id=0x%x\n", __func__, req->index);
1579 mutex_unlock(&table->lock);
1580 return LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED;
1582 ether_addr_copy(old_mac, entry->mac);
1583 seq_id = entry->seq_id;
1584 mcam_idx = entry->mcam_idx;
1585 mutex_unlock(&table->lock);
1587 rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, old_mac,
1588 req->mac_addr, &seq_id);
1590 rsp->index = seq_id;
1591 dev_dbg(rvu->dev, "%s mac:%pM (pfvf:%pM default:%pM) update to PF=%d success\n",
1592 __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf);
1593 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1597 /* Try deleting and adding it again */
1598 rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
1600 /* This could be a new entry */
1601 dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__,
1602 pfvf->mac_addr, pf);
1605 rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
1606 pfvf->rx_chan_base, 0, &seq_id, true,
1607 mcam_idx, req->hdr.pcifunc);
1609 dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", __func__,
1611 return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
1614 rsp->index = seq_id;
1616 "%s MAC (new:%pM, old=%pM default:%pM) del and add to PF=%d success (seq_id=%u)\n",
1617 __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf, seq_id);
1619 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1624 * rvu_npc_exact_mac_addr_add - Adds MAC address to exact match table.
1625 * @rvu: resource virtualization unit.
1626 * @req: Add request.
1627 * @rsp: Add response.
1628 * Return: 0 upon success
1630 int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
1631 struct cgx_mac_addr_add_req *req,
1632 struct cgx_mac_addr_add_rsp *rsp)
1634 int pf = rvu_get_pf(req->hdr.pcifunc);
1635 struct rvu_pfvf *pfvf;
1640 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1641 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1643 rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
1644 pfvf->rx_chan_base, 0, &seq_id,
1645 true, -1, req->hdr.pcifunc);
1648 rsp->index = seq_id;
1649 dev_dbg(rvu->dev, "%s MAC (%pM) add to PF=%d success (seq_id=%u)\n",
1650 __func__, req->mac_addr, pf, seq_id);
1654 dev_err(rvu->dev, "%s MAC (%pM) add to PF=%d failed\n", __func__,
1656 return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
1660 * rvu_npc_exact_mac_addr_del - Delete DMAC filter
1661 * @rvu: resource virtualization unit.
1662 * @req: Delete request.
1663 * @rsp: Delete response.
1664 * Return: 0 upon success
1666 int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
1667 struct cgx_mac_addr_del_req *req,
1668 struct msg_rsp *rsp)
1670 int pf = rvu_get_pf(req->hdr.pcifunc);
1673 rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
1675 dev_dbg(rvu->dev, "%s del to PF=%d success (seq_id=%u)\n",
1676 __func__, pf, req->index);
1680 dev_err(rvu->dev, "%s del to PF=%d failed (seq_id=%u)\n",
1681 __func__, pf, req->index);
1682 return LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED;
1686 * rvu_npc_exact_mac_addr_set - Add PF mac address to dmac filter.
1687 * @rvu: resource virtualization unit.
1688 * @req: Set request.
1689 * @rsp: Set response.
1690 * Return: 0 upon success
1692 int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
1693 struct cgx_mac_addr_set_or_get *rsp)
1695 int pf = rvu_get_pf(req->hdr.pcifunc);
1696 u32 seq_id = req->index;
1697 struct rvu_pfvf *pfvf;
1702 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1704 pfvf = &rvu->pf[pf];
1706 /* If table does not have an entry; both update entry and del table entry API
1707 * below fails. Those are not failure conditions.
1709 rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, pfvf->mac_addr,
1710 req->mac_addr, &seq_id);
1712 rsp->index = seq_id;
1713 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1714 ether_addr_copy(rsp->mac_addr, req->mac_addr);
1715 dev_dbg(rvu->dev, "%s MAC (%pM) update to PF=%d success\n",
1716 __func__, req->mac_addr, pf);
1720 /* Try deleting and adding it again */
1721 rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
1723 dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n",
1724 __func__, pfvf->mac_addr, pf);
1727 /* find mcam entry if exist */
1728 rc = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, NULL);
1730 mcam_idx = npc_get_nixlf_mcam_index(&rvu->hw->mcam, req->hdr.pcifunc,
1731 nixlf, NIXLF_UCAST_ENTRY);
1734 rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
1735 pfvf->rx_chan_base, 0, &seq_id,
1736 true, mcam_idx, req->hdr.pcifunc);
1738 dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n",
1739 __func__, req->mac_addr, pf);
1740 return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
1743 rsp->index = seq_id;
1744 ether_addr_copy(rsp->mac_addr, req->mac_addr);
1745 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1747 "%s MAC (%pM) del and add to PF=%d success (seq_id=%u)\n",
1748 __func__, req->mac_addr, pf, seq_id);
1753 * rvu_npc_exact_can_disable_feature - Check if feature can be disabled.
1754 * @rvu: resource virtualization unit.
1755 * Return: True if exact match feature is supported.
1757 bool rvu_npc_exact_can_disable_feature(struct rvu *rvu)
1759 struct npc_exact_table *table = rvu->hw->table;
1762 if (!rvu->hw->cap.npc_exact_match_enabled)
1765 mutex_lock(&table->lock);
1766 empty = list_empty(&table->lhead_gbl);
1767 mutex_unlock(&table->lock);
1773 * rvu_npc_exact_disable_feature - Disable feature.
1774 * @rvu: resource virtualization unit.
1776 void rvu_npc_exact_disable_feature(struct rvu *rvu)
1778 rvu->hw->cap.npc_exact_match_enabled = false;
1782 * rvu_npc_exact_reset - Delete and free all entry which match pcifunc.
1783 * @rvu: resource virtualization unit.
1784 * @pcifunc: PCI func to match.
1786 void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc)
1788 struct npc_exact_table *table = rvu->hw->table;
1789 struct npc_exact_table_entry *tmp, *iter;
1792 mutex_lock(&table->lock);
1793 list_for_each_entry_safe(iter, tmp, &table->lhead_gbl, glist) {
1794 if (pcifunc != iter->pcifunc)
1797 seq_id = iter->seq_id;
1798 dev_dbg(rvu->dev, "%s: resetting pcifun=%d seq_id=%u\n", __func__,
1801 mutex_unlock(&table->lock);
1802 rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
1803 mutex_lock(&table->lock);
1805 mutex_unlock(&table->lock);
1809 * rvu_npc_exact_init - initialize exact match table
1810 * @rvu: resource virtualization unit.
1812 * Initialize HW and SW resources to manage 4way-2K table and fully
1813 * associative 32-entry mcam table.
1814 * Return: 0 upon success.
1816 int rvu_npc_exact_init(struct rvu *rvu)
1818 u64 bcast_mcast_val, bcast_mcast_mask;
1819 struct npc_exact_table *table;
1820 u64 exact_val, exact_mask;
1821 u64 chan_val, chan_mask;
1833 /* Read NPC_AF_CONST3 and check for have exact
1834 * match functionality is present
1836 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1838 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
1842 /* Check exact match feature is supported */
1843 npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3);
1844 if (!(npc_const3 & BIT_ULL(62)))
1847 /* Check if kex profile has enabled EXACT match nibble */
1848 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
1849 if (!(cfg & NPC_EXACT_NIBBLE_HIT))
1852 /* Set capability to true */
1853 rvu->hw->cap.npc_exact_match_enabled = true;
1855 table = kzalloc(sizeof(*table), GFP_KERNEL);
1859 dev_dbg(rvu->dev, "%s: Memory allocation for table success\n", __func__);
1860 rvu->hw->table = table;
1862 /* Read table size, ways and depth */
1863 table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3);
1864 table->mem_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
1865 table->cam_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
1867 dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n",
1868 __func__, table->mem_table.ways, table->cam_table.depth);
1870 /* Check if depth of table is not a sequre of 2
1871 * TODO: why _builtin_popcount() is not working ?
1873 if ((table->mem_table.depth & (table->mem_table.depth - 1)) != 0) {
1875 "%s: NPC exact match 4way_2k table depth(%d) is not square of 2\n",
1876 __func__, table->mem_table.depth);
1880 table_size = table->mem_table.depth * table->mem_table.ways;
1882 /* Allocate bitmap for 4way 2K table */
1883 table->mem_table.bmap = devm_bitmap_zalloc(rvu->dev, table_size,
1885 if (!table->mem_table.bmap)
1888 dev_dbg(rvu->dev, "%s: Allocated bitmap for 4way 2K entry table\n", __func__);
1890 /* Allocate bitmap for 32 entry mcam */
1891 table->cam_table.bmap = devm_bitmap_zalloc(rvu->dev, 32, GFP_KERNEL);
1893 if (!table->cam_table.bmap)
1896 dev_dbg(rvu->dev, "%s: Allocated bitmap for 32 entry cam\n", __func__);
1898 table->tot_ids = table_size + table->cam_table.depth;
1899 table->id_bmap = devm_bitmap_zalloc(rvu->dev, table->tot_ids,
1902 if (!table->id_bmap)
1905 dev_dbg(rvu->dev, "%s: Allocated bitmap for id map (total=%d)\n",
1906 __func__, table->tot_ids);
1908 /* Initialize list heads for npc_exact_table entries.
1909 * This entry is used by debugfs to show entries in
1910 * exact match table.
1912 for (i = 0; i < NPC_EXACT_TBL_MAX_WAYS; i++)
1913 INIT_LIST_HEAD(&table->lhead_mem_tbl_entry[i]);
1915 INIT_LIST_HEAD(&table->lhead_cam_tbl_entry);
1916 INIT_LIST_HEAD(&table->lhead_gbl);
1918 mutex_init(&table->lock);
1920 rvu_exact_config_secret_key(rvu);
1921 rvu_exact_config_search_key(rvu);
1923 rvu_exact_config_table_mask(rvu);
1924 rvu_exact_config_result_ctrl(rvu, table->mem_table.depth);
1926 /* - No drop rule for LBK
1927 * - Drop rules for SDP and each LMAC.
1929 exact_val = !NPC_EXACT_RESULT_HIT;
1930 exact_mask = NPC_EXACT_RESULT_HIT;
1935 bcast_mcast_val = 0b0000;
1936 bcast_mcast_mask = 0b0011;
1938 /* Install SDP drop rule */
1939 drop_mcam_idx = &table->num_drop_rules;
1941 max_lmac_cnt = rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx +
1944 for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) {
1945 if (rvu->pf2cgxlmac_map[i] == 0xFF)
1948 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[i], &cgx_id, &lmac_id);
1950 rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, NIX_INTF_TYPE_CGX, cgx_id,
1951 lmac_id, &chan_val, &chan_mask);
1954 "%s: failed, info chan_val=0x%llx chan_mask=0x%llx rule_id=%d\n",
1955 __func__, chan_val, chan_mask, *drop_mcam_idx);
1959 /* Filter rules are only for PF */
1960 pcifunc = RVU_PFFUNC(i, 0);
1963 "%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n",
1964 __func__, cgx_id, lmac_id, chan_val, chan_mask);
1966 rc = rvu_npc_exact_save_drop_rule_chan_and_mask(rvu, table->num_drop_rules,
1967 chan_val, chan_mask, pcifunc);
1970 "%s: failed to set drop info for cgx=%d, lmac=%d, chan=%llx\n",
1971 __func__, cgx_id, lmac_id, chan_val);
1975 err = npc_install_mcam_drop_rule(rvu, *drop_mcam_idx,
1976 &table->counter_idx[*drop_mcam_idx],
1977 chan_val, chan_mask,
1978 exact_val, exact_mask,
1979 bcast_mcast_val, bcast_mcast_mask);
1982 "failed to configure drop rule (cgx=%d lmac=%d)\n",
1990 dev_info(rvu->dev, "initialized exact match table successfully\n");