rocker: implement get settings mode command
[linux-2.6-block.git] / drivers / net / ethernet / rocker / rocker_main.c
CommitLineData
4b8ac966
JP
1/*
2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
11ce2ba3 3 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
4b8ac966
JP
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/interrupt.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
9f6bbf7c 19#include <linux/hashtable.h>
4b8ac966
JP
20#include <linux/crc32.h>
21#include <linux/sort.h>
22#include <linux/random.h>
23#include <linux/netdevice.h>
24#include <linux/inetdevice.h>
25#include <linux/skbuff.h>
26#include <linux/socket.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/if_ether.h>
30#include <linux/if_vlan.h>
6c707945 31#include <linux/if_bridge.h>
9f6bbf7c 32#include <linux/bitops.h>
db19170b 33#include <linux/ctype.h>
4b8ac966
JP
34#include <net/switchdev.h>
35#include <net/rtnetlink.h>
c1beeef7
SF
36#include <net/ip_fib.h>
37#include <net/netevent.h>
38#include <net/arp.h>
2f8e2c87 39#include <linux/io-64-nonatomic-lo-hi.h>
4b8ac966
JP
40#include <generated/utsrelease.h>
41
0fe685f6 42#include "rocker_hw.h"
de152192
JP
43#include "rocker.h"
44#include "rocker_tlv.h"
4b8ac966
JP
45
46static const char rocker_driver_name[] = "rocker";
47
48static const struct pci_device_id rocker_pci_id_table[] = {
49 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
50 {0, }
51};
52
9f6bbf7c
SF
53struct rocker_flow_tbl_key {
54 u32 priority;
55 enum rocker_of_dpa_table_id tbl_id;
56 union {
57 struct {
4a6bb6d3
SF
58 u32 in_pport;
59 u32 in_pport_mask;
9f6bbf7c
SF
60 enum rocker_of_dpa_table_id goto_tbl;
61 } ig_port;
62 struct {
4a6bb6d3 63 u32 in_pport;
9f6bbf7c
SF
64 __be16 vlan_id;
65 __be16 vlan_id_mask;
66 enum rocker_of_dpa_table_id goto_tbl;
67 bool untagged;
68 __be16 new_vlan_id;
69 } vlan;
70 struct {
4a6bb6d3
SF
71 u32 in_pport;
72 u32 in_pport_mask;
9f6bbf7c
SF
73 __be16 eth_type;
74 u8 eth_dst[ETH_ALEN];
75 u8 eth_dst_mask[ETH_ALEN];
76 __be16 vlan_id;
77 __be16 vlan_id_mask;
78 enum rocker_of_dpa_table_id goto_tbl;
79 bool copy_to_cpu;
80 } term_mac;
81 struct {
82 __be16 eth_type;
83 __be32 dst4;
84 __be32 dst4_mask;
85 enum rocker_of_dpa_table_id goto_tbl;
86 u32 group_id;
87 } ucast_routing;
88 struct {
89 u8 eth_dst[ETH_ALEN];
90 u8 eth_dst_mask[ETH_ALEN];
91 int has_eth_dst;
92 int has_eth_dst_mask;
93 __be16 vlan_id;
94 u32 tunnel_id;
95 enum rocker_of_dpa_table_id goto_tbl;
96 u32 group_id;
97 bool copy_to_cpu;
98 } bridge;
99 struct {
4a6bb6d3
SF
100 u32 in_pport;
101 u32 in_pport_mask;
9f6bbf7c
SF
102 u8 eth_src[ETH_ALEN];
103 u8 eth_src_mask[ETH_ALEN];
104 u8 eth_dst[ETH_ALEN];
105 u8 eth_dst_mask[ETH_ALEN];
106 __be16 eth_type;
107 __be16 vlan_id;
108 __be16 vlan_id_mask;
109 u8 ip_proto;
110 u8 ip_proto_mask;
111 u8 ip_tos;
112 u8 ip_tos_mask;
113 u32 group_id;
114 } acl;
115 };
116};
117
118struct rocker_flow_tbl_entry {
119 struct hlist_node entry;
c1beeef7 120 u32 cmd;
9f6bbf7c
SF
121 u64 cookie;
122 struct rocker_flow_tbl_key key;
c1beeef7 123 size_t key_len;
9f6bbf7c
SF
124 u32 key_crc32; /* key */
125};
126
127struct rocker_group_tbl_entry {
128 struct hlist_node entry;
129 u32 cmd;
130 u32 group_id; /* key */
131 u16 group_count;
132 u32 *group_ids;
133 union {
134 struct {
135 u8 pop_vlan;
136 } l2_interface;
137 struct {
138 u8 eth_src[ETH_ALEN];
139 u8 eth_dst[ETH_ALEN];
140 __be16 vlan_id;
141 u32 group_id;
142 } l2_rewrite;
143 struct {
144 u8 eth_src[ETH_ALEN];
145 u8 eth_dst[ETH_ALEN];
146 __be16 vlan_id;
147 bool ttl_check;
148 u32 group_id;
149 } l3_unicast;
150 };
151};
152
153struct rocker_fdb_tbl_entry {
154 struct hlist_node entry;
155 u32 key_crc32; /* key */
156 bool learned;
a471be41 157 unsigned long touched;
9f6bbf7c 158 struct rocker_fdb_tbl_key {
4c660496 159 struct rocker_port *rocker_port;
9f6bbf7c
SF
160 u8 addr[ETH_ALEN];
161 __be16 vlan_id;
162 } key;
163};
164
165struct rocker_internal_vlan_tbl_entry {
166 struct hlist_node entry;
167 int ifindex; /* key */
168 u32 ref_count;
169 __be16 vlan_id;
170};
171
c1beeef7
SF
172struct rocker_neigh_tbl_entry {
173 struct hlist_node entry;
174 __be32 ip_addr; /* key */
175 struct net_device *dev;
176 u32 ref_count;
177 u32 index;
178 u8 eth_dst[ETH_ALEN];
179 bool ttl_check;
180};
181
4b8ac966
JP
182struct rocker_dma_ring_info {
183 size_t size;
184 u32 head;
185 u32 tail;
186 struct rocker_desc *desc; /* mapped */
187 dma_addr_t mapaddr;
188 struct rocker_desc_info *desc_info;
189 unsigned int type;
190};
191
192struct rocker;
193
9f6bbf7c
SF
194enum {
195 ROCKER_CTRL_LINK_LOCAL_MCAST,
196 ROCKER_CTRL_LOCAL_ARP,
197 ROCKER_CTRL_IPV4_MCAST,
198 ROCKER_CTRL_IPV6_MCAST,
199 ROCKER_CTRL_DFLT_BRIDGING,
8254973f 200 ROCKER_CTRL_DFLT_OVS,
9f6bbf7c
SF
201 ROCKER_CTRL_MAX,
202};
203
204#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
205#define ROCKER_N_INTERNAL_VLANS 255
206#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
207#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
208
4b8ac966
JP
209struct rocker_port {
210 struct net_device *dev;
6c707945 211 struct net_device *bridge_dev;
4b8ac966
JP
212 struct rocker *rocker;
213 unsigned int port_number;
4a6bb6d3 214 u32 pport;
9f6bbf7c 215 __be16 internal_vlan_id;
6c707945 216 int stp_state;
5111f80c 217 u32 brport_flags;
e7335703 218 unsigned long ageing_time;
9f6bbf7c
SF
219 bool ctrls[ROCKER_CTRL_MAX];
220 unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
4b8ac966
JP
221 struct napi_struct napi_tx;
222 struct napi_struct napi_rx;
223 struct rocker_dma_ring_info tx_ring;
224 struct rocker_dma_ring_info rx_ring;
225};
226
227struct rocker {
228 struct pci_dev *pdev;
229 u8 __iomem *hw_addr;
230 struct msix_entry *msix_entries;
231 unsigned int port_count;
232 struct rocker_port **ports;
233 struct {
234 u64 id;
235 } hw;
4725ceb9 236 spinlock_t cmd_ring_lock; /* for cmd ring accesses */
4b8ac966
JP
237 struct rocker_dma_ring_info cmd_ring;
238 struct rocker_dma_ring_info event_ring;
9f6bbf7c 239 DECLARE_HASHTABLE(flow_tbl, 16);
4725ceb9 240 spinlock_t flow_tbl_lock; /* for flow tbl accesses */
9f6bbf7c
SF
241 u64 flow_tbl_next_cookie;
242 DECLARE_HASHTABLE(group_tbl, 16);
4725ceb9 243 spinlock_t group_tbl_lock; /* for group tbl accesses */
52fe3e2d 244 struct timer_list fdb_cleanup_timer;
9f6bbf7c 245 DECLARE_HASHTABLE(fdb_tbl, 16);
4725ceb9 246 spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
9f6bbf7c
SF
247 unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
248 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
4725ceb9 249 spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
c1beeef7 250 DECLARE_HASHTABLE(neigh_tbl, 16);
4725ceb9 251 spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
c1beeef7 252 u32 neigh_tbl_next_index;
9f6bbf7c
SF
253};
254
255static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
256static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
257static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
258static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
259static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
260static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
261static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
262static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
263static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
264
265/* Rocker priority levels for flow table entries. Higher
266 * priority match takes precedence over lower priority match.
267 */
268
269enum {
270 ROCKER_PRIORITY_UNKNOWN = 0,
271 ROCKER_PRIORITY_IG_PORT = 1,
272 ROCKER_PRIORITY_VLAN = 1,
273 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
274 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
9f6bbf7c
SF
275 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
276 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
277 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
278 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
279 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
280 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
281 ROCKER_PRIORITY_ACL_CTRL = 3,
282 ROCKER_PRIORITY_ACL_NORMAL = 2,
283 ROCKER_PRIORITY_ACL_DFLT = 1,
4b8ac966
JP
284};
285
9f6bbf7c
SF
286static bool rocker_vlan_id_is_internal(__be16 vlan_id)
287{
288 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
289 u16 end = 0xffe;
290 u16 _vlan_id = ntohs(vlan_id);
291
292 return (_vlan_id >= start && _vlan_id <= end);
293}
294
e5054643 295static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
9f6bbf7c
SF
296 u16 vid, bool *pop_vlan)
297{
298 __be16 vlan_id;
299
300 if (pop_vlan)
301 *pop_vlan = false;
302 vlan_id = htons(vid);
303 if (!vlan_id) {
304 vlan_id = rocker_port->internal_vlan_id;
305 if (pop_vlan)
306 *pop_vlan = true;
307 }
308
309 return vlan_id;
310}
311
e5054643 312static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
6c707945
SF
313 __be16 vlan_id)
314{
315 if (rocker_vlan_id_is_internal(vlan_id))
316 return 0;
317
318 return ntohs(vlan_id);
319}
320
e5054643 321static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
6c707945 322{
fb4bf214
JP
323 return rocker_port->bridge_dev &&
324 netif_is_bridge_master(rocker_port->bridge_dev);
8254973f
SH
325}
326
327static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
328{
fb4bf214
JP
329 return rocker_port->bridge_dev &&
330 netif_is_ovs_master(rocker_port->bridge_dev);
6c707945
SF
331}
332
179f9a25
SF
333#define ROCKER_OP_FLAG_REMOVE BIT(0)
334#define ROCKER_OP_FLAG_NOWAIT BIT(1)
335#define ROCKER_OP_FLAG_LEARNED BIT(2)
336#define ROCKER_OP_FLAG_REFRESH BIT(3)
337
b15edf85
JP
338static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags,
339 size_t size)
c4f20321 340{
ac3dbc68 341 struct switchdev_trans_item *elem = NULL;
179f9a25
SF
342 gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
343 GFP_ATOMIC : GFP_KERNEL;
c4f20321
SF
344
345 /* If in transaction prepare phase, allocate the memory
ac3dbc68
JP
346 * and enqueue it on a transaction. If in transaction
347 * commit phase, dequeue the memory from the transaction
c4f20321
SF
348 * rather than re-allocating the memory. The idea is the
349 * driver code paths for prepare and commit are identical
350 * so the memory allocated in the prepare phase is the
351 * memory used in the commit phase.
352 */
353
76c6f945
JP
354 if (!trans) {
355 elem = kzalloc(size + sizeof(*elem), gfp_flags);
76c6f945 356 } else if (switchdev_trans_ph_prepare(trans)) {
179f9a25 357 elem = kzalloc(size + sizeof(*elem), gfp_flags);
c4f20321
SF
358 if (!elem)
359 return NULL;
ac3dbc68 360 switchdev_trans_item_enqueue(trans, elem, kfree, elem);
76c6f945 361 } else {
ac3dbc68 362 elem = switchdev_trans_item_dequeue(trans);
c4f20321
SF
363 }
364
365 return elem ? elem + 1 : NULL;
366}
367
b15edf85
JP
368static void *rocker_kzalloc(struct switchdev_trans *trans, int flags,
369 size_t size)
c4f20321 370{
b15edf85 371 return __rocker_mem_alloc(trans, flags, size);
c4f20321
SF
372}
373
b15edf85
JP
374static void *rocker_kcalloc(struct switchdev_trans *trans, int flags,
375 size_t n, size_t size)
c4f20321 376{
b15edf85 377 return __rocker_mem_alloc(trans, flags, n * size);
c4f20321
SF
378}
379
b15edf85 380static void rocker_kfree(struct switchdev_trans *trans, const void *mem)
c4f20321 381{
ac3dbc68 382 struct switchdev_trans_item *elem;
c4f20321
SF
383
384 /* Frees are ignored if in transaction prepare phase. The
385 * memory remains on the per-port list until freed in the
386 * commit phase.
387 */
388
76c6f945 389 if (switchdev_trans_ph_prepare(trans))
c4f20321
SF
390 return;
391
ac3dbc68 392 elem = (struct switchdev_trans_item *) mem - 1;
c4f20321
SF
393 kfree(elem);
394}
395
4b8ac966
JP
396struct rocker_wait {
397 wait_queue_head_t wait;
398 bool done;
179f9a25 399 bool nowait;
4b8ac966
JP
400};
401
402static void rocker_wait_reset(struct rocker_wait *wait)
403{
404 wait->done = false;
179f9a25 405 wait->nowait = false;
4b8ac966
JP
406}
407
408static void rocker_wait_init(struct rocker_wait *wait)
409{
410 init_waitqueue_head(&wait->wait);
411 rocker_wait_reset(wait);
412}
413
c4f20321 414static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
76c6f945 415 struct switchdev_trans *trans,
179f9a25 416 int flags)
4b8ac966
JP
417{
418 struct rocker_wait *wait;
419
b15edf85 420 wait = rocker_kzalloc(trans, flags, sizeof(*wait));
4b8ac966
JP
421 if (!wait)
422 return NULL;
423 rocker_wait_init(wait);
424 return wait;
425}
426
76c6f945 427static void rocker_wait_destroy(struct switchdev_trans *trans,
c4f20321 428 struct rocker_wait *wait)
4b8ac966 429{
b15edf85 430 rocker_kfree(trans, wait);
4b8ac966
JP
431}
432
433static bool rocker_wait_event_timeout(struct rocker_wait *wait,
434 unsigned long timeout)
435{
436 wait_event_timeout(wait->wait, wait->done, HZ / 10);
437 if (!wait->done)
438 return false;
439 return true;
440}
441
442static void rocker_wait_wake_up(struct rocker_wait *wait)
443{
444 wait->done = true;
445 wake_up(&wait->wait);
446}
447
e5054643 448static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
4b8ac966
JP
449{
450 return rocker->msix_entries[vector].vector;
451}
452
e5054643 453static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
4b8ac966
JP
454{
455 return rocker_msix_vector(rocker_port->rocker,
456 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
457}
458
e5054643 459static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
4b8ac966
JP
460{
461 return rocker_msix_vector(rocker_port->rocker,
462 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
463}
464
465#define rocker_write32(rocker, reg, val) \
466 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
467#define rocker_read32(rocker, reg) \
468 readl((rocker)->hw_addr + (ROCKER_ ## reg))
469#define rocker_write64(rocker, reg, val) \
470 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
471#define rocker_read64(rocker, reg) \
472 readq((rocker)->hw_addr + (ROCKER_ ## reg))
473
474/*****************************
475 * HW basic testing functions
476 *****************************/
477
e5054643 478static int rocker_reg_test(const struct rocker *rocker)
4b8ac966 479{
e5054643 480 const struct pci_dev *pdev = rocker->pdev;
4b8ac966
JP
481 u64 test_reg;
482 u64 rnd;
483
484 rnd = prandom_u32();
485 rnd >>= 1;
486 rocker_write32(rocker, TEST_REG, rnd);
487 test_reg = rocker_read32(rocker, TEST_REG);
488 if (test_reg != rnd * 2) {
489 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
490 test_reg, rnd * 2);
491 return -EIO;
492 }
493
494 rnd = prandom_u32();
495 rnd <<= 31;
496 rnd |= prandom_u32();
497 rocker_write64(rocker, TEST_REG64, rnd);
498 test_reg = rocker_read64(rocker, TEST_REG64);
499 if (test_reg != rnd * 2) {
500 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
501 test_reg, rnd * 2);
502 return -EIO;
503 }
504
505 return 0;
506}
507
e5054643
SH
508static int rocker_dma_test_one(const struct rocker *rocker,
509 struct rocker_wait *wait, u32 test_type,
510 dma_addr_t dma_handle, const unsigned char *buf,
511 const unsigned char *expect, size_t size)
4b8ac966 512{
e5054643 513 const struct pci_dev *pdev = rocker->pdev;
4b8ac966
JP
514 int i;
515
516 rocker_wait_reset(wait);
517 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
518
519 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
520 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
521 return -EIO;
522 }
523
524 for (i = 0; i < size; i++) {
525 if (buf[i] != expect[i]) {
526 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
527 buf[i], i, expect[i]);
528 return -EIO;
529 }
530 }
531 return 0;
532}
533
534#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
535#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
536
e5054643 537static int rocker_dma_test_offset(const struct rocker *rocker,
4b8ac966
JP
538 struct rocker_wait *wait, int offset)
539{
540 struct pci_dev *pdev = rocker->pdev;
541 unsigned char *alloc;
542 unsigned char *buf;
543 unsigned char *expect;
544 dma_addr_t dma_handle;
545 int i;
546 int err;
547
548 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
549 GFP_KERNEL | GFP_DMA);
550 if (!alloc)
551 return -ENOMEM;
552 buf = alloc + offset;
553 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
554
555 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
556 PCI_DMA_BIDIRECTIONAL);
557 if (pci_dma_mapping_error(pdev, dma_handle)) {
558 err = -EIO;
559 goto free_alloc;
560 }
561
562 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
563 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
564
565 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
566 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
567 dma_handle, buf, expect,
568 ROCKER_TEST_DMA_BUF_SIZE);
569 if (err)
570 goto unmap;
571
572 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
573 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
574 dma_handle, buf, expect,
575 ROCKER_TEST_DMA_BUF_SIZE);
576 if (err)
577 goto unmap;
578
579 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
580 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
581 expect[i] = ~buf[i];
582 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
583 dma_handle, buf, expect,
584 ROCKER_TEST_DMA_BUF_SIZE);
585 if (err)
586 goto unmap;
587
588unmap:
589 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
590 PCI_DMA_BIDIRECTIONAL);
591free_alloc:
592 kfree(alloc);
593
594 return err;
595}
596
e5054643
SH
597static int rocker_dma_test(const struct rocker *rocker,
598 struct rocker_wait *wait)
4b8ac966
JP
599{
600 int i;
601 int err;
602
603 for (i = 0; i < 8; i++) {
604 err = rocker_dma_test_offset(rocker, wait, i);
605 if (err)
606 return err;
607 }
608 return 0;
609}
610
611static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
612{
613 struct rocker_wait *wait = dev_id;
614
615 rocker_wait_wake_up(wait);
616
617 return IRQ_HANDLED;
618}
619
e5054643 620static int rocker_basic_hw_test(const struct rocker *rocker)
4b8ac966 621{
e5054643 622 const struct pci_dev *pdev = rocker->pdev;
4b8ac966
JP
623 struct rocker_wait wait;
624 int err;
625
626 err = rocker_reg_test(rocker);
627 if (err) {
628 dev_err(&pdev->dev, "reg test failed\n");
629 return err;
630 }
631
632 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
633 rocker_test_irq_handler, 0,
634 rocker_driver_name, &wait);
635 if (err) {
636 dev_err(&pdev->dev, "cannot assign test irq\n");
637 return err;
638 }
639
640 rocker_wait_init(&wait);
641 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
642
643 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
644 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
645 err = -EIO;
646 goto free_irq;
647 }
648
649 err = rocker_dma_test(rocker, &wait);
650 if (err)
651 dev_err(&pdev->dev, "dma test failed\n");
652
653free_irq:
654 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
655 return err;
656}
657
4b8ac966
JP
658/******************************************
659 * DMA rings and descriptors manipulations
660 ******************************************/
661
662static u32 __pos_inc(u32 pos, size_t limit)
663{
664 return ++pos == limit ? 0 : pos;
665}
666
e5054643 667static int rocker_desc_err(const struct rocker_desc_info *desc_info)
4b8ac966 668{
7eb344f8
SF
669 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
670
671 switch (err) {
672 case ROCKER_OK:
673 return 0;
674 case -ROCKER_ENOENT:
675 return -ENOENT;
676 case -ROCKER_ENXIO:
677 return -ENXIO;
678 case -ROCKER_ENOMEM:
679 return -ENOMEM;
680 case -ROCKER_EEXIST:
681 return -EEXIST;
682 case -ROCKER_EINVAL:
683 return -EINVAL;
684 case -ROCKER_EMSGSIZE:
685 return -EMSGSIZE;
686 case -ROCKER_ENOTSUP:
687 return -EOPNOTSUPP;
688 case -ROCKER_ENOBUFS:
689 return -ENOBUFS;
690 }
691
692 return -EINVAL;
4b8ac966
JP
693}
694
e5054643 695static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
4b8ac966
JP
696{
697 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
698}
699
e5054643 700static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
4b8ac966
JP
701{
702 u32 comp_err = desc_info->desc->comp_err;
703
704 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
705}
706
11ce2ba3
JP
707static void *
708rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
4b8ac966 709{
adedf37b 710 return (void *)(uintptr_t)desc_info->desc->cookie;
4b8ac966
JP
711}
712
e5054643 713static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
4b8ac966
JP
714 void *ptr)
715{
adedf37b 716 desc_info->desc->cookie = (uintptr_t) ptr;
4b8ac966
JP
717}
718
719static struct rocker_desc_info *
e5054643 720rocker_desc_head_get(const struct rocker_dma_ring_info *info)
4b8ac966
JP
721{
722 static struct rocker_desc_info *desc_info;
723 u32 head = __pos_inc(info->head, info->size);
724
725 desc_info = &info->desc_info[info->head];
726 if (head == info->tail)
727 return NULL; /* ring full */
728 desc_info->tlv_size = 0;
729 return desc_info;
730}
731
e5054643 732static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
4b8ac966
JP
733{
734 desc_info->desc->buf_size = desc_info->data_size;
735 desc_info->desc->tlv_size = desc_info->tlv_size;
736}
737
e5054643 738static void rocker_desc_head_set(const struct rocker *rocker,
4b8ac966 739 struct rocker_dma_ring_info *info,
e5054643 740 const struct rocker_desc_info *desc_info)
4b8ac966
JP
741{
742 u32 head = __pos_inc(info->head, info->size);
743
744 BUG_ON(head == info->tail);
745 rocker_desc_commit(desc_info);
746 info->head = head;
747 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
748}
749
750static struct rocker_desc_info *
751rocker_desc_tail_get(struct rocker_dma_ring_info *info)
752{
753 static struct rocker_desc_info *desc_info;
754
755 if (info->tail == info->head)
756 return NULL; /* nothing to be done between head and tail */
757 desc_info = &info->desc_info[info->tail];
758 if (!rocker_desc_gen(desc_info))
759 return NULL; /* gen bit not set, desc is not ready yet */
760 info->tail = __pos_inc(info->tail, info->size);
761 desc_info->tlv_size = desc_info->desc->tlv_size;
762 return desc_info;
763}
764
e5054643
SH
765static void rocker_dma_ring_credits_set(const struct rocker *rocker,
766 const struct rocker_dma_ring_info *info,
4b8ac966
JP
767 u32 credits)
768{
769 if (credits)
770 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
771}
772
773static unsigned long rocker_dma_ring_size_fix(size_t size)
774{
775 return max(ROCKER_DMA_SIZE_MIN,
776 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
777}
778
e5054643 779static int rocker_dma_ring_create(const struct rocker *rocker,
4b8ac966
JP
780 unsigned int type,
781 size_t size,
782 struct rocker_dma_ring_info *info)
783{
784 int i;
785
786 BUG_ON(size != rocker_dma_ring_size_fix(size));
787 info->size = size;
788 info->type = type;
789 info->head = 0;
790 info->tail = 0;
791 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
792 GFP_KERNEL);
793 if (!info->desc_info)
794 return -ENOMEM;
795
796 info->desc = pci_alloc_consistent(rocker->pdev,
797 info->size * sizeof(*info->desc),
798 &info->mapaddr);
799 if (!info->desc) {
800 kfree(info->desc_info);
801 return -ENOMEM;
802 }
803
804 for (i = 0; i < info->size; i++)
805 info->desc_info[i].desc = &info->desc[i];
806
807 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
808 ROCKER_DMA_DESC_CTRL_RESET);
809 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
810 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
811
812 return 0;
813}
814
e5054643
SH
815static void rocker_dma_ring_destroy(const struct rocker *rocker,
816 const struct rocker_dma_ring_info *info)
4b8ac966
JP
817{
818 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
819
820 pci_free_consistent(rocker->pdev,
821 info->size * sizeof(struct rocker_desc),
822 info->desc, info->mapaddr);
823 kfree(info->desc_info);
824}
825
e5054643 826static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
4b8ac966
JP
827 struct rocker_dma_ring_info *info)
828{
829 int i;
830
831 BUG_ON(info->head || info->tail);
832
833 /* When ring is consumer, we need to advance head for each desc.
834 * That tells hw that the desc is ready to be used by it.
835 */
836 for (i = 0; i < info->size - 1; i++)
837 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
838 rocker_desc_commit(&info->desc_info[i]);
839}
840
e5054643
SH
841static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
842 const struct rocker_dma_ring_info *info,
4b8ac966
JP
843 int direction, size_t buf_size)
844{
845 struct pci_dev *pdev = rocker->pdev;
846 int i;
847 int err;
848
849 for (i = 0; i < info->size; i++) {
850 struct rocker_desc_info *desc_info = &info->desc_info[i];
851 struct rocker_desc *desc = &info->desc[i];
852 dma_addr_t dma_handle;
853 char *buf;
854
855 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
856 if (!buf) {
857 err = -ENOMEM;
858 goto rollback;
859 }
860
861 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
862 if (pci_dma_mapping_error(pdev, dma_handle)) {
863 kfree(buf);
864 err = -EIO;
865 goto rollback;
866 }
867
868 desc_info->data = buf;
869 desc_info->data_size = buf_size;
870 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
871
872 desc->buf_addr = dma_handle;
873 desc->buf_size = buf_size;
874 }
875 return 0;
876
877rollback:
878 for (i--; i >= 0; i--) {
e5054643 879 const struct rocker_desc_info *desc_info = &info->desc_info[i];
4b8ac966
JP
880
881 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
882 desc_info->data_size, direction);
883 kfree(desc_info->data);
884 }
885 return err;
886}
887
e5054643
SH
888static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
889 const struct rocker_dma_ring_info *info,
4b8ac966
JP
890 int direction)
891{
892 struct pci_dev *pdev = rocker->pdev;
893 int i;
894
895 for (i = 0; i < info->size; i++) {
e5054643 896 const struct rocker_desc_info *desc_info = &info->desc_info[i];
4b8ac966
JP
897 struct rocker_desc *desc = &info->desc[i];
898
899 desc->buf_addr = 0;
900 desc->buf_size = 0;
901 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
902 desc_info->data_size, direction);
903 kfree(desc_info->data);
904 }
905}
906
907static int rocker_dma_rings_init(struct rocker *rocker)
908{
e5054643 909 const struct pci_dev *pdev = rocker->pdev;
4b8ac966
JP
910 int err;
911
912 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
913 ROCKER_DMA_CMD_DEFAULT_SIZE,
914 &rocker->cmd_ring);
915 if (err) {
916 dev_err(&pdev->dev, "failed to create command dma ring\n");
917 return err;
918 }
919
920 spin_lock_init(&rocker->cmd_ring_lock);
921
922 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
923 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
924 if (err) {
925 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
926 goto err_dma_cmd_ring_bufs_alloc;
927 }
928
929 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
930 ROCKER_DMA_EVENT_DEFAULT_SIZE,
931 &rocker->event_ring);
932 if (err) {
933 dev_err(&pdev->dev, "failed to create event dma ring\n");
934 goto err_dma_event_ring_create;
935 }
936
937 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
938 PCI_DMA_FROMDEVICE, PAGE_SIZE);
939 if (err) {
940 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
941 goto err_dma_event_ring_bufs_alloc;
942 }
943 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
944 return 0;
945
946err_dma_event_ring_bufs_alloc:
947 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
948err_dma_event_ring_create:
949 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
950 PCI_DMA_BIDIRECTIONAL);
951err_dma_cmd_ring_bufs_alloc:
952 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
953 return err;
954}
955
956static void rocker_dma_rings_fini(struct rocker *rocker)
957{
958 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
959 PCI_DMA_BIDIRECTIONAL);
960 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
961 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
962 PCI_DMA_BIDIRECTIONAL);
963 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
964}
965
534ba6a8 966static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
4b8ac966
JP
967 struct rocker_desc_info *desc_info,
968 struct sk_buff *skb, size_t buf_len)
969{
534ba6a8 970 const struct rocker *rocker = rocker_port->rocker;
4b8ac966
JP
971 struct pci_dev *pdev = rocker->pdev;
972 dma_addr_t dma_handle;
973
974 dma_handle = pci_map_single(pdev, skb->data, buf_len,
975 PCI_DMA_FROMDEVICE);
976 if (pci_dma_mapping_error(pdev, dma_handle))
977 return -EIO;
978 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
979 goto tlv_put_failure;
980 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
981 goto tlv_put_failure;
982 return 0;
983
984tlv_put_failure:
985 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
986 desc_info->tlv_size = 0;
987 return -EMSGSIZE;
988}
989
e5054643 990static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
4b8ac966
JP
991{
992 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
993}
994
534ba6a8 995static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
4b8ac966
JP
996 struct rocker_desc_info *desc_info)
997{
998 struct net_device *dev = rocker_port->dev;
999 struct sk_buff *skb;
1000 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1001 int err;
1002
1003 /* Ensure that hw will see tlv_size zero in case of an error.
1004 * That tells hw to use another descriptor.
1005 */
1006 rocker_desc_cookie_ptr_set(desc_info, NULL);
1007 desc_info->tlv_size = 0;
1008
1009 skb = netdev_alloc_skb_ip_align(dev, buf_len);
1010 if (!skb)
1011 return -ENOMEM;
534ba6a8 1012 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
4b8ac966
JP
1013 if (err) {
1014 dev_kfree_skb_any(skb);
1015 return err;
1016 }
1017 rocker_desc_cookie_ptr_set(desc_info, skb);
1018 return 0;
1019}
1020
e5054643
SH
1021static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1022 const struct rocker_tlv **attrs)
4b8ac966
JP
1023{
1024 struct pci_dev *pdev = rocker->pdev;
1025 dma_addr_t dma_handle;
1026 size_t len;
1027
1028 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1029 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1030 return;
1031 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1032 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1033 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1034}
1035
e5054643
SH
1036static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1037 const struct rocker_desc_info *desc_info)
4b8ac966 1038{
e5054643 1039 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
4b8ac966
JP
1040 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1041
1042 if (!skb)
1043 return;
1044 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1045 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1046 dev_kfree_skb_any(skb);
1047}
1048
534ba6a8 1049static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
4b8ac966 1050{
e5054643 1051 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
534ba6a8 1052 const struct rocker *rocker = rocker_port->rocker;
4b8ac966
JP
1053 int i;
1054 int err;
1055
1056 for (i = 0; i < rx_ring->size; i++) {
534ba6a8 1057 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
4b8ac966
JP
1058 &rx_ring->desc_info[i]);
1059 if (err)
1060 goto rollback;
1061 }
1062 return 0;
1063
1064rollback:
1065 for (i--; i >= 0; i--)
1066 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1067 return err;
1068}
1069
534ba6a8 1070static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
4b8ac966 1071{
e5054643 1072 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
534ba6a8 1073 const struct rocker *rocker = rocker_port->rocker;
4b8ac966
JP
1074 int i;
1075
1076 for (i = 0; i < rx_ring->size; i++)
1077 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1078}
1079
1080static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1081{
1082 struct rocker *rocker = rocker_port->rocker;
1083 int err;
1084
1085 err = rocker_dma_ring_create(rocker,
1086 ROCKER_DMA_TX(rocker_port->port_number),
1087 ROCKER_DMA_TX_DEFAULT_SIZE,
1088 &rocker_port->tx_ring);
1089 if (err) {
1090 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1091 return err;
1092 }
1093
1094 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1095 PCI_DMA_TODEVICE,
1096 ROCKER_DMA_TX_DESC_SIZE);
1097 if (err) {
1098 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1099 goto err_dma_tx_ring_bufs_alloc;
1100 }
1101
1102 err = rocker_dma_ring_create(rocker,
1103 ROCKER_DMA_RX(rocker_port->port_number),
1104 ROCKER_DMA_RX_DEFAULT_SIZE,
1105 &rocker_port->rx_ring);
1106 if (err) {
1107 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1108 goto err_dma_rx_ring_create;
1109 }
1110
1111 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1112 PCI_DMA_BIDIRECTIONAL,
1113 ROCKER_DMA_RX_DESC_SIZE);
1114 if (err) {
1115 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1116 goto err_dma_rx_ring_bufs_alloc;
1117 }
1118
534ba6a8 1119 err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
4b8ac966
JP
1120 if (err) {
1121 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1122 goto err_dma_rx_ring_skbs_alloc;
1123 }
1124 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1125
1126 return 0;
1127
1128err_dma_rx_ring_skbs_alloc:
1129 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1130 PCI_DMA_BIDIRECTIONAL);
1131err_dma_rx_ring_bufs_alloc:
1132 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1133err_dma_rx_ring_create:
1134 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1135 PCI_DMA_TODEVICE);
1136err_dma_tx_ring_bufs_alloc:
1137 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1138 return err;
1139}
1140
1141static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1142{
1143 struct rocker *rocker = rocker_port->rocker;
1144
534ba6a8 1145 rocker_dma_rx_ring_skbs_free(rocker_port);
4b8ac966
JP
1146 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1147 PCI_DMA_BIDIRECTIONAL);
1148 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1149 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1150 PCI_DMA_TODEVICE);
1151 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1152}
1153
e5054643
SH
1154static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1155 bool enable)
4b8ac966
JP
1156{
1157 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1158
1159 if (enable)
71a83a6d 1160 val |= 1ULL << rocker_port->pport;
4b8ac966 1161 else
71a83a6d 1162 val &= ~(1ULL << rocker_port->pport);
4b8ac966
JP
1163 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1164}
1165
1166/********************************
1167 * Interrupt handler and helpers
1168 ********************************/
1169
1170static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1171{
1172 struct rocker *rocker = dev_id;
e5054643 1173 const struct rocker_desc_info *desc_info;
4b8ac966
JP
1174 struct rocker_wait *wait;
1175 u32 credits = 0;
1176
1177 spin_lock(&rocker->cmd_ring_lock);
1178 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1179 wait = rocker_desc_cookie_ptr_get(desc_info);
179f9a25
SF
1180 if (wait->nowait) {
1181 rocker_desc_gen_clear(desc_info);
76c6f945 1182 rocker_wait_destroy(NULL, wait);
179f9a25
SF
1183 } else {
1184 rocker_wait_wake_up(wait);
1185 }
4b8ac966
JP
1186 credits++;
1187 }
1188 spin_unlock(&rocker->cmd_ring_lock);
1189 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1190
1191 return IRQ_HANDLED;
1192}
1193
e5054643 1194static void rocker_port_link_up(const struct rocker_port *rocker_port)
4b8ac966
JP
1195{
1196 netif_carrier_on(rocker_port->dev);
1197 netdev_info(rocker_port->dev, "Link is up\n");
1198}
1199
e5054643 1200static void rocker_port_link_down(const struct rocker_port *rocker_port)
4b8ac966
JP
1201{
1202 netif_carrier_off(rocker_port->dev);
1203 netdev_info(rocker_port->dev, "Link is down\n");
1204}
1205
e5054643 1206static int rocker_event_link_change(const struct rocker *rocker,
4b8ac966
JP
1207 const struct rocker_tlv *info)
1208{
e5054643 1209 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
4b8ac966
JP
1210 unsigned int port_number;
1211 bool link_up;
1212 struct rocker_port *rocker_port;
1213
1214 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
4a6bb6d3 1215 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
4b8ac966
JP
1216 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1217 return -EIO;
1218 port_number =
4a6bb6d3 1219 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
4b8ac966
JP
1220 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1221
1222 if (port_number >= rocker->port_count)
1223 return -EINVAL;
1224
1225 rocker_port = rocker->ports[port_number];
1226 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1227 if (link_up)
1228 rocker_port_link_up(rocker_port);
1229 else
1230 rocker_port_link_down(rocker_port);
1231 }
1232
1233 return 0;
1234}
1235
6c707945 1236static int rocker_port_fdb(struct rocker_port *rocker_port,
76c6f945 1237 struct switchdev_trans *trans,
6c707945
SF
1238 const unsigned char *addr,
1239 __be16 vlan_id, int flags);
1240
e5054643 1241static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
6c707945
SF
1242 const struct rocker_tlv *info)
1243{
e5054643 1244 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
6c707945
SF
1245 unsigned int port_number;
1246 struct rocker_port *rocker_port;
e5054643 1247 const unsigned char *addr;
92014b97 1248 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
6c707945
SF
1249 __be16 vlan_id;
1250
1251 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
4a6bb6d3 1252 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
6c707945
SF
1253 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1254 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1255 return -EIO;
1256 port_number =
4a6bb6d3 1257 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
6c707945 1258 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
9b03c71f 1259 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
6c707945
SF
1260
1261 if (port_number >= rocker->port_count)
1262 return -EINVAL;
1263
1264 rocker_port = rocker->ports[port_number];
1265
1266 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1267 rocker_port->stp_state != BR_STATE_FORWARDING)
1268 return 0;
1269
76c6f945 1270 return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
6c707945 1271}
9f6bbf7c 1272
e5054643
SH
1273static int rocker_event_process(const struct rocker *rocker,
1274 const struct rocker_desc_info *desc_info)
4b8ac966 1275{
e5054643
SH
1276 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1277 const struct rocker_tlv *info;
4b8ac966
JP
1278 u16 type;
1279
1280 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1281 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1282 !attrs[ROCKER_TLV_EVENT_INFO])
1283 return -EIO;
1284
1285 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1286 info = attrs[ROCKER_TLV_EVENT_INFO];
1287
1288 switch (type) {
1289 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1290 return rocker_event_link_change(rocker, info);
6c707945
SF
1291 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1292 return rocker_event_mac_vlan_seen(rocker, info);
4b8ac966
JP
1293 }
1294
1295 return -EOPNOTSUPP;
1296}
1297
1298static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1299{
1300 struct rocker *rocker = dev_id;
e5054643
SH
1301 const struct pci_dev *pdev = rocker->pdev;
1302 const struct rocker_desc_info *desc_info;
4b8ac966
JP
1303 u32 credits = 0;
1304 int err;
1305
1306 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1307 err = rocker_desc_err(desc_info);
1308 if (err) {
1309 dev_err(&pdev->dev, "event desc received with err %d\n",
1310 err);
1311 } else {
1312 err = rocker_event_process(rocker, desc_info);
1313 if (err)
1314 dev_err(&pdev->dev, "event processing failed with err %d\n",
1315 err);
1316 }
1317 rocker_desc_gen_clear(desc_info);
1318 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1319 credits++;
1320 }
1321 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1322
1323 return IRQ_HANDLED;
1324}
1325
1326static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1327{
1328 struct rocker_port *rocker_port = dev_id;
1329
1330 napi_schedule(&rocker_port->napi_tx);
1331 return IRQ_HANDLED;
1332}
1333
1334static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1335{
1336 struct rocker_port *rocker_port = dev_id;
1337
1338 napi_schedule(&rocker_port->napi_rx);
1339 return IRQ_HANDLED;
1340}
1341
1342/********************
1343 * Command interface
1344 ********************/
1345
534ba6a8 1346typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
e5054643
SH
1347 struct rocker_desc_info *desc_info,
1348 void *priv);
1349
534ba6a8 1350typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
e5054643
SH
1351 const struct rocker_desc_info *desc_info,
1352 void *priv);
4b8ac966 1353
534ba6a8 1354static int rocker_cmd_exec(struct rocker_port *rocker_port,
76c6f945 1355 struct switchdev_trans *trans, int flags,
e5054643
SH
1356 rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1357 rocker_cmd_proc_cb_t process, void *process_priv)
4b8ac966 1358{
534ba6a8 1359 struct rocker *rocker = rocker_port->rocker;
4b8ac966
JP
1360 struct rocker_desc_info *desc_info;
1361 struct rocker_wait *wait;
179f9a25
SF
1362 bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1363 unsigned long lock_flags;
4b8ac966
JP
1364 int err;
1365
76c6f945 1366 wait = rocker_wait_create(rocker_port, trans, flags);
4b8ac966
JP
1367 if (!wait)
1368 return -ENOMEM;
179f9a25 1369 wait->nowait = nowait;
4b8ac966 1370
179f9a25 1371 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
c4f20321 1372
4b8ac966
JP
1373 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1374 if (!desc_info) {
179f9a25 1375 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
4b8ac966
JP
1376 err = -EAGAIN;
1377 goto out;
1378 }
c4f20321 1379
534ba6a8 1380 err = prepare(rocker_port, desc_info, prepare_priv);
4b8ac966 1381 if (err) {
179f9a25 1382 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
4b8ac966
JP
1383 goto out;
1384 }
c4f20321 1385
4b8ac966 1386 rocker_desc_cookie_ptr_set(desc_info, wait);
4b8ac966 1387
76c6f945 1388 if (!switchdev_trans_ph_prepare(trans))
c4f20321 1389 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
4b8ac966 1390
179f9a25
SF
1391 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1392
1393 if (nowait)
1394 return 0;
c4f20321 1395
76c6f945 1396 if (!switchdev_trans_ph_prepare(trans))
c4f20321
SF
1397 if (!rocker_wait_event_timeout(wait, HZ / 10))
1398 return -EIO;
4b8ac966
JP
1399
1400 err = rocker_desc_err(desc_info);
1401 if (err)
1402 return err;
1403
1404 if (process)
534ba6a8 1405 err = process(rocker_port, desc_info, process_priv);
4b8ac966
JP
1406
1407 rocker_desc_gen_clear(desc_info);
1408out:
76c6f945 1409 rocker_wait_destroy(trans, wait);
4b8ac966
JP
1410 return err;
1411}
1412
1413static int
534ba6a8 1414rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
4b8ac966
JP
1415 struct rocker_desc_info *desc_info,
1416 void *priv)
1417{
1418 struct rocker_tlv *cmd_info;
1419
1420 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1421 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1422 return -EMSGSIZE;
1423 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1424 if (!cmd_info)
1425 return -EMSGSIZE;
4a6bb6d3
SF
1426 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1427 rocker_port->pport))
4b8ac966
JP
1428 return -EMSGSIZE;
1429 rocker_tlv_nest_end(desc_info, cmd_info);
1430 return 0;
1431}
1432
1433static int
534ba6a8 1434rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
e5054643 1435 const struct rocker_desc_info *desc_info,
4b8ac966
JP
1436 void *priv)
1437{
1438 struct ethtool_cmd *ecmd = priv;
e5054643
SH
1439 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1440 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
4b8ac966
JP
1441 u32 speed;
1442 u8 duplex;
1443 u8 autoneg;
1444
1445 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1446 if (!attrs[ROCKER_TLV_CMD_INFO])
1447 return -EIO;
1448
1449 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1450 attrs[ROCKER_TLV_CMD_INFO]);
1451 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1452 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1453 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1454 return -EIO;
1455
1456 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1457 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1458 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1459
1460 ecmd->transceiver = XCVR_INTERNAL;
1461 ecmd->supported = SUPPORTED_TP;
1462 ecmd->phy_address = 0xff;
1463 ecmd->port = PORT_TP;
1464 ethtool_cmd_speed_set(ecmd, speed);
1465 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1466 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1467
1468 return 0;
1469}
1470
1471static int
534ba6a8 1472rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
e5054643 1473 const struct rocker_desc_info *desc_info,
4b8ac966
JP
1474 void *priv)
1475{
1476 unsigned char *macaddr = priv;
e5054643
SH
1477 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1478 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1479 const struct rocker_tlv *attr;
4b8ac966
JP
1480
1481 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1482 if (!attrs[ROCKER_TLV_CMD_INFO])
1483 return -EIO;
1484
1485 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1486 attrs[ROCKER_TLV_CMD_INFO]);
1487 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1488 if (!attr)
1489 return -EIO;
1490
1491 if (rocker_tlv_len(attr) != ETH_ALEN)
1492 return -EINVAL;
1493
1494 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1495 return 0;
1496}
1497
e1ba3dee
JP
1498static int
1499rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port,
1500 const struct rocker_desc_info *desc_info,
1501 void *priv)
1502{
1503 u8 *p_mode = priv;
1504 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1505 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1506 const struct rocker_tlv *attr;
1507
1508 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1509 if (!attrs[ROCKER_TLV_CMD_INFO])
1510 return -EIO;
1511
1512 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1513 attrs[ROCKER_TLV_CMD_INFO]);
1514 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE];
1515 if (!attr)
1516 return -EIO;
1517
1518 *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
1519 return 0;
1520}
1521
db19170b
DA
1522struct port_name {
1523 char *buf;
1524 size_t len;
1525};
1526
1527static int
534ba6a8 1528rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
e5054643 1529 const struct rocker_desc_info *desc_info,
db19170b
DA
1530 void *priv)
1531{
e5054643
SH
1532 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1533 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
db19170b 1534 struct port_name *name = priv;
e5054643 1535 const struct rocker_tlv *attr;
db19170b 1536 size_t i, j, len;
e5054643 1537 const char *str;
db19170b
DA
1538
1539 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1540 if (!attrs[ROCKER_TLV_CMD_INFO])
1541 return -EIO;
1542
1543 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1544 attrs[ROCKER_TLV_CMD_INFO]);
1545 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1546 if (!attr)
1547 return -EIO;
1548
1549 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1550 str = rocker_tlv_data(attr);
1551
1552 /* make sure name only contains alphanumeric characters */
1553 for (i = j = 0; i < len; ++i) {
1554 if (isalnum(str[i])) {
1555 name->buf[j] = str[i];
1556 j++;
1557 }
1558 }
1559
1560 if (j == 0)
1561 return -EIO;
1562
1563 name->buf[j] = '\0';
1564
1565 return 0;
1566}
1567
4b8ac966 1568static int
534ba6a8 1569rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
4b8ac966
JP
1570 struct rocker_desc_info *desc_info,
1571 void *priv)
1572{
1573 struct ethtool_cmd *ecmd = priv;
1574 struct rocker_tlv *cmd_info;
1575
1576 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1577 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1578 return -EMSGSIZE;
1579 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1580 if (!cmd_info)
1581 return -EMSGSIZE;
4a6bb6d3
SF
1582 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1583 rocker_port->pport))
4b8ac966
JP
1584 return -EMSGSIZE;
1585 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1586 ethtool_cmd_speed(ecmd)))
1587 return -EMSGSIZE;
1588 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1589 ecmd->duplex))
1590 return -EMSGSIZE;
1591 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1592 ecmd->autoneg))
1593 return -EMSGSIZE;
1594 rocker_tlv_nest_end(desc_info, cmd_info);
1595 return 0;
1596}
1597
1598static int
534ba6a8 1599rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
4b8ac966
JP
1600 struct rocker_desc_info *desc_info,
1601 void *priv)
1602{
e5054643 1603 const unsigned char *macaddr = priv;
4b8ac966
JP
1604 struct rocker_tlv *cmd_info;
1605
1606 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1607 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1608 return -EMSGSIZE;
1609 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1610 if (!cmd_info)
1611 return -EMSGSIZE;
4a6bb6d3
SF
1612 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1613 rocker_port->pport))
4b8ac966
JP
1614 return -EMSGSIZE;
1615 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1616 ETH_ALEN, macaddr))
1617 return -EMSGSIZE;
1618 rocker_tlv_nest_end(desc_info, cmd_info);
1619 return 0;
1620}
1621
77a58c74
SF
1622static int
1623rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1624 struct rocker_desc_info *desc_info,
1625 void *priv)
1626{
1627 int mtu = *(int *)priv;
1628 struct rocker_tlv *cmd_info;
1629
1630 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1631 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1632 return -EMSGSIZE;
1633 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1634 if (!cmd_info)
1635 return -EMSGSIZE;
1636 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1637 rocker_port->pport))
1638 return -EMSGSIZE;
1639 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1640 mtu))
1641 return -EMSGSIZE;
1642 rocker_tlv_nest_end(desc_info, cmd_info);
1643 return 0;
1644}
1645
5111f80c 1646static int
534ba6a8 1647rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
5111f80c
SF
1648 struct rocker_desc_info *desc_info,
1649 void *priv)
1650{
1651 struct rocker_tlv *cmd_info;
1652
1653 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1654 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1655 return -EMSGSIZE;
1656 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1657 if (!cmd_info)
1658 return -EMSGSIZE;
4a6bb6d3
SF
1659 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1660 rocker_port->pport))
5111f80c
SF
1661 return -EMSGSIZE;
1662 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1663 !!(rocker_port->brport_flags & BR_LEARNING)))
1664 return -EMSGSIZE;
1665 rocker_tlv_nest_end(desc_info, cmd_info);
1666 return 0;
1667}
1668
4b8ac966
JP
1669static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1670 struct ethtool_cmd *ecmd)
1671{
76c6f945 1672 return rocker_cmd_exec(rocker_port, NULL, 0,
4b8ac966
JP
1673 rocker_cmd_get_port_settings_prep, NULL,
1674 rocker_cmd_get_port_settings_ethtool_proc,
c4f20321 1675 ecmd);
4b8ac966
JP
1676}
1677
1678static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1679 unsigned char *macaddr)
1680{
76c6f945 1681 return rocker_cmd_exec(rocker_port, NULL, 0,
4b8ac966
JP
1682 rocker_cmd_get_port_settings_prep, NULL,
1683 rocker_cmd_get_port_settings_macaddr_proc,
c4f20321 1684 macaddr);
4b8ac966
JP
1685}
1686
e1ba3dee
JP
1687static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
1688 u8 *p_mode)
1689{
1690 return rocker_cmd_exec(rocker_port, NULL, 0,
1691 rocker_cmd_get_port_settings_prep, NULL,
1692 rocker_cmd_get_port_settings_mode_proc, p_mode);
1693}
1694
4b8ac966
JP
1695static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1696 struct ethtool_cmd *ecmd)
1697{
76c6f945 1698 return rocker_cmd_exec(rocker_port, NULL, 0,
4b8ac966 1699 rocker_cmd_set_port_settings_ethtool_prep,
c4f20321 1700 ecmd, NULL, NULL);
4b8ac966
JP
1701}
1702
1703static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1704 unsigned char *macaddr)
1705{
76c6f945 1706 return rocker_cmd_exec(rocker_port, NULL, 0,
4b8ac966 1707 rocker_cmd_set_port_settings_macaddr_prep,
c4f20321 1708 macaddr, NULL, NULL);
4b8ac966
JP
1709}
1710
77a58c74
SF
1711static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1712 int mtu)
1713{
76c6f945 1714 return rocker_cmd_exec(rocker_port, NULL, 0,
77a58c74
SF
1715 rocker_cmd_set_port_settings_mtu_prep,
1716 &mtu, NULL, NULL);
1717}
1718
c4f20321 1719static int rocker_port_set_learning(struct rocker_port *rocker_port,
76c6f945 1720 struct switchdev_trans *trans)
5111f80c 1721{
76c6f945 1722 return rocker_cmd_exec(rocker_port, trans, 0,
5111f80c 1723 rocker_cmd_set_port_learning_prep,
c4f20321 1724 NULL, NULL, NULL);
5111f80c
SF
1725}
1726
e5054643
SH
1727static int
1728rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1729 const struct rocker_flow_tbl_entry *entry)
9f6bbf7c 1730{
4a6bb6d3
SF
1731 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1732 entry->key.ig_port.in_pport))
9f6bbf7c 1733 return -EMSGSIZE;
4a6bb6d3
SF
1734 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1735 entry->key.ig_port.in_pport_mask))
9f6bbf7c
SF
1736 return -EMSGSIZE;
1737 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1738 entry->key.ig_port.goto_tbl))
1739 return -EMSGSIZE;
1740
1741 return 0;
1742}
1743
e5054643
SH
1744static int
1745rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1746 const struct rocker_flow_tbl_entry *entry)
9f6bbf7c 1747{
4a6bb6d3
SF
1748 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1749 entry->key.vlan.in_pport))
9f6bbf7c 1750 return -EMSGSIZE;
9b03c71f
JP
1751 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1752 entry->key.vlan.vlan_id))
9f6bbf7c 1753 return -EMSGSIZE;
9b03c71f
JP
1754 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1755 entry->key.vlan.vlan_id_mask))
9f6bbf7c
SF
1756 return -EMSGSIZE;
1757 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1758 entry->key.vlan.goto_tbl))
1759 return -EMSGSIZE;
1760 if (entry->key.vlan.untagged &&
9b03c71f
JP
1761 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1762 entry->key.vlan.new_vlan_id))
9f6bbf7c
SF
1763 return -EMSGSIZE;
1764
1765 return 0;
1766}
1767
e5054643
SH
1768static int
1769rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1770 const struct rocker_flow_tbl_entry *entry)
9f6bbf7c 1771{
4a6bb6d3
SF
1772 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1773 entry->key.term_mac.in_pport))
9f6bbf7c 1774 return -EMSGSIZE;
4a6bb6d3
SF
1775 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1776 entry->key.term_mac.in_pport_mask))
9f6bbf7c 1777 return -EMSGSIZE;
9b03c71f
JP
1778 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1779 entry->key.term_mac.eth_type))
9f6bbf7c
SF
1780 return -EMSGSIZE;
1781 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1782 ETH_ALEN, entry->key.term_mac.eth_dst))
1783 return -EMSGSIZE;
1784 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1785 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1786 return -EMSGSIZE;
9b03c71f
JP
1787 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1788 entry->key.term_mac.vlan_id))
9f6bbf7c 1789 return -EMSGSIZE;
9b03c71f
JP
1790 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1791 entry->key.term_mac.vlan_id_mask))
9f6bbf7c
SF
1792 return -EMSGSIZE;
1793 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1794 entry->key.term_mac.goto_tbl))
1795 return -EMSGSIZE;
1796 if (entry->key.term_mac.copy_to_cpu &&
1797 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1798 entry->key.term_mac.copy_to_cpu))
1799 return -EMSGSIZE;
1800
1801 return 0;
1802}
1803
1804static int
1805rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
e5054643 1806 const struct rocker_flow_tbl_entry *entry)
9f6bbf7c 1807{
9b03c71f
JP
1808 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1809 entry->key.ucast_routing.eth_type))
9f6bbf7c 1810 return -EMSGSIZE;
9b03c71f
JP
1811 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
1812 entry->key.ucast_routing.dst4))
9f6bbf7c 1813 return -EMSGSIZE;
9b03c71f
JP
1814 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
1815 entry->key.ucast_routing.dst4_mask))
9f6bbf7c
SF
1816 return -EMSGSIZE;
1817 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1818 entry->key.ucast_routing.goto_tbl))
1819 return -EMSGSIZE;
1820 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1821 entry->key.ucast_routing.group_id))
1822 return -EMSGSIZE;
1823
1824 return 0;
1825}
1826
e5054643
SH
1827static int
1828rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
1829 const struct rocker_flow_tbl_entry *entry)
9f6bbf7c
SF
1830{
1831 if (entry->key.bridge.has_eth_dst &&
1832 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1833 ETH_ALEN, entry->key.bridge.eth_dst))
1834 return -EMSGSIZE;
1835 if (entry->key.bridge.has_eth_dst_mask &&
1836 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1837 ETH_ALEN, entry->key.bridge.eth_dst_mask))
1838 return -EMSGSIZE;
1839 if (entry->key.bridge.vlan_id &&
9b03c71f
JP
1840 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1841 entry->key.bridge.vlan_id))
9f6bbf7c
SF
1842 return -EMSGSIZE;
1843 if (entry->key.bridge.tunnel_id &&
1844 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
1845 entry->key.bridge.tunnel_id))
1846 return -EMSGSIZE;
1847 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1848 entry->key.bridge.goto_tbl))
1849 return -EMSGSIZE;
1850 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1851 entry->key.bridge.group_id))
1852 return -EMSGSIZE;
1853 if (entry->key.bridge.copy_to_cpu &&
1854 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1855 entry->key.bridge.copy_to_cpu))
1856 return -EMSGSIZE;
1857
1858 return 0;
1859}
1860
e5054643
SH
1861static int
1862rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
1863 const struct rocker_flow_tbl_entry *entry)
9f6bbf7c 1864{
4a6bb6d3
SF
1865 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1866 entry->key.acl.in_pport))
9f6bbf7c 1867 return -EMSGSIZE;
4a6bb6d3
SF
1868 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1869 entry->key.acl.in_pport_mask))
9f6bbf7c
SF
1870 return -EMSGSIZE;
1871 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
1872 ETH_ALEN, entry->key.acl.eth_src))
1873 return -EMSGSIZE;
1874 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
1875 ETH_ALEN, entry->key.acl.eth_src_mask))
1876 return -EMSGSIZE;
1877 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1878 ETH_ALEN, entry->key.acl.eth_dst))
1879 return -EMSGSIZE;
1880 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1881 ETH_ALEN, entry->key.acl.eth_dst_mask))
1882 return -EMSGSIZE;
9b03c71f
JP
1883 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1884 entry->key.acl.eth_type))
9f6bbf7c 1885 return -EMSGSIZE;
9b03c71f
JP
1886 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1887 entry->key.acl.vlan_id))
9f6bbf7c 1888 return -EMSGSIZE;
9b03c71f
JP
1889 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1890 entry->key.acl.vlan_id_mask))
9f6bbf7c
SF
1891 return -EMSGSIZE;
1892
1893 switch (ntohs(entry->key.acl.eth_type)) {
1894 case ETH_P_IP:
1895 case ETH_P_IPV6:
1896 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
1897 entry->key.acl.ip_proto))
1898 return -EMSGSIZE;
1899 if (rocker_tlv_put_u8(desc_info,
1900 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
1901 entry->key.acl.ip_proto_mask))
1902 return -EMSGSIZE;
1903 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
1904 entry->key.acl.ip_tos & 0x3f))
1905 return -EMSGSIZE;
1906 if (rocker_tlv_put_u8(desc_info,
1907 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
1908 entry->key.acl.ip_tos_mask & 0x3f))
1909 return -EMSGSIZE;
1910 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
1911 (entry->key.acl.ip_tos & 0xc0) >> 6))
1912 return -EMSGSIZE;
1913 if (rocker_tlv_put_u8(desc_info,
1914 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
1915 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
1916 return -EMSGSIZE;
1917 break;
1918 }
1919
1920 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
1921 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1922 entry->key.acl.group_id))
1923 return -EMSGSIZE;
1924
1925 return 0;
1926}
1927
534ba6a8 1928static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
9f6bbf7c
SF
1929 struct rocker_desc_info *desc_info,
1930 void *priv)
1931{
e5054643 1932 const struct rocker_flow_tbl_entry *entry = priv;
9f6bbf7c
SF
1933 struct rocker_tlv *cmd_info;
1934 int err = 0;
1935
c1beeef7 1936 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
9f6bbf7c
SF
1937 return -EMSGSIZE;
1938 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1939 if (!cmd_info)
1940 return -EMSGSIZE;
1941 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
1942 entry->key.tbl_id))
1943 return -EMSGSIZE;
1944 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
1945 entry->key.priority))
1946 return -EMSGSIZE;
1947 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
1948 return -EMSGSIZE;
1949 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
1950 entry->cookie))
1951 return -EMSGSIZE;
1952
1953 switch (entry->key.tbl_id) {
1954 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
1955 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
1956 break;
1957 case ROCKER_OF_DPA_TABLE_ID_VLAN:
1958 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
1959 break;
1960 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
1961 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
1962 break;
1963 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
1964 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
1965 break;
1966 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
1967 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
1968 break;
1969 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
1970 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
1971 break;
1972 default:
1973 err = -ENOTSUPP;
1974 break;
1975 }
1976
1977 if (err)
1978 return err;
1979
1980 rocker_tlv_nest_end(desc_info, cmd_info);
1981
1982 return 0;
1983}
1984
534ba6a8 1985static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
9f6bbf7c
SF
1986 struct rocker_desc_info *desc_info,
1987 void *priv)
1988{
1989 const struct rocker_flow_tbl_entry *entry = priv;
1990 struct rocker_tlv *cmd_info;
1991
c1beeef7 1992 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
9f6bbf7c
SF
1993 return -EMSGSIZE;
1994 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1995 if (!cmd_info)
1996 return -EMSGSIZE;
1997 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
1998 entry->cookie))
1999 return -EMSGSIZE;
2000 rocker_tlv_nest_end(desc_info, cmd_info);
2001
2002 return 0;
2003}
2004
2005static int
2006rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2007 struct rocker_group_tbl_entry *entry)
2008{
4a6bb6d3 2009 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
9f6bbf7c
SF
2010 ROCKER_GROUP_PORT_GET(entry->group_id)))
2011 return -EMSGSIZE;
2012 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2013 entry->l2_interface.pop_vlan))
2014 return -EMSGSIZE;
2015
2016 return 0;
2017}
2018
2019static int
2020rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
e5054643 2021 const struct rocker_group_tbl_entry *entry)
9f6bbf7c
SF
2022{
2023 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2024 entry->l2_rewrite.group_id))
2025 return -EMSGSIZE;
2026 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2027 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2028 ETH_ALEN, entry->l2_rewrite.eth_src))
2029 return -EMSGSIZE;
2030 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2031 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2032 ETH_ALEN, entry->l2_rewrite.eth_dst))
2033 return -EMSGSIZE;
2034 if (entry->l2_rewrite.vlan_id &&
9b03c71f
JP
2035 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2036 entry->l2_rewrite.vlan_id))
9f6bbf7c
SF
2037 return -EMSGSIZE;
2038
2039 return 0;
2040}
2041
2042static int
2043rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
e5054643 2044 const struct rocker_group_tbl_entry *entry)
9f6bbf7c
SF
2045{
2046 int i;
2047 struct rocker_tlv *group_ids;
2048
2049 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2050 entry->group_count))
2051 return -EMSGSIZE;
2052
2053 group_ids = rocker_tlv_nest_start(desc_info,
2054 ROCKER_TLV_OF_DPA_GROUP_IDS);
2055 if (!group_ids)
2056 return -EMSGSIZE;
2057
2058 for (i = 0; i < entry->group_count; i++)
2059 /* Note TLV array is 1-based */
2060 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2061 return -EMSGSIZE;
2062
2063 rocker_tlv_nest_end(desc_info, group_ids);
2064
2065 return 0;
2066}
2067
2068static int
2069rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
e5054643 2070 const struct rocker_group_tbl_entry *entry)
9f6bbf7c
SF
2071{
2072 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2073 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2074 ETH_ALEN, entry->l3_unicast.eth_src))
2075 return -EMSGSIZE;
2076 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2077 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2078 ETH_ALEN, entry->l3_unicast.eth_dst))
2079 return -EMSGSIZE;
2080 if (entry->l3_unicast.vlan_id &&
9b03c71f
JP
2081 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2082 entry->l3_unicast.vlan_id))
9f6bbf7c
SF
2083 return -EMSGSIZE;
2084 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2085 entry->l3_unicast.ttl_check))
2086 return -EMSGSIZE;
2087 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2088 entry->l3_unicast.group_id))
2089 return -EMSGSIZE;
2090
2091 return 0;
2092}
2093
534ba6a8 2094static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
9f6bbf7c
SF
2095 struct rocker_desc_info *desc_info,
2096 void *priv)
2097{
2098 struct rocker_group_tbl_entry *entry = priv;
2099 struct rocker_tlv *cmd_info;
2100 int err = 0;
2101
2102 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2103 return -EMSGSIZE;
2104 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2105 if (!cmd_info)
2106 return -EMSGSIZE;
2107
2108 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2109 entry->group_id))
2110 return -EMSGSIZE;
2111
2112 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2113 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2114 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2115 break;
2116 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2117 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2118 break;
2119 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2120 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2121 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2122 break;
2123 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2124 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2125 break;
2126 default:
2127 err = -ENOTSUPP;
2128 break;
2129 }
2130
2131 if (err)
2132 return err;
2133
2134 rocker_tlv_nest_end(desc_info, cmd_info);
2135
2136 return 0;
2137}
2138
534ba6a8 2139static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
9f6bbf7c
SF
2140 struct rocker_desc_info *desc_info,
2141 void *priv)
2142{
2143 const struct rocker_group_tbl_entry *entry = priv;
2144 struct rocker_tlv *cmd_info;
2145
2146 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2147 return -EMSGSIZE;
2148 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2149 if (!cmd_info)
2150 return -EMSGSIZE;
2151 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2152 entry->group_id))
2153 return -EMSGSIZE;
2154 rocker_tlv_nest_end(desc_info, cmd_info);
2155
2156 return 0;
2157}
2158
c1beeef7
SF
2159/***************************************************
2160 * Flow, group, FDB, internal VLAN and neigh tables
2161 ***************************************************/
9f6bbf7c
SF
2162
2163static int rocker_init_tbls(struct rocker *rocker)
2164{
2165 hash_init(rocker->flow_tbl);
2166 spin_lock_init(&rocker->flow_tbl_lock);
2167
2168 hash_init(rocker->group_tbl);
2169 spin_lock_init(&rocker->group_tbl_lock);
2170
2171 hash_init(rocker->fdb_tbl);
2172 spin_lock_init(&rocker->fdb_tbl_lock);
2173
2174 hash_init(rocker->internal_vlan_tbl);
2175 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2176
c1beeef7
SF
2177 hash_init(rocker->neigh_tbl);
2178 spin_lock_init(&rocker->neigh_tbl_lock);
2179
9f6bbf7c
SF
2180 return 0;
2181}
2182
2183static void rocker_free_tbls(struct rocker *rocker)
2184{
2185 unsigned long flags;
2186 struct rocker_flow_tbl_entry *flow_entry;
2187 struct rocker_group_tbl_entry *group_entry;
2188 struct rocker_fdb_tbl_entry *fdb_entry;
2189 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
c1beeef7 2190 struct rocker_neigh_tbl_entry *neigh_entry;
9f6bbf7c
SF
2191 struct hlist_node *tmp;
2192 int bkt;
2193
2194 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2195 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2196 hash_del(&flow_entry->entry);
2197 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2198
2199 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2200 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2201 hash_del(&group_entry->entry);
2202 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2203
2204 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2205 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2206 hash_del(&fdb_entry->entry);
2207 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2208
2209 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2210 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2211 tmp, internal_vlan_entry, entry)
2212 hash_del(&internal_vlan_entry->entry);
2213 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
c1beeef7
SF
2214
2215 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2216 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2217 hash_del(&neigh_entry->entry);
2218 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
9f6bbf7c
SF
2219}
2220
2221static struct rocker_flow_tbl_entry *
e5054643
SH
2222rocker_flow_tbl_find(const struct rocker *rocker,
2223 const struct rocker_flow_tbl_entry *match)
9f6bbf7c
SF
2224{
2225 struct rocker_flow_tbl_entry *found;
c1beeef7 2226 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
9f6bbf7c
SF
2227
2228 hash_for_each_possible(rocker->flow_tbl, found,
2229 entry, match->key_crc32) {
c1beeef7 2230 if (memcmp(&found->key, &match->key, key_len) == 0)
9f6bbf7c
SF
2231 return found;
2232 }
2233
2234 return NULL;
2235}
2236
2237static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
76c6f945 2238 struct switchdev_trans *trans, int flags,
c4f20321 2239 struct rocker_flow_tbl_entry *match)
9f6bbf7c
SF
2240{
2241 struct rocker *rocker = rocker_port->rocker;
2242 struct rocker_flow_tbl_entry *found;
c1beeef7 2243 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
179f9a25 2244 unsigned long lock_flags;
9f6bbf7c 2245
c1beeef7 2246 match->key_crc32 = crc32(~0, &match->key, key_len);
9f6bbf7c 2247
179f9a25 2248 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
9f6bbf7c
SF
2249
2250 found = rocker_flow_tbl_find(rocker, match);
2251
2252 if (found) {
c1beeef7 2253 match->cookie = found->cookie;
76c6f945 2254 if (!switchdev_trans_ph_prepare(trans))
c4f20321 2255 hash_del(&found->entry);
b15edf85 2256 rocker_kfree(trans, found);
c1beeef7
SF
2257 found = match;
2258 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
9f6bbf7c
SF
2259 } else {
2260 found = match;
2261 found->cookie = rocker->flow_tbl_next_cookie++;
c1beeef7 2262 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
9f6bbf7c
SF
2263 }
2264
76c6f945 2265 if (!switchdev_trans_ph_prepare(trans))
c4f20321 2266 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
9f6bbf7c 2267
179f9a25 2268 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
9f6bbf7c 2269
76c6f945 2270 return rocker_cmd_exec(rocker_port, trans, flags,
179f9a25 2271 rocker_cmd_flow_tbl_add, found, NULL, NULL);
9f6bbf7c
SF
2272}
2273
2274static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
76c6f945 2275 struct switchdev_trans *trans, int flags,
c4f20321 2276 struct rocker_flow_tbl_entry *match)
9f6bbf7c
SF
2277{
2278 struct rocker *rocker = rocker_port->rocker;
2279 struct rocker_flow_tbl_entry *found;
c1beeef7 2280 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
179f9a25 2281 unsigned long lock_flags;
9f6bbf7c
SF
2282 int err = 0;
2283
c1beeef7 2284 match->key_crc32 = crc32(~0, &match->key, key_len);
9f6bbf7c 2285
179f9a25 2286 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
9f6bbf7c
SF
2287
2288 found = rocker_flow_tbl_find(rocker, match);
2289
2290 if (found) {
76c6f945 2291 if (!switchdev_trans_ph_prepare(trans))
c4f20321 2292 hash_del(&found->entry);
c1beeef7 2293 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
9f6bbf7c
SF
2294 }
2295
179f9a25 2296 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
9f6bbf7c 2297
b15edf85 2298 rocker_kfree(trans, match);
9f6bbf7c 2299
c1beeef7 2300 if (found) {
76c6f945 2301 err = rocker_cmd_exec(rocker_port, trans, flags,
9f6bbf7c 2302 rocker_cmd_flow_tbl_del,
c4f20321 2303 found, NULL, NULL);
b15edf85 2304 rocker_kfree(trans, found);
9f6bbf7c
SF
2305 }
2306
2307 return err;
2308}
2309
9f6bbf7c 2310static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
76c6f945 2311 struct switchdev_trans *trans, int flags,
c4f20321 2312 struct rocker_flow_tbl_entry *entry)
9f6bbf7c 2313{
9f6bbf7c 2314 if (flags & ROCKER_OP_FLAG_REMOVE)
76c6f945 2315 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
9f6bbf7c 2316 else
76c6f945 2317 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
9f6bbf7c
SF
2318}
2319
2320static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
76c6f945 2321 struct switchdev_trans *trans, int flags,
c4f20321 2322 u32 in_pport, u32 in_pport_mask,
9f6bbf7c
SF
2323 enum rocker_of_dpa_table_id goto_tbl)
2324{
2325 struct rocker_flow_tbl_entry *entry;
2326
b15edf85 2327 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
9f6bbf7c
SF
2328 if (!entry)
2329 return -ENOMEM;
2330
2331 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2332 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
4a6bb6d3
SF
2333 entry->key.ig_port.in_pport = in_pport;
2334 entry->key.ig_port.in_pport_mask = in_pport_mask;
9f6bbf7c
SF
2335 entry->key.ig_port.goto_tbl = goto_tbl;
2336
76c6f945 2337 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
9f6bbf7c
SF
2338}
2339
2340static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
76c6f945 2341 struct switchdev_trans *trans, int flags,
c4f20321
SF
2342 u32 in_pport, __be16 vlan_id,
2343 __be16 vlan_id_mask,
9f6bbf7c
SF
2344 enum rocker_of_dpa_table_id goto_tbl,
2345 bool untagged, __be16 new_vlan_id)
2346{
2347 struct rocker_flow_tbl_entry *entry;
2348
b15edf85 2349 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
9f6bbf7c
SF
2350 if (!entry)
2351 return -ENOMEM;
2352
2353 entry->key.priority = ROCKER_PRIORITY_VLAN;
2354 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
4a6bb6d3 2355 entry->key.vlan.in_pport = in_pport;
9f6bbf7c
SF
2356 entry->key.vlan.vlan_id = vlan_id;
2357 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2358 entry->key.vlan.goto_tbl = goto_tbl;
2359
2360 entry->key.vlan.untagged = untagged;
2361 entry->key.vlan.new_vlan_id = new_vlan_id;
2362
76c6f945 2363 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
9f6bbf7c
SF
2364}
2365
2366static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
76c6f945 2367 struct switchdev_trans *trans,
4a6bb6d3 2368 u32 in_pport, u32 in_pport_mask,
9f6bbf7c
SF
2369 __be16 eth_type, const u8 *eth_dst,
2370 const u8 *eth_dst_mask, __be16 vlan_id,
2371 __be16 vlan_id_mask, bool copy_to_cpu,
2372 int flags)
2373{
2374 struct rocker_flow_tbl_entry *entry;
2375
b15edf85 2376 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
9f6bbf7c
SF
2377 if (!entry)
2378 return -ENOMEM;
2379
2380 if (is_multicast_ether_addr(eth_dst)) {
2381 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2382 entry->key.term_mac.goto_tbl =
2383 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2384 } else {
2385 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2386 entry->key.term_mac.goto_tbl =
2387 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2388 }
2389
2390 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
4a6bb6d3
SF
2391 entry->key.term_mac.in_pport = in_pport;
2392 entry->key.term_mac.in_pport_mask = in_pport_mask;
9f6bbf7c
SF
2393 entry->key.term_mac.eth_type = eth_type;
2394 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2395 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2396 entry->key.term_mac.vlan_id = vlan_id;
2397 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2398 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2399
76c6f945 2400 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
9f6bbf7c
SF
2401}
2402
2403static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
76c6f945 2404 struct switchdev_trans *trans, int flags,
9f6bbf7c
SF
2405 const u8 *eth_dst, const u8 *eth_dst_mask,
2406 __be16 vlan_id, u32 tunnel_id,
2407 enum rocker_of_dpa_table_id goto_tbl,
2408 u32 group_id, bool copy_to_cpu)
2409{
2410 struct rocker_flow_tbl_entry *entry;
2411 u32 priority;
2412 bool vlan_bridging = !!vlan_id;
2413 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2414 bool wild = false;
2415
b15edf85 2416 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
9f6bbf7c
SF
2417 if (!entry)
2418 return -ENOMEM;
2419
2420 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2421
2422 if (eth_dst) {
2423 entry->key.bridge.has_eth_dst = 1;
2424 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2425 }
2426 if (eth_dst_mask) {
2427 entry->key.bridge.has_eth_dst_mask = 1;
2428 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
629161f6 2429 if (!ether_addr_equal(eth_dst_mask, ff_mac))
9f6bbf7c
SF
2430 wild = true;
2431 }
2432
2433 priority = ROCKER_PRIORITY_UNKNOWN;
51ace887 2434 if (vlan_bridging && dflt && wild)
9f6bbf7c 2435 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
51ace887 2436 else if (vlan_bridging && dflt && !wild)
9f6bbf7c 2437 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
51ace887 2438 else if (vlan_bridging && !dflt)
9f6bbf7c 2439 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
51ace887 2440 else if (!vlan_bridging && dflt && wild)
9f6bbf7c 2441 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
51ace887 2442 else if (!vlan_bridging && dflt && !wild)
9f6bbf7c 2443 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
51ace887 2444 else if (!vlan_bridging && !dflt)
9f6bbf7c
SF
2445 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2446
2447 entry->key.priority = priority;
2448 entry->key.bridge.vlan_id = vlan_id;
2449 entry->key.bridge.tunnel_id = tunnel_id;
2450 entry->key.bridge.goto_tbl = goto_tbl;
2451 entry->key.bridge.group_id = group_id;
2452 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2453
76c6f945 2454 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
9f6bbf7c
SF
2455}
2456
c1beeef7 2457static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
76c6f945 2458 struct switchdev_trans *trans,
c1beeef7
SF
2459 __be16 eth_type, __be32 dst,
2460 __be32 dst_mask, u32 priority,
2461 enum rocker_of_dpa_table_id goto_tbl,
2462 u32 group_id, int flags)
2463{
2464 struct rocker_flow_tbl_entry *entry;
2465
b15edf85 2466 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
c1beeef7
SF
2467 if (!entry)
2468 return -ENOMEM;
2469
2470 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2471 entry->key.priority = priority;
2472 entry->key.ucast_routing.eth_type = eth_type;
2473 entry->key.ucast_routing.dst4 = dst;
2474 entry->key.ucast_routing.dst4_mask = dst_mask;
2475 entry->key.ucast_routing.goto_tbl = goto_tbl;
2476 entry->key.ucast_routing.group_id = group_id;
2477 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2478 ucast_routing.group_id);
2479
76c6f945 2480 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
c1beeef7
SF
2481}
2482
9f6bbf7c 2483static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
76c6f945 2484 struct switchdev_trans *trans, int flags,
c4f20321 2485 u32 in_pport, u32 in_pport_mask,
9f6bbf7c
SF
2486 const u8 *eth_src, const u8 *eth_src_mask,
2487 const u8 *eth_dst, const u8 *eth_dst_mask,
c4f20321
SF
2488 __be16 eth_type, __be16 vlan_id,
2489 __be16 vlan_id_mask, u8 ip_proto,
2490 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
9f6bbf7c
SF
2491 u32 group_id)
2492{
2493 u32 priority;
2494 struct rocker_flow_tbl_entry *entry;
2495
b15edf85 2496 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
9f6bbf7c
SF
2497 if (!entry)
2498 return -ENOMEM;
2499
2500 priority = ROCKER_PRIORITY_ACL_NORMAL;
2501 if (eth_dst && eth_dst_mask) {
629161f6 2502 if (ether_addr_equal(eth_dst_mask, mcast_mac))
9f6bbf7c
SF
2503 priority = ROCKER_PRIORITY_ACL_DFLT;
2504 else if (is_link_local_ether_addr(eth_dst))
2505 priority = ROCKER_PRIORITY_ACL_CTRL;
2506 }
2507
2508 entry->key.priority = priority;
2509 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4a6bb6d3
SF
2510 entry->key.acl.in_pport = in_pport;
2511 entry->key.acl.in_pport_mask = in_pport_mask;
9f6bbf7c
SF
2512
2513 if (eth_src)
2514 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2515 if (eth_src_mask)
2516 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2517 if (eth_dst)
2518 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2519 if (eth_dst_mask)
2520 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2521
2522 entry->key.acl.eth_type = eth_type;
2523 entry->key.acl.vlan_id = vlan_id;
2524 entry->key.acl.vlan_id_mask = vlan_id_mask;
2525 entry->key.acl.ip_proto = ip_proto;
2526 entry->key.acl.ip_proto_mask = ip_proto_mask;
2527 entry->key.acl.ip_tos = ip_tos;
2528 entry->key.acl.ip_tos_mask = ip_tos_mask;
2529 entry->key.acl.group_id = group_id;
2530
76c6f945 2531 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
9f6bbf7c
SF
2532}
2533
2534static struct rocker_group_tbl_entry *
e5054643
SH
2535rocker_group_tbl_find(const struct rocker *rocker,
2536 const struct rocker_group_tbl_entry *match)
9f6bbf7c
SF
2537{
2538 struct rocker_group_tbl_entry *found;
2539
2540 hash_for_each_possible(rocker->group_tbl, found,
2541 entry, match->group_id) {
2542 if (found->group_id == match->group_id)
2543 return found;
2544 }
2545
2546 return NULL;
2547}
2548
76c6f945 2549static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
c4f20321 2550 struct rocker_group_tbl_entry *entry)
9f6bbf7c
SF
2551{
2552 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2553 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2554 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
b15edf85 2555 rocker_kfree(trans, entry->group_ids);
9f6bbf7c
SF
2556 break;
2557 default:
2558 break;
2559 }
b15edf85 2560 rocker_kfree(trans, entry);
9f6bbf7c
SF
2561}
2562
2563static int rocker_group_tbl_add(struct rocker_port *rocker_port,
76c6f945 2564 struct switchdev_trans *trans, int flags,
c4f20321 2565 struct rocker_group_tbl_entry *match)
9f6bbf7c
SF
2566{
2567 struct rocker *rocker = rocker_port->rocker;
2568 struct rocker_group_tbl_entry *found;
179f9a25 2569 unsigned long lock_flags;
9f6bbf7c 2570
179f9a25 2571 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
9f6bbf7c
SF
2572
2573 found = rocker_group_tbl_find(rocker, match);
2574
2575 if (found) {
76c6f945 2576 if (!switchdev_trans_ph_prepare(trans))
c4f20321 2577 hash_del(&found->entry);
76c6f945 2578 rocker_group_tbl_entry_free(trans, found);
9f6bbf7c
SF
2579 found = match;
2580 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2581 } else {
2582 found = match;
2583 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2584 }
2585
76c6f945 2586 if (!switchdev_trans_ph_prepare(trans))
c4f20321 2587 hash_add(rocker->group_tbl, &found->entry, found->group_id);
9f6bbf7c 2588
179f9a25 2589 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
9f6bbf7c 2590
76c6f945 2591 return rocker_cmd_exec(rocker_port, trans, flags,
179f9a25 2592 rocker_cmd_group_tbl_add, found, NULL, NULL);
9f6bbf7c
SF
2593}
2594
2595static int rocker_group_tbl_del(struct rocker_port *rocker_port,
76c6f945 2596 struct switchdev_trans *trans, int flags,
c4f20321 2597 struct rocker_group_tbl_entry *match)
9f6bbf7c
SF
2598{
2599 struct rocker *rocker = rocker_port->rocker;
2600 struct rocker_group_tbl_entry *found;
179f9a25 2601 unsigned long lock_flags;
9f6bbf7c
SF
2602 int err = 0;
2603
179f9a25 2604 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
9f6bbf7c
SF
2605
2606 found = rocker_group_tbl_find(rocker, match);
2607
2608 if (found) {
76c6f945 2609 if (!switchdev_trans_ph_prepare(trans))
c4f20321 2610 hash_del(&found->entry);
9f6bbf7c
SF
2611 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2612 }
2613
179f9a25 2614 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
9f6bbf7c 2615
76c6f945 2616 rocker_group_tbl_entry_free(trans, match);
9f6bbf7c
SF
2617
2618 if (found) {
76c6f945 2619 err = rocker_cmd_exec(rocker_port, trans, flags,
9f6bbf7c 2620 rocker_cmd_group_tbl_del,
c4f20321 2621 found, NULL, NULL);
76c6f945 2622 rocker_group_tbl_entry_free(trans, found);
9f6bbf7c
SF
2623 }
2624
2625 return err;
2626}
2627
2628static int rocker_group_tbl_do(struct rocker_port *rocker_port,
76c6f945 2629 struct switchdev_trans *trans, int flags,
c4f20321 2630 struct rocker_group_tbl_entry *entry)
9f6bbf7c 2631{
9f6bbf7c 2632 if (flags & ROCKER_OP_FLAG_REMOVE)
76c6f945 2633 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
9f6bbf7c 2634 else
76c6f945 2635 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
9f6bbf7c
SF
2636}
2637
2638static int rocker_group_l2_interface(struct rocker_port *rocker_port,
76c6f945 2639 struct switchdev_trans *trans, int flags,
c4f20321
SF
2640 __be16 vlan_id, u32 out_pport,
2641 int pop_vlan)
9f6bbf7c
SF
2642{
2643 struct rocker_group_tbl_entry *entry;
2644
b15edf85 2645 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
9f6bbf7c
SF
2646 if (!entry)
2647 return -ENOMEM;
2648
4a6bb6d3 2649 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
9f6bbf7c
SF
2650 entry->l2_interface.pop_vlan = pop_vlan;
2651
76c6f945 2652 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
9f6bbf7c
SF
2653}
2654
2655static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
76c6f945 2656 struct switchdev_trans *trans,
9f6bbf7c 2657 int flags, u8 group_count,
e5054643 2658 const u32 *group_ids, u32 group_id)
9f6bbf7c
SF
2659{
2660 struct rocker_group_tbl_entry *entry;
2661
b15edf85 2662 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
9f6bbf7c
SF
2663 if (!entry)
2664 return -ENOMEM;
2665
2666 entry->group_id = group_id;
2667 entry->group_count = group_count;
2668
b15edf85
JP
2669 entry->group_ids = rocker_kcalloc(trans, flags,
2670 group_count, sizeof(u32));
9f6bbf7c 2671 if (!entry->group_ids) {
b15edf85 2672 rocker_kfree(trans, entry);
9f6bbf7c
SF
2673 return -ENOMEM;
2674 }
2675 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2676
76c6f945 2677 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
9f6bbf7c
SF
2678}
2679
2680static int rocker_group_l2_flood(struct rocker_port *rocker_port,
76c6f945 2681 struct switchdev_trans *trans, int flags,
c4f20321 2682 __be16 vlan_id, u8 group_count,
e5054643 2683 const u32 *group_ids, u32 group_id)
9f6bbf7c 2684{
76c6f945 2685 return rocker_group_l2_fan_out(rocker_port, trans, flags,
9f6bbf7c
SF
2686 group_count, group_ids,
2687 group_id);
2688}
2689
c1beeef7 2690static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
76c6f945 2691 struct switchdev_trans *trans, int flags,
e5054643 2692 u32 index, const u8 *src_mac, const u8 *dst_mac,
c4f20321 2693 __be16 vlan_id, bool ttl_check, u32 pport)
c1beeef7
SF
2694{
2695 struct rocker_group_tbl_entry *entry;
2696
b15edf85 2697 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
c1beeef7
SF
2698 if (!entry)
2699 return -ENOMEM;
2700
2701 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2702 if (src_mac)
2703 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2704 if (dst_mac)
2705 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2706 entry->l3_unicast.vlan_id = vlan_id;
2707 entry->l3_unicast.ttl_check = ttl_check;
2708 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2709
76c6f945 2710 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
c1beeef7
SF
2711}
2712
2713static struct rocker_neigh_tbl_entry *
e5054643 2714rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
c1beeef7
SF
2715{
2716 struct rocker_neigh_tbl_entry *found;
2717
0f43deba
SF
2718 hash_for_each_possible(rocker->neigh_tbl, found,
2719 entry, be32_to_cpu(ip_addr))
c1beeef7
SF
2720 if (found->ip_addr == ip_addr)
2721 return found;
2722
2723 return NULL;
2724}
2725
2726static void _rocker_neigh_add(struct rocker *rocker,
76c6f945 2727 struct switchdev_trans *trans,
c1beeef7
SF
2728 struct rocker_neigh_tbl_entry *entry)
2729{
76c6f945 2730 if (!switchdev_trans_ph_commit(trans))
4d81db41 2731 entry->index = rocker->neigh_tbl_next_index++;
76c6f945 2732 if (switchdev_trans_ph_prepare(trans))
550ecc92 2733 return;
c1beeef7 2734 entry->ref_count++;
0f43deba
SF
2735 hash_add(rocker->neigh_tbl, &entry->entry,
2736 be32_to_cpu(entry->ip_addr));
c1beeef7
SF
2737}
2738
76c6f945 2739static void _rocker_neigh_del(struct switchdev_trans *trans,
c1beeef7
SF
2740 struct rocker_neigh_tbl_entry *entry)
2741{
76c6f945 2742 if (switchdev_trans_ph_prepare(trans))
550ecc92 2743 return;
c1beeef7
SF
2744 if (--entry->ref_count == 0) {
2745 hash_del(&entry->entry);
b15edf85 2746 rocker_kfree(trans, entry);
c1beeef7
SF
2747 }
2748}
2749
c4f20321 2750static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
76c6f945 2751 struct switchdev_trans *trans,
e5054643 2752 const u8 *eth_dst, bool ttl_check)
c1beeef7
SF
2753{
2754 if (eth_dst) {
2755 ether_addr_copy(entry->eth_dst, eth_dst);
2756 entry->ttl_check = ttl_check;
76c6f945 2757 } else if (!switchdev_trans_ph_prepare(trans)) {
c1beeef7
SF
2758 entry->ref_count++;
2759 }
2760}
2761
2762static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
76c6f945 2763 struct switchdev_trans *trans,
e5054643 2764 int flags, __be32 ip_addr, const u8 *eth_dst)
c1beeef7
SF
2765{
2766 struct rocker *rocker = rocker_port->rocker;
2767 struct rocker_neigh_tbl_entry *entry;
2768 struct rocker_neigh_tbl_entry *found;
2769 unsigned long lock_flags;
2770 __be16 eth_type = htons(ETH_P_IP);
2771 enum rocker_of_dpa_table_id goto_tbl =
2772 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2773 u32 group_id;
2774 u32 priority = 0;
2775 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2776 bool updating;
2777 bool removing;
2778 int err = 0;
2779
b15edf85 2780 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
c1beeef7
SF
2781 if (!entry)
2782 return -ENOMEM;
2783
2784 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2785
2786 found = rocker_neigh_tbl_find(rocker, ip_addr);
2787
2788 updating = found && adding;
2789 removing = found && !adding;
2790 adding = !found && adding;
2791
2792 if (adding) {
2793 entry->ip_addr = ip_addr;
2794 entry->dev = rocker_port->dev;
2795 ether_addr_copy(entry->eth_dst, eth_dst);
2796 entry->ttl_check = true;
76c6f945 2797 _rocker_neigh_add(rocker, trans, entry);
c1beeef7
SF
2798 } else if (removing) {
2799 memcpy(entry, found, sizeof(*entry));
76c6f945 2800 _rocker_neigh_del(trans, found);
c1beeef7 2801 } else if (updating) {
76c6f945 2802 _rocker_neigh_update(found, trans, eth_dst, true);
c1beeef7
SF
2803 memcpy(entry, found, sizeof(*entry));
2804 } else {
2805 err = -ENOENT;
2806 }
2807
2808 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
2809
2810 if (err)
2811 goto err_out;
2812
2813 /* For each active neighbor, we have an L3 unicast group and
2814 * a /32 route to the neighbor, which uses the L3 unicast
2815 * group. The L3 unicast group can also be referred to by
2816 * other routes' nexthops.
2817 */
2818
76c6f945 2819 err = rocker_group_l3_unicast(rocker_port, trans, flags,
c1beeef7
SF
2820 entry->index,
2821 rocker_port->dev->dev_addr,
2822 entry->eth_dst,
2823 rocker_port->internal_vlan_id,
2824 entry->ttl_check,
2825 rocker_port->pport);
2826 if (err) {
2827 netdev_err(rocker_port->dev,
2828 "Error (%d) L3 unicast group index %d\n",
2829 err, entry->index);
2830 goto err_out;
2831 }
2832
2833 if (adding || removing) {
2834 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
76c6f945 2835 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
c1beeef7
SF
2836 eth_type, ip_addr,
2837 inet_make_mask(32),
2838 priority, goto_tbl,
2839 group_id, flags);
2840
2841 if (err)
2842 netdev_err(rocker_port->dev,
2843 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
2844 err, &entry->ip_addr, group_id);
2845 }
2846
2847err_out:
2848 if (!adding)
b15edf85 2849 rocker_kfree(trans, entry);
c1beeef7
SF
2850
2851 return err;
2852}
2853
2854static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
76c6f945
JP
2855 struct switchdev_trans *trans,
2856 __be32 ip_addr)
c1beeef7
SF
2857{
2858 struct net_device *dev = rocker_port->dev;
0f43deba 2859 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
c1beeef7
SF
2860 int err = 0;
2861
4133fc09 2862 if (!n) {
c1beeef7 2863 n = neigh_create(&arp_tbl, &ip_addr, dev);
4133fc09
YX
2864 if (IS_ERR(n))
2865 return IS_ERR(n);
2866 }
c1beeef7
SF
2867
2868 /* If the neigh is already resolved, then go ahead and
2869 * install the entry, otherwise start the ARP process to
2870 * resolve the neigh.
2871 */
2872
2873 if (n->nud_state & NUD_VALID)
76c6f945 2874 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
c4f20321 2875 ip_addr, n->ha);
c1beeef7
SF
2876 else
2877 neigh_event_send(n, NULL);
2878
4133fc09 2879 neigh_release(n);
c1beeef7
SF
2880 return err;
2881}
2882
c4f20321 2883static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
76c6f945 2884 struct switchdev_trans *trans, int flags,
c1beeef7
SF
2885 __be32 ip_addr, u32 *index)
2886{
2887 struct rocker *rocker = rocker_port->rocker;
2888 struct rocker_neigh_tbl_entry *entry;
2889 struct rocker_neigh_tbl_entry *found;
2890 unsigned long lock_flags;
2891 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2892 bool updating;
2893 bool removing;
2894 bool resolved = true;
2895 int err = 0;
2896
b15edf85 2897 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
c1beeef7
SF
2898 if (!entry)
2899 return -ENOMEM;
2900
2901 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2902
2903 found = rocker_neigh_tbl_find(rocker, ip_addr);
2904 if (found)
2905 *index = found->index;
2906
2907 updating = found && adding;
2908 removing = found && !adding;
2909 adding = !found && adding;
2910
2911 if (adding) {
2912 entry->ip_addr = ip_addr;
2913 entry->dev = rocker_port->dev;
76c6f945 2914 _rocker_neigh_add(rocker, trans, entry);
c1beeef7
SF
2915 *index = entry->index;
2916 resolved = false;
2917 } else if (removing) {
76c6f945 2918 _rocker_neigh_del(trans, found);
c1beeef7 2919 } else if (updating) {
76c6f945 2920 _rocker_neigh_update(found, trans, NULL, false);
c1beeef7
SF
2921 resolved = !is_zero_ether_addr(found->eth_dst);
2922 } else {
2923 err = -ENOENT;
2924 }
2925
2926 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
2927
2928 if (!adding)
b15edf85 2929 rocker_kfree(trans, entry);
c1beeef7
SF
2930
2931 if (err)
2932 return err;
2933
2934 /* Resolved means neigh ip_addr is resolved to neigh mac. */
2935
2936 if (!resolved)
76c6f945 2937 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
c1beeef7
SF
2938
2939 return err;
2940}
2941
6c707945 2942static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
76c6f945 2943 struct switchdev_trans *trans,
6c707945
SF
2944 int flags, __be16 vlan_id)
2945{
2946 struct rocker_port *p;
e5054643 2947 const struct rocker *rocker = rocker_port->rocker;
6c707945 2948 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
04f49faf 2949 u32 *group_ids;
6c707945 2950 u8 group_count = 0;
04f49faf 2951 int err = 0;
6c707945
SF
2952 int i;
2953
b15edf85
JP
2954 group_ids = rocker_kcalloc(trans, flags,
2955 rocker->port_count, sizeof(u32));
04f49faf
SF
2956 if (!group_ids)
2957 return -ENOMEM;
2958
6c707945
SF
2959 /* Adjust the flood group for this VLAN. The flood group
2960 * references an L2 interface group for each port in this
2961 * VLAN.
2962 */
2963
2964 for (i = 0; i < rocker->port_count; i++) {
2965 p = rocker->ports[i];
bcfd7801
SF
2966 if (!p)
2967 continue;
6c707945
SF
2968 if (!rocker_port_is_bridged(p))
2969 continue;
2970 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
2971 group_ids[group_count++] =
4a6bb6d3 2972 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
6c707945
SF
2973 }
2974 }
2975
2976 /* If there are no bridged ports in this VLAN, we're done */
2977 if (group_count == 0)
04f49faf 2978 goto no_ports_in_vlan;
6c707945 2979
76c6f945 2980 err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
c4f20321 2981 group_count, group_ids, group_id);
6c707945
SF
2982 if (err)
2983 netdev_err(rocker_port->dev,
2984 "Error (%d) port VLAN l2 flood group\n", err);
2985
04f49faf 2986no_ports_in_vlan:
b15edf85 2987 rocker_kfree(trans, group_ids);
6c707945
SF
2988 return err;
2989}
2990
2991static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
76c6f945 2992 struct switchdev_trans *trans, int flags,
c4f20321 2993 __be16 vlan_id, bool pop_vlan)
6c707945 2994{
e5054643 2995 const struct rocker *rocker = rocker_port->rocker;
6c707945
SF
2996 struct rocker_port *p;
2997 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
4a6bb6d3 2998 u32 out_pport;
6c707945
SF
2999 int ref = 0;
3000 int err;
3001 int i;
3002
3003 /* An L2 interface group for this port in this VLAN, but
3004 * only when port STP state is LEARNING|FORWARDING.
3005 */
3006
3007 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3008 rocker_port->stp_state == BR_STATE_FORWARDING) {
4a6bb6d3 3009 out_pport = rocker_port->pport;
76c6f945 3010 err = rocker_group_l2_interface(rocker_port, trans, flags,
c4f20321 3011 vlan_id, out_pport, pop_vlan);
6c707945
SF
3012 if (err) {
3013 netdev_err(rocker_port->dev,
4a6bb6d3
SF
3014 "Error (%d) port VLAN l2 group for pport %d\n",
3015 err, out_pport);
6c707945
SF
3016 return err;
3017 }
3018 }
3019
3020 /* An L2 interface group for this VLAN to CPU port.
3021 * Add when first port joins this VLAN and destroy when
3022 * last port leaves this VLAN.
3023 */
3024
3025 for (i = 0; i < rocker->port_count; i++) {
3026 p = rocker->ports[i];
bcfd7801 3027 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
6c707945
SF
3028 ref++;
3029 }
3030
3031 if ((!adding || ref != 1) && (adding || ref != 0))
3032 return 0;
3033
4a6bb6d3 3034 out_pport = 0;
76c6f945 3035 err = rocker_group_l2_interface(rocker_port, trans, flags,
c4f20321 3036 vlan_id, out_pport, pop_vlan);
6c707945
SF
3037 if (err) {
3038 netdev_err(rocker_port->dev,
3039 "Error (%d) port VLAN l2 group for CPU port\n", err);
3040 return err;
3041 }
3042
3043 return 0;
3044}
3045
9f6bbf7c
SF
3046static struct rocker_ctrl {
3047 const u8 *eth_dst;
3048 const u8 *eth_dst_mask;
11e6c65a 3049 __be16 eth_type;
9f6bbf7c
SF
3050 bool acl;
3051 bool bridge;
3052 bool term;
3053 bool copy_to_cpu;
3054} rocker_ctrls[] = {
3055 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3056 /* pass link local multicast pkts up to CPU for filtering */
3057 .eth_dst = ll_mac,
3058 .eth_dst_mask = ll_mask,
3059 .acl = true,
3060 },
3061 [ROCKER_CTRL_LOCAL_ARP] = {
3062 /* pass local ARP pkts up to CPU */
3063 .eth_dst = zero_mac,
3064 .eth_dst_mask = zero_mac,
3065 .eth_type = htons(ETH_P_ARP),
3066 .acl = true,
3067 },
3068 [ROCKER_CTRL_IPV4_MCAST] = {
3069 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3070 .eth_dst = ipv4_mcast,
3071 .eth_dst_mask = ipv4_mask,
3072 .eth_type = htons(ETH_P_IP),
3073 .term = true,
3074 .copy_to_cpu = true,
3075 },
3076 [ROCKER_CTRL_IPV6_MCAST] = {
3077 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3078 .eth_dst = ipv6_mcast,
3079 .eth_dst_mask = ipv6_mask,
3080 .eth_type = htons(ETH_P_IPV6),
3081 .term = true,
3082 .copy_to_cpu = true,
3083 },
3084 [ROCKER_CTRL_DFLT_BRIDGING] = {
3085 /* flood any pkts on vlan */
3086 .bridge = true,
3087 .copy_to_cpu = true,
3088 },
8254973f
SH
3089 [ROCKER_CTRL_DFLT_OVS] = {
3090 /* pass all pkts up to CPU */
3091 .eth_dst = zero_mac,
3092 .eth_dst_mask = zero_mac,
3093 .acl = true,
3094 },
9f6bbf7c
SF
3095};
3096
3097static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
76c6f945 3098 struct switchdev_trans *trans, int flags,
e5054643 3099 const struct rocker_ctrl *ctrl, __be16 vlan_id)
9f6bbf7c 3100{
4a6bb6d3
SF
3101 u32 in_pport = rocker_port->pport;
3102 u32 in_pport_mask = 0xffffffff;
3103 u32 out_pport = 0;
e5054643
SH
3104 const u8 *eth_src = NULL;
3105 const u8 *eth_src_mask = NULL;
9f6bbf7c
SF
3106 __be16 vlan_id_mask = htons(0xffff);
3107 u8 ip_proto = 0;
3108 u8 ip_proto_mask = 0;
3109 u8 ip_tos = 0;
3110 u8 ip_tos_mask = 0;
4a6bb6d3 3111 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
9f6bbf7c
SF
3112 int err;
3113
76c6f945 3114 err = rocker_flow_tbl_acl(rocker_port, trans, flags,
4a6bb6d3 3115 in_pport, in_pport_mask,
9f6bbf7c
SF
3116 eth_src, eth_src_mask,
3117 ctrl->eth_dst, ctrl->eth_dst_mask,
3118 ctrl->eth_type,
3119 vlan_id, vlan_id_mask,
3120 ip_proto, ip_proto_mask,
3121 ip_tos, ip_tos_mask,
3122 group_id);
3123
3124 if (err)
3125 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3126
3127 return err;
3128}
3129
6c707945 3130static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
76c6f945
JP
3131 struct switchdev_trans *trans,
3132 int flags,
e5054643 3133 const struct rocker_ctrl *ctrl,
6c707945
SF
3134 __be16 vlan_id)
3135{
3136 enum rocker_of_dpa_table_id goto_tbl =
3137 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3138 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3139 u32 tunnel_id = 0;
3140 int err;
3141
3142 if (!rocker_port_is_bridged(rocker_port))
3143 return 0;
3144
76c6f945 3145 err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
6c707945
SF
3146 ctrl->eth_dst, ctrl->eth_dst_mask,
3147 vlan_id, tunnel_id,
3148 goto_tbl, group_id, ctrl->copy_to_cpu);
3149
3150 if (err)
3151 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3152
3153 return err;
3154}
3155
9f6bbf7c 3156static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
76c6f945 3157 struct switchdev_trans *trans, int flags,
e5054643 3158 const struct rocker_ctrl *ctrl, __be16 vlan_id)
9f6bbf7c 3159{
4a6bb6d3 3160 u32 in_pport_mask = 0xffffffff;
9f6bbf7c
SF
3161 __be16 vlan_id_mask = htons(0xffff);
3162 int err;
3163
3164 if (ntohs(vlan_id) == 0)
3165 vlan_id = rocker_port->internal_vlan_id;
3166
76c6f945 3167 err = rocker_flow_tbl_term_mac(rocker_port, trans,
4a6bb6d3 3168 rocker_port->pport, in_pport_mask,
9f6bbf7c
SF
3169 ctrl->eth_type, ctrl->eth_dst,
3170 ctrl->eth_dst_mask, vlan_id,
3171 vlan_id_mask, ctrl->copy_to_cpu,
3172 flags);
3173
3174 if (err)
3175 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3176
3177 return err;
3178}
3179
c4f20321 3180static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
76c6f945 3181 struct switchdev_trans *trans, int flags,
e5054643 3182 const struct rocker_ctrl *ctrl, __be16 vlan_id)
9f6bbf7c
SF
3183{
3184 if (ctrl->acl)
76c6f945 3185 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
9f6bbf7c 3186 ctrl, vlan_id);
6c707945 3187 if (ctrl->bridge)
76c6f945 3188 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
6c707945 3189 ctrl, vlan_id);
9f6bbf7c
SF
3190
3191 if (ctrl->term)
76c6f945 3192 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
9f6bbf7c
SF
3193 ctrl, vlan_id);
3194
3195 return -EOPNOTSUPP;
3196}
3197
3198static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
76c6f945 3199 struct switchdev_trans *trans, int flags,
c4f20321 3200 __be16 vlan_id)
9f6bbf7c
SF
3201{
3202 int err = 0;
3203 int i;
3204
3205 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3206 if (rocker_port->ctrls[i]) {
76c6f945 3207 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
9f6bbf7c
SF
3208 &rocker_ctrls[i], vlan_id);
3209 if (err)
3210 return err;
3211 }
3212 }
3213
3214 return err;
3215}
3216
c4f20321 3217static int rocker_port_ctrl(struct rocker_port *rocker_port,
76c6f945 3218 struct switchdev_trans *trans, int flags,
e5054643 3219 const struct rocker_ctrl *ctrl)
9f6bbf7c
SF
3220{
3221 u16 vid;
3222 int err = 0;
3223
3224 for (vid = 1; vid < VLAN_N_VID; vid++) {
3225 if (!test_bit(vid, rocker_port->vlan_bitmap))
3226 continue;
76c6f945 3227 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
9f6bbf7c
SF
3228 ctrl, htons(vid));
3229 if (err)
3230 break;
3231 }
3232
3233 return err;
3234}
3235
c4f20321 3236static int rocker_port_vlan(struct rocker_port *rocker_port,
76c6f945 3237 struct switchdev_trans *trans, int flags, u16 vid)
6c707945
SF
3238{
3239 enum rocker_of_dpa_table_id goto_tbl =
3240 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
4a6bb6d3 3241 u32 in_pport = rocker_port->pport;
6c707945
SF
3242 __be16 vlan_id = htons(vid);
3243 __be16 vlan_id_mask = htons(0xffff);
3244 __be16 internal_vlan_id;
3245 bool untagged;
3246 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3247 int err;
3248
3249 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3250
9228ad26
SF
3251 if (adding && test_bit(ntohs(internal_vlan_id),
3252 rocker_port->vlan_bitmap))
11ce2ba3 3253 return 0; /* already added */
9228ad26
SF
3254 else if (!adding && !test_bit(ntohs(internal_vlan_id),
3255 rocker_port->vlan_bitmap))
11ce2ba3 3256 return 0; /* already removed */
6c707945 3257
9228ad26
SF
3258 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3259
6c707945 3260 if (adding) {
76c6f945 3261 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
6c707945
SF
3262 internal_vlan_id);
3263 if (err) {
3264 netdev_err(rocker_port->dev,
3265 "Error (%d) port ctrl vlan add\n", err);
9228ad26 3266 goto err_out;
6c707945
SF
3267 }
3268 }
3269
76c6f945 3270 err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
6c707945
SF
3271 internal_vlan_id, untagged);
3272 if (err) {
3273 netdev_err(rocker_port->dev,
3274 "Error (%d) port VLAN l2 groups\n", err);
9228ad26 3275 goto err_out;
6c707945
SF
3276 }
3277
76c6f945 3278 err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
6c707945
SF
3279 internal_vlan_id);
3280 if (err) {
3281 netdev_err(rocker_port->dev,
3282 "Error (%d) port VLAN l2 flood group\n", err);
9228ad26 3283 goto err_out;
6c707945
SF
3284 }
3285
76c6f945 3286 err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
4a6bb6d3 3287 in_pport, vlan_id, vlan_id_mask,
6c707945
SF
3288 goto_tbl, untagged, internal_vlan_id);
3289 if (err)
3290 netdev_err(rocker_port->dev,
3291 "Error (%d) port VLAN table\n", err);
3292
9228ad26 3293err_out:
76c6f945 3294 if (switchdev_trans_ph_prepare(trans))
9228ad26
SF
3295 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3296
6c707945
SF
3297 return err;
3298}
3299
c4f20321 3300static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
76c6f945 3301 struct switchdev_trans *trans, int flags)
9f6bbf7c
SF
3302{
3303 enum rocker_of_dpa_table_id goto_tbl;
4a6bb6d3
SF
3304 u32 in_pport;
3305 u32 in_pport_mask;
9f6bbf7c
SF
3306 int err;
3307
3308 /* Normal Ethernet Frames. Matches pkts from any local physical
3309 * ports. Goto VLAN tbl.
3310 */
3311
4a6bb6d3
SF
3312 in_pport = 0;
3313 in_pport_mask = 0xffff0000;
9f6bbf7c
SF
3314 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3315
76c6f945 3316 err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
4a6bb6d3 3317 in_pport, in_pport_mask,
9f6bbf7c
SF
3318 goto_tbl);
3319 if (err)
3320 netdev_err(rocker_port->dev,
3321 "Error (%d) ingress port table entry\n", err);
3322
3323 return err;
3324}
3325
6c707945
SF
3326struct rocker_fdb_learn_work {
3327 struct work_struct work;
c4f20321 3328 struct rocker_port *rocker_port;
76c6f945 3329 struct switchdev_trans *trans;
6c707945
SF
3330 int flags;
3331 u8 addr[ETH_ALEN];
3332 u16 vid;
3333};
3334
3335static void rocker_port_fdb_learn_work(struct work_struct *work)
3336{
e5054643 3337 const struct rocker_fdb_learn_work *lw =
6c707945
SF
3338 container_of(work, struct rocker_fdb_learn_work, work);
3339 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3340 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
ebb9a03a 3341 struct switchdev_notifier_fdb_info info;
3aeb6617
JP
3342
3343 info.addr = lw->addr;
3344 info.vid = lw->vid;
6c707945 3345
4f2c6ae5 3346 rtnl_lock();
51ace887 3347 if (learned && removing)
ebb9a03a 3348 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
c4f20321 3349 lw->rocker_port->dev, &info.info);
51ace887 3350 else if (learned && !removing)
ebb9a03a 3351 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
c4f20321 3352 lw->rocker_port->dev, &info.info);
4f2c6ae5 3353 rtnl_unlock();
6c707945 3354
b15edf85 3355 rocker_kfree(lw->trans, work);
6c707945
SF
3356}
3357
3358static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
76c6f945 3359 struct switchdev_trans *trans, int flags,
c4f20321 3360 const u8 *addr, __be16 vlan_id)
6c707945
SF
3361{
3362 struct rocker_fdb_learn_work *lw;
3363 enum rocker_of_dpa_table_id goto_tbl =
3364 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4a6bb6d3 3365 u32 out_pport = rocker_port->pport;
6c707945
SF
3366 u32 tunnel_id = 0;
3367 u32 group_id = ROCKER_GROUP_NONE;
5111f80c 3368 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
6c707945
SF
3369 bool copy_to_cpu = false;
3370 int err;
3371
3372 if (rocker_port_is_bridged(rocker_port))
4a6bb6d3 3373 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
6c707945
SF
3374
3375 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
76c6f945 3376 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
c4f20321 3377 NULL, vlan_id, tunnel_id, goto_tbl,
6c707945
SF
3378 group_id, copy_to_cpu);
3379 if (err)
3380 return err;
3381 }
3382
5111f80c
SF
3383 if (!syncing)
3384 return 0;
3385
6c707945
SF
3386 if (!rocker_port_is_bridged(rocker_port))
3387 return 0;
3388
b15edf85 3389 lw = rocker_kzalloc(trans, flags, sizeof(*lw));
6c707945
SF
3390 if (!lw)
3391 return -ENOMEM;
3392
3393 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3394
c4f20321 3395 lw->rocker_port = rocker_port;
76c6f945 3396 lw->trans = trans;
6c707945
SF
3397 lw->flags = flags;
3398 ether_addr_copy(lw->addr, addr);
3399 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3400
76c6f945 3401 if (switchdev_trans_ph_prepare(trans))
b15edf85 3402 rocker_kfree(trans, lw);
c4f20321
SF
3403 else
3404 schedule_work(&lw->work);
6c707945
SF
3405
3406 return 0;
3407}
3408
3409static struct rocker_fdb_tbl_entry *
e5054643
SH
3410rocker_fdb_tbl_find(const struct rocker *rocker,
3411 const struct rocker_fdb_tbl_entry *match)
6c707945
SF
3412{
3413 struct rocker_fdb_tbl_entry *found;
3414
3415 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3416 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3417 return found;
3418
3419 return NULL;
3420}
3421
3422static int rocker_port_fdb(struct rocker_port *rocker_port,
76c6f945 3423 struct switchdev_trans *trans,
6c707945
SF
3424 const unsigned char *addr,
3425 __be16 vlan_id, int flags)
3426{
3427 struct rocker *rocker = rocker_port->rocker;
3428 struct rocker_fdb_tbl_entry *fdb;
3429 struct rocker_fdb_tbl_entry *found;
3430 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3431 unsigned long lock_flags;
3432
b15edf85 3433 fdb = rocker_kzalloc(trans, flags, sizeof(*fdb));
6c707945
SF
3434 if (!fdb)
3435 return -ENOMEM;
3436
3437 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
a471be41 3438 fdb->touched = jiffies;
4c660496 3439 fdb->key.rocker_port = rocker_port;
6c707945
SF
3440 ether_addr_copy(fdb->key.addr, addr);
3441 fdb->key.vlan_id = vlan_id;
3442 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3443
3444 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3445
3446 found = rocker_fdb_tbl_find(rocker, fdb);
3447
a471be41
SF
3448 if (found) {
3449 found->touched = jiffies;
3450 if (removing) {
b15edf85 3451 rocker_kfree(trans, fdb);
76c6f945 3452 if (!switchdev_trans_ph_prepare(trans))
a471be41
SF
3453 hash_del(&found->entry);
3454 }
3455 } else if (!removing) {
76c6f945 3456 if (!switchdev_trans_ph_prepare(trans))
a471be41
SF
3457 hash_add(rocker->fdb_tbl, &fdb->entry,
3458 fdb->key_crc32);
6c707945
SF
3459 }
3460
3461 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3462
3463 /* Check if adding and already exists, or removing and can't find */
3464 if (!found != !removing) {
b15edf85 3465 rocker_kfree(trans, fdb);
6c707945
SF
3466 if (!found && removing)
3467 return 0;
3468 /* Refreshing existing to update aging timers */
3469 flags |= ROCKER_OP_FLAG_REFRESH;
3470 }
3471
76c6f945 3472 return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
6c707945
SF
3473}
3474
c4f20321 3475static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
76c6f945 3476 struct switchdev_trans *trans, int flags)
6c707945
SF
3477{
3478 struct rocker *rocker = rocker_port->rocker;
3479 struct rocker_fdb_tbl_entry *found;
3480 unsigned long lock_flags;
6c707945
SF
3481 struct hlist_node *tmp;
3482 int bkt;
3483 int err = 0;
3484
3485 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3486 rocker_port->stp_state == BR_STATE_FORWARDING)
3487 return 0;
3488
d33eeb64 3489 flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
179f9a25 3490
6c707945
SF
3491 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3492
3493 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4c660496 3494 if (found->key.rocker_port != rocker_port)
6c707945
SF
3495 continue;
3496 if (!found->learned)
3497 continue;
76c6f945 3498 err = rocker_port_fdb_learn(rocker_port, trans, flags,
6c707945
SF
3499 found->key.addr,
3500 found->key.vlan_id);
3501 if (err)
3502 goto err_out;
76c6f945 3503 if (!switchdev_trans_ph_prepare(trans))
3098ac39 3504 hash_del(&found->entry);
6c707945
SF
3505 }
3506
3507err_out:
3508 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3509
3510 return err;
3511}
3512
52fe3e2d
SF
3513static void rocker_fdb_cleanup(unsigned long data)
3514{
3515 struct rocker *rocker = (struct rocker *)data;
3516 struct rocker_port *rocker_port;
3517 struct rocker_fdb_tbl_entry *entry;
3518 struct hlist_node *tmp;
3519 unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
3520 unsigned long expires;
3521 unsigned long lock_flags;
3522 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3523 ROCKER_OP_FLAG_LEARNED;
3524 int bkt;
3525
3526 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3527
3528 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3529 if (!entry->learned)
3530 continue;
3531 rocker_port = entry->key.rocker_port;
3532 expires = entry->touched + rocker_port->ageing_time;
3533 if (time_before_eq(expires, jiffies)) {
76c6f945 3534 rocker_port_fdb_learn(rocker_port, NULL,
52fe3e2d
SF
3535 flags, entry->key.addr,
3536 entry->key.vlan_id);
3537 hash_del(&entry->entry);
3538 } else if (time_before(expires, next_timer)) {
3539 next_timer = expires;
3540 }
3541 }
3542
3543 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3544
3545 mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3546}
3547
9f6bbf7c 3548static int rocker_port_router_mac(struct rocker_port *rocker_port,
76c6f945 3549 struct switchdev_trans *trans, int flags,
c4f20321 3550 __be16 vlan_id)
9f6bbf7c 3551{
4a6bb6d3 3552 u32 in_pport_mask = 0xffffffff;
9f6bbf7c
SF
3553 __be16 eth_type;
3554 const u8 *dst_mac_mask = ff_mac;
3555 __be16 vlan_id_mask = htons(0xffff);
3556 bool copy_to_cpu = false;
3557 int err;
3558
3559 if (ntohs(vlan_id) == 0)
3560 vlan_id = rocker_port->internal_vlan_id;
3561
3562 eth_type = htons(ETH_P_IP);
76c6f945 3563 err = rocker_flow_tbl_term_mac(rocker_port, trans,
4a6bb6d3 3564 rocker_port->pport, in_pport_mask,
9f6bbf7c
SF
3565 eth_type, rocker_port->dev->dev_addr,
3566 dst_mac_mask, vlan_id, vlan_id_mask,
3567 copy_to_cpu, flags);
3568 if (err)
3569 return err;
3570
3571 eth_type = htons(ETH_P_IPV6);
76c6f945 3572 err = rocker_flow_tbl_term_mac(rocker_port, trans,
4a6bb6d3 3573 rocker_port->pport, in_pport_mask,
9f6bbf7c
SF
3574 eth_type, rocker_port->dev->dev_addr,
3575 dst_mac_mask, vlan_id, vlan_id_mask,
3576 copy_to_cpu, flags);
3577
3578 return err;
3579}
3580
c4f20321 3581static int rocker_port_fwding(struct rocker_port *rocker_port,
76c6f945 3582 struct switchdev_trans *trans, int flags)
6c707945
SF
3583{
3584 bool pop_vlan;
4a6bb6d3 3585 u32 out_pport;
6c707945
SF
3586 __be16 vlan_id;
3587 u16 vid;
6c707945
SF
3588 int err;
3589
3590 /* Port will be forwarding-enabled if its STP state is LEARNING
3591 * or FORWARDING. Traffic from CPU can still egress, regardless of
3592 * port STP state. Use L2 interface group on port VLANs as a way
3593 * to toggle port forwarding: if forwarding is disabled, L2
3594 * interface group will not exist.
3595 */
3596
3597 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3598 rocker_port->stp_state != BR_STATE_FORWARDING)
3599 flags |= ROCKER_OP_FLAG_REMOVE;
3600
4a6bb6d3 3601 out_pport = rocker_port->pport;
6c707945
SF
3602 for (vid = 1; vid < VLAN_N_VID; vid++) {
3603 if (!test_bit(vid, rocker_port->vlan_bitmap))
3604 continue;
3605 vlan_id = htons(vid);
3606 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
76c6f945 3607 err = rocker_group_l2_interface(rocker_port, trans, flags,
c4f20321 3608 vlan_id, out_pport, pop_vlan);
6c707945
SF
3609 if (err) {
3610 netdev_err(rocker_port->dev,
4a6bb6d3
SF
3611 "Error (%d) port VLAN l2 group for pport %d\n",
3612 err, out_pport);
6c707945
SF
3613 return err;
3614 }
3615 }
3616
3617 return 0;
3618}
3619
c4f20321 3620static int rocker_port_stp_update(struct rocker_port *rocker_port,
76c6f945 3621 struct switchdev_trans *trans, int flags,
179f9a25 3622 u8 state)
6c707945
SF
3623{
3624 bool want[ROCKER_CTRL_MAX] = { 0, };
c4f20321 3625 bool prev_ctrls[ROCKER_CTRL_MAX];
76c6f945 3626 u8 uninitialized_var(prev_state);
6c707945
SF
3627 int err;
3628 int i;
3629
76c6f945 3630 if (switchdev_trans_ph_prepare(trans)) {
c4f20321
SF
3631 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3632 prev_state = rocker_port->stp_state;
3633 }
3634
6c707945
SF
3635 if (rocker_port->stp_state == state)
3636 return 0;
3637
3638 rocker_port->stp_state = state;
3639
3640 switch (state) {
3641 case BR_STATE_DISABLED:
3642 /* port is completely disabled */
3643 break;
3644 case BR_STATE_LISTENING:
3645 case BR_STATE_BLOCKING:
3646 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3647 break;
3648 case BR_STATE_LEARNING:
3649 case BR_STATE_FORWARDING:
8254973f
SH
3650 if (!rocker_port_is_ovsed(rocker_port))
3651 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
6c707945
SF
3652 want[ROCKER_CTRL_IPV4_MCAST] = true;
3653 want[ROCKER_CTRL_IPV6_MCAST] = true;
3654 if (rocker_port_is_bridged(rocker_port))
3655 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
8254973f
SH
3656 else if (rocker_port_is_ovsed(rocker_port))
3657 want[ROCKER_CTRL_DFLT_OVS] = true;
6c707945
SF
3658 else
3659 want[ROCKER_CTRL_LOCAL_ARP] = true;
3660 break;
3661 }
3662
3663 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3664 if (want[i] != rocker_port->ctrls[i]) {
179f9a25
SF
3665 int ctrl_flags = flags |
3666 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
76c6f945 3667 err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
6c707945
SF
3668 &rocker_ctrls[i]);
3669 if (err)
c4f20321 3670 goto err_out;
6c707945
SF
3671 rocker_port->ctrls[i] = want[i];
3672 }
3673 }
3674
76c6f945 3675 err = rocker_port_fdb_flush(rocker_port, trans, flags);
6c707945 3676 if (err)
c4f20321 3677 goto err_out;
6c707945 3678
76c6f945 3679 err = rocker_port_fwding(rocker_port, trans, flags);
c4f20321
SF
3680
3681err_out:
76c6f945 3682 if (switchdev_trans_ph_prepare(trans)) {
c4f20321
SF
3683 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3684 rocker_port->stp_state = prev_state;
3685 }
3686
3687 return err;
6c707945
SF
3688}
3689
c4f20321 3690static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
76c6f945 3691 struct switchdev_trans *trans, int flags)
e47172ab
SF
3692{
3693 if (rocker_port_is_bridged(rocker_port))
3694 /* bridge STP will enable port */
3695 return 0;
3696
3697 /* port is not bridged, so simulate going to FORWARDING state */
76c6f945 3698 return rocker_port_stp_update(rocker_port, trans, flags,
179f9a25 3699 BR_STATE_FORWARDING);
e47172ab
SF
3700}
3701
c4f20321 3702static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
76c6f945 3703 struct switchdev_trans *trans, int flags)
e47172ab
SF
3704{
3705 if (rocker_port_is_bridged(rocker_port))
3706 /* bridge STP will disable port */
3707 return 0;
3708
3709 /* port is not bridged, so simulate going to DISABLED state */
76c6f945 3710 return rocker_port_stp_update(rocker_port, trans, flags,
179f9a25 3711 BR_STATE_DISABLED);
e47172ab
SF
3712}
3713
9f6bbf7c 3714static struct rocker_internal_vlan_tbl_entry *
e5054643 3715rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
9f6bbf7c
SF
3716{
3717 struct rocker_internal_vlan_tbl_entry *found;
3718
3719 hash_for_each_possible(rocker->internal_vlan_tbl, found,
3720 entry, ifindex) {
3721 if (found->ifindex == ifindex)
3722 return found;
3723 }
3724
3725 return NULL;
3726}
3727
3728static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3729 int ifindex)
3730{
3731 struct rocker *rocker = rocker_port->rocker;
3732 struct rocker_internal_vlan_tbl_entry *entry;
3733 struct rocker_internal_vlan_tbl_entry *found;
3734 unsigned long lock_flags;
3735 int i;
3736
df6a2067 3737 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
9f6bbf7c
SF
3738 if (!entry)
3739 return 0;
3740
3741 entry->ifindex = ifindex;
3742
3743 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3744
3745 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3746 if (found) {
df6a2067 3747 kfree(entry);
9f6bbf7c
SF
3748 goto found;
3749 }
3750
3751 found = entry;
3752 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3753
3754 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3755 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3756 continue;
3757 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3758 goto found;
3759 }
3760
3761 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3762
3763found:
3764 found->ref_count++;
3765 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3766
3767 return found->vlan_id;
3768}
3769
e5054643
SH
3770static void
3771rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
3772 int ifindex)
9f6bbf7c
SF
3773{
3774 struct rocker *rocker = rocker_port->rocker;
3775 struct rocker_internal_vlan_tbl_entry *found;
3776 unsigned long lock_flags;
3777 unsigned long bit;
3778
3779 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3780
3781 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3782 if (!found) {
3783 netdev_err(rocker_port->dev,
3784 "ifindex (%d) not found in internal VLAN tbl\n",
3785 ifindex);
3786 goto not_found;
3787 }
3788
3789 if (--found->ref_count <= 0) {
3790 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3791 clear_bit(bit, rocker->internal_vlan_bitmap);
3792 hash_del(&found->entry);
df6a2067 3793 kfree(found);
9f6bbf7c
SF
3794 }
3795
3796not_found:
3797 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3798}
3799
c4f20321 3800static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
76c6f945 3801 struct switchdev_trans *trans, __be32 dst,
e5054643
SH
3802 int dst_len, const struct fib_info *fi,
3803 u32 tb_id, int flags)
c1beeef7 3804{
e5054643 3805 const struct fib_nh *nh;
c1beeef7
SF
3806 __be16 eth_type = htons(ETH_P_IP);
3807 __be32 dst_mask = inet_make_mask(dst_len);
3808 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
3809 u32 priority = fi->fib_priority;
3810 enum rocker_of_dpa_table_id goto_tbl =
3811 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3812 u32 group_id;
3813 bool nh_on_port;
3814 bool has_gw;
3815 u32 index;
3816 int err;
3817
3818 /* XXX support ECMP */
3819
3820 nh = fi->fib_nh;
3821 nh_on_port = (fi->fib_dev == rocker_port->dev);
3822 has_gw = !!nh->nh_gw;
3823
3824 if (has_gw && nh_on_port) {
76c6f945 3825 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
c1beeef7
SF
3826 nh->nh_gw, &index);
3827 if (err)
3828 return err;
3829
3830 group_id = ROCKER_GROUP_L3_UNICAST(index);
3831 } else {
3832 /* Send to CPU for processing */
3833 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
3834 }
3835
76c6f945 3836 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
c1beeef7
SF
3837 dst_mask, priority, goto_tbl,
3838 group_id, flags);
3839 if (err)
3840 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
3841 err, &dst);
3842
3843 return err;
3844}
3845
4b8ac966
JP
3846/*****************
3847 * Net device ops
3848 *****************/
3849
3850static int rocker_port_open(struct net_device *dev)
3851{
3852 struct rocker_port *rocker_port = netdev_priv(dev);
3853 int err;
3854
3855 err = rocker_port_dma_rings_init(rocker_port);
3856 if (err)
3857 return err;
3858
3859 err = request_irq(rocker_msix_tx_vector(rocker_port),
3860 rocker_tx_irq_handler, 0,
3861 rocker_driver_name, rocker_port);
3862 if (err) {
3863 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
3864 goto err_request_tx_irq;
3865 }
3866
3867 err = request_irq(rocker_msix_rx_vector(rocker_port),
3868 rocker_rx_irq_handler, 0,
3869 rocker_driver_name, rocker_port);
3870 if (err) {
3871 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
3872 goto err_request_rx_irq;
3873 }
3874
76c6f945 3875 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
6c707945 3876 if (err)
e47172ab 3877 goto err_fwd_enable;
6c707945 3878
4b8ac966
JP
3879 napi_enable(&rocker_port->napi_tx);
3880 napi_enable(&rocker_port->napi_rx);
c3055246
AK
3881 if (!dev->proto_down)
3882 rocker_port_set_enable(rocker_port, true);
4b8ac966
JP
3883 netif_start_queue(dev);
3884 return 0;
3885
e47172ab 3886err_fwd_enable:
6c707945 3887 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4b8ac966
JP
3888err_request_rx_irq:
3889 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3890err_request_tx_irq:
3891 rocker_port_dma_rings_fini(rocker_port);
3892 return err;
3893}
3894
3895static int rocker_port_stop(struct net_device *dev)
3896{
3897 struct rocker_port *rocker_port = netdev_priv(dev);
3898
3899 netif_stop_queue(dev);
3900 rocker_port_set_enable(rocker_port, false);
3901 napi_disable(&rocker_port->napi_rx);
3902 napi_disable(&rocker_port->napi_tx);
76c6f945 3903 rocker_port_fwd_disable(rocker_port, NULL,
f66feaa9 3904 ROCKER_OP_FLAG_NOWAIT);
4b8ac966
JP
3905 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
3906 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3907 rocker_port_dma_rings_fini(rocker_port);
3908
3909 return 0;
3910}
3911
e5054643
SH
3912static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
3913 const struct rocker_desc_info *desc_info)
4b8ac966 3914{
e5054643 3915 const struct rocker *rocker = rocker_port->rocker;
4b8ac966 3916 struct pci_dev *pdev = rocker->pdev;
e5054643 3917 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
4b8ac966
JP
3918 struct rocker_tlv *attr;
3919 int rem;
3920
3921 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
3922 if (!attrs[ROCKER_TLV_TX_FRAGS])
3923 return;
3924 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
e5054643 3925 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
4b8ac966
JP
3926 dma_addr_t dma_handle;
3927 size_t len;
3928
3929 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
3930 continue;
3931 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
3932 attr);
3933 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
3934 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
3935 continue;
3936 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
3937 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
3938 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
3939 }
3940}
3941
e5054643 3942static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
4b8ac966
JP
3943 struct rocker_desc_info *desc_info,
3944 char *buf, size_t buf_len)
3945{
e5054643 3946 const struct rocker *rocker = rocker_port->rocker;
4b8ac966
JP
3947 struct pci_dev *pdev = rocker->pdev;
3948 dma_addr_t dma_handle;
3949 struct rocker_tlv *frag;
3950
3951 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
3952 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
3953 if (net_ratelimit())
3954 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
3955 return -EIO;
3956 }
3957 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
3958 if (!frag)
3959 goto unmap_frag;
3960 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
3961 dma_handle))
3962 goto nest_cancel;
3963 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
3964 buf_len))
3965 goto nest_cancel;
3966 rocker_tlv_nest_end(desc_info, frag);
3967 return 0;
3968
3969nest_cancel:
3970 rocker_tlv_nest_cancel(desc_info, frag);
3971unmap_frag:
3972 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
3973 return -EMSGSIZE;
3974}
3975
3976static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
3977{
3978 struct rocker_port *rocker_port = netdev_priv(dev);
3979 struct rocker *rocker = rocker_port->rocker;
3980 struct rocker_desc_info *desc_info;
3981 struct rocker_tlv *frags;
3982 int i;
3983 int err;
3984
3985 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
3986 if (unlikely(!desc_info)) {
3987 if (net_ratelimit())
3988 netdev_err(dev, "tx ring full when queue awake\n");
3989 return NETDEV_TX_BUSY;
3990 }
3991
3992 rocker_desc_cookie_ptr_set(desc_info, skb);
3993
3994 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
3995 if (!frags)
3996 goto out;
3997 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
3998 skb->data, skb_headlen(skb));
3999 if (err)
4000 goto nest_cancel;
95b9be64
JP
4001 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4002 err = skb_linearize(skb);
4003 if (err)
4004 goto unmap_frags;
4005 }
4b8ac966
JP
4006
4007 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4008 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4009
4010 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4011 skb_frag_address(frag),
4012 skb_frag_size(frag));
4013 if (err)
4014 goto unmap_frags;
4015 }
4016 rocker_tlv_nest_end(desc_info, frags);
4017
4018 rocker_desc_gen_clear(desc_info);
4019 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4020
4021 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4022 if (!desc_info)
4023 netif_stop_queue(dev);
4024
4025 return NETDEV_TX_OK;
4026
4027unmap_frags:
4028 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4029nest_cancel:
4030 rocker_tlv_nest_cancel(desc_info, frags);
4031out:
4032 dev_kfree_skb(skb);
f2bbca51
DA
4033 dev->stats.tx_dropped++;
4034
4b8ac966
JP
4035 return NETDEV_TX_OK;
4036}
4037
4038static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4039{
4040 struct sockaddr *addr = p;
4041 struct rocker_port *rocker_port = netdev_priv(dev);
4042 int err;
4043
4044 if (!is_valid_ether_addr(addr->sa_data))
4045 return -EADDRNOTAVAIL;
4046
4047 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4048 if (err)
4049 return err;
4050 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4051 return 0;
4052}
4053
77a58c74
SF
4054static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4055{
4056 struct rocker_port *rocker_port = netdev_priv(dev);
4057 int running = netif_running(dev);
4058 int err;
4059
4060#define ROCKER_PORT_MIN_MTU 68
4061#define ROCKER_PORT_MAX_MTU 9000
4062
4063 if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4064 return -EINVAL;
4065
4066 if (running)
4067 rocker_port_stop(dev);
4068
4069 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4070 dev->mtu = new_mtu;
4071
4072 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4073 if (err)
4074 return err;
4075
4076 if (running)
4077 err = rocker_port_open(dev);
4078
4079 return err;
4080}
4081
db19170b
DA
4082static int rocker_port_get_phys_port_name(struct net_device *dev,
4083 char *buf, size_t len)
4084{
4085 struct rocker_port *rocker_port = netdev_priv(dev);
4086 struct port_name name = { .buf = buf, .len = len };
4087 int err;
4088
76c6f945 4089 err = rocker_cmd_exec(rocker_port, NULL, 0,
db19170b
DA
4090 rocker_cmd_get_port_settings_prep, NULL,
4091 rocker_cmd_get_port_settings_phys_name_proc,
c4f20321 4092 &name);
db19170b
DA
4093
4094 return err ? -EOPNOTSUPP : 0;
4095}
4096
c3055246
AK
4097static int rocker_port_change_proto_down(struct net_device *dev,
4098 bool proto_down)
4099{
4100 struct rocker_port *rocker_port = netdev_priv(dev);
4101
4102 if (rocker_port->dev->flags & IFF_UP)
4103 rocker_port_set_enable(rocker_port, !proto_down);
4104 rocker_port->dev->proto_down = proto_down;
4105 return 0;
4106}
4107
dd19f83d
SF
4108static void rocker_port_neigh_destroy(struct neighbour *n)
4109{
4110 struct rocker_port *rocker_port = netdev_priv(n->dev);
4111 int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4112 __be32 ip_addr = *(__be32 *)n->primary_key;
4113
76c6f945 4114 rocker_port_ipv4_neigh(rocker_port, NULL,
dd19f83d
SF
4115 flags, ip_addr, n->ha);
4116}
4117
98237d43
SF
4118static const struct net_device_ops rocker_port_netdev_ops = {
4119 .ndo_open = rocker_port_open,
4120 .ndo_stop = rocker_port_stop,
4121 .ndo_start_xmit = rocker_port_xmit,
4122 .ndo_set_mac_address = rocker_port_set_mac_address,
77a58c74 4123 .ndo_change_mtu = rocker_port_change_mtu,
85fdb956 4124 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
fc8f40d8 4125 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
54ba5a0b 4126 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
45d4122c
SS
4127 .ndo_fdb_add = switchdev_port_fdb_add,
4128 .ndo_fdb_del = switchdev_port_fdb_del,
4129 .ndo_fdb_dump = switchdev_port_fdb_dump,
db19170b 4130 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
c3055246 4131 .ndo_change_proto_down = rocker_port_change_proto_down,
dd19f83d 4132 .ndo_neigh_destroy = rocker_port_neigh_destroy,
98237d43
SF
4133};
4134
4135/********************
4136 * swdev interface
4137 ********************/
4138
f8e20a9f
SF
4139static int rocker_port_attr_get(struct net_device *dev,
4140 struct switchdev_attr *attr)
4b8ac966 4141{
e5054643
SH
4142 const struct rocker_port *rocker_port = netdev_priv(dev);
4143 const struct rocker *rocker = rocker_port->rocker;
4b8ac966 4144
f8e20a9f 4145 switch (attr->id) {
1f868398 4146 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
42275bd8
SF
4147 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4148 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
f8e20a9f 4149 break;
1f868398 4150 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
42275bd8 4151 attr->u.brport_flags = rocker_port->brport_flags;
6004c867 4152 break;
f8e20a9f
SF
4153 default:
4154 return -EOPNOTSUPP;
4155 }
4156
4b8ac966
JP
4157 return 0;
4158}
4159
6004c867 4160static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
76c6f945 4161 struct switchdev_trans *trans,
6004c867
SF
4162 unsigned long brport_flags)
4163{
4164 unsigned long orig_flags;
4165 int err = 0;
4166
4167 orig_flags = rocker_port->brport_flags;
4168 rocker_port->brport_flags = brport_flags;
4169 if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
76c6f945 4170 err = rocker_port_set_learning(rocker_port, trans);
6004c867 4171
76c6f945 4172 if (switchdev_trans_ph_prepare(trans))
6004c867
SF
4173 rocker_port->brport_flags = orig_flags;
4174
4175 return err;
4176}
4177
d0cf57f9
SF
4178static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
4179 struct switchdev_trans *trans,
4180 u32 ageing_time)
4181{
4182 if (!switchdev_trans_ph_prepare(trans)) {
4183 rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
4184 mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
4185 }
4186
4187 return 0;
4188}
4189
c4f20321 4190static int rocker_port_attr_set(struct net_device *dev,
f7fadf30 4191 const struct switchdev_attr *attr,
7ea6eb3f 4192 struct switchdev_trans *trans)
c4f20321
SF
4193{
4194 struct rocker_port *rocker_port = netdev_priv(dev);
4195 int err = 0;
4196
c4f20321 4197 switch (attr->id) {
1f868398 4198 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
d33eeb64 4199 err = rocker_port_stp_update(rocker_port, trans, 0,
42275bd8 4200 attr->u.stp_state);
35636062 4201 break;
1f868398 4202 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
76c6f945 4203 err = rocker_port_brport_flags_set(rocker_port, trans,
42275bd8 4204 attr->u.brport_flags);
6004c867 4205 break;
d0cf57f9
SF
4206 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
4207 err = rocker_port_bridge_ageing_time(rocker_port, trans,
4208 attr->u.ageing_time);
4209 break;
c4f20321
SF
4210 default:
4211 err = -EOPNOTSUPP;
4212 break;
4213 }
4214
4215 return err;
6c707945
SF
4216}
4217
9228ad26 4218static int rocker_port_vlan_add(struct rocker_port *rocker_port,
76c6f945
JP
4219 struct switchdev_trans *trans,
4220 u16 vid, u16 flags)
9228ad26
SF
4221{
4222 int err;
4223
4224 /* XXX deal with flags for PVID and untagged */
4225
76c6f945 4226 err = rocker_port_vlan(rocker_port, trans, 0, vid);
9228ad26
SF
4227 if (err)
4228 return err;
4229
76c6f945 4230 err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
cec04a60 4231 if (err)
76c6f945 4232 rocker_port_vlan(rocker_port, trans,
cec04a60
SF
4233 ROCKER_OP_FLAG_REMOVE, vid);
4234
4235 return err;
9228ad26
SF
4236}
4237
4238static int rocker_port_vlans_add(struct rocker_port *rocker_port,
76c6f945 4239 struct switchdev_trans *trans,
8f24f309 4240 const struct switchdev_obj_port_vlan *vlan)
9228ad26
SF
4241{
4242 u16 vid;
4243 int err;
4244
3e3a78b4 4245 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
76c6f945 4246 err = rocker_port_vlan_add(rocker_port, trans,
9228ad26
SF
4247 vid, vlan->flags);
4248 if (err)
4249 return err;
4250 }
4251
4252 return 0;
4253}
4254
45d4122c 4255static int rocker_port_fdb_add(struct rocker_port *rocker_port,
76c6f945 4256 struct switchdev_trans *trans,
52ba57cf 4257 const struct switchdev_obj_port_fdb *fdb)
45d4122c
SS
4258{
4259 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4260 int flags = 0;
4261
4262 if (!rocker_port_is_bridged(rocker_port))
4263 return -EINVAL;
4264
76c6f945 4265 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
45d4122c
SS
4266}
4267
9228ad26 4268static int rocker_port_obj_add(struct net_device *dev,
648b4a99 4269 const struct switchdev_obj *obj,
7ea6eb3f 4270 struct switchdev_trans *trans)
9228ad26
SF
4271{
4272 struct rocker_port *rocker_port = netdev_priv(dev);
e5054643 4273 const struct switchdev_obj_ipv4_fib *fib4;
9228ad26
SF
4274 int err = 0;
4275
9e8f4a54 4276 switch (obj->id) {
57d80838 4277 case SWITCHDEV_OBJ_ID_PORT_VLAN:
648b4a99
JP
4278 err = rocker_port_vlans_add(rocker_port, trans,
4279 SWITCHDEV_OBJ_PORT_VLAN(obj));
9228ad26 4280 break;
57d80838 4281 case SWITCHDEV_OBJ_ID_IPV4_FIB:
648b4a99 4282 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
76c6f945 4283 err = rocker_port_fib_ipv4(rocker_port, trans,
7a7ee531 4284 htonl(fib4->dst), fib4->dst_len,
850d0cbc 4285 &fib4->fi, fib4->tb_id, 0);
58c2cb16 4286 break;
57d80838 4287 case SWITCHDEV_OBJ_ID_PORT_FDB:
648b4a99
JP
4288 err = rocker_port_fdb_add(rocker_port, trans,
4289 SWITCHDEV_OBJ_PORT_FDB(obj));
45d4122c 4290 break;
9228ad26
SF
4291 default:
4292 err = -EOPNOTSUPP;
4293 break;
4294 }
4295
4296 return err;
4297}
4298
4299static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4300 u16 vid, u16 flags)
4301{
4302 int err;
4303
76c6f945 4304 err = rocker_port_router_mac(rocker_port, NULL,
9228ad26
SF
4305 ROCKER_OP_FLAG_REMOVE, htons(vid));
4306 if (err)
4307 return err;
4308
76c6f945 4309 return rocker_port_vlan(rocker_port, NULL,
9228ad26
SF
4310 ROCKER_OP_FLAG_REMOVE, vid);
4311}
4312
4313static int rocker_port_vlans_del(struct rocker_port *rocker_port,
8f24f309 4314 const struct switchdev_obj_port_vlan *vlan)
9228ad26
SF
4315{
4316 u16 vid;
4317 int err;
4318
3e3a78b4 4319 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
9228ad26
SF
4320 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4321 if (err)
4322 return err;
4323 }
4324
4325 return 0;
4326}
4327
45d4122c 4328static int rocker_port_fdb_del(struct rocker_port *rocker_port,
76c6f945 4329 struct switchdev_trans *trans,
52ba57cf 4330 const struct switchdev_obj_port_fdb *fdb)
45d4122c
SS
4331{
4332 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
d33eeb64 4333 int flags = ROCKER_OP_FLAG_REMOVE;
45d4122c
SS
4334
4335 if (!rocker_port_is_bridged(rocker_port))
4336 return -EINVAL;
4337
76c6f945 4338 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
45d4122c
SS
4339}
4340
9228ad26 4341static int rocker_port_obj_del(struct net_device *dev,
648b4a99 4342 const struct switchdev_obj *obj)
9228ad26
SF
4343{
4344 struct rocker_port *rocker_port = netdev_priv(dev);
e5054643 4345 const struct switchdev_obj_ipv4_fib *fib4;
9228ad26
SF
4346 int err = 0;
4347
9e8f4a54 4348 switch (obj->id) {
57d80838 4349 case SWITCHDEV_OBJ_ID_PORT_VLAN:
648b4a99
JP
4350 err = rocker_port_vlans_del(rocker_port,
4351 SWITCHDEV_OBJ_PORT_VLAN(obj));
9228ad26 4352 break;
57d80838 4353 case SWITCHDEV_OBJ_ID_IPV4_FIB:
648b4a99 4354 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
76c6f945 4355 err = rocker_port_fib_ipv4(rocker_port, NULL,
7a7ee531 4356 htonl(fib4->dst), fib4->dst_len,
850d0cbc 4357 &fib4->fi, fib4->tb_id,
7a7ee531 4358 ROCKER_OP_FLAG_REMOVE);
58c2cb16 4359 break;
57d80838 4360 case SWITCHDEV_OBJ_ID_PORT_FDB:
648b4a99
JP
4361 err = rocker_port_fdb_del(rocker_port, NULL,
4362 SWITCHDEV_OBJ_PORT_FDB(obj));
45d4122c
SS
4363 break;
4364 default:
4365 err = -EOPNOTSUPP;
4366 break;
4367 }
4368
4369 return err;
4370}
4371
e5054643 4372static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
52ba57cf 4373 struct switchdev_obj_port_fdb *fdb,
648b4a99 4374 switchdev_obj_dump_cb_t *cb)
45d4122c
SS
4375{
4376 struct rocker *rocker = rocker_port->rocker;
45d4122c
SS
4377 struct rocker_fdb_tbl_entry *found;
4378 struct hlist_node *tmp;
4379 unsigned long lock_flags;
4380 int bkt;
4381 int err = 0;
4382
4383 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4384 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4c660496 4385 if (found->key.rocker_port != rocker_port)
45d4122c 4386 continue;
850d0cbc 4387 ether_addr_copy(fdb->addr, found->key.addr);
ce80e7bc 4388 fdb->ndm_state = NUD_REACHABLE;
45d4122c
SS
4389 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4390 found->key.vlan_id);
648b4a99 4391 err = cb(&fdb->obj);
45d4122c
SS
4392 if (err)
4393 break;
4394 }
4395 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4396
4397 return err;
4398}
4399
7d4f8d87 4400static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
8f24f309 4401 struct switchdev_obj_port_vlan *vlan,
648b4a99 4402 switchdev_obj_dump_cb_t *cb)
7d4f8d87 4403{
7d4f8d87
SF
4404 u16 vid;
4405 int err = 0;
4406
4407 for (vid = 1; vid < VLAN_N_VID; vid++) {
4408 if (!test_bit(vid, rocker_port->vlan_bitmap))
4409 continue;
4410 vlan->flags = 0;
4411 if (rocker_vlan_id_is_internal(htons(vid)))
4412 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
11ce2ba3
JP
4413 vlan->vid_begin = vid;
4414 vlan->vid_end = vid;
648b4a99 4415 err = cb(&vlan->obj);
7d4f8d87
SF
4416 if (err)
4417 break;
4418 }
4419
4420 return err;
4421}
4422
45d4122c 4423static int rocker_port_obj_dump(struct net_device *dev,
648b4a99
JP
4424 struct switchdev_obj *obj,
4425 switchdev_obj_dump_cb_t *cb)
45d4122c 4426{
e5054643 4427 const struct rocker_port *rocker_port = netdev_priv(dev);
45d4122c
SS
4428 int err = 0;
4429
9e8f4a54 4430 switch (obj->id) {
57d80838 4431 case SWITCHDEV_OBJ_ID_PORT_FDB:
648b4a99
JP
4432 err = rocker_port_fdb_dump(rocker_port,
4433 SWITCHDEV_OBJ_PORT_FDB(obj), cb);
45d4122c 4434 break;
57d80838 4435 case SWITCHDEV_OBJ_ID_PORT_VLAN:
648b4a99
JP
4436 err = rocker_port_vlan_dump(rocker_port,
4437 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
7d4f8d87 4438 break;
9228ad26
SF
4439 default:
4440 err = -EOPNOTSUPP;
4441 break;
4442 }
4443
4444 return err;
4445}
4446
9d47c0a2 4447static const struct switchdev_ops rocker_port_switchdev_ops = {
f8e20a9f 4448 .switchdev_port_attr_get = rocker_port_attr_get,
35636062 4449 .switchdev_port_attr_set = rocker_port_attr_set,
9228ad26
SF
4450 .switchdev_port_obj_add = rocker_port_obj_add,
4451 .switchdev_port_obj_del = rocker_port_obj_del,
45d4122c 4452 .switchdev_port_obj_dump = rocker_port_obj_dump,
4b8ac966
JP
4453};
4454
4455/********************
4456 * ethtool interface
4457 ********************/
4458
4459static int rocker_port_get_settings(struct net_device *dev,
4460 struct ethtool_cmd *ecmd)
4461{
4462 struct rocker_port *rocker_port = netdev_priv(dev);
4463
4464 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4465}
4466
4467static int rocker_port_set_settings(struct net_device *dev,
4468 struct ethtool_cmd *ecmd)
4469{
4470 struct rocker_port *rocker_port = netdev_priv(dev);
4471
4472 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4473}
4474
4475static void rocker_port_get_drvinfo(struct net_device *dev,
4476 struct ethtool_drvinfo *drvinfo)
4477{
4478 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4479 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4480}
4481
9766e97a
DA
4482static struct rocker_port_stats {
4483 char str[ETH_GSTRING_LEN];
4484 int type;
4485} rocker_port_stats[] = {
4486 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
4487 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
4488 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4489 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
4490
4491 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
4492 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
4493 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4494 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
4495};
4496
4497#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4498
4499static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4500 u8 *data)
4501{
4502 u8 *p = data;
4503 int i;
4504
4505 switch (stringset) {
4506 case ETH_SS_STATS:
4507 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4508 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4509 p += ETH_GSTRING_LEN;
4510 }
4511 break;
4512 }
4513}
4514
4515static int
534ba6a8 4516rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
9766e97a
DA
4517 struct rocker_desc_info *desc_info,
4518 void *priv)
4519{
4520 struct rocker_tlv *cmd_stats;
4521
4522 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4523 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4524 return -EMSGSIZE;
4525
4526 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4527 if (!cmd_stats)
4528 return -EMSGSIZE;
4529
4a6bb6d3
SF
4530 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4531 rocker_port->pport))
9766e97a
DA
4532 return -EMSGSIZE;
4533
4534 rocker_tlv_nest_end(desc_info, cmd_stats);
4535
4536 return 0;
4537}
4538
4539static int
534ba6a8 4540rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
e5054643 4541 const struct rocker_desc_info *desc_info,
9766e97a
DA
4542 void *priv)
4543{
e5054643
SH
4544 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4545 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4546 const struct rocker_tlv *pattr;
4a6bb6d3 4547 u32 pport;
9766e97a
DA
4548 u64 *data = priv;
4549 int i;
4550
4551 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4552
4553 if (!attrs[ROCKER_TLV_CMD_INFO])
4554 return -EIO;
4555
4556 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4557 attrs[ROCKER_TLV_CMD_INFO]);
4558
4a6bb6d3 4559 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
9766e97a
DA
4560 return -EIO;
4561
4a6bb6d3
SF
4562 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4563 if (pport != rocker_port->pport)
9766e97a
DA
4564 return -EIO;
4565
4566 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4567 pattr = stats_attrs[rocker_port_stats[i].type];
4568 if (!pattr)
4569 continue;
4570
4571 data[i] = rocker_tlv_get_u64(pattr);
4572 }
4573
4574 return 0;
4575}
4576
4577static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4578 void *priv)
4579{
76c6f945 4580 return rocker_cmd_exec(rocker_port, NULL, 0,
9766e97a
DA
4581 rocker_cmd_get_port_stats_prep, NULL,
4582 rocker_cmd_get_port_stats_ethtool_proc,
c4f20321 4583 priv);
9766e97a
DA
4584}
4585
4586static void rocker_port_get_stats(struct net_device *dev,
4587 struct ethtool_stats *stats, u64 *data)
4588{
4589 struct rocker_port *rocker_port = netdev_priv(dev);
4590
4591 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4592 int i;
4593
4594 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4595 data[i] = 0;
4596 }
9766e97a
DA
4597}
4598
4599static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4600{
4601 switch (sset) {
4602 case ETH_SS_STATS:
4603 return ROCKER_PORT_STATS_LEN;
4604 default:
4605 return -EOPNOTSUPP;
4606 }
4607}
4608
4b8ac966
JP
4609static const struct ethtool_ops rocker_port_ethtool_ops = {
4610 .get_settings = rocker_port_get_settings,
4611 .set_settings = rocker_port_set_settings,
4612 .get_drvinfo = rocker_port_get_drvinfo,
4613 .get_link = ethtool_op_get_link,
9766e97a
DA
4614 .get_strings = rocker_port_get_strings,
4615 .get_ethtool_stats = rocker_port_get_stats,
4616 .get_sset_count = rocker_port_get_sset_count,
4b8ac966
JP
4617};
4618
4619/*****************
4620 * NAPI interface
4621 *****************/
4622
4623static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4624{
4625 return container_of(napi, struct rocker_port, napi_tx);
4626}
4627
4628static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4629{
4630 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
e5054643
SH
4631 const struct rocker *rocker = rocker_port->rocker;
4632 const struct rocker_desc_info *desc_info;
4b8ac966
JP
4633 u32 credits = 0;
4634 int err;
4635
4636 /* Cleanup tx descriptors */
4637 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
f2bbca51
DA
4638 struct sk_buff *skb;
4639
4b8ac966
JP
4640 err = rocker_desc_err(desc_info);
4641 if (err && net_ratelimit())
4642 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4643 err);
4644 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
f2bbca51
DA
4645
4646 skb = rocker_desc_cookie_ptr_get(desc_info);
4647 if (err == 0) {
4648 rocker_port->dev->stats.tx_packets++;
4649 rocker_port->dev->stats.tx_bytes += skb->len;
4725ceb9 4650 } else {
f2bbca51 4651 rocker_port->dev->stats.tx_errors++;
4725ceb9 4652 }
f2bbca51
DA
4653
4654 dev_kfree_skb_any(skb);
4b8ac966
JP
4655 credits++;
4656 }
4657
4658 if (credits && netif_queue_stopped(rocker_port->dev))
4659 netif_wake_queue(rocker_port->dev);
4660
4661 napi_complete(napi);
4662 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4663
4664 return 0;
4665}
4666
e5054643
SH
4667static int rocker_port_rx_proc(const struct rocker *rocker,
4668 const struct rocker_port *rocker_port,
4b8ac966
JP
4669 struct rocker_desc_info *desc_info)
4670{
e5054643 4671 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
4b8ac966
JP
4672 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4673 size_t rx_len;
3f98a8e6 4674 u16 rx_flags = 0;
4b8ac966
JP
4675
4676 if (!skb)
4677 return -ENOENT;
4678
4679 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4680 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4681 return -EINVAL;
3f98a8e6
SF
4682 if (attrs[ROCKER_TLV_RX_FLAGS])
4683 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
4b8ac966
JP
4684
4685 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4686
4687 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4688 skb_put(skb, rx_len);
4689 skb->protocol = eth_type_trans(skb, rocker_port->dev);
f2bbca51 4690
3f98a8e6
SF
4691 if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
4692 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
4693
f2bbca51
DA
4694 rocker_port->dev->stats.rx_packets++;
4695 rocker_port->dev->stats.rx_bytes += skb->len;
4696
4b8ac966
JP
4697 netif_receive_skb(skb);
4698
534ba6a8 4699 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
4b8ac966
JP
4700}
4701
4702static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4703{
4704 return container_of(napi, struct rocker_port, napi_rx);
4705}
4706
4707static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4708{
4709 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
e5054643 4710 const struct rocker *rocker = rocker_port->rocker;
4b8ac966
JP
4711 struct rocker_desc_info *desc_info;
4712 u32 credits = 0;
4713 int err;
4714
4715 /* Process rx descriptors */
4716 while (credits < budget &&
4717 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4718 err = rocker_desc_err(desc_info);
4719 if (err) {
4720 if (net_ratelimit())
4721 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4722 err);
4723 } else {
4724 err = rocker_port_rx_proc(rocker, rocker_port,
4725 desc_info);
4726 if (err && net_ratelimit())
4727 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4728 err);
4729 }
f2bbca51
DA
4730 if (err)
4731 rocker_port->dev->stats.rx_errors++;
4732
4b8ac966
JP
4733 rocker_desc_gen_clear(desc_info);
4734 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4735 credits++;
4736 }
4737
4738 if (credits < budget)
4739 napi_complete(napi);
4740
4741 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4742
4743 return credits;
4744}
4745
4746/*****************
4747 * PCI driver ops
4748 *****************/
4749
e5054643 4750static void rocker_carrier_init(const struct rocker_port *rocker_port)
4b8ac966 4751{
e5054643 4752 const struct rocker *rocker = rocker_port->rocker;
4b8ac966
JP
4753 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4754 bool link_up;
4755
4a6bb6d3 4756 link_up = link_status & (1 << rocker_port->pport);
4b8ac966
JP
4757 if (link_up)
4758 netif_carrier_on(rocker_port->dev);
4759 else
4760 netif_carrier_off(rocker_port->dev);
4761}
4762
e5054643 4763static void rocker_remove_ports(const struct rocker *rocker)
4b8ac966 4764{
9f6bbf7c 4765 struct rocker_port *rocker_port;
4b8ac966
JP
4766 int i;
4767
9f6bbf7c
SF
4768 for (i = 0; i < rocker->port_count; i++) {
4769 rocker_port = rocker->ports[i];
a0720310
SF
4770 if (!rocker_port)
4771 continue;
76c6f945 4772 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
9f6bbf7c 4773 unregister_netdev(rocker_port->dev);
1ebd47ef 4774 free_netdev(rocker_port->dev);
9f6bbf7c 4775 }
4b8ac966
JP
4776 kfree(rocker->ports);
4777}
4778
534ba6a8 4779static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
4b8ac966 4780{
534ba6a8 4781 const struct rocker *rocker = rocker_port->rocker;
e5054643 4782 const struct pci_dev *pdev = rocker->pdev;
4b8ac966
JP
4783 int err;
4784
4785 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4786 rocker_port->dev->dev_addr);
4787 if (err) {
4788 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4789 eth_hw_addr_random(rocker_port->dev);
4790 }
4791}
4792
4793static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4794{
e5054643 4795 const struct pci_dev *pdev = rocker->pdev;
4b8ac966
JP
4796 struct rocker_port *rocker_port;
4797 struct net_device *dev;
bcfd7801 4798 u16 untagged_vid = 0;
4b8ac966
JP
4799 int err;
4800
4801 dev = alloc_etherdev(sizeof(struct rocker_port));
4802 if (!dev)
4803 return -ENOMEM;
4804 rocker_port = netdev_priv(dev);
4805 rocker_port->dev = dev;
4806 rocker_port->rocker = rocker;
4807 rocker_port->port_number = port_number;
4a6bb6d3 4808 rocker_port->pport = port_number + 1;
5111f80c 4809 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
e7335703 4810 rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
4b8ac966 4811
534ba6a8 4812 rocker_port_dev_addr_init(rocker_port);
4b8ac966
JP
4813 dev->netdev_ops = &rocker_port_netdev_ops;
4814 dev->ethtool_ops = &rocker_port_ethtool_ops;
9d47c0a2 4815 dev->switchdev_ops = &rocker_port_switchdev_ops;
d64b5e85 4816 netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
11ce2ba3 4817 NAPI_POLL_WEIGHT);
4b8ac966
JP
4818 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
4819 NAPI_POLL_WEIGHT);
4820 rocker_carrier_init(rocker_port);
4821
21518a6e 4822 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
4b8ac966
JP
4823
4824 err = register_netdev(dev);
4825 if (err) {
4826 dev_err(&pdev->dev, "register_netdev failed\n");
4827 goto err_register_netdev;
4828 }
4829 rocker->ports[port_number] = rocker_port;
4830
3f98a8e6
SF
4831 switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
4832
76c6f945 4833 rocker_port_set_learning(rocker_port, NULL);
5111f80c 4834
76c6f945 4835 err = rocker_port_ig_tbl(rocker_port, NULL, 0);
9f6bbf7c 4836 if (err) {
ff147028 4837 netdev_err(rocker_port->dev, "install ig port table failed\n");
9f6bbf7c
SF
4838 goto err_port_ig_tbl;
4839 }
4840
bcfd7801
SF
4841 rocker_port->internal_vlan_id =
4842 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
4843
76c6f945 4844 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
bcfd7801
SF
4845 if (err) {
4846 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
4847 goto err_untagged_vlan;
4848 }
4849
4b8ac966
JP
4850 return 0;
4851
bcfd7801 4852err_untagged_vlan:
76c6f945 4853 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
9f6bbf7c 4854err_port_ig_tbl:
6c4f7780 4855 rocker->ports[port_number] = NULL;
9f6bbf7c 4856 unregister_netdev(dev);
4b8ac966
JP
4857err_register_netdev:
4858 free_netdev(dev);
4859 return err;
4860}
4861
4862static int rocker_probe_ports(struct rocker *rocker)
4863{
4864 int i;
4865 size_t alloc_size;
4866 int err;
4867
4868 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
27b808cb 4869 rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
e65ad3be
DC
4870 if (!rocker->ports)
4871 return -ENOMEM;
4b8ac966
JP
4872 for (i = 0; i < rocker->port_count; i++) {
4873 err = rocker_probe_port(rocker, i);
4874 if (err)
4875 goto remove_ports;
4876 }
4877 return 0;
4878
4879remove_ports:
4880 rocker_remove_ports(rocker);
4881 return err;
4882}
4883
4884static int rocker_msix_init(struct rocker *rocker)
4885{
4886 struct pci_dev *pdev = rocker->pdev;
4887 int msix_entries;
4888 int i;
4889 int err;
4890
4891 msix_entries = pci_msix_vec_count(pdev);
4892 if (msix_entries < 0)
4893 return msix_entries;
4894
4895 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
4896 return -EINVAL;
4897
4898 rocker->msix_entries = kmalloc_array(msix_entries,
4899 sizeof(struct msix_entry),
4900 GFP_KERNEL);
4901 if (!rocker->msix_entries)
4902 return -ENOMEM;
4903
4904 for (i = 0; i < msix_entries; i++)
4905 rocker->msix_entries[i].entry = i;
4906
4907 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
4908 if (err < 0)
4909 goto err_enable_msix;
4910
4911 return 0;
4912
4913err_enable_msix:
4914 kfree(rocker->msix_entries);
4915 return err;
4916}
4917
e5054643 4918static void rocker_msix_fini(const struct rocker *rocker)
4b8ac966
JP
4919{
4920 pci_disable_msix(rocker->pdev);
4921 kfree(rocker->msix_entries);
4922}
4923
4924static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4925{
4926 struct rocker *rocker;
4927 int err;
4928
4929 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
4930 if (!rocker)
4931 return -ENOMEM;
4932
4933 err = pci_enable_device(pdev);
4934 if (err) {
4935 dev_err(&pdev->dev, "pci_enable_device failed\n");
4936 goto err_pci_enable_device;
4937 }
4938
4939 err = pci_request_regions(pdev, rocker_driver_name);
4940 if (err) {
4941 dev_err(&pdev->dev, "pci_request_regions failed\n");
4942 goto err_pci_request_regions;
4943 }
4944
4945 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4946 if (!err) {
4947 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4948 if (err) {
4949 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
4950 goto err_pci_set_dma_mask;
4951 }
4952 } else {
4953 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4954 if (err) {
4955 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
4956 goto err_pci_set_dma_mask;
4957 }
4958 }
4959
4960 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
4961 dev_err(&pdev->dev, "invalid PCI region size\n");
3122a92e 4962 err = -EINVAL;
4b8ac966
JP
4963 goto err_pci_resource_len_check;
4964 }
4965
4966 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
4967 pci_resource_len(pdev, 0));
4968 if (!rocker->hw_addr) {
4969 dev_err(&pdev->dev, "ioremap failed\n");
4970 err = -EIO;
4971 goto err_ioremap;
4972 }
4973 pci_set_master(pdev);
4974
4975 rocker->pdev = pdev;
4976 pci_set_drvdata(pdev, rocker);
4977
4978 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
4979
4980 err = rocker_msix_init(rocker);
4981 if (err) {
4982 dev_err(&pdev->dev, "MSI-X init failed\n");
4983 goto err_msix_init;
4984 }
4985
4986 err = rocker_basic_hw_test(rocker);
4987 if (err) {
4988 dev_err(&pdev->dev, "basic hw test failed\n");
4989 goto err_basic_hw_test;
4990 }
4991
4992 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
4993
4994 err = rocker_dma_rings_init(rocker);
4995 if (err)
4996 goto err_dma_rings_init;
4997
4998 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
4999 rocker_cmd_irq_handler, 0,
5000 rocker_driver_name, rocker);
5001 if (err) {
5002 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5003 goto err_request_cmd_irq;
5004 }
5005
5006 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5007 rocker_event_irq_handler, 0,
5008 rocker_driver_name, rocker);
5009 if (err) {
5010 dev_err(&pdev->dev, "cannot assign event irq\n");
5011 goto err_request_event_irq;
5012 }
5013
5014 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5015
9f6bbf7c
SF
5016 err = rocker_init_tbls(rocker);
5017 if (err) {
5018 dev_err(&pdev->dev, "cannot init rocker tables\n");
5019 goto err_init_tbls;
5020 }
5021
52fe3e2d
SF
5022 setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
5023 (unsigned long) rocker);
5024 mod_timer(&rocker->fdb_cleanup_timer, jiffies);
5025
4b8ac966
JP
5026 err = rocker_probe_ports(rocker);
5027 if (err) {
5028 dev_err(&pdev->dev, "failed to probe ports\n");
5029 goto err_probe_ports;
5030 }
5031
c8beb5b2
SF
5032 dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5033 (int)sizeof(rocker->hw.id), &rocker->hw.id);
4b8ac966
JP
5034
5035 return 0;
5036
5037err_probe_ports:
52fe3e2d 5038 del_timer_sync(&rocker->fdb_cleanup_timer);
9f6bbf7c
SF
5039 rocker_free_tbls(rocker);
5040err_init_tbls:
4b8ac966
JP
5041 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5042err_request_event_irq:
5043 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5044err_request_cmd_irq:
5045 rocker_dma_rings_fini(rocker);
5046err_dma_rings_init:
5047err_basic_hw_test:
5048 rocker_msix_fini(rocker);
5049err_msix_init:
5050 iounmap(rocker->hw_addr);
5051err_ioremap:
5052err_pci_resource_len_check:
5053err_pci_set_dma_mask:
5054 pci_release_regions(pdev);
5055err_pci_request_regions:
5056 pci_disable_device(pdev);
5057err_pci_enable_device:
5058 kfree(rocker);
5059 return err;
5060}
5061
5062static void rocker_remove(struct pci_dev *pdev)
5063{
5064 struct rocker *rocker = pci_get_drvdata(pdev);
5065
52fe3e2d 5066 del_timer_sync(&rocker->fdb_cleanup_timer);
9f6bbf7c 5067 rocker_free_tbls(rocker);
4b8ac966
JP
5068 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5069 rocker_remove_ports(rocker);
5070 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5071 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5072 rocker_dma_rings_fini(rocker);
5073 rocker_msix_fini(rocker);
5074 iounmap(rocker->hw_addr);
5075 pci_release_regions(rocker->pdev);
5076 pci_disable_device(rocker->pdev);
5077 kfree(rocker);
5078}
5079
5080static struct pci_driver rocker_pci_driver = {
5081 .name = rocker_driver_name,
5082 .id_table = rocker_pci_id_table,
5083 .probe = rocker_probe,
5084 .remove = rocker_remove,
5085};
5086
6c707945
SF
5087/************************************
5088 * Net device notifier event handler
5089 ************************************/
5090
e5054643 5091static bool rocker_port_dev_check(const struct net_device *dev)
6c707945
SF
5092{
5093 return dev->netdev_ops == &rocker_port_netdev_ops;
5094}
5095
5096static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5097 struct net_device *bridge)
5098{
027e00dc 5099 u16 untagged_vid = 0;
6c707945
SF
5100 int err;
5101
027e00dc
SF
5102 /* Port is joining bridge, so the internal VLAN for the
5103 * port is going to change to the bridge internal VLAN.
5104 * Let's remove untagged VLAN (vid=0) from port and
5105 * re-add once internal VLAN has changed.
5106 */
6c707945 5107
027e00dc 5108 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
6c707945
SF
5109 if (err)
5110 return err;
027e00dc
SF
5111
5112 rocker_port_internal_vlan_id_put(rocker_port,
5113 rocker_port->dev->ifindex);
6c707945 5114 rocker_port->internal_vlan_id =
df6a2067 5115 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
027e00dc
SF
5116
5117 rocker_port->bridge_dev = bridge;
3f98a8e6 5118 switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
027e00dc 5119
76c6f945 5120 return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
6c707945
SF
5121}
5122
5123static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5124{
027e00dc 5125 u16 untagged_vid = 0;
6c707945
SF
5126 int err;
5127
027e00dc 5128 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
6c707945
SF
5129 if (err)
5130 return err;
027e00dc
SF
5131
5132 rocker_port_internal_vlan_id_put(rocker_port,
5133 rocker_port->bridge_dev->ifindex);
6c707945
SF
5134 rocker_port->internal_vlan_id =
5135 rocker_port_internal_vlan_id_get(rocker_port,
5136 rocker_port->dev->ifindex);
027e00dc 5137
3f98a8e6
SF
5138 switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5139 false);
027e00dc
SF
5140 rocker_port->bridge_dev = NULL;
5141
76c6f945 5142 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
e47172ab
SF
5143 if (err)
5144 return err;
5145
5146 if (rocker_port->dev->flags & IFF_UP)
76c6f945 5147 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
6c707945
SF
5148
5149 return err;
5150}
5151
8254973f
SH
5152static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5153 struct net_device *master)
5154{
5155 int err;
5156
5157 rocker_port->bridge_dev = master;
5158
76c6f945 5159 err = rocker_port_fwd_disable(rocker_port, NULL, 0);
8254973f
SH
5160 if (err)
5161 return err;
76c6f945 5162 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
8254973f
SH
5163
5164 return err;
5165}
5166
686ed304
JP
5167static int rocker_port_master_linked(struct rocker_port *rocker_port,
5168 struct net_device *master)
5169{
5170 int err = 0;
5171
5172 if (netif_is_bridge_master(master))
5173 err = rocker_port_bridge_join(rocker_port, master);
5174 else if (netif_is_ovs_master(master))
5175 err = rocker_port_ovs_changed(rocker_port, master);
5176 return err;
5177}
5178
5179static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
6c707945 5180{
6c707945
SF
5181 int err = 0;
5182
686ed304 5183 if (rocker_port_is_bridged(rocker_port))
6c707945 5184 err = rocker_port_bridge_leave(rocker_port);
686ed304 5185 else if (rocker_port_is_ovsed(rocker_port))
8254973f 5186 err = rocker_port_ovs_changed(rocker_port, NULL);
6c707945
SF
5187 return err;
5188}
5189
5190static int rocker_netdevice_event(struct notifier_block *unused,
5191 unsigned long event, void *ptr)
5192{
686ed304
JP
5193 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5194 struct netdev_notifier_changeupper_info *info;
5195 struct rocker_port *rocker_port;
6c707945
SF
5196 int err;
5197
686ed304
JP
5198 if (!rocker_port_dev_check(dev))
5199 return NOTIFY_DONE;
5200
6c707945
SF
5201 switch (event) {
5202 case NETDEV_CHANGEUPPER:
686ed304
JP
5203 info = ptr;
5204 if (!info->master)
5205 goto out;
5206 rocker_port = netdev_priv(dev);
5207 if (info->linking) {
5208 err = rocker_port_master_linked(rocker_port,
5209 info->upper_dev);
5210 if (err)
5211 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5212 err);
5213 } else {
5214 err = rocker_port_master_unlinked(rocker_port);
5215 if (err)
5216 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5217 err);
5218 }
6c707945
SF
5219 break;
5220 }
686ed304 5221out:
6c707945
SF
5222 return NOTIFY_DONE;
5223}
5224
5225static struct notifier_block rocker_netdevice_nb __read_mostly = {
5226 .notifier_call = rocker_netdevice_event,
5227};
5228
c1beeef7
SF
5229/************************************
5230 * Net event notifier event handler
5231 ************************************/
5232
5233static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5234{
5235 struct rocker_port *rocker_port = netdev_priv(dev);
02a9fbfc
SF
5236 int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5237 ROCKER_OP_FLAG_NOWAIT;
c1beeef7
SF
5238 __be32 ip_addr = *(__be32 *)n->primary_key;
5239
76c6f945 5240 return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
c1beeef7
SF
5241}
5242
5243static int rocker_netevent_event(struct notifier_block *unused,
5244 unsigned long event, void *ptr)
5245{
5246 struct net_device *dev;
5247 struct neighbour *n = ptr;
5248 int err;
5249
5250 switch (event) {
5251 case NETEVENT_NEIGH_UPDATE:
5252 if (n->tbl != &arp_tbl)
5253 return NOTIFY_DONE;
5254 dev = n->dev;
5255 if (!rocker_port_dev_check(dev))
5256 return NOTIFY_DONE;
5257 err = rocker_neigh_update(dev, n);
5258 if (err)
5259 netdev_warn(dev,
5260 "failed to handle neigh update (err %d)\n",
5261 err);
5262 break;
5263 }
5264
5265 return NOTIFY_DONE;
5266}
5267
5268static struct notifier_block rocker_netevent_nb __read_mostly = {
5269 .notifier_call = rocker_netevent_event,
5270};
5271
4b8ac966
JP
5272/***********************
5273 * Module init and exit
5274 ***********************/
5275
5276static int __init rocker_module_init(void)
5277{
6c707945
SF
5278 int err;
5279
5280 register_netdevice_notifier(&rocker_netdevice_nb);
c1beeef7 5281 register_netevent_notifier(&rocker_netevent_nb);
6c707945
SF
5282 err = pci_register_driver(&rocker_pci_driver);
5283 if (err)
5284 goto err_pci_register_driver;
5285 return 0;
5286
5287err_pci_register_driver:
a076e6bf 5288 unregister_netevent_notifier(&rocker_netevent_nb);
6c707945
SF
5289 unregister_netdevice_notifier(&rocker_netdevice_nb);
5290 return err;
4b8ac966
JP
5291}
5292
5293static void __exit rocker_module_exit(void)
5294{
c1beeef7 5295 unregister_netevent_notifier(&rocker_netevent_nb);
6c707945 5296 unregister_netdevice_notifier(&rocker_netdevice_nb);
4b8ac966
JP
5297 pci_unregister_driver(&rocker_pci_driver);
5298}
5299
5300module_init(rocker_module_init);
5301module_exit(rocker_module_exit);
5302
5303MODULE_LICENSE("GPL v2");
5304MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5305MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5306MODULE_DESCRIPTION("Rocker switch device driver");
5307MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);