rocker: fix non-portable err return codes
[linux-2.6-block.git] / drivers / net / ethernet / rocker / rocker.c
CommitLineData
4b8ac966
JP
1/*
2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/interrupt.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
9f6bbf7c 19#include <linux/hashtable.h>
4b8ac966
JP
20#include <linux/crc32.h>
21#include <linux/sort.h>
22#include <linux/random.h>
23#include <linux/netdevice.h>
24#include <linux/inetdevice.h>
25#include <linux/skbuff.h>
26#include <linux/socket.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/if_ether.h>
30#include <linux/if_vlan.h>
6c707945 31#include <linux/if_bridge.h>
9f6bbf7c 32#include <linux/bitops.h>
4b8ac966
JP
33#include <net/switchdev.h>
34#include <net/rtnetlink.h>
35#include <asm-generic/io-64-nonatomic-lo-hi.h>
36#include <generated/utsrelease.h>
37
38#include "rocker.h"
39
40static const char rocker_driver_name[] = "rocker";
41
42static const struct pci_device_id rocker_pci_id_table[] = {
43 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
44 {0, }
45};
46
9f6bbf7c
SF
47struct rocker_flow_tbl_key {
48 u32 priority;
49 enum rocker_of_dpa_table_id tbl_id;
50 union {
51 struct {
52 u32 in_lport;
53 u32 in_lport_mask;
54 enum rocker_of_dpa_table_id goto_tbl;
55 } ig_port;
56 struct {
57 u32 in_lport;
58 __be16 vlan_id;
59 __be16 vlan_id_mask;
60 enum rocker_of_dpa_table_id goto_tbl;
61 bool untagged;
62 __be16 new_vlan_id;
63 } vlan;
64 struct {
65 u32 in_lport;
66 u32 in_lport_mask;
67 __be16 eth_type;
68 u8 eth_dst[ETH_ALEN];
69 u8 eth_dst_mask[ETH_ALEN];
70 __be16 vlan_id;
71 __be16 vlan_id_mask;
72 enum rocker_of_dpa_table_id goto_tbl;
73 bool copy_to_cpu;
74 } term_mac;
75 struct {
76 __be16 eth_type;
77 __be32 dst4;
78 __be32 dst4_mask;
79 enum rocker_of_dpa_table_id goto_tbl;
80 u32 group_id;
81 } ucast_routing;
82 struct {
83 u8 eth_dst[ETH_ALEN];
84 u8 eth_dst_mask[ETH_ALEN];
85 int has_eth_dst;
86 int has_eth_dst_mask;
87 __be16 vlan_id;
88 u32 tunnel_id;
89 enum rocker_of_dpa_table_id goto_tbl;
90 u32 group_id;
91 bool copy_to_cpu;
92 } bridge;
93 struct {
94 u32 in_lport;
95 u32 in_lport_mask;
96 u8 eth_src[ETH_ALEN];
97 u8 eth_src_mask[ETH_ALEN];
98 u8 eth_dst[ETH_ALEN];
99 u8 eth_dst_mask[ETH_ALEN];
100 __be16 eth_type;
101 __be16 vlan_id;
102 __be16 vlan_id_mask;
103 u8 ip_proto;
104 u8 ip_proto_mask;
105 u8 ip_tos;
106 u8 ip_tos_mask;
107 u32 group_id;
108 } acl;
109 };
110};
111
112struct rocker_flow_tbl_entry {
113 struct hlist_node entry;
114 u32 ref_count;
115 u64 cookie;
116 struct rocker_flow_tbl_key key;
117 u32 key_crc32; /* key */
118};
119
120struct rocker_group_tbl_entry {
121 struct hlist_node entry;
122 u32 cmd;
123 u32 group_id; /* key */
124 u16 group_count;
125 u32 *group_ids;
126 union {
127 struct {
128 u8 pop_vlan;
129 } l2_interface;
130 struct {
131 u8 eth_src[ETH_ALEN];
132 u8 eth_dst[ETH_ALEN];
133 __be16 vlan_id;
134 u32 group_id;
135 } l2_rewrite;
136 struct {
137 u8 eth_src[ETH_ALEN];
138 u8 eth_dst[ETH_ALEN];
139 __be16 vlan_id;
140 bool ttl_check;
141 u32 group_id;
142 } l3_unicast;
143 };
144};
145
146struct rocker_fdb_tbl_entry {
147 struct hlist_node entry;
148 u32 key_crc32; /* key */
149 bool learned;
150 struct rocker_fdb_tbl_key {
151 u32 lport;
152 u8 addr[ETH_ALEN];
153 __be16 vlan_id;
154 } key;
155};
156
157struct rocker_internal_vlan_tbl_entry {
158 struct hlist_node entry;
159 int ifindex; /* key */
160 u32 ref_count;
161 __be16 vlan_id;
162};
163
4b8ac966
JP
164struct rocker_desc_info {
165 char *data; /* mapped */
166 size_t data_size;
167 size_t tlv_size;
168 struct rocker_desc *desc;
169 DEFINE_DMA_UNMAP_ADDR(mapaddr);
170};
171
172struct rocker_dma_ring_info {
173 size_t size;
174 u32 head;
175 u32 tail;
176 struct rocker_desc *desc; /* mapped */
177 dma_addr_t mapaddr;
178 struct rocker_desc_info *desc_info;
179 unsigned int type;
180};
181
182struct rocker;
183
9f6bbf7c
SF
184enum {
185 ROCKER_CTRL_LINK_LOCAL_MCAST,
186 ROCKER_CTRL_LOCAL_ARP,
187 ROCKER_CTRL_IPV4_MCAST,
188 ROCKER_CTRL_IPV6_MCAST,
189 ROCKER_CTRL_DFLT_BRIDGING,
190 ROCKER_CTRL_MAX,
191};
192
193#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
194#define ROCKER_N_INTERNAL_VLANS 255
195#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
196#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
197
4b8ac966
JP
198struct rocker_port {
199 struct net_device *dev;
6c707945 200 struct net_device *bridge_dev;
4b8ac966
JP
201 struct rocker *rocker;
202 unsigned int port_number;
203 u32 lport;
9f6bbf7c 204 __be16 internal_vlan_id;
6c707945 205 int stp_state;
5111f80c 206 u32 brport_flags;
9f6bbf7c
SF
207 bool ctrls[ROCKER_CTRL_MAX];
208 unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
4b8ac966
JP
209 struct napi_struct napi_tx;
210 struct napi_struct napi_rx;
211 struct rocker_dma_ring_info tx_ring;
212 struct rocker_dma_ring_info rx_ring;
213};
214
215struct rocker {
216 struct pci_dev *pdev;
217 u8 __iomem *hw_addr;
218 struct msix_entry *msix_entries;
219 unsigned int port_count;
220 struct rocker_port **ports;
221 struct {
222 u64 id;
223 } hw;
224 spinlock_t cmd_ring_lock;
225 struct rocker_dma_ring_info cmd_ring;
226 struct rocker_dma_ring_info event_ring;
9f6bbf7c
SF
227 DECLARE_HASHTABLE(flow_tbl, 16);
228 spinlock_t flow_tbl_lock;
229 u64 flow_tbl_next_cookie;
230 DECLARE_HASHTABLE(group_tbl, 16);
231 spinlock_t group_tbl_lock;
232 DECLARE_HASHTABLE(fdb_tbl, 16);
233 spinlock_t fdb_tbl_lock;
234 unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
235 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
236 spinlock_t internal_vlan_tbl_lock;
237};
238
239static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
240static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
241static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
242static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
243static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
244static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
245static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
246static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
247static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
248
249/* Rocker priority levels for flow table entries. Higher
250 * priority match takes precedence over lower priority match.
251 */
252
253enum {
254 ROCKER_PRIORITY_UNKNOWN = 0,
255 ROCKER_PRIORITY_IG_PORT = 1,
256 ROCKER_PRIORITY_VLAN = 1,
257 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
258 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
259 ROCKER_PRIORITY_UNICAST_ROUTING = 1,
260 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
261 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
262 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
263 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
264 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
265 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
266 ROCKER_PRIORITY_ACL_CTRL = 3,
267 ROCKER_PRIORITY_ACL_NORMAL = 2,
268 ROCKER_PRIORITY_ACL_DFLT = 1,
4b8ac966
JP
269};
270
9f6bbf7c
SF
271static bool rocker_vlan_id_is_internal(__be16 vlan_id)
272{
273 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
274 u16 end = 0xffe;
275 u16 _vlan_id = ntohs(vlan_id);
276
277 return (_vlan_id >= start && _vlan_id <= end);
278}
279
280static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
281 u16 vid, bool *pop_vlan)
282{
283 __be16 vlan_id;
284
285 if (pop_vlan)
286 *pop_vlan = false;
287 vlan_id = htons(vid);
288 if (!vlan_id) {
289 vlan_id = rocker_port->internal_vlan_id;
290 if (pop_vlan)
291 *pop_vlan = true;
292 }
293
294 return vlan_id;
295}
296
6c707945
SF
297static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
298 __be16 vlan_id)
299{
300 if (rocker_vlan_id_is_internal(vlan_id))
301 return 0;
302
303 return ntohs(vlan_id);
304}
305
306static bool rocker_port_is_bridged(struct rocker_port *rocker_port)
307{
308 return !!rocker_port->bridge_dev;
309}
310
4b8ac966
JP
311struct rocker_wait {
312 wait_queue_head_t wait;
313 bool done;
314 bool nowait;
315};
316
317static void rocker_wait_reset(struct rocker_wait *wait)
318{
319 wait->done = false;
320 wait->nowait = false;
321}
322
323static void rocker_wait_init(struct rocker_wait *wait)
324{
325 init_waitqueue_head(&wait->wait);
326 rocker_wait_reset(wait);
327}
328
329static struct rocker_wait *rocker_wait_create(gfp_t gfp)
330{
331 struct rocker_wait *wait;
332
333 wait = kmalloc(sizeof(*wait), gfp);
334 if (!wait)
335 return NULL;
336 rocker_wait_init(wait);
337 return wait;
338}
339
340static void rocker_wait_destroy(struct rocker_wait *work)
341{
342 kfree(work);
343}
344
345static bool rocker_wait_event_timeout(struct rocker_wait *wait,
346 unsigned long timeout)
347{
348 wait_event_timeout(wait->wait, wait->done, HZ / 10);
349 if (!wait->done)
350 return false;
351 return true;
352}
353
354static void rocker_wait_wake_up(struct rocker_wait *wait)
355{
356 wait->done = true;
357 wake_up(&wait->wait);
358}
359
360static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector)
361{
362 return rocker->msix_entries[vector].vector;
363}
364
365static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port)
366{
367 return rocker_msix_vector(rocker_port->rocker,
368 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
369}
370
371static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
372{
373 return rocker_msix_vector(rocker_port->rocker,
374 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
375}
376
377#define rocker_write32(rocker, reg, val) \
378 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
379#define rocker_read32(rocker, reg) \
380 readl((rocker)->hw_addr + (ROCKER_ ## reg))
381#define rocker_write64(rocker, reg, val) \
382 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
383#define rocker_read64(rocker, reg) \
384 readq((rocker)->hw_addr + (ROCKER_ ## reg))
385
386/*****************************
387 * HW basic testing functions
388 *****************************/
389
390static int rocker_reg_test(struct rocker *rocker)
391{
392 struct pci_dev *pdev = rocker->pdev;
393 u64 test_reg;
394 u64 rnd;
395
396 rnd = prandom_u32();
397 rnd >>= 1;
398 rocker_write32(rocker, TEST_REG, rnd);
399 test_reg = rocker_read32(rocker, TEST_REG);
400 if (test_reg != rnd * 2) {
401 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
402 test_reg, rnd * 2);
403 return -EIO;
404 }
405
406 rnd = prandom_u32();
407 rnd <<= 31;
408 rnd |= prandom_u32();
409 rocker_write64(rocker, TEST_REG64, rnd);
410 test_reg = rocker_read64(rocker, TEST_REG64);
411 if (test_reg != rnd * 2) {
412 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
413 test_reg, rnd * 2);
414 return -EIO;
415 }
416
417 return 0;
418}
419
420static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
421 u32 test_type, dma_addr_t dma_handle,
422 unsigned char *buf, unsigned char *expect,
423 size_t size)
424{
425 struct pci_dev *pdev = rocker->pdev;
426 int i;
427
428 rocker_wait_reset(wait);
429 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
430
431 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
432 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
433 return -EIO;
434 }
435
436 for (i = 0; i < size; i++) {
437 if (buf[i] != expect[i]) {
438 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
439 buf[i], i, expect[i]);
440 return -EIO;
441 }
442 }
443 return 0;
444}
445
446#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
447#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
448
449static int rocker_dma_test_offset(struct rocker *rocker,
450 struct rocker_wait *wait, int offset)
451{
452 struct pci_dev *pdev = rocker->pdev;
453 unsigned char *alloc;
454 unsigned char *buf;
455 unsigned char *expect;
456 dma_addr_t dma_handle;
457 int i;
458 int err;
459
460 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
461 GFP_KERNEL | GFP_DMA);
462 if (!alloc)
463 return -ENOMEM;
464 buf = alloc + offset;
465 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
466
467 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
468 PCI_DMA_BIDIRECTIONAL);
469 if (pci_dma_mapping_error(pdev, dma_handle)) {
470 err = -EIO;
471 goto free_alloc;
472 }
473
474 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
475 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
476
477 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
478 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
479 dma_handle, buf, expect,
480 ROCKER_TEST_DMA_BUF_SIZE);
481 if (err)
482 goto unmap;
483
484 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
485 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
486 dma_handle, buf, expect,
487 ROCKER_TEST_DMA_BUF_SIZE);
488 if (err)
489 goto unmap;
490
491 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
492 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
493 expect[i] = ~buf[i];
494 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
495 dma_handle, buf, expect,
496 ROCKER_TEST_DMA_BUF_SIZE);
497 if (err)
498 goto unmap;
499
500unmap:
501 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
502 PCI_DMA_BIDIRECTIONAL);
503free_alloc:
504 kfree(alloc);
505
506 return err;
507}
508
509static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait)
510{
511 int i;
512 int err;
513
514 for (i = 0; i < 8; i++) {
515 err = rocker_dma_test_offset(rocker, wait, i);
516 if (err)
517 return err;
518 }
519 return 0;
520}
521
522static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
523{
524 struct rocker_wait *wait = dev_id;
525
526 rocker_wait_wake_up(wait);
527
528 return IRQ_HANDLED;
529}
530
531static int rocker_basic_hw_test(struct rocker *rocker)
532{
533 struct pci_dev *pdev = rocker->pdev;
534 struct rocker_wait wait;
535 int err;
536
537 err = rocker_reg_test(rocker);
538 if (err) {
539 dev_err(&pdev->dev, "reg test failed\n");
540 return err;
541 }
542
543 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
544 rocker_test_irq_handler, 0,
545 rocker_driver_name, &wait);
546 if (err) {
547 dev_err(&pdev->dev, "cannot assign test irq\n");
548 return err;
549 }
550
551 rocker_wait_init(&wait);
552 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
553
554 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
555 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
556 err = -EIO;
557 goto free_irq;
558 }
559
560 err = rocker_dma_test(rocker, &wait);
561 if (err)
562 dev_err(&pdev->dev, "dma test failed\n");
563
564free_irq:
565 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
566 return err;
567}
568
569/******
570 * TLV
571 ******/
572
573#define ROCKER_TLV_ALIGNTO 8U
574#define ROCKER_TLV_ALIGN(len) \
575 (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
576#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
577
578/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
579 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
580 * | Header | Pad | Payload | Pad |
581 * | (struct rocker_tlv) | ing | | ing |
582 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
583 * <--------------------------- tlv->len -------------------------->
584 */
585
586static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
587 int *remaining)
588{
589 int totlen = ROCKER_TLV_ALIGN(tlv->len);
590
591 *remaining -= totlen;
592 return (struct rocker_tlv *) ((char *) tlv + totlen);
593}
594
595static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
596{
597 return remaining >= (int) ROCKER_TLV_HDRLEN &&
598 tlv->len >= ROCKER_TLV_HDRLEN &&
599 tlv->len <= remaining;
600}
601
602#define rocker_tlv_for_each(pos, head, len, rem) \
603 for (pos = head, rem = len; \
604 rocker_tlv_ok(pos, rem); \
605 pos = rocker_tlv_next(pos, &(rem)))
606
607#define rocker_tlv_for_each_nested(pos, tlv, rem) \
608 rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \
609 rocker_tlv_len(tlv), rem)
610
611static int rocker_tlv_attr_size(int payload)
612{
613 return ROCKER_TLV_HDRLEN + payload;
614}
615
616static int rocker_tlv_total_size(int payload)
617{
618 return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
619}
620
621static int rocker_tlv_padlen(int payload)
622{
623 return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
624}
625
626static int rocker_tlv_type(const struct rocker_tlv *tlv)
627{
628 return tlv->type;
629}
630
631static void *rocker_tlv_data(const struct rocker_tlv *tlv)
632{
633 return (char *) tlv + ROCKER_TLV_HDRLEN;
634}
635
636static int rocker_tlv_len(const struct rocker_tlv *tlv)
637{
638 return tlv->len - ROCKER_TLV_HDRLEN;
639}
640
641static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
642{
643 return *(u8 *) rocker_tlv_data(tlv);
644}
645
646static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
647{
648 return *(u16 *) rocker_tlv_data(tlv);
649}
650
9b03c71f
JP
651static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
652{
653 return *(__be16 *) rocker_tlv_data(tlv);
654}
655
4b8ac966
JP
656static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
657{
658 return *(u32 *) rocker_tlv_data(tlv);
659}
660
661static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
662{
663 return *(u64 *) rocker_tlv_data(tlv);
664}
665
666static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
667 const char *buf, int buf_len)
668{
669 const struct rocker_tlv *tlv;
670 const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
671 int rem;
672
673 memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
674
675 rocker_tlv_for_each(tlv, head, buf_len, rem) {
676 u32 type = rocker_tlv_type(tlv);
677
678 if (type > 0 && type <= maxtype)
679 tb[type] = (struct rocker_tlv *) tlv;
680 }
681}
682
683static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype,
684 const struct rocker_tlv *tlv)
685{
686 rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
687 rocker_tlv_len(tlv));
688}
689
690static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype,
691 struct rocker_desc_info *desc_info)
692{
693 rocker_tlv_parse(tb, maxtype, desc_info->data,
694 desc_info->desc->tlv_size);
695}
696
697static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
698{
699 return (struct rocker_tlv *) ((char *) desc_info->data +
700 desc_info->tlv_size);
701}
702
703static int rocker_tlv_put(struct rocker_desc_info *desc_info,
704 int attrtype, int attrlen, const void *data)
705{
706 int tail_room = desc_info->data_size - desc_info->tlv_size;
707 int total_size = rocker_tlv_total_size(attrlen);
708 struct rocker_tlv *tlv;
709
710 if (unlikely(tail_room < total_size))
711 return -EMSGSIZE;
712
713 tlv = rocker_tlv_start(desc_info);
714 desc_info->tlv_size += total_size;
715 tlv->type = attrtype;
716 tlv->len = rocker_tlv_attr_size(attrlen);
717 memcpy(rocker_tlv_data(tlv), data, attrlen);
718 memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
719 return 0;
720}
721
722static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
723 int attrtype, u8 value)
724{
725 return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
726}
727
728static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
729 int attrtype, u16 value)
730{
731 return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
732}
733
9b03c71f
JP
734static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
735 int attrtype, __be16 value)
736{
737 return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
738}
739
4b8ac966
JP
740static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
741 int attrtype, u32 value)
742{
743 return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
744}
745
9b03c71f
JP
746static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
747 int attrtype, __be32 value)
748{
749 return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
750}
751
4b8ac966
JP
752static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
753 int attrtype, u64 value)
754{
755 return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
756}
757
758static struct rocker_tlv *
759rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
760{
761 struct rocker_tlv *start = rocker_tlv_start(desc_info);
762
763 if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
764 return NULL;
765
766 return start;
767}
768
769static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
770 struct rocker_tlv *start)
771{
772 start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
773}
774
775static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
776 struct rocker_tlv *start)
777{
778 desc_info->tlv_size = (char *) start - desc_info->data;
779}
780
781/******************************************
782 * DMA rings and descriptors manipulations
783 ******************************************/
784
785static u32 __pos_inc(u32 pos, size_t limit)
786{
787 return ++pos == limit ? 0 : pos;
788}
789
790static int rocker_desc_err(struct rocker_desc_info *desc_info)
791{
7eb344f8
SF
792 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
793
794 switch (err) {
795 case ROCKER_OK:
796 return 0;
797 case -ROCKER_ENOENT:
798 return -ENOENT;
799 case -ROCKER_ENXIO:
800 return -ENXIO;
801 case -ROCKER_ENOMEM:
802 return -ENOMEM;
803 case -ROCKER_EEXIST:
804 return -EEXIST;
805 case -ROCKER_EINVAL:
806 return -EINVAL;
807 case -ROCKER_EMSGSIZE:
808 return -EMSGSIZE;
809 case -ROCKER_ENOTSUP:
810 return -EOPNOTSUPP;
811 case -ROCKER_ENOBUFS:
812 return -ENOBUFS;
813 }
814
815 return -EINVAL;
4b8ac966
JP
816}
817
818static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
819{
820 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
821}
822
823static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
824{
825 u32 comp_err = desc_info->desc->comp_err;
826
827 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
828}
829
830static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
831{
adedf37b 832 return (void *)(uintptr_t)desc_info->desc->cookie;
4b8ac966
JP
833}
834
835static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
836 void *ptr)
837{
adedf37b 838 desc_info->desc->cookie = (uintptr_t) ptr;
4b8ac966
JP
839}
840
841static struct rocker_desc_info *
842rocker_desc_head_get(struct rocker_dma_ring_info *info)
843{
844 static struct rocker_desc_info *desc_info;
845 u32 head = __pos_inc(info->head, info->size);
846
847 desc_info = &info->desc_info[info->head];
848 if (head == info->tail)
849 return NULL; /* ring full */
850 desc_info->tlv_size = 0;
851 return desc_info;
852}
853
854static void rocker_desc_commit(struct rocker_desc_info *desc_info)
855{
856 desc_info->desc->buf_size = desc_info->data_size;
857 desc_info->desc->tlv_size = desc_info->tlv_size;
858}
859
860static void rocker_desc_head_set(struct rocker *rocker,
861 struct rocker_dma_ring_info *info,
862 struct rocker_desc_info *desc_info)
863{
864 u32 head = __pos_inc(info->head, info->size);
865
866 BUG_ON(head == info->tail);
867 rocker_desc_commit(desc_info);
868 info->head = head;
869 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
870}
871
872static struct rocker_desc_info *
873rocker_desc_tail_get(struct rocker_dma_ring_info *info)
874{
875 static struct rocker_desc_info *desc_info;
876
877 if (info->tail == info->head)
878 return NULL; /* nothing to be done between head and tail */
879 desc_info = &info->desc_info[info->tail];
880 if (!rocker_desc_gen(desc_info))
881 return NULL; /* gen bit not set, desc is not ready yet */
882 info->tail = __pos_inc(info->tail, info->size);
883 desc_info->tlv_size = desc_info->desc->tlv_size;
884 return desc_info;
885}
886
887static void rocker_dma_ring_credits_set(struct rocker *rocker,
888 struct rocker_dma_ring_info *info,
889 u32 credits)
890{
891 if (credits)
892 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
893}
894
895static unsigned long rocker_dma_ring_size_fix(size_t size)
896{
897 return max(ROCKER_DMA_SIZE_MIN,
898 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
899}
900
901static int rocker_dma_ring_create(struct rocker *rocker,
902 unsigned int type,
903 size_t size,
904 struct rocker_dma_ring_info *info)
905{
906 int i;
907
908 BUG_ON(size != rocker_dma_ring_size_fix(size));
909 info->size = size;
910 info->type = type;
911 info->head = 0;
912 info->tail = 0;
913 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
914 GFP_KERNEL);
915 if (!info->desc_info)
916 return -ENOMEM;
917
918 info->desc = pci_alloc_consistent(rocker->pdev,
919 info->size * sizeof(*info->desc),
920 &info->mapaddr);
921 if (!info->desc) {
922 kfree(info->desc_info);
923 return -ENOMEM;
924 }
925
926 for (i = 0; i < info->size; i++)
927 info->desc_info[i].desc = &info->desc[i];
928
929 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
930 ROCKER_DMA_DESC_CTRL_RESET);
931 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
932 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
933
934 return 0;
935}
936
937static void rocker_dma_ring_destroy(struct rocker *rocker,
938 struct rocker_dma_ring_info *info)
939{
940 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
941
942 pci_free_consistent(rocker->pdev,
943 info->size * sizeof(struct rocker_desc),
944 info->desc, info->mapaddr);
945 kfree(info->desc_info);
946}
947
948static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
949 struct rocker_dma_ring_info *info)
950{
951 int i;
952
953 BUG_ON(info->head || info->tail);
954
955 /* When ring is consumer, we need to advance head for each desc.
956 * That tells hw that the desc is ready to be used by it.
957 */
958 for (i = 0; i < info->size - 1; i++)
959 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
960 rocker_desc_commit(&info->desc_info[i]);
961}
962
963static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
964 struct rocker_dma_ring_info *info,
965 int direction, size_t buf_size)
966{
967 struct pci_dev *pdev = rocker->pdev;
968 int i;
969 int err;
970
971 for (i = 0; i < info->size; i++) {
972 struct rocker_desc_info *desc_info = &info->desc_info[i];
973 struct rocker_desc *desc = &info->desc[i];
974 dma_addr_t dma_handle;
975 char *buf;
976
977 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
978 if (!buf) {
979 err = -ENOMEM;
980 goto rollback;
981 }
982
983 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
984 if (pci_dma_mapping_error(pdev, dma_handle)) {
985 kfree(buf);
986 err = -EIO;
987 goto rollback;
988 }
989
990 desc_info->data = buf;
991 desc_info->data_size = buf_size;
992 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
993
994 desc->buf_addr = dma_handle;
995 desc->buf_size = buf_size;
996 }
997 return 0;
998
999rollback:
1000 for (i--; i >= 0; i--) {
1001 struct rocker_desc_info *desc_info = &info->desc_info[i];
1002
1003 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1004 desc_info->data_size, direction);
1005 kfree(desc_info->data);
1006 }
1007 return err;
1008}
1009
1010static void rocker_dma_ring_bufs_free(struct rocker *rocker,
1011 struct rocker_dma_ring_info *info,
1012 int direction)
1013{
1014 struct pci_dev *pdev = rocker->pdev;
1015 int i;
1016
1017 for (i = 0; i < info->size; i++) {
1018 struct rocker_desc_info *desc_info = &info->desc_info[i];
1019 struct rocker_desc *desc = &info->desc[i];
1020
1021 desc->buf_addr = 0;
1022 desc->buf_size = 0;
1023 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1024 desc_info->data_size, direction);
1025 kfree(desc_info->data);
1026 }
1027}
1028
1029static int rocker_dma_rings_init(struct rocker *rocker)
1030{
1031 struct pci_dev *pdev = rocker->pdev;
1032 int err;
1033
1034 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
1035 ROCKER_DMA_CMD_DEFAULT_SIZE,
1036 &rocker->cmd_ring);
1037 if (err) {
1038 dev_err(&pdev->dev, "failed to create command dma ring\n");
1039 return err;
1040 }
1041
1042 spin_lock_init(&rocker->cmd_ring_lock);
1043
1044 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
1045 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
1046 if (err) {
1047 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
1048 goto err_dma_cmd_ring_bufs_alloc;
1049 }
1050
1051 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
1052 ROCKER_DMA_EVENT_DEFAULT_SIZE,
1053 &rocker->event_ring);
1054 if (err) {
1055 dev_err(&pdev->dev, "failed to create event dma ring\n");
1056 goto err_dma_event_ring_create;
1057 }
1058
1059 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
1060 PCI_DMA_FROMDEVICE, PAGE_SIZE);
1061 if (err) {
1062 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
1063 goto err_dma_event_ring_bufs_alloc;
1064 }
1065 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
1066 return 0;
1067
1068err_dma_event_ring_bufs_alloc:
1069 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1070err_dma_event_ring_create:
1071 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1072 PCI_DMA_BIDIRECTIONAL);
1073err_dma_cmd_ring_bufs_alloc:
1074 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1075 return err;
1076}
1077
1078static void rocker_dma_rings_fini(struct rocker *rocker)
1079{
1080 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
1081 PCI_DMA_BIDIRECTIONAL);
1082 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1083 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1084 PCI_DMA_BIDIRECTIONAL);
1085 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1086}
1087
1088static int rocker_dma_rx_ring_skb_map(struct rocker *rocker,
1089 struct rocker_port *rocker_port,
1090 struct rocker_desc_info *desc_info,
1091 struct sk_buff *skb, size_t buf_len)
1092{
1093 struct pci_dev *pdev = rocker->pdev;
1094 dma_addr_t dma_handle;
1095
1096 dma_handle = pci_map_single(pdev, skb->data, buf_len,
1097 PCI_DMA_FROMDEVICE);
1098 if (pci_dma_mapping_error(pdev, dma_handle))
1099 return -EIO;
1100 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
1101 goto tlv_put_failure;
1102 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
1103 goto tlv_put_failure;
1104 return 0;
1105
1106tlv_put_failure:
1107 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
1108 desc_info->tlv_size = 0;
1109 return -EMSGSIZE;
1110}
1111
1112static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port)
1113{
1114 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1115}
1116
1117static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
1118 struct rocker_port *rocker_port,
1119 struct rocker_desc_info *desc_info)
1120{
1121 struct net_device *dev = rocker_port->dev;
1122 struct sk_buff *skb;
1123 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1124 int err;
1125
1126 /* Ensure that hw will see tlv_size zero in case of an error.
1127 * That tells hw to use another descriptor.
1128 */
1129 rocker_desc_cookie_ptr_set(desc_info, NULL);
1130 desc_info->tlv_size = 0;
1131
1132 skb = netdev_alloc_skb_ip_align(dev, buf_len);
1133 if (!skb)
1134 return -ENOMEM;
1135 err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info,
1136 skb, buf_len);
1137 if (err) {
1138 dev_kfree_skb_any(skb);
1139 return err;
1140 }
1141 rocker_desc_cookie_ptr_set(desc_info, skb);
1142 return 0;
1143}
1144
1145static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
1146 struct rocker_tlv **attrs)
1147{
1148 struct pci_dev *pdev = rocker->pdev;
1149 dma_addr_t dma_handle;
1150 size_t len;
1151
1152 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1153 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1154 return;
1155 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1156 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1157 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1158}
1159
1160static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
1161 struct rocker_desc_info *desc_info)
1162{
1163 struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
1164 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1165
1166 if (!skb)
1167 return;
1168 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1169 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1170 dev_kfree_skb_any(skb);
1171}
1172
1173static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker,
1174 struct rocker_port *rocker_port)
1175{
1176 struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1177 int i;
1178 int err;
1179
1180 for (i = 0; i < rx_ring->size; i++) {
1181 err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port,
1182 &rx_ring->desc_info[i]);
1183 if (err)
1184 goto rollback;
1185 }
1186 return 0;
1187
1188rollback:
1189 for (i--; i >= 0; i--)
1190 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1191 return err;
1192}
1193
1194static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker,
1195 struct rocker_port *rocker_port)
1196{
1197 struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1198 int i;
1199
1200 for (i = 0; i < rx_ring->size; i++)
1201 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1202}
1203
1204static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1205{
1206 struct rocker *rocker = rocker_port->rocker;
1207 int err;
1208
1209 err = rocker_dma_ring_create(rocker,
1210 ROCKER_DMA_TX(rocker_port->port_number),
1211 ROCKER_DMA_TX_DEFAULT_SIZE,
1212 &rocker_port->tx_ring);
1213 if (err) {
1214 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1215 return err;
1216 }
1217
1218 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1219 PCI_DMA_TODEVICE,
1220 ROCKER_DMA_TX_DESC_SIZE);
1221 if (err) {
1222 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1223 goto err_dma_tx_ring_bufs_alloc;
1224 }
1225
1226 err = rocker_dma_ring_create(rocker,
1227 ROCKER_DMA_RX(rocker_port->port_number),
1228 ROCKER_DMA_RX_DEFAULT_SIZE,
1229 &rocker_port->rx_ring);
1230 if (err) {
1231 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1232 goto err_dma_rx_ring_create;
1233 }
1234
1235 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1236 PCI_DMA_BIDIRECTIONAL,
1237 ROCKER_DMA_RX_DESC_SIZE);
1238 if (err) {
1239 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1240 goto err_dma_rx_ring_bufs_alloc;
1241 }
1242
1243 err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port);
1244 if (err) {
1245 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1246 goto err_dma_rx_ring_skbs_alloc;
1247 }
1248 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1249
1250 return 0;
1251
1252err_dma_rx_ring_skbs_alloc:
1253 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1254 PCI_DMA_BIDIRECTIONAL);
1255err_dma_rx_ring_bufs_alloc:
1256 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1257err_dma_rx_ring_create:
1258 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1259 PCI_DMA_TODEVICE);
1260err_dma_tx_ring_bufs_alloc:
1261 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1262 return err;
1263}
1264
1265static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1266{
1267 struct rocker *rocker = rocker_port->rocker;
1268
1269 rocker_dma_rx_ring_skbs_free(rocker, rocker_port);
1270 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1271 PCI_DMA_BIDIRECTIONAL);
1272 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1273 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1274 PCI_DMA_TODEVICE);
1275 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1276}
1277
1278static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
1279{
1280 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1281
1282 if (enable)
1283 val |= 1 << rocker_port->lport;
1284 else
1285 val &= ~(1 << rocker_port->lport);
1286 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1287}
1288
1289/********************************
1290 * Interrupt handler and helpers
1291 ********************************/
1292
1293static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1294{
1295 struct rocker *rocker = dev_id;
1296 struct rocker_desc_info *desc_info;
1297 struct rocker_wait *wait;
1298 u32 credits = 0;
1299
1300 spin_lock(&rocker->cmd_ring_lock);
1301 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1302 wait = rocker_desc_cookie_ptr_get(desc_info);
1303 if (wait->nowait) {
1304 rocker_desc_gen_clear(desc_info);
1305 rocker_wait_destroy(wait);
1306 } else {
1307 rocker_wait_wake_up(wait);
1308 }
1309 credits++;
1310 }
1311 spin_unlock(&rocker->cmd_ring_lock);
1312 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1313
1314 return IRQ_HANDLED;
1315}
1316
1317static void rocker_port_link_up(struct rocker_port *rocker_port)
1318{
1319 netif_carrier_on(rocker_port->dev);
1320 netdev_info(rocker_port->dev, "Link is up\n");
1321}
1322
1323static void rocker_port_link_down(struct rocker_port *rocker_port)
1324{
1325 netif_carrier_off(rocker_port->dev);
1326 netdev_info(rocker_port->dev, "Link is down\n");
1327}
1328
1329static int rocker_event_link_change(struct rocker *rocker,
1330 const struct rocker_tlv *info)
1331{
1332 struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1333 unsigned int port_number;
1334 bool link_up;
1335 struct rocker_port *rocker_port;
1336
1337 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
1338 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT] ||
1339 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1340 return -EIO;
1341 port_number =
1342 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT]) - 1;
1343 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1344
1345 if (port_number >= rocker->port_count)
1346 return -EINVAL;
1347
1348 rocker_port = rocker->ports[port_number];
1349 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1350 if (link_up)
1351 rocker_port_link_up(rocker_port);
1352 else
1353 rocker_port_link_down(rocker_port);
1354 }
1355
1356 return 0;
1357}
1358
9f6bbf7c
SF
1359#define ROCKER_OP_FLAG_REMOVE BIT(0)
1360#define ROCKER_OP_FLAG_NOWAIT BIT(1)
1361#define ROCKER_OP_FLAG_LEARNED BIT(2)
6c707945
SF
1362#define ROCKER_OP_FLAG_REFRESH BIT(3)
1363
1364static int rocker_port_fdb(struct rocker_port *rocker_port,
1365 const unsigned char *addr,
1366 __be16 vlan_id, int flags);
1367
1368static int rocker_event_mac_vlan_seen(struct rocker *rocker,
1369 const struct rocker_tlv *info)
1370{
1371 struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1372 unsigned int port_number;
1373 struct rocker_port *rocker_port;
1374 unsigned char *addr;
1375 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1376 __be16 vlan_id;
1377
1378 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
1379 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT] ||
1380 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1381 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1382 return -EIO;
1383 port_number =
1384 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT]) - 1;
1385 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
9b03c71f 1386 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
6c707945
SF
1387
1388 if (port_number >= rocker->port_count)
1389 return -EINVAL;
1390
1391 rocker_port = rocker->ports[port_number];
1392
1393 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1394 rocker_port->stp_state != BR_STATE_FORWARDING)
1395 return 0;
1396
1397 return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
1398}
9f6bbf7c 1399
4b8ac966
JP
1400static int rocker_event_process(struct rocker *rocker,
1401 struct rocker_desc_info *desc_info)
1402{
1403 struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1404 struct rocker_tlv *info;
1405 u16 type;
1406
1407 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1408 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1409 !attrs[ROCKER_TLV_EVENT_INFO])
1410 return -EIO;
1411
1412 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1413 info = attrs[ROCKER_TLV_EVENT_INFO];
1414
1415 switch (type) {
1416 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1417 return rocker_event_link_change(rocker, info);
6c707945
SF
1418 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1419 return rocker_event_mac_vlan_seen(rocker, info);
4b8ac966
JP
1420 }
1421
1422 return -EOPNOTSUPP;
1423}
1424
1425static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1426{
1427 struct rocker *rocker = dev_id;
1428 struct pci_dev *pdev = rocker->pdev;
1429 struct rocker_desc_info *desc_info;
1430 u32 credits = 0;
1431 int err;
1432
1433 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1434 err = rocker_desc_err(desc_info);
1435 if (err) {
1436 dev_err(&pdev->dev, "event desc received with err %d\n",
1437 err);
1438 } else {
1439 err = rocker_event_process(rocker, desc_info);
1440 if (err)
1441 dev_err(&pdev->dev, "event processing failed with err %d\n",
1442 err);
1443 }
1444 rocker_desc_gen_clear(desc_info);
1445 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1446 credits++;
1447 }
1448 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1449
1450 return IRQ_HANDLED;
1451}
1452
1453static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1454{
1455 struct rocker_port *rocker_port = dev_id;
1456
1457 napi_schedule(&rocker_port->napi_tx);
1458 return IRQ_HANDLED;
1459}
1460
1461static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1462{
1463 struct rocker_port *rocker_port = dev_id;
1464
1465 napi_schedule(&rocker_port->napi_rx);
1466 return IRQ_HANDLED;
1467}
1468
1469/********************
1470 * Command interface
1471 ********************/
1472
1473typedef int (*rocker_cmd_cb_t)(struct rocker *rocker,
1474 struct rocker_port *rocker_port,
1475 struct rocker_desc_info *desc_info,
1476 void *priv);
1477
1478static int rocker_cmd_exec(struct rocker *rocker,
1479 struct rocker_port *rocker_port,
1480 rocker_cmd_cb_t prepare, void *prepare_priv,
1481 rocker_cmd_cb_t process, void *process_priv,
1482 bool nowait)
1483{
1484 struct rocker_desc_info *desc_info;
1485 struct rocker_wait *wait;
1486 unsigned long flags;
1487 int err;
1488
1489 wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL);
1490 if (!wait)
1491 return -ENOMEM;
1492 wait->nowait = nowait;
1493
1494 spin_lock_irqsave(&rocker->cmd_ring_lock, flags);
1495 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1496 if (!desc_info) {
1497 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1498 err = -EAGAIN;
1499 goto out;
1500 }
1501 err = prepare(rocker, rocker_port, desc_info, prepare_priv);
1502 if (err) {
1503 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1504 goto out;
1505 }
1506 rocker_desc_cookie_ptr_set(desc_info, wait);
1507 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1508 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1509
1510 if (nowait)
1511 return 0;
1512
1513 if (!rocker_wait_event_timeout(wait, HZ / 10))
1514 return -EIO;
1515
1516 err = rocker_desc_err(desc_info);
1517 if (err)
1518 return err;
1519
1520 if (process)
1521 err = process(rocker, rocker_port, desc_info, process_priv);
1522
1523 rocker_desc_gen_clear(desc_info);
1524out:
1525 rocker_wait_destroy(wait);
1526 return err;
1527}
1528
1529static int
1530rocker_cmd_get_port_settings_prep(struct rocker *rocker,
1531 struct rocker_port *rocker_port,
1532 struct rocker_desc_info *desc_info,
1533 void *priv)
1534{
1535 struct rocker_tlv *cmd_info;
1536
1537 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1538 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1539 return -EMSGSIZE;
1540 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1541 if (!cmd_info)
1542 return -EMSGSIZE;
1543 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
1544 rocker_port->lport))
1545 return -EMSGSIZE;
1546 rocker_tlv_nest_end(desc_info, cmd_info);
1547 return 0;
1548}
1549
1550static int
1551rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
1552 struct rocker_port *rocker_port,
1553 struct rocker_desc_info *desc_info,
1554 void *priv)
1555{
1556 struct ethtool_cmd *ecmd = priv;
1557 struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1558 struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1559 u32 speed;
1560 u8 duplex;
1561 u8 autoneg;
1562
1563 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1564 if (!attrs[ROCKER_TLV_CMD_INFO])
1565 return -EIO;
1566
1567 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1568 attrs[ROCKER_TLV_CMD_INFO]);
1569 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1570 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1571 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1572 return -EIO;
1573
1574 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1575 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1576 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1577
1578 ecmd->transceiver = XCVR_INTERNAL;
1579 ecmd->supported = SUPPORTED_TP;
1580 ecmd->phy_address = 0xff;
1581 ecmd->port = PORT_TP;
1582 ethtool_cmd_speed_set(ecmd, speed);
1583 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1584 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1585
1586 return 0;
1587}
1588
1589static int
1590rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
1591 struct rocker_port *rocker_port,
1592 struct rocker_desc_info *desc_info,
1593 void *priv)
1594{
1595 unsigned char *macaddr = priv;
1596 struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1597 struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1598 struct rocker_tlv *attr;
1599
1600 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1601 if (!attrs[ROCKER_TLV_CMD_INFO])
1602 return -EIO;
1603
1604 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1605 attrs[ROCKER_TLV_CMD_INFO]);
1606 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1607 if (!attr)
1608 return -EIO;
1609
1610 if (rocker_tlv_len(attr) != ETH_ALEN)
1611 return -EINVAL;
1612
1613 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1614 return 0;
1615}
1616
1617static int
1618rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
1619 struct rocker_port *rocker_port,
1620 struct rocker_desc_info *desc_info,
1621 void *priv)
1622{
1623 struct ethtool_cmd *ecmd = priv;
1624 struct rocker_tlv *cmd_info;
1625
1626 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1627 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1628 return -EMSGSIZE;
1629 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1630 if (!cmd_info)
1631 return -EMSGSIZE;
1632 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
1633 rocker_port->lport))
1634 return -EMSGSIZE;
1635 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1636 ethtool_cmd_speed(ecmd)))
1637 return -EMSGSIZE;
1638 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1639 ecmd->duplex))
1640 return -EMSGSIZE;
1641 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1642 ecmd->autoneg))
1643 return -EMSGSIZE;
1644 rocker_tlv_nest_end(desc_info, cmd_info);
1645 return 0;
1646}
1647
1648static int
1649rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
1650 struct rocker_port *rocker_port,
1651 struct rocker_desc_info *desc_info,
1652 void *priv)
1653{
1654 unsigned char *macaddr = priv;
1655 struct rocker_tlv *cmd_info;
1656
1657 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1658 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1659 return -EMSGSIZE;
1660 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1661 if (!cmd_info)
1662 return -EMSGSIZE;
1663 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
1664 rocker_port->lport))
1665 return -EMSGSIZE;
1666 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1667 ETH_ALEN, macaddr))
1668 return -EMSGSIZE;
1669 rocker_tlv_nest_end(desc_info, cmd_info);
1670 return 0;
1671}
1672
5111f80c
SF
1673static int
1674rocker_cmd_set_port_learning_prep(struct rocker *rocker,
1675 struct rocker_port *rocker_port,
1676 struct rocker_desc_info *desc_info,
1677 void *priv)
1678{
1679 struct rocker_tlv *cmd_info;
1680
1681 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1682 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1683 return -EMSGSIZE;
1684 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1685 if (!cmd_info)
1686 return -EMSGSIZE;
1687 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT,
1688 rocker_port->lport))
1689 return -EMSGSIZE;
1690 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1691 !!(rocker_port->brport_flags & BR_LEARNING)))
1692 return -EMSGSIZE;
1693 rocker_tlv_nest_end(desc_info, cmd_info);
1694 return 0;
1695}
1696
4b8ac966
JP
1697static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1698 struct ethtool_cmd *ecmd)
1699{
1700 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1701 rocker_cmd_get_port_settings_prep, NULL,
1702 rocker_cmd_get_port_settings_ethtool_proc,
1703 ecmd, false);
1704}
1705
1706static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1707 unsigned char *macaddr)
1708{
1709 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1710 rocker_cmd_get_port_settings_prep, NULL,
1711 rocker_cmd_get_port_settings_macaddr_proc,
1712 macaddr, false);
1713}
1714
1715static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1716 struct ethtool_cmd *ecmd)
1717{
1718 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1719 rocker_cmd_set_port_settings_ethtool_prep,
1720 ecmd, NULL, NULL, false);
1721}
1722
1723static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1724 unsigned char *macaddr)
1725{
1726 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1727 rocker_cmd_set_port_settings_macaddr_prep,
1728 macaddr, NULL, NULL, false);
1729}
1730
5111f80c
SF
1731static int rocker_port_set_learning(struct rocker_port *rocker_port)
1732{
1733 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1734 rocker_cmd_set_port_learning_prep,
1735 NULL, NULL, NULL, false);
1736}
1737
9f6bbf7c
SF
1738static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1739 struct rocker_flow_tbl_entry *entry)
1740{
1741 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
1742 entry->key.ig_port.in_lport))
1743 return -EMSGSIZE;
1744 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
1745 entry->key.ig_port.in_lport_mask))
1746 return -EMSGSIZE;
1747 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1748 entry->key.ig_port.goto_tbl))
1749 return -EMSGSIZE;
1750
1751 return 0;
1752}
1753
1754static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1755 struct rocker_flow_tbl_entry *entry)
1756{
1757 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
1758 entry->key.vlan.in_lport))
1759 return -EMSGSIZE;
9b03c71f
JP
1760 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1761 entry->key.vlan.vlan_id))
9f6bbf7c 1762 return -EMSGSIZE;
9b03c71f
JP
1763 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1764 entry->key.vlan.vlan_id_mask))
9f6bbf7c
SF
1765 return -EMSGSIZE;
1766 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1767 entry->key.vlan.goto_tbl))
1768 return -EMSGSIZE;
1769 if (entry->key.vlan.untagged &&
9b03c71f
JP
1770 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1771 entry->key.vlan.new_vlan_id))
9f6bbf7c
SF
1772 return -EMSGSIZE;
1773
1774 return 0;
1775}
1776
1777static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1778 struct rocker_flow_tbl_entry *entry)
1779{
1780 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
1781 entry->key.term_mac.in_lport))
1782 return -EMSGSIZE;
1783 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
1784 entry->key.term_mac.in_lport_mask))
1785 return -EMSGSIZE;
9b03c71f
JP
1786 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1787 entry->key.term_mac.eth_type))
9f6bbf7c
SF
1788 return -EMSGSIZE;
1789 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1790 ETH_ALEN, entry->key.term_mac.eth_dst))
1791 return -EMSGSIZE;
1792 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1793 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1794 return -EMSGSIZE;
9b03c71f
JP
1795 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1796 entry->key.term_mac.vlan_id))
9f6bbf7c 1797 return -EMSGSIZE;
9b03c71f
JP
1798 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1799 entry->key.term_mac.vlan_id_mask))
9f6bbf7c
SF
1800 return -EMSGSIZE;
1801 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1802 entry->key.term_mac.goto_tbl))
1803 return -EMSGSIZE;
1804 if (entry->key.term_mac.copy_to_cpu &&
1805 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1806 entry->key.term_mac.copy_to_cpu))
1807 return -EMSGSIZE;
1808
1809 return 0;
1810}
1811
1812static int
1813rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
1814 struct rocker_flow_tbl_entry *entry)
1815{
9b03c71f
JP
1816 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1817 entry->key.ucast_routing.eth_type))
9f6bbf7c 1818 return -EMSGSIZE;
9b03c71f
JP
1819 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
1820 entry->key.ucast_routing.dst4))
9f6bbf7c 1821 return -EMSGSIZE;
9b03c71f
JP
1822 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
1823 entry->key.ucast_routing.dst4_mask))
9f6bbf7c
SF
1824 return -EMSGSIZE;
1825 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1826 entry->key.ucast_routing.goto_tbl))
1827 return -EMSGSIZE;
1828 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1829 entry->key.ucast_routing.group_id))
1830 return -EMSGSIZE;
1831
1832 return 0;
1833}
1834
1835static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
1836 struct rocker_flow_tbl_entry *entry)
1837{
1838 if (entry->key.bridge.has_eth_dst &&
1839 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1840 ETH_ALEN, entry->key.bridge.eth_dst))
1841 return -EMSGSIZE;
1842 if (entry->key.bridge.has_eth_dst_mask &&
1843 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1844 ETH_ALEN, entry->key.bridge.eth_dst_mask))
1845 return -EMSGSIZE;
1846 if (entry->key.bridge.vlan_id &&
9b03c71f
JP
1847 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1848 entry->key.bridge.vlan_id))
9f6bbf7c
SF
1849 return -EMSGSIZE;
1850 if (entry->key.bridge.tunnel_id &&
1851 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
1852 entry->key.bridge.tunnel_id))
1853 return -EMSGSIZE;
1854 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1855 entry->key.bridge.goto_tbl))
1856 return -EMSGSIZE;
1857 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1858 entry->key.bridge.group_id))
1859 return -EMSGSIZE;
1860 if (entry->key.bridge.copy_to_cpu &&
1861 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1862 entry->key.bridge.copy_to_cpu))
1863 return -EMSGSIZE;
1864
1865 return 0;
1866}
1867
1868static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
1869 struct rocker_flow_tbl_entry *entry)
1870{
1871 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT,
1872 entry->key.acl.in_lport))
1873 return -EMSGSIZE;
1874 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK,
1875 entry->key.acl.in_lport_mask))
1876 return -EMSGSIZE;
1877 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
1878 ETH_ALEN, entry->key.acl.eth_src))
1879 return -EMSGSIZE;
1880 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
1881 ETH_ALEN, entry->key.acl.eth_src_mask))
1882 return -EMSGSIZE;
1883 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1884 ETH_ALEN, entry->key.acl.eth_dst))
1885 return -EMSGSIZE;
1886 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1887 ETH_ALEN, entry->key.acl.eth_dst_mask))
1888 return -EMSGSIZE;
9b03c71f
JP
1889 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1890 entry->key.acl.eth_type))
9f6bbf7c 1891 return -EMSGSIZE;
9b03c71f
JP
1892 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1893 entry->key.acl.vlan_id))
9f6bbf7c 1894 return -EMSGSIZE;
9b03c71f
JP
1895 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1896 entry->key.acl.vlan_id_mask))
9f6bbf7c
SF
1897 return -EMSGSIZE;
1898
1899 switch (ntohs(entry->key.acl.eth_type)) {
1900 case ETH_P_IP:
1901 case ETH_P_IPV6:
1902 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
1903 entry->key.acl.ip_proto))
1904 return -EMSGSIZE;
1905 if (rocker_tlv_put_u8(desc_info,
1906 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
1907 entry->key.acl.ip_proto_mask))
1908 return -EMSGSIZE;
1909 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
1910 entry->key.acl.ip_tos & 0x3f))
1911 return -EMSGSIZE;
1912 if (rocker_tlv_put_u8(desc_info,
1913 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
1914 entry->key.acl.ip_tos_mask & 0x3f))
1915 return -EMSGSIZE;
1916 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
1917 (entry->key.acl.ip_tos & 0xc0) >> 6))
1918 return -EMSGSIZE;
1919 if (rocker_tlv_put_u8(desc_info,
1920 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
1921 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
1922 return -EMSGSIZE;
1923 break;
1924 }
1925
1926 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
1927 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1928 entry->key.acl.group_id))
1929 return -EMSGSIZE;
1930
1931 return 0;
1932}
1933
1934static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
1935 struct rocker_port *rocker_port,
1936 struct rocker_desc_info *desc_info,
1937 void *priv)
1938{
1939 struct rocker_flow_tbl_entry *entry = priv;
1940 struct rocker_tlv *cmd_info;
1941 int err = 0;
1942
1943 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1944 ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD))
1945 return -EMSGSIZE;
1946 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1947 if (!cmd_info)
1948 return -EMSGSIZE;
1949 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
1950 entry->key.tbl_id))
1951 return -EMSGSIZE;
1952 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
1953 entry->key.priority))
1954 return -EMSGSIZE;
1955 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
1956 return -EMSGSIZE;
1957 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
1958 entry->cookie))
1959 return -EMSGSIZE;
1960
1961 switch (entry->key.tbl_id) {
1962 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
1963 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
1964 break;
1965 case ROCKER_OF_DPA_TABLE_ID_VLAN:
1966 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
1967 break;
1968 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
1969 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
1970 break;
1971 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
1972 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
1973 break;
1974 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
1975 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
1976 break;
1977 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
1978 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
1979 break;
1980 default:
1981 err = -ENOTSUPP;
1982 break;
1983 }
1984
1985 if (err)
1986 return err;
1987
1988 rocker_tlv_nest_end(desc_info, cmd_info);
1989
1990 return 0;
1991}
1992
1993static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
1994 struct rocker_port *rocker_port,
1995 struct rocker_desc_info *desc_info,
1996 void *priv)
1997{
1998 const struct rocker_flow_tbl_entry *entry = priv;
1999 struct rocker_tlv *cmd_info;
2000
2001 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
2002 ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL))
2003 return -EMSGSIZE;
2004 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2005 if (!cmd_info)
2006 return -EMSGSIZE;
2007 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2008 entry->cookie))
2009 return -EMSGSIZE;
2010 rocker_tlv_nest_end(desc_info, cmd_info);
2011
2012 return 0;
2013}
2014
2015static int
2016rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2017 struct rocker_group_tbl_entry *entry)
2018{
2019 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_LPORT,
2020 ROCKER_GROUP_PORT_GET(entry->group_id)))
2021 return -EMSGSIZE;
2022 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2023 entry->l2_interface.pop_vlan))
2024 return -EMSGSIZE;
2025
2026 return 0;
2027}
2028
2029static int
2030rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2031 struct rocker_group_tbl_entry *entry)
2032{
2033 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2034 entry->l2_rewrite.group_id))
2035 return -EMSGSIZE;
2036 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2037 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2038 ETH_ALEN, entry->l2_rewrite.eth_src))
2039 return -EMSGSIZE;
2040 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2041 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2042 ETH_ALEN, entry->l2_rewrite.eth_dst))
2043 return -EMSGSIZE;
2044 if (entry->l2_rewrite.vlan_id &&
9b03c71f
JP
2045 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2046 entry->l2_rewrite.vlan_id))
9f6bbf7c
SF
2047 return -EMSGSIZE;
2048
2049 return 0;
2050}
2051
2052static int
2053rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2054 struct rocker_group_tbl_entry *entry)
2055{
2056 int i;
2057 struct rocker_tlv *group_ids;
2058
2059 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2060 entry->group_count))
2061 return -EMSGSIZE;
2062
2063 group_ids = rocker_tlv_nest_start(desc_info,
2064 ROCKER_TLV_OF_DPA_GROUP_IDS);
2065 if (!group_ids)
2066 return -EMSGSIZE;
2067
2068 for (i = 0; i < entry->group_count; i++)
2069 /* Note TLV array is 1-based */
2070 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2071 return -EMSGSIZE;
2072
2073 rocker_tlv_nest_end(desc_info, group_ids);
2074
2075 return 0;
2076}
2077
2078static int
2079rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2080 struct rocker_group_tbl_entry *entry)
2081{
2082 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2083 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2084 ETH_ALEN, entry->l3_unicast.eth_src))
2085 return -EMSGSIZE;
2086 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2087 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2088 ETH_ALEN, entry->l3_unicast.eth_dst))
2089 return -EMSGSIZE;
2090 if (entry->l3_unicast.vlan_id &&
9b03c71f
JP
2091 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2092 entry->l3_unicast.vlan_id))
9f6bbf7c
SF
2093 return -EMSGSIZE;
2094 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2095 entry->l3_unicast.ttl_check))
2096 return -EMSGSIZE;
2097 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2098 entry->l3_unicast.group_id))
2099 return -EMSGSIZE;
2100
2101 return 0;
2102}
2103
2104static int rocker_cmd_group_tbl_add(struct rocker *rocker,
2105 struct rocker_port *rocker_port,
2106 struct rocker_desc_info *desc_info,
2107 void *priv)
2108{
2109 struct rocker_group_tbl_entry *entry = priv;
2110 struct rocker_tlv *cmd_info;
2111 int err = 0;
2112
2113 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2114 return -EMSGSIZE;
2115 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2116 if (!cmd_info)
2117 return -EMSGSIZE;
2118
2119 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2120 entry->group_id))
2121 return -EMSGSIZE;
2122
2123 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2124 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2125 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2126 break;
2127 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2128 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2129 break;
2130 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2131 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2132 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2133 break;
2134 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2135 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2136 break;
2137 default:
2138 err = -ENOTSUPP;
2139 break;
2140 }
2141
2142 if (err)
2143 return err;
2144
2145 rocker_tlv_nest_end(desc_info, cmd_info);
2146
2147 return 0;
2148}
2149
2150static int rocker_cmd_group_tbl_del(struct rocker *rocker,
2151 struct rocker_port *rocker_port,
2152 struct rocker_desc_info *desc_info,
2153 void *priv)
2154{
2155 const struct rocker_group_tbl_entry *entry = priv;
2156 struct rocker_tlv *cmd_info;
2157
2158 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2159 return -EMSGSIZE;
2160 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2161 if (!cmd_info)
2162 return -EMSGSIZE;
2163 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2164 entry->group_id))
2165 return -EMSGSIZE;
2166 rocker_tlv_nest_end(desc_info, cmd_info);
2167
2168 return 0;
2169}
2170
2171/*****************************************
2172 * Flow, group, FDB, internal VLAN tables
2173 *****************************************/
2174
2175static int rocker_init_tbls(struct rocker *rocker)
2176{
2177 hash_init(rocker->flow_tbl);
2178 spin_lock_init(&rocker->flow_tbl_lock);
2179
2180 hash_init(rocker->group_tbl);
2181 spin_lock_init(&rocker->group_tbl_lock);
2182
2183 hash_init(rocker->fdb_tbl);
2184 spin_lock_init(&rocker->fdb_tbl_lock);
2185
2186 hash_init(rocker->internal_vlan_tbl);
2187 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2188
2189 return 0;
2190}
2191
2192static void rocker_free_tbls(struct rocker *rocker)
2193{
2194 unsigned long flags;
2195 struct rocker_flow_tbl_entry *flow_entry;
2196 struct rocker_group_tbl_entry *group_entry;
2197 struct rocker_fdb_tbl_entry *fdb_entry;
2198 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2199 struct hlist_node *tmp;
2200 int bkt;
2201
2202 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2203 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2204 hash_del(&flow_entry->entry);
2205 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2206
2207 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2208 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2209 hash_del(&group_entry->entry);
2210 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2211
2212 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2213 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2214 hash_del(&fdb_entry->entry);
2215 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2216
2217 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2218 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2219 tmp, internal_vlan_entry, entry)
2220 hash_del(&internal_vlan_entry->entry);
2221 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2222}
2223
2224static struct rocker_flow_tbl_entry *
2225rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
2226{
2227 struct rocker_flow_tbl_entry *found;
2228
2229 hash_for_each_possible(rocker->flow_tbl, found,
2230 entry, match->key_crc32) {
2231 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
2232 return found;
2233 }
2234
2235 return NULL;
2236}
2237
2238static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2239 struct rocker_flow_tbl_entry *match,
2240 bool nowait)
2241{
2242 struct rocker *rocker = rocker_port->rocker;
2243 struct rocker_flow_tbl_entry *found;
2244 unsigned long flags;
2245 bool add_to_hw = false;
2246 int err = 0;
2247
2248 match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
2249
2250 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2251
2252 found = rocker_flow_tbl_find(rocker, match);
2253
2254 if (found) {
2255 kfree(match);
2256 } else {
2257 found = match;
2258 found->cookie = rocker->flow_tbl_next_cookie++;
2259 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2260 add_to_hw = true;
2261 }
2262
2263 found->ref_count++;
2264
2265 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2266
2267 if (add_to_hw) {
2268 err = rocker_cmd_exec(rocker, rocker_port,
2269 rocker_cmd_flow_tbl_add,
2270 found, NULL, NULL, nowait);
2271 if (err) {
2272 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2273 hash_del(&found->entry);
2274 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2275 kfree(found);
2276 }
2277 }
2278
2279 return err;
2280}
2281
2282static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2283 struct rocker_flow_tbl_entry *match,
2284 bool nowait)
2285{
2286 struct rocker *rocker = rocker_port->rocker;
2287 struct rocker_flow_tbl_entry *found;
2288 unsigned long flags;
2289 bool del_from_hw = false;
2290 int err = 0;
2291
2292 match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
2293
2294 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2295
2296 found = rocker_flow_tbl_find(rocker, match);
2297
2298 if (found) {
2299 found->ref_count--;
2300 if (found->ref_count == 0) {
2301 hash_del(&found->entry);
2302 del_from_hw = true;
2303 }
2304 }
2305
2306 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2307
2308 kfree(match);
2309
2310 if (del_from_hw) {
2311 err = rocker_cmd_exec(rocker, rocker_port,
2312 rocker_cmd_flow_tbl_del,
2313 found, NULL, NULL, nowait);
2314 kfree(found);
2315 }
2316
2317 return err;
2318}
2319
2320static gfp_t rocker_op_flags_gfp(int flags)
2321{
2322 return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL;
2323}
2324
2325static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2326 int flags, struct rocker_flow_tbl_entry *entry)
2327{
2328 bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
2329
2330 if (flags & ROCKER_OP_FLAG_REMOVE)
2331 return rocker_flow_tbl_del(rocker_port, entry, nowait);
2332 else
2333 return rocker_flow_tbl_add(rocker_port, entry, nowait);
2334}
2335
2336static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
2337 int flags, u32 in_lport, u32 in_lport_mask,
2338 enum rocker_of_dpa_table_id goto_tbl)
2339{
2340 struct rocker_flow_tbl_entry *entry;
2341
2342 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2343 if (!entry)
2344 return -ENOMEM;
2345
2346 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2347 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
2348 entry->key.ig_port.in_lport = in_lport;
2349 entry->key.ig_port.in_lport_mask = in_lport_mask;
2350 entry->key.ig_port.goto_tbl = goto_tbl;
2351
2352 return rocker_flow_tbl_do(rocker_port, flags, entry);
2353}
2354
2355static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
2356 int flags, u32 in_lport,
2357 __be16 vlan_id, __be16 vlan_id_mask,
2358 enum rocker_of_dpa_table_id goto_tbl,
2359 bool untagged, __be16 new_vlan_id)
2360{
2361 struct rocker_flow_tbl_entry *entry;
2362
2363 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2364 if (!entry)
2365 return -ENOMEM;
2366
2367 entry->key.priority = ROCKER_PRIORITY_VLAN;
2368 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
2369 entry->key.vlan.in_lport = in_lport;
2370 entry->key.vlan.vlan_id = vlan_id;
2371 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2372 entry->key.vlan.goto_tbl = goto_tbl;
2373
2374 entry->key.vlan.untagged = untagged;
2375 entry->key.vlan.new_vlan_id = new_vlan_id;
2376
2377 return rocker_flow_tbl_do(rocker_port, flags, entry);
2378}
2379
2380static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
2381 u32 in_lport, u32 in_lport_mask,
2382 __be16 eth_type, const u8 *eth_dst,
2383 const u8 *eth_dst_mask, __be16 vlan_id,
2384 __be16 vlan_id_mask, bool copy_to_cpu,
2385 int flags)
2386{
2387 struct rocker_flow_tbl_entry *entry;
2388
2389 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2390 if (!entry)
2391 return -ENOMEM;
2392
2393 if (is_multicast_ether_addr(eth_dst)) {
2394 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2395 entry->key.term_mac.goto_tbl =
2396 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2397 } else {
2398 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2399 entry->key.term_mac.goto_tbl =
2400 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2401 }
2402
2403 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2404 entry->key.term_mac.in_lport = in_lport;
2405 entry->key.term_mac.in_lport_mask = in_lport_mask;
2406 entry->key.term_mac.eth_type = eth_type;
2407 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2408 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2409 entry->key.term_mac.vlan_id = vlan_id;
2410 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2411 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2412
2413 return rocker_flow_tbl_do(rocker_port, flags, entry);
2414}
2415
2416static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2417 int flags,
2418 const u8 *eth_dst, const u8 *eth_dst_mask,
2419 __be16 vlan_id, u32 tunnel_id,
2420 enum rocker_of_dpa_table_id goto_tbl,
2421 u32 group_id, bool copy_to_cpu)
2422{
2423 struct rocker_flow_tbl_entry *entry;
2424 u32 priority;
2425 bool vlan_bridging = !!vlan_id;
2426 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2427 bool wild = false;
2428
2429 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2430 if (!entry)
2431 return -ENOMEM;
2432
2433 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2434
2435 if (eth_dst) {
2436 entry->key.bridge.has_eth_dst = 1;
2437 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2438 }
2439 if (eth_dst_mask) {
2440 entry->key.bridge.has_eth_dst_mask = 1;
2441 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2442 if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN))
2443 wild = true;
2444 }
2445
2446 priority = ROCKER_PRIORITY_UNKNOWN;
51ace887 2447 if (vlan_bridging && dflt && wild)
9f6bbf7c 2448 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
51ace887 2449 else if (vlan_bridging && dflt && !wild)
9f6bbf7c 2450 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
51ace887 2451 else if (vlan_bridging && !dflt)
9f6bbf7c 2452 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
51ace887 2453 else if (!vlan_bridging && dflt && wild)
9f6bbf7c 2454 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
51ace887 2455 else if (!vlan_bridging && dflt && !wild)
9f6bbf7c 2456 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
51ace887 2457 else if (!vlan_bridging && !dflt)
9f6bbf7c
SF
2458 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2459
2460 entry->key.priority = priority;
2461 entry->key.bridge.vlan_id = vlan_id;
2462 entry->key.bridge.tunnel_id = tunnel_id;
2463 entry->key.bridge.goto_tbl = goto_tbl;
2464 entry->key.bridge.group_id = group_id;
2465 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2466
2467 return rocker_flow_tbl_do(rocker_port, flags, entry);
2468}
2469
2470static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2471 int flags, u32 in_lport,
2472 u32 in_lport_mask,
2473 const u8 *eth_src, const u8 *eth_src_mask,
2474 const u8 *eth_dst, const u8 *eth_dst_mask,
2475 __be16 eth_type,
2476 __be16 vlan_id, __be16 vlan_id_mask,
2477 u8 ip_proto, u8 ip_proto_mask,
2478 u8 ip_tos, u8 ip_tos_mask,
2479 u32 group_id)
2480{
2481 u32 priority;
2482 struct rocker_flow_tbl_entry *entry;
2483
2484 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2485 if (!entry)
2486 return -ENOMEM;
2487
2488 priority = ROCKER_PRIORITY_ACL_NORMAL;
2489 if (eth_dst && eth_dst_mask) {
2490 if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0)
2491 priority = ROCKER_PRIORITY_ACL_DFLT;
2492 else if (is_link_local_ether_addr(eth_dst))
2493 priority = ROCKER_PRIORITY_ACL_CTRL;
2494 }
2495
2496 entry->key.priority = priority;
2497 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2498 entry->key.acl.in_lport = in_lport;
2499 entry->key.acl.in_lport_mask = in_lport_mask;
2500
2501 if (eth_src)
2502 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2503 if (eth_src_mask)
2504 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2505 if (eth_dst)
2506 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2507 if (eth_dst_mask)
2508 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2509
2510 entry->key.acl.eth_type = eth_type;
2511 entry->key.acl.vlan_id = vlan_id;
2512 entry->key.acl.vlan_id_mask = vlan_id_mask;
2513 entry->key.acl.ip_proto = ip_proto;
2514 entry->key.acl.ip_proto_mask = ip_proto_mask;
2515 entry->key.acl.ip_tos = ip_tos;
2516 entry->key.acl.ip_tos_mask = ip_tos_mask;
2517 entry->key.acl.group_id = group_id;
2518
2519 return rocker_flow_tbl_do(rocker_port, flags, entry);
2520}
2521
2522static struct rocker_group_tbl_entry *
2523rocker_group_tbl_find(struct rocker *rocker,
2524 struct rocker_group_tbl_entry *match)
2525{
2526 struct rocker_group_tbl_entry *found;
2527
2528 hash_for_each_possible(rocker->group_tbl, found,
2529 entry, match->group_id) {
2530 if (found->group_id == match->group_id)
2531 return found;
2532 }
2533
2534 return NULL;
2535}
2536
2537static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry)
2538{
2539 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2540 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2541 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2542 kfree(entry->group_ids);
2543 break;
2544 default:
2545 break;
2546 }
2547 kfree(entry);
2548}
2549
2550static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2551 struct rocker_group_tbl_entry *match,
2552 bool nowait)
2553{
2554 struct rocker *rocker = rocker_port->rocker;
2555 struct rocker_group_tbl_entry *found;
2556 unsigned long flags;
2557 int err = 0;
2558
2559 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2560
2561 found = rocker_group_tbl_find(rocker, match);
2562
2563 if (found) {
2564 hash_del(&found->entry);
2565 rocker_group_tbl_entry_free(found);
2566 found = match;
2567 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2568 } else {
2569 found = match;
2570 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2571 }
2572
2573 hash_add(rocker->group_tbl, &found->entry, found->group_id);
2574
2575 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2576
2577 if (found->cmd)
2578 err = rocker_cmd_exec(rocker, rocker_port,
2579 rocker_cmd_group_tbl_add,
2580 found, NULL, NULL, nowait);
2581
2582 return err;
2583}
2584
2585static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2586 struct rocker_group_tbl_entry *match,
2587 bool nowait)
2588{
2589 struct rocker *rocker = rocker_port->rocker;
2590 struct rocker_group_tbl_entry *found;
2591 unsigned long flags;
2592 int err = 0;
2593
2594 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2595
2596 found = rocker_group_tbl_find(rocker, match);
2597
2598 if (found) {
2599 hash_del(&found->entry);
2600 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2601 }
2602
2603 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2604
2605 rocker_group_tbl_entry_free(match);
2606
2607 if (found) {
2608 err = rocker_cmd_exec(rocker, rocker_port,
2609 rocker_cmd_group_tbl_del,
2610 found, NULL, NULL, nowait);
2611 rocker_group_tbl_entry_free(found);
2612 }
2613
2614 return err;
2615}
2616
2617static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2618 int flags, struct rocker_group_tbl_entry *entry)
2619{
2620 bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
2621
2622 if (flags & ROCKER_OP_FLAG_REMOVE)
2623 return rocker_group_tbl_del(rocker_port, entry, nowait);
2624 else
2625 return rocker_group_tbl_add(rocker_port, entry, nowait);
2626}
2627
2628static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2629 int flags, __be16 vlan_id,
2630 u32 out_lport, int pop_vlan)
2631{
2632 struct rocker_group_tbl_entry *entry;
2633
2634 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2635 if (!entry)
2636 return -ENOMEM;
2637
2638 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
2639 entry->l2_interface.pop_vlan = pop_vlan;
2640
2641 return rocker_group_tbl_do(rocker_port, flags, entry);
2642}
2643
2644static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2645 int flags, u8 group_count,
2646 u32 *group_ids, u32 group_id)
2647{
2648 struct rocker_group_tbl_entry *entry;
2649
2650 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2651 if (!entry)
2652 return -ENOMEM;
2653
2654 entry->group_id = group_id;
2655 entry->group_count = group_count;
2656
2657 entry->group_ids = kcalloc(group_count, sizeof(u32),
2658 rocker_op_flags_gfp(flags));
2659 if (!entry->group_ids) {
2660 kfree(entry);
2661 return -ENOMEM;
2662 }
2663 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2664
2665 return rocker_group_tbl_do(rocker_port, flags, entry);
2666}
2667
2668static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2669 int flags, __be16 vlan_id,
2670 u8 group_count, u32 *group_ids,
2671 u32 group_id)
2672{
2673 return rocker_group_l2_fan_out(rocker_port, flags,
2674 group_count, group_ids,
2675 group_id);
2676}
2677
6c707945
SF
2678static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
2679 int flags, __be16 vlan_id)
2680{
2681 struct rocker_port *p;
2682 struct rocker *rocker = rocker_port->rocker;
2683 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
2684 u32 group_ids[rocker->port_count];
2685 u8 group_count = 0;
2686 int err;
2687 int i;
2688
2689 /* Adjust the flood group for this VLAN. The flood group
2690 * references an L2 interface group for each port in this
2691 * VLAN.
2692 */
2693
2694 for (i = 0; i < rocker->port_count; i++) {
2695 p = rocker->ports[i];
2696 if (!rocker_port_is_bridged(p))
2697 continue;
2698 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
2699 group_ids[group_count++] =
2700 ROCKER_GROUP_L2_INTERFACE(vlan_id,
2701 p->lport);
2702 }
2703 }
2704
2705 /* If there are no bridged ports in this VLAN, we're done */
2706 if (group_count == 0)
2707 return 0;
2708
2709 err = rocker_group_l2_flood(rocker_port, flags, vlan_id,
2710 group_count, group_ids,
2711 group_id);
2712 if (err)
2713 netdev_err(rocker_port->dev,
2714 "Error (%d) port VLAN l2 flood group\n", err);
2715
2716 return err;
2717}
2718
2719static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
2720 int flags, __be16 vlan_id,
2721 bool pop_vlan)
2722{
2723 struct rocker *rocker = rocker_port->rocker;
2724 struct rocker_port *p;
2725 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2726 u32 out_lport;
2727 int ref = 0;
2728 int err;
2729 int i;
2730
2731 /* An L2 interface group for this port in this VLAN, but
2732 * only when port STP state is LEARNING|FORWARDING.
2733 */
2734
2735 if (rocker_port->stp_state == BR_STATE_LEARNING ||
2736 rocker_port->stp_state == BR_STATE_FORWARDING) {
2737 out_lport = rocker_port->lport;
2738 err = rocker_group_l2_interface(rocker_port, flags,
2739 vlan_id, out_lport,
2740 pop_vlan);
2741 if (err) {
2742 netdev_err(rocker_port->dev,
2743 "Error (%d) port VLAN l2 group for lport %d\n",
2744 err, out_lport);
2745 return err;
2746 }
2747 }
2748
2749 /* An L2 interface group for this VLAN to CPU port.
2750 * Add when first port joins this VLAN and destroy when
2751 * last port leaves this VLAN.
2752 */
2753
2754 for (i = 0; i < rocker->port_count; i++) {
2755 p = rocker->ports[i];
2756 if (test_bit(ntohs(vlan_id), p->vlan_bitmap))
2757 ref++;
2758 }
2759
2760 if ((!adding || ref != 1) && (adding || ref != 0))
2761 return 0;
2762
2763 out_lport = 0;
2764 err = rocker_group_l2_interface(rocker_port, flags,
2765 vlan_id, out_lport,
2766 pop_vlan);
2767 if (err) {
2768 netdev_err(rocker_port->dev,
2769 "Error (%d) port VLAN l2 group for CPU port\n", err);
2770 return err;
2771 }
2772
2773 return 0;
2774}
2775
9f6bbf7c
SF
2776static struct rocker_ctrl {
2777 const u8 *eth_dst;
2778 const u8 *eth_dst_mask;
11e6c65a 2779 __be16 eth_type;
9f6bbf7c
SF
2780 bool acl;
2781 bool bridge;
2782 bool term;
2783 bool copy_to_cpu;
2784} rocker_ctrls[] = {
2785 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
2786 /* pass link local multicast pkts up to CPU for filtering */
2787 .eth_dst = ll_mac,
2788 .eth_dst_mask = ll_mask,
2789 .acl = true,
2790 },
2791 [ROCKER_CTRL_LOCAL_ARP] = {
2792 /* pass local ARP pkts up to CPU */
2793 .eth_dst = zero_mac,
2794 .eth_dst_mask = zero_mac,
2795 .eth_type = htons(ETH_P_ARP),
2796 .acl = true,
2797 },
2798 [ROCKER_CTRL_IPV4_MCAST] = {
2799 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
2800 .eth_dst = ipv4_mcast,
2801 .eth_dst_mask = ipv4_mask,
2802 .eth_type = htons(ETH_P_IP),
2803 .term = true,
2804 .copy_to_cpu = true,
2805 },
2806 [ROCKER_CTRL_IPV6_MCAST] = {
2807 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
2808 .eth_dst = ipv6_mcast,
2809 .eth_dst_mask = ipv6_mask,
2810 .eth_type = htons(ETH_P_IPV6),
2811 .term = true,
2812 .copy_to_cpu = true,
2813 },
2814 [ROCKER_CTRL_DFLT_BRIDGING] = {
2815 /* flood any pkts on vlan */
2816 .bridge = true,
2817 .copy_to_cpu = true,
2818 },
2819};
2820
2821static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
2822 int flags, struct rocker_ctrl *ctrl,
2823 __be16 vlan_id)
2824{
2825 u32 in_lport = rocker_port->lport;
2826 u32 in_lport_mask = 0xffffffff;
2827 u32 out_lport = 0;
2828 u8 *eth_src = NULL;
2829 u8 *eth_src_mask = NULL;
2830 __be16 vlan_id_mask = htons(0xffff);
2831 u8 ip_proto = 0;
2832 u8 ip_proto_mask = 0;
2833 u8 ip_tos = 0;
2834 u8 ip_tos_mask = 0;
2835 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
2836 int err;
2837
2838 err = rocker_flow_tbl_acl(rocker_port, flags,
2839 in_lport, in_lport_mask,
2840 eth_src, eth_src_mask,
2841 ctrl->eth_dst, ctrl->eth_dst_mask,
2842 ctrl->eth_type,
2843 vlan_id, vlan_id_mask,
2844 ip_proto, ip_proto_mask,
2845 ip_tos, ip_tos_mask,
2846 group_id);
2847
2848 if (err)
2849 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
2850
2851 return err;
2852}
2853
6c707945
SF
2854static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
2855 int flags, struct rocker_ctrl *ctrl,
2856 __be16 vlan_id)
2857{
2858 enum rocker_of_dpa_table_id goto_tbl =
2859 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2860 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
2861 u32 tunnel_id = 0;
2862 int err;
2863
2864 if (!rocker_port_is_bridged(rocker_port))
2865 return 0;
2866
2867 err = rocker_flow_tbl_bridge(rocker_port, flags,
2868 ctrl->eth_dst, ctrl->eth_dst_mask,
2869 vlan_id, tunnel_id,
2870 goto_tbl, group_id, ctrl->copy_to_cpu);
2871
2872 if (err)
2873 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
2874
2875 return err;
2876}
2877
9f6bbf7c
SF
2878static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
2879 int flags, struct rocker_ctrl *ctrl,
2880 __be16 vlan_id)
2881{
2882 u32 in_lport_mask = 0xffffffff;
2883 __be16 vlan_id_mask = htons(0xffff);
2884 int err;
2885
2886 if (ntohs(vlan_id) == 0)
2887 vlan_id = rocker_port->internal_vlan_id;
2888
2889 err = rocker_flow_tbl_term_mac(rocker_port,
2890 rocker_port->lport, in_lport_mask,
2891 ctrl->eth_type, ctrl->eth_dst,
2892 ctrl->eth_dst_mask, vlan_id,
2893 vlan_id_mask, ctrl->copy_to_cpu,
2894 flags);
2895
2896 if (err)
2897 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
2898
2899 return err;
2900}
2901
2902static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags,
2903 struct rocker_ctrl *ctrl, __be16 vlan_id)
2904{
2905 if (ctrl->acl)
2906 return rocker_port_ctrl_vlan_acl(rocker_port, flags,
2907 ctrl, vlan_id);
6c707945
SF
2908 if (ctrl->bridge)
2909 return rocker_port_ctrl_vlan_bridge(rocker_port, flags,
2910 ctrl, vlan_id);
9f6bbf7c
SF
2911
2912 if (ctrl->term)
2913 return rocker_port_ctrl_vlan_term(rocker_port, flags,
2914 ctrl, vlan_id);
2915
2916 return -EOPNOTSUPP;
2917}
2918
2919static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
2920 int flags, __be16 vlan_id)
2921{
2922 int err = 0;
2923 int i;
2924
2925 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
2926 if (rocker_port->ctrls[i]) {
2927 err = rocker_port_ctrl_vlan(rocker_port, flags,
2928 &rocker_ctrls[i], vlan_id);
2929 if (err)
2930 return err;
2931 }
2932 }
2933
2934 return err;
2935}
2936
2937static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
2938 struct rocker_ctrl *ctrl)
2939{
2940 u16 vid;
2941 int err = 0;
2942
2943 for (vid = 1; vid < VLAN_N_VID; vid++) {
2944 if (!test_bit(vid, rocker_port->vlan_bitmap))
2945 continue;
2946 err = rocker_port_ctrl_vlan(rocker_port, flags,
2947 ctrl, htons(vid));
2948 if (err)
2949 break;
2950 }
2951
2952 return err;
2953}
2954
6c707945
SF
2955static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
2956 u16 vid)
2957{
2958 enum rocker_of_dpa_table_id goto_tbl =
2959 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2960 u32 in_lport = rocker_port->lport;
2961 __be16 vlan_id = htons(vid);
2962 __be16 vlan_id_mask = htons(0xffff);
2963 __be16 internal_vlan_id;
2964 bool untagged;
2965 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2966 int err;
2967
2968 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
2969
2970 if (adding && test_and_set_bit(ntohs(internal_vlan_id),
2971 rocker_port->vlan_bitmap))
2972 return 0; /* already added */
2973 else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id),
2974 rocker_port->vlan_bitmap))
2975 return 0; /* already removed */
2976
2977 if (adding) {
2978 err = rocker_port_ctrl_vlan_add(rocker_port, flags,
2979 internal_vlan_id);
2980 if (err) {
2981 netdev_err(rocker_port->dev,
2982 "Error (%d) port ctrl vlan add\n", err);
2983 return err;
2984 }
2985 }
2986
2987 err = rocker_port_vlan_l2_groups(rocker_port, flags,
2988 internal_vlan_id, untagged);
2989 if (err) {
2990 netdev_err(rocker_port->dev,
2991 "Error (%d) port VLAN l2 groups\n", err);
2992 return err;
2993 }
2994
2995 err = rocker_port_vlan_flood_group(rocker_port, flags,
2996 internal_vlan_id);
2997 if (err) {
2998 netdev_err(rocker_port->dev,
2999 "Error (%d) port VLAN l2 flood group\n", err);
3000 return err;
3001 }
3002
3003 err = rocker_flow_tbl_vlan(rocker_port, flags,
3004 in_lport, vlan_id, vlan_id_mask,
3005 goto_tbl, untagged, internal_vlan_id);
3006 if (err)
3007 netdev_err(rocker_port->dev,
3008 "Error (%d) port VLAN table\n", err);
3009
3010 return err;
3011}
3012
9f6bbf7c
SF
3013static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
3014{
3015 enum rocker_of_dpa_table_id goto_tbl;
3016 u32 in_lport;
3017 u32 in_lport_mask;
3018 int err;
3019
3020 /* Normal Ethernet Frames. Matches pkts from any local physical
3021 * ports. Goto VLAN tbl.
3022 */
3023
3024 in_lport = 0;
3025 in_lport_mask = 0xffff0000;
3026 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3027
3028 err = rocker_flow_tbl_ig_port(rocker_port, flags,
3029 in_lport, in_lport_mask,
3030 goto_tbl);
3031 if (err)
3032 netdev_err(rocker_port->dev,
3033 "Error (%d) ingress port table entry\n", err);
3034
3035 return err;
3036}
3037
6c707945
SF
3038struct rocker_fdb_learn_work {
3039 struct work_struct work;
3040 struct net_device *dev;
3041 int flags;
3042 u8 addr[ETH_ALEN];
3043 u16 vid;
3044};
3045
3046static void rocker_port_fdb_learn_work(struct work_struct *work)
3047{
3048 struct rocker_fdb_learn_work *lw =
3049 container_of(work, struct rocker_fdb_learn_work, work);
3050 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3051 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3aeb6617
JP
3052 struct netdev_switch_notifier_fdb_info info;
3053
3054 info.addr = lw->addr;
3055 info.vid = lw->vid;
6c707945 3056
51ace887 3057 if (learned && removing)
3aeb6617
JP
3058 call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL,
3059 lw->dev, &info.info);
51ace887 3060 else if (learned && !removing)
3aeb6617
JP
3061 call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD,
3062 lw->dev, &info.info);
6c707945
SF
3063
3064 kfree(work);
3065}
3066
3067static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3068 int flags, const u8 *addr, __be16 vlan_id)
3069{
3070 struct rocker_fdb_learn_work *lw;
3071 enum rocker_of_dpa_table_id goto_tbl =
3072 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3073 u32 out_lport = rocker_port->lport;
3074 u32 tunnel_id = 0;
3075 u32 group_id = ROCKER_GROUP_NONE;
5111f80c 3076 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
6c707945
SF
3077 bool copy_to_cpu = false;
3078 int err;
3079
3080 if (rocker_port_is_bridged(rocker_port))
3081 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport);
3082
3083 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3084 err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL,
3085 vlan_id, tunnel_id, goto_tbl,
3086 group_id, copy_to_cpu);
3087 if (err)
3088 return err;
3089 }
3090
5111f80c
SF
3091 if (!syncing)
3092 return 0;
3093
6c707945
SF
3094 if (!rocker_port_is_bridged(rocker_port))
3095 return 0;
3096
3097 lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags));
3098 if (!lw)
3099 return -ENOMEM;
3100
3101 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3102
3103 lw->dev = rocker_port->dev;
3104 lw->flags = flags;
3105 ether_addr_copy(lw->addr, addr);
3106 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3107
3108 schedule_work(&lw->work);
3109
3110 return 0;
3111}
3112
3113static struct rocker_fdb_tbl_entry *
3114rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
3115{
3116 struct rocker_fdb_tbl_entry *found;
3117
3118 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3119 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3120 return found;
3121
3122 return NULL;
3123}
3124
3125static int rocker_port_fdb(struct rocker_port *rocker_port,
3126 const unsigned char *addr,
3127 __be16 vlan_id, int flags)
3128{
3129 struct rocker *rocker = rocker_port->rocker;
3130 struct rocker_fdb_tbl_entry *fdb;
3131 struct rocker_fdb_tbl_entry *found;
3132 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3133 unsigned long lock_flags;
3134
3135 fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags));
3136 if (!fdb)
3137 return -ENOMEM;
3138
3139 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
3140 fdb->key.lport = rocker_port->lport;
3141 ether_addr_copy(fdb->key.addr, addr);
3142 fdb->key.vlan_id = vlan_id;
3143 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3144
3145 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3146
3147 found = rocker_fdb_tbl_find(rocker, fdb);
3148
3149 if (removing && found) {
3150 kfree(fdb);
3151 hash_del(&found->entry);
3152 } else if (!removing && !found) {
3153 hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
3154 }
3155
3156 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3157
3158 /* Check if adding and already exists, or removing and can't find */
3159 if (!found != !removing) {
3160 kfree(fdb);
3161 if (!found && removing)
3162 return 0;
3163 /* Refreshing existing to update aging timers */
3164 flags |= ROCKER_OP_FLAG_REFRESH;
3165 }
3166
3167 return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id);
3168}
3169
3170static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
3171{
3172 struct rocker *rocker = rocker_port->rocker;
3173 struct rocker_fdb_tbl_entry *found;
3174 unsigned long lock_flags;
3175 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
3176 struct hlist_node *tmp;
3177 int bkt;
3178 int err = 0;
3179
3180 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3181 rocker_port->stp_state == BR_STATE_FORWARDING)
3182 return 0;
3183
3184 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3185
3186 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3187 if (found->key.lport != rocker_port->lport)
3188 continue;
3189 if (!found->learned)
3190 continue;
3191 err = rocker_port_fdb_learn(rocker_port, flags,
3192 found->key.addr,
3193 found->key.vlan_id);
3194 if (err)
3195 goto err_out;
3196 hash_del(&found->entry);
3197 }
3198
3199err_out:
3200 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3201
3202 return err;
3203}
3204
9f6bbf7c
SF
3205static int rocker_port_router_mac(struct rocker_port *rocker_port,
3206 int flags, __be16 vlan_id)
3207{
3208 u32 in_lport_mask = 0xffffffff;
3209 __be16 eth_type;
3210 const u8 *dst_mac_mask = ff_mac;
3211 __be16 vlan_id_mask = htons(0xffff);
3212 bool copy_to_cpu = false;
3213 int err;
3214
3215 if (ntohs(vlan_id) == 0)
3216 vlan_id = rocker_port->internal_vlan_id;
3217
3218 eth_type = htons(ETH_P_IP);
3219 err = rocker_flow_tbl_term_mac(rocker_port,
3220 rocker_port->lport, in_lport_mask,
3221 eth_type, rocker_port->dev->dev_addr,
3222 dst_mac_mask, vlan_id, vlan_id_mask,
3223 copy_to_cpu, flags);
3224 if (err)
3225 return err;
3226
3227 eth_type = htons(ETH_P_IPV6);
3228 err = rocker_flow_tbl_term_mac(rocker_port,
3229 rocker_port->lport, in_lport_mask,
3230 eth_type, rocker_port->dev->dev_addr,
3231 dst_mac_mask, vlan_id, vlan_id_mask,
3232 copy_to_cpu, flags);
3233
3234 return err;
3235}
3236
6c707945
SF
3237static int rocker_port_fwding(struct rocker_port *rocker_port)
3238{
3239 bool pop_vlan;
3240 u32 out_lport;
3241 __be16 vlan_id;
3242 u16 vid;
3243 int flags = ROCKER_OP_FLAG_NOWAIT;
3244 int err;
3245
3246 /* Port will be forwarding-enabled if its STP state is LEARNING
3247 * or FORWARDING. Traffic from CPU can still egress, regardless of
3248 * port STP state. Use L2 interface group on port VLANs as a way
3249 * to toggle port forwarding: if forwarding is disabled, L2
3250 * interface group will not exist.
3251 */
3252
3253 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3254 rocker_port->stp_state != BR_STATE_FORWARDING)
3255 flags |= ROCKER_OP_FLAG_REMOVE;
3256
3257 out_lport = rocker_port->lport;
3258 for (vid = 1; vid < VLAN_N_VID; vid++) {
3259 if (!test_bit(vid, rocker_port->vlan_bitmap))
3260 continue;
3261 vlan_id = htons(vid);
3262 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3263 err = rocker_group_l2_interface(rocker_port, flags,
3264 vlan_id, out_lport,
3265 pop_vlan);
3266 if (err) {
3267 netdev_err(rocker_port->dev,
3268 "Error (%d) port VLAN l2 group for lport %d\n",
3269 err, out_lport);
3270 return err;
3271 }
3272 }
3273
3274 return 0;
3275}
3276
3277static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
3278{
3279 bool want[ROCKER_CTRL_MAX] = { 0, };
3280 int flags;
3281 int err;
3282 int i;
3283
3284 if (rocker_port->stp_state == state)
3285 return 0;
3286
3287 rocker_port->stp_state = state;
3288
3289 switch (state) {
3290 case BR_STATE_DISABLED:
3291 /* port is completely disabled */
3292 break;
3293 case BR_STATE_LISTENING:
3294 case BR_STATE_BLOCKING:
3295 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3296 break;
3297 case BR_STATE_LEARNING:
3298 case BR_STATE_FORWARDING:
3299 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3300 want[ROCKER_CTRL_IPV4_MCAST] = true;
3301 want[ROCKER_CTRL_IPV6_MCAST] = true;
3302 if (rocker_port_is_bridged(rocker_port))
3303 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3304 else
3305 want[ROCKER_CTRL_LOCAL_ARP] = true;
3306 break;
3307 }
3308
3309 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3310 if (want[i] != rocker_port->ctrls[i]) {
3311 flags = ROCKER_OP_FLAG_NOWAIT |
3312 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3313 err = rocker_port_ctrl(rocker_port, flags,
3314 &rocker_ctrls[i]);
3315 if (err)
3316 return err;
3317 rocker_port->ctrls[i] = want[i];
3318 }
3319 }
3320
3321 err = rocker_port_fdb_flush(rocker_port);
3322 if (err)
3323 return err;
3324
3325 return rocker_port_fwding(rocker_port);
3326}
3327
9f6bbf7c
SF
3328static struct rocker_internal_vlan_tbl_entry *
3329rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
3330{
3331 struct rocker_internal_vlan_tbl_entry *found;
3332
3333 hash_for_each_possible(rocker->internal_vlan_tbl, found,
3334 entry, ifindex) {
3335 if (found->ifindex == ifindex)
3336 return found;
3337 }
3338
3339 return NULL;
3340}
3341
3342static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3343 int ifindex)
3344{
3345 struct rocker *rocker = rocker_port->rocker;
3346 struct rocker_internal_vlan_tbl_entry *entry;
3347 struct rocker_internal_vlan_tbl_entry *found;
3348 unsigned long lock_flags;
3349 int i;
3350
3351 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3352 if (!entry)
3353 return 0;
3354
3355 entry->ifindex = ifindex;
3356
3357 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3358
3359 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3360 if (found) {
3361 kfree(entry);
3362 goto found;
3363 }
3364
3365 found = entry;
3366 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3367
3368 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3369 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3370 continue;
3371 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3372 goto found;
3373 }
3374
3375 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3376
3377found:
3378 found->ref_count++;
3379 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3380
3381 return found->vlan_id;
3382}
3383
3384static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port,
3385 int ifindex)
3386{
3387 struct rocker *rocker = rocker_port->rocker;
3388 struct rocker_internal_vlan_tbl_entry *found;
3389 unsigned long lock_flags;
3390 unsigned long bit;
3391
3392 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3393
3394 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3395 if (!found) {
3396 netdev_err(rocker_port->dev,
3397 "ifindex (%d) not found in internal VLAN tbl\n",
3398 ifindex);
3399 goto not_found;
3400 }
3401
3402 if (--found->ref_count <= 0) {
3403 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3404 clear_bit(bit, rocker->internal_vlan_bitmap);
3405 hash_del(&found->entry);
3406 kfree(found);
3407 }
3408
3409not_found:
3410 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3411}
3412
4b8ac966
JP
3413/*****************
3414 * Net device ops
3415 *****************/
3416
3417static int rocker_port_open(struct net_device *dev)
3418{
3419 struct rocker_port *rocker_port = netdev_priv(dev);
6c707945
SF
3420 u8 stp_state = rocker_port_is_bridged(rocker_port) ?
3421 BR_STATE_BLOCKING : BR_STATE_FORWARDING;
4b8ac966
JP
3422 int err;
3423
3424 err = rocker_port_dma_rings_init(rocker_port);
3425 if (err)
3426 return err;
3427
3428 err = request_irq(rocker_msix_tx_vector(rocker_port),
3429 rocker_tx_irq_handler, 0,
3430 rocker_driver_name, rocker_port);
3431 if (err) {
3432 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
3433 goto err_request_tx_irq;
3434 }
3435
3436 err = request_irq(rocker_msix_rx_vector(rocker_port),
3437 rocker_rx_irq_handler, 0,
3438 rocker_driver_name, rocker_port);
3439 if (err) {
3440 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
3441 goto err_request_rx_irq;
3442 }
3443
6c707945
SF
3444 err = rocker_port_stp_update(rocker_port, stp_state);
3445 if (err)
3446 goto err_stp_update;
3447
4b8ac966
JP
3448 napi_enable(&rocker_port->napi_tx);
3449 napi_enable(&rocker_port->napi_rx);
3450 rocker_port_set_enable(rocker_port, true);
3451 netif_start_queue(dev);
3452 return 0;
3453
6c707945
SF
3454err_stp_update:
3455 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4b8ac966
JP
3456err_request_rx_irq:
3457 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3458err_request_tx_irq:
3459 rocker_port_dma_rings_fini(rocker_port);
3460 return err;
3461}
3462
3463static int rocker_port_stop(struct net_device *dev)
3464{
3465 struct rocker_port *rocker_port = netdev_priv(dev);
3466
3467 netif_stop_queue(dev);
3468 rocker_port_set_enable(rocker_port, false);
3469 napi_disable(&rocker_port->napi_rx);
3470 napi_disable(&rocker_port->napi_tx);
6c707945 3471 rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
4b8ac966
JP
3472 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
3473 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3474 rocker_port_dma_rings_fini(rocker_port);
3475
3476 return 0;
3477}
3478
3479static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
3480 struct rocker_desc_info *desc_info)
3481{
3482 struct rocker *rocker = rocker_port->rocker;
3483 struct pci_dev *pdev = rocker->pdev;
3484 struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
3485 struct rocker_tlv *attr;
3486 int rem;
3487
3488 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
3489 if (!attrs[ROCKER_TLV_TX_FRAGS])
3490 return;
3491 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
3492 struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
3493 dma_addr_t dma_handle;
3494 size_t len;
3495
3496 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
3497 continue;
3498 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
3499 attr);
3500 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
3501 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
3502 continue;
3503 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
3504 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
3505 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
3506 }
3507}
3508
3509static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port,
3510 struct rocker_desc_info *desc_info,
3511 char *buf, size_t buf_len)
3512{
3513 struct rocker *rocker = rocker_port->rocker;
3514 struct pci_dev *pdev = rocker->pdev;
3515 dma_addr_t dma_handle;
3516 struct rocker_tlv *frag;
3517
3518 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
3519 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
3520 if (net_ratelimit())
3521 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
3522 return -EIO;
3523 }
3524 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
3525 if (!frag)
3526 goto unmap_frag;
3527 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
3528 dma_handle))
3529 goto nest_cancel;
3530 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
3531 buf_len))
3532 goto nest_cancel;
3533 rocker_tlv_nest_end(desc_info, frag);
3534 return 0;
3535
3536nest_cancel:
3537 rocker_tlv_nest_cancel(desc_info, frag);
3538unmap_frag:
3539 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
3540 return -EMSGSIZE;
3541}
3542
3543static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
3544{
3545 struct rocker_port *rocker_port = netdev_priv(dev);
3546 struct rocker *rocker = rocker_port->rocker;
3547 struct rocker_desc_info *desc_info;
3548 struct rocker_tlv *frags;
3549 int i;
3550 int err;
3551
3552 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
3553 if (unlikely(!desc_info)) {
3554 if (net_ratelimit())
3555 netdev_err(dev, "tx ring full when queue awake\n");
3556 return NETDEV_TX_BUSY;
3557 }
3558
3559 rocker_desc_cookie_ptr_set(desc_info, skb);
3560
3561 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
3562 if (!frags)
3563 goto out;
3564 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
3565 skb->data, skb_headlen(skb));
3566 if (err)
3567 goto nest_cancel;
3568 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX)
3569 goto nest_cancel;
3570
3571 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3572 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3573
3574 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
3575 skb_frag_address(frag),
3576 skb_frag_size(frag));
3577 if (err)
3578 goto unmap_frags;
3579 }
3580 rocker_tlv_nest_end(desc_info, frags);
3581
3582 rocker_desc_gen_clear(desc_info);
3583 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
3584
3585 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
3586 if (!desc_info)
3587 netif_stop_queue(dev);
3588
3589 return NETDEV_TX_OK;
3590
3591unmap_frags:
3592 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
3593nest_cancel:
3594 rocker_tlv_nest_cancel(desc_info, frags);
3595out:
3596 dev_kfree_skb(skb);
f2bbca51
DA
3597 dev->stats.tx_dropped++;
3598
4b8ac966
JP
3599 return NETDEV_TX_OK;
3600}
3601
3602static int rocker_port_set_mac_address(struct net_device *dev, void *p)
3603{
3604 struct sockaddr *addr = p;
3605 struct rocker_port *rocker_port = netdev_priv(dev);
3606 int err;
3607
3608 if (!is_valid_ether_addr(addr->sa_data))
3609 return -EADDRNOTAVAIL;
3610
3611 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
3612 if (err)
3613 return err;
3614 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3615 return 0;
3616}
3617
6c707945
SF
3618static int rocker_port_vlan_rx_add_vid(struct net_device *dev,
3619 __be16 proto, u16 vid)
3620{
3621 struct rocker_port *rocker_port = netdev_priv(dev);
3622 int err;
3623
3624 err = rocker_port_vlan(rocker_port, 0, vid);
3625 if (err)
3626 return err;
3627
3628 return rocker_port_router_mac(rocker_port, 0, htons(vid));
3629}
3630
3631static int rocker_port_vlan_rx_kill_vid(struct net_device *dev,
3632 __be16 proto, u16 vid)
3633{
3634 struct rocker_port *rocker_port = netdev_priv(dev);
3635 int err;
3636
3637 err = rocker_port_router_mac(rocker_port, ROCKER_OP_FLAG_REMOVE,
3638 htons(vid));
3639 if (err)
3640 return err;
3641
3642 return rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, vid);
3643}
3644
3645static int rocker_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
3646 struct net_device *dev,
3647 const unsigned char *addr, u16 vid,
3648 u16 nlm_flags)
3649{
3650 struct rocker_port *rocker_port = netdev_priv(dev);
3651 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
3652 int flags = 0;
3653
3654 if (!rocker_port_is_bridged(rocker_port))
3655 return -EINVAL;
3656
3657 return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
3658}
3659
3660static int rocker_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
3661 struct net_device *dev,
3662 const unsigned char *addr, u16 vid)
3663{
3664 struct rocker_port *rocker_port = netdev_priv(dev);
3665 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
3666 int flags = ROCKER_OP_FLAG_REMOVE;
3667
3668 if (!rocker_port_is_bridged(rocker_port))
3669 return -EINVAL;
3670
3671 return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
3672}
3673
ce76ca68
JP
3674static int rocker_fdb_fill_info(struct sk_buff *skb,
3675 struct rocker_port *rocker_port,
3676 const unsigned char *addr, u16 vid,
3677 u32 portid, u32 seq, int type,
3678 unsigned int flags)
3679{
3680 struct nlmsghdr *nlh;
3681 struct ndmsg *ndm;
3682
3683 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
3684 if (!nlh)
3685 return -EMSGSIZE;
3686
3687 ndm = nlmsg_data(nlh);
3688 ndm->ndm_family = AF_BRIDGE;
3689 ndm->ndm_pad1 = 0;
3690 ndm->ndm_pad2 = 0;
3691 ndm->ndm_flags = NTF_SELF;
3692 ndm->ndm_type = 0;
3693 ndm->ndm_ifindex = rocker_port->dev->ifindex;
3694 ndm->ndm_state = NUD_REACHABLE;
3695
3696 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
3697 goto nla_put_failure;
3698
3699 if (vid && nla_put_u16(skb, NDA_VLAN, vid))
3700 goto nla_put_failure;
3701
053c095a
JB
3702 nlmsg_end(skb, nlh);
3703 return 0;
ce76ca68
JP
3704
3705nla_put_failure:
3706 nlmsg_cancel(skb, nlh);
3707 return -EMSGSIZE;
3708}
3709
3710static int rocker_port_fdb_dump(struct sk_buff *skb,
3711 struct netlink_callback *cb,
3712 struct net_device *dev,
3713 struct net_device *filter_dev,
3714 int idx)
3715{
3716 struct rocker_port *rocker_port = netdev_priv(dev);
3717 struct rocker *rocker = rocker_port->rocker;
3718 struct rocker_fdb_tbl_entry *found;
3719 struct hlist_node *tmp;
3720 int bkt;
3721 unsigned long lock_flags;
3722 const unsigned char *addr;
3723 u16 vid;
3724 int err;
3725
3726 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3727 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3728 if (found->key.lport != rocker_port->lport)
3729 continue;
3730 if (idx < cb->args[0])
3731 goto skip;
3732 addr = found->key.addr;
3733 vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id);
3734 err = rocker_fdb_fill_info(skb, rocker_port, addr, vid,
3735 NETLINK_CB(cb->skb).portid,
3736 cb->nlh->nlmsg_seq,
3737 RTM_NEWNEIGH, NLM_F_MULTI);
3738 if (err < 0)
3739 break;
3740skip:
3741 ++idx;
3742 }
3743 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3744 return idx;
3745}
3746
5111f80c 3747static int rocker_port_bridge_setlink(struct net_device *dev,
add511b3 3748 struct nlmsghdr *nlh, u16 flags)
5111f80c
SF
3749{
3750 struct rocker_port *rocker_port = netdev_priv(dev);
3751 struct nlattr *protinfo;
5111f80c 3752 struct nlattr *attr;
5111f80c
SF
3753 int err;
3754
3755 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
3756 IFLA_PROTINFO);
5111f80c
SF
3757 if (protinfo) {
3758 attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING);
3759 if (attr) {
e7560582
TG
3760 if (nla_len(attr) < sizeof(u8))
3761 return -EINVAL;
3762
5111f80c
SF
3763 if (nla_get_u8(attr))
3764 rocker_port->brport_flags |= BR_LEARNING;
3765 else
3766 rocker_port->brport_flags &= ~BR_LEARNING;
3767 err = rocker_port_set_learning(rocker_port);
3768 if (err)
3769 return err;
3770 }
3771 attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING_SYNC);
3772 if (attr) {
e7560582
TG
3773 if (nla_len(attr) < sizeof(u8))
3774 return -EINVAL;
3775
5111f80c
SF
3776 if (nla_get_u8(attr))
3777 rocker_port->brport_flags |= BR_LEARNING_SYNC;
3778 else
3779 rocker_port->brport_flags &= ~BR_LEARNING_SYNC;
3780 }
3781 }
3782
3783 return 0;
3784}
3785
3786static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3787 struct net_device *dev,
3788 u32 filter_mask)
3789{
3790 struct rocker_port *rocker_port = netdev_priv(dev);
1d460b98 3791 u16 mode = BRIDGE_MODE_UNDEF;
5111f80c
SF
3792 u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
3793
3794 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
3795 rocker_port->brport_flags, mask);
3796}
3797
4b8ac966
JP
3798static int rocker_port_switch_parent_id_get(struct net_device *dev,
3799 struct netdev_phys_item_id *psid)
3800{
3801 struct rocker_port *rocker_port = netdev_priv(dev);
3802 struct rocker *rocker = rocker_port->rocker;
3803
3804 psid->id_len = sizeof(rocker->hw.id);
3805 memcpy(&psid->id, &rocker->hw.id, psid->id_len);
3806 return 0;
3807}
3808
6c707945
SF
3809static int rocker_port_switch_port_stp_update(struct net_device *dev, u8 state)
3810{
3811 struct rocker_port *rocker_port = netdev_priv(dev);
3812
3813 return rocker_port_stp_update(rocker_port, state);
3814}
3815
4b8ac966
JP
3816static const struct net_device_ops rocker_port_netdev_ops = {
3817 .ndo_open = rocker_port_open,
3818 .ndo_stop = rocker_port_stop,
3819 .ndo_start_xmit = rocker_port_xmit,
3820 .ndo_set_mac_address = rocker_port_set_mac_address,
6c707945
SF
3821 .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid,
3822 .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid,
3823 .ndo_fdb_add = rocker_port_fdb_add,
3824 .ndo_fdb_del = rocker_port_fdb_del,
ce76ca68 3825 .ndo_fdb_dump = rocker_port_fdb_dump,
5111f80c
SF
3826 .ndo_bridge_setlink = rocker_port_bridge_setlink,
3827 .ndo_bridge_getlink = rocker_port_bridge_getlink,
4b8ac966 3828 .ndo_switch_parent_id_get = rocker_port_switch_parent_id_get,
6c707945 3829 .ndo_switch_port_stp_update = rocker_port_switch_port_stp_update,
4b8ac966
JP
3830};
3831
3832/********************
3833 * ethtool interface
3834 ********************/
3835
3836static int rocker_port_get_settings(struct net_device *dev,
3837 struct ethtool_cmd *ecmd)
3838{
3839 struct rocker_port *rocker_port = netdev_priv(dev);
3840
3841 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
3842}
3843
3844static int rocker_port_set_settings(struct net_device *dev,
3845 struct ethtool_cmd *ecmd)
3846{
3847 struct rocker_port *rocker_port = netdev_priv(dev);
3848
3849 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
3850}
3851
3852static void rocker_port_get_drvinfo(struct net_device *dev,
3853 struct ethtool_drvinfo *drvinfo)
3854{
3855 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
3856 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
3857}
3858
9766e97a
DA
3859static struct rocker_port_stats {
3860 char str[ETH_GSTRING_LEN];
3861 int type;
3862} rocker_port_stats[] = {
3863 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
3864 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
3865 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
3866 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
3867
3868 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
3869 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
3870 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
3871 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
3872};
3873
3874#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
3875
3876static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
3877 u8 *data)
3878{
3879 u8 *p = data;
3880 int i;
3881
3882 switch (stringset) {
3883 case ETH_SS_STATS:
3884 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
3885 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
3886 p += ETH_GSTRING_LEN;
3887 }
3888 break;
3889 }
3890}
3891
3892static int
3893rocker_cmd_get_port_stats_prep(struct rocker *rocker,
3894 struct rocker_port *rocker_port,
3895 struct rocker_desc_info *desc_info,
3896 void *priv)
3897{
3898 struct rocker_tlv *cmd_stats;
3899
3900 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
3901 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
3902 return -EMSGSIZE;
3903
3904 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
3905 if (!cmd_stats)
3906 return -EMSGSIZE;
3907
3908 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_LPORT,
3909 rocker_port->lport))
3910 return -EMSGSIZE;
3911
3912 rocker_tlv_nest_end(desc_info, cmd_stats);
3913
3914 return 0;
3915}
3916
3917static int
3918rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
3919 struct rocker_port *rocker_port,
3920 struct rocker_desc_info *desc_info,
3921 void *priv)
3922{
3923 struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
3924 struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
3925 struct rocker_tlv *pattr;
3926 u32 lport;
3927 u64 *data = priv;
3928 int i;
3929
3930 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
3931
3932 if (!attrs[ROCKER_TLV_CMD_INFO])
3933 return -EIO;
3934
3935 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
3936 attrs[ROCKER_TLV_CMD_INFO]);
3937
3938 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT])
3939 return -EIO;
3940
3941 lport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT]);
3942 if (lport != rocker_port->lport)
3943 return -EIO;
3944
3945 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
3946 pattr = stats_attrs[rocker_port_stats[i].type];
3947 if (!pattr)
3948 continue;
3949
3950 data[i] = rocker_tlv_get_u64(pattr);
3951 }
3952
3953 return 0;
3954}
3955
3956static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
3957 void *priv)
3958{
3959 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
3960 rocker_cmd_get_port_stats_prep, NULL,
3961 rocker_cmd_get_port_stats_ethtool_proc,
3962 priv, false);
3963}
3964
3965static void rocker_port_get_stats(struct net_device *dev,
3966 struct ethtool_stats *stats, u64 *data)
3967{
3968 struct rocker_port *rocker_port = netdev_priv(dev);
3969
3970 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
3971 int i;
3972
3973 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
3974 data[i] = 0;
3975 }
3976
3977 return;
3978}
3979
3980static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
3981{
3982 switch (sset) {
3983 case ETH_SS_STATS:
3984 return ROCKER_PORT_STATS_LEN;
3985 default:
3986 return -EOPNOTSUPP;
3987 }
3988}
3989
4b8ac966
JP
3990static const struct ethtool_ops rocker_port_ethtool_ops = {
3991 .get_settings = rocker_port_get_settings,
3992 .set_settings = rocker_port_set_settings,
3993 .get_drvinfo = rocker_port_get_drvinfo,
3994 .get_link = ethtool_op_get_link,
9766e97a
DA
3995 .get_strings = rocker_port_get_strings,
3996 .get_ethtool_stats = rocker_port_get_stats,
3997 .get_sset_count = rocker_port_get_sset_count,
4b8ac966
JP
3998};
3999
4000/*****************
4001 * NAPI interface
4002 *****************/
4003
4004static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4005{
4006 return container_of(napi, struct rocker_port, napi_tx);
4007}
4008
4009static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4010{
4011 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
4012 struct rocker *rocker = rocker_port->rocker;
4013 struct rocker_desc_info *desc_info;
4014 u32 credits = 0;
4015 int err;
4016
4017 /* Cleanup tx descriptors */
4018 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
f2bbca51
DA
4019 struct sk_buff *skb;
4020
4b8ac966
JP
4021 err = rocker_desc_err(desc_info);
4022 if (err && net_ratelimit())
4023 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4024 err);
4025 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
f2bbca51
DA
4026
4027 skb = rocker_desc_cookie_ptr_get(desc_info);
4028 if (err == 0) {
4029 rocker_port->dev->stats.tx_packets++;
4030 rocker_port->dev->stats.tx_bytes += skb->len;
4031 } else
4032 rocker_port->dev->stats.tx_errors++;
4033
4034 dev_kfree_skb_any(skb);
4b8ac966
JP
4035 credits++;
4036 }
4037
4038 if (credits && netif_queue_stopped(rocker_port->dev))
4039 netif_wake_queue(rocker_port->dev);
4040
4041 napi_complete(napi);
4042 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4043
4044 return 0;
4045}
4046
4047static int rocker_port_rx_proc(struct rocker *rocker,
4048 struct rocker_port *rocker_port,
4049 struct rocker_desc_info *desc_info)
4050{
4051 struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
4052 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4053 size_t rx_len;
4054
4055 if (!skb)
4056 return -ENOENT;
4057
4058 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4059 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4060 return -EINVAL;
4061
4062 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4063
4064 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4065 skb_put(skb, rx_len);
4066 skb->protocol = eth_type_trans(skb, rocker_port->dev);
f2bbca51
DA
4067
4068 rocker_port->dev->stats.rx_packets++;
4069 rocker_port->dev->stats.rx_bytes += skb->len;
4070
4b8ac966
JP
4071 netif_receive_skb(skb);
4072
4073 return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
4074}
4075
4076static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4077{
4078 return container_of(napi, struct rocker_port, napi_rx);
4079}
4080
4081static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4082{
4083 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
4084 struct rocker *rocker = rocker_port->rocker;
4085 struct rocker_desc_info *desc_info;
4086 u32 credits = 0;
4087 int err;
4088
4089 /* Process rx descriptors */
4090 while (credits < budget &&
4091 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4092 err = rocker_desc_err(desc_info);
4093 if (err) {
4094 if (net_ratelimit())
4095 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4096 err);
4097 } else {
4098 err = rocker_port_rx_proc(rocker, rocker_port,
4099 desc_info);
4100 if (err && net_ratelimit())
4101 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4102 err);
4103 }
f2bbca51
DA
4104 if (err)
4105 rocker_port->dev->stats.rx_errors++;
4106
4b8ac966
JP
4107 rocker_desc_gen_clear(desc_info);
4108 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4109 credits++;
4110 }
4111
4112 if (credits < budget)
4113 napi_complete(napi);
4114
4115 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4116
4117 return credits;
4118}
4119
4120/*****************
4121 * PCI driver ops
4122 *****************/
4123
4124static void rocker_carrier_init(struct rocker_port *rocker_port)
4125{
4126 struct rocker *rocker = rocker_port->rocker;
4127 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4128 bool link_up;
4129
4130 link_up = link_status & (1 << rocker_port->lport);
4131 if (link_up)
4132 netif_carrier_on(rocker_port->dev);
4133 else
4134 netif_carrier_off(rocker_port->dev);
4135}
4136
4137static void rocker_remove_ports(struct rocker *rocker)
4138{
9f6bbf7c 4139 struct rocker_port *rocker_port;
4b8ac966
JP
4140 int i;
4141
9f6bbf7c
SF
4142 for (i = 0; i < rocker->port_count; i++) {
4143 rocker_port = rocker->ports[i];
4144 rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE);
4145 unregister_netdev(rocker_port->dev);
4146 }
4b8ac966
JP
4147 kfree(rocker->ports);
4148}
4149
4150static void rocker_port_dev_addr_init(struct rocker *rocker,
4151 struct rocker_port *rocker_port)
4152{
4153 struct pci_dev *pdev = rocker->pdev;
4154 int err;
4155
4156 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4157 rocker_port->dev->dev_addr);
4158 if (err) {
4159 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4160 eth_hw_addr_random(rocker_port->dev);
4161 }
4162}
4163
4164static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4165{
4166 struct pci_dev *pdev = rocker->pdev;
4167 struct rocker_port *rocker_port;
4168 struct net_device *dev;
4169 int err;
4170
4171 dev = alloc_etherdev(sizeof(struct rocker_port));
4172 if (!dev)
4173 return -ENOMEM;
4174 rocker_port = netdev_priv(dev);
4175 rocker_port->dev = dev;
4176 rocker_port->rocker = rocker;
4177 rocker_port->port_number = port_number;
4178 rocker_port->lport = port_number + 1;
5111f80c 4179 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
4b8ac966
JP
4180
4181 rocker_port_dev_addr_init(rocker, rocker_port);
4182 dev->netdev_ops = &rocker_port_netdev_ops;
4183 dev->ethtool_ops = &rocker_port_ethtool_ops;
4184 netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
4185 NAPI_POLL_WEIGHT);
4186 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
4187 NAPI_POLL_WEIGHT);
4188 rocker_carrier_init(rocker_port);
4189
eb0ac420
RP
4190 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4191 NETIF_F_HW_SWITCH_OFFLOAD;
4b8ac966
JP
4192
4193 err = register_netdev(dev);
4194 if (err) {
4195 dev_err(&pdev->dev, "register_netdev failed\n");
4196 goto err_register_netdev;
4197 }
4198 rocker->ports[port_number] = rocker_port;
4199
5111f80c
SF
4200 rocker_port_set_learning(rocker_port);
4201
9f6bbf7c
SF
4202 rocker_port->internal_vlan_id =
4203 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
4204 err = rocker_port_ig_tbl(rocker_port, 0);
4205 if (err) {
4206 dev_err(&pdev->dev, "install ig port table failed\n");
4207 goto err_port_ig_tbl;
4208 }
4209
4b8ac966
JP
4210 return 0;
4211
9f6bbf7c
SF
4212err_port_ig_tbl:
4213 unregister_netdev(dev);
4b8ac966
JP
4214err_register_netdev:
4215 free_netdev(dev);
4216 return err;
4217}
4218
4219static int rocker_probe_ports(struct rocker *rocker)
4220{
4221 int i;
4222 size_t alloc_size;
4223 int err;
4224
4225 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
4226 rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
4227 for (i = 0; i < rocker->port_count; i++) {
4228 err = rocker_probe_port(rocker, i);
4229 if (err)
4230 goto remove_ports;
4231 }
4232 return 0;
4233
4234remove_ports:
4235 rocker_remove_ports(rocker);
4236 return err;
4237}
4238
4239static int rocker_msix_init(struct rocker *rocker)
4240{
4241 struct pci_dev *pdev = rocker->pdev;
4242 int msix_entries;
4243 int i;
4244 int err;
4245
4246 msix_entries = pci_msix_vec_count(pdev);
4247 if (msix_entries < 0)
4248 return msix_entries;
4249
4250 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
4251 return -EINVAL;
4252
4253 rocker->msix_entries = kmalloc_array(msix_entries,
4254 sizeof(struct msix_entry),
4255 GFP_KERNEL);
4256 if (!rocker->msix_entries)
4257 return -ENOMEM;
4258
4259 for (i = 0; i < msix_entries; i++)
4260 rocker->msix_entries[i].entry = i;
4261
4262 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
4263 if (err < 0)
4264 goto err_enable_msix;
4265
4266 return 0;
4267
4268err_enable_msix:
4269 kfree(rocker->msix_entries);
4270 return err;
4271}
4272
4273static void rocker_msix_fini(struct rocker *rocker)
4274{
4275 pci_disable_msix(rocker->pdev);
4276 kfree(rocker->msix_entries);
4277}
4278
4279static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4280{
4281 struct rocker *rocker;
4282 int err;
4283
4284 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
4285 if (!rocker)
4286 return -ENOMEM;
4287
4288 err = pci_enable_device(pdev);
4289 if (err) {
4290 dev_err(&pdev->dev, "pci_enable_device failed\n");
4291 goto err_pci_enable_device;
4292 }
4293
4294 err = pci_request_regions(pdev, rocker_driver_name);
4295 if (err) {
4296 dev_err(&pdev->dev, "pci_request_regions failed\n");
4297 goto err_pci_request_regions;
4298 }
4299
4300 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4301 if (!err) {
4302 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4303 if (err) {
4304 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
4305 goto err_pci_set_dma_mask;
4306 }
4307 } else {
4308 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4309 if (err) {
4310 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
4311 goto err_pci_set_dma_mask;
4312 }
4313 }
4314
4315 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
4316 dev_err(&pdev->dev, "invalid PCI region size\n");
4317 goto err_pci_resource_len_check;
4318 }
4319
4320 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
4321 pci_resource_len(pdev, 0));
4322 if (!rocker->hw_addr) {
4323 dev_err(&pdev->dev, "ioremap failed\n");
4324 err = -EIO;
4325 goto err_ioremap;
4326 }
4327 pci_set_master(pdev);
4328
4329 rocker->pdev = pdev;
4330 pci_set_drvdata(pdev, rocker);
4331
4332 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
4333
4334 err = rocker_msix_init(rocker);
4335 if (err) {
4336 dev_err(&pdev->dev, "MSI-X init failed\n");
4337 goto err_msix_init;
4338 }
4339
4340 err = rocker_basic_hw_test(rocker);
4341 if (err) {
4342 dev_err(&pdev->dev, "basic hw test failed\n");
4343 goto err_basic_hw_test;
4344 }
4345
4346 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
4347
4348 err = rocker_dma_rings_init(rocker);
4349 if (err)
4350 goto err_dma_rings_init;
4351
4352 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
4353 rocker_cmd_irq_handler, 0,
4354 rocker_driver_name, rocker);
4355 if (err) {
4356 dev_err(&pdev->dev, "cannot assign cmd irq\n");
4357 goto err_request_cmd_irq;
4358 }
4359
4360 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
4361 rocker_event_irq_handler, 0,
4362 rocker_driver_name, rocker);
4363 if (err) {
4364 dev_err(&pdev->dev, "cannot assign event irq\n");
4365 goto err_request_event_irq;
4366 }
4367
4368 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
4369
9f6bbf7c
SF
4370 err = rocker_init_tbls(rocker);
4371 if (err) {
4372 dev_err(&pdev->dev, "cannot init rocker tables\n");
4373 goto err_init_tbls;
4374 }
4375
4b8ac966
JP
4376 err = rocker_probe_ports(rocker);
4377 if (err) {
4378 dev_err(&pdev->dev, "failed to probe ports\n");
4379 goto err_probe_ports;
4380 }
4381
4382 dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id);
4383
4384 return 0;
4385
4386err_probe_ports:
9f6bbf7c
SF
4387 rocker_free_tbls(rocker);
4388err_init_tbls:
4b8ac966
JP
4389 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
4390err_request_event_irq:
4391 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
4392err_request_cmd_irq:
4393 rocker_dma_rings_fini(rocker);
4394err_dma_rings_init:
4395err_basic_hw_test:
4396 rocker_msix_fini(rocker);
4397err_msix_init:
4398 iounmap(rocker->hw_addr);
4399err_ioremap:
4400err_pci_resource_len_check:
4401err_pci_set_dma_mask:
4402 pci_release_regions(pdev);
4403err_pci_request_regions:
4404 pci_disable_device(pdev);
4405err_pci_enable_device:
4406 kfree(rocker);
4407 return err;
4408}
4409
4410static void rocker_remove(struct pci_dev *pdev)
4411{
4412 struct rocker *rocker = pci_get_drvdata(pdev);
4413
9f6bbf7c 4414 rocker_free_tbls(rocker);
4b8ac966
JP
4415 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
4416 rocker_remove_ports(rocker);
4417 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
4418 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
4419 rocker_dma_rings_fini(rocker);
4420 rocker_msix_fini(rocker);
4421 iounmap(rocker->hw_addr);
4422 pci_release_regions(rocker->pdev);
4423 pci_disable_device(rocker->pdev);
4424 kfree(rocker);
4425}
4426
4427static struct pci_driver rocker_pci_driver = {
4428 .name = rocker_driver_name,
4429 .id_table = rocker_pci_id_table,
4430 .probe = rocker_probe,
4431 .remove = rocker_remove,
4432};
4433
6c707945
SF
4434/************************************
4435 * Net device notifier event handler
4436 ************************************/
4437
4438static bool rocker_port_dev_check(struct net_device *dev)
4439{
4440 return dev->netdev_ops == &rocker_port_netdev_ops;
4441}
4442
4443static int rocker_port_bridge_join(struct rocker_port *rocker_port,
4444 struct net_device *bridge)
4445{
4446 int err;
4447
4448 rocker_port_internal_vlan_id_put(rocker_port,
4449 rocker_port->dev->ifindex);
4450
4451 rocker_port->bridge_dev = bridge;
4452
4453 /* Use bridge internal VLAN ID for untagged pkts */
4454 err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
4455 if (err)
4456 return err;
4457 rocker_port->internal_vlan_id =
4458 rocker_port_internal_vlan_id_get(rocker_port,
4459 bridge->ifindex);
4460 err = rocker_port_vlan(rocker_port, 0, 0);
4461
4462 return err;
4463}
4464
4465static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
4466{
4467 int err;
4468
4469 rocker_port_internal_vlan_id_put(rocker_port,
4470 rocker_port->bridge_dev->ifindex);
4471
4472 rocker_port->bridge_dev = NULL;
4473
4474 /* Use port internal VLAN ID for untagged pkts */
4475 err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
4476 if (err)
4477 return err;
4478 rocker_port->internal_vlan_id =
4479 rocker_port_internal_vlan_id_get(rocker_port,
4480 rocker_port->dev->ifindex);
4481 err = rocker_port_vlan(rocker_port, 0, 0);
4482
4483 return err;
4484}
4485
4486static int rocker_port_master_changed(struct net_device *dev)
4487{
4488 struct rocker_port *rocker_port = netdev_priv(dev);
4489 struct net_device *master = netdev_master_upper_dev_get(dev);
4490 int err = 0;
4491
4492 if (master && master->rtnl_link_ops &&
4493 !strcmp(master->rtnl_link_ops->kind, "bridge"))
4494 err = rocker_port_bridge_join(rocker_port, master);
4495 else
4496 err = rocker_port_bridge_leave(rocker_port);
4497
4498 return err;
4499}
4500
4501static int rocker_netdevice_event(struct notifier_block *unused,
4502 unsigned long event, void *ptr)
4503{
4504 struct net_device *dev;
4505 int err;
4506
4507 switch (event) {
4508 case NETDEV_CHANGEUPPER:
4509 dev = netdev_notifier_info_to_dev(ptr);
4510 if (!rocker_port_dev_check(dev))
4511 return NOTIFY_DONE;
4512 err = rocker_port_master_changed(dev);
4513 if (err)
4514 netdev_warn(dev,
4515 "failed to reflect master change (err %d)\n",
4516 err);
4517 break;
4518 }
4519
4520 return NOTIFY_DONE;
4521}
4522
4523static struct notifier_block rocker_netdevice_nb __read_mostly = {
4524 .notifier_call = rocker_netdevice_event,
4525};
4526
4b8ac966
JP
4527/***********************
4528 * Module init and exit
4529 ***********************/
4530
4531static int __init rocker_module_init(void)
4532{
6c707945
SF
4533 int err;
4534
4535 register_netdevice_notifier(&rocker_netdevice_nb);
4536 err = pci_register_driver(&rocker_pci_driver);
4537 if (err)
4538 goto err_pci_register_driver;
4539 return 0;
4540
4541err_pci_register_driver:
4542 unregister_netdevice_notifier(&rocker_netdevice_nb);
4543 return err;
4b8ac966
JP
4544}
4545
4546static void __exit rocker_module_exit(void)
4547{
6c707945 4548 unregister_netdevice_notifier(&rocker_netdevice_nb);
4b8ac966
JP
4549 pci_unregister_driver(&rocker_pci_driver);
4550}
4551
4552module_init(rocker_module_init);
4553module_exit(rocker_module_exit);
4554
4555MODULE_LICENSE("GPL v2");
4556MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
4557MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
4558MODULE_DESCRIPTION("Rocker switch device driver");
4559MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);