1 // SPDX-License-Identifier: GPL-2.0
2 /* Texas Instruments ICSSG Ethernet Driver
4 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
8 #include <linux/etherdevice.h>
9 #include <linux/types.h>
10 #include <linux/regmap.h>
12 #include "icssg_prueth.h"
14 #define ICSSG_NUM_CLASSIFIERS 16
15 #define ICSSG_NUM_FT1_SLOTS 8
16 #define ICSSG_NUM_FT3_SLOTS 16
18 #define ICSSG_NUM_CLASSIFIERS_IN_USE 5
21 #define FT1_NUM_SLOTS 8
22 #define FT1_SLOT_SIZE 0x10 /* bytes */
24 /* offsets from FT1 slot base i.e. slot 1 start */
27 #define FT1_DA0_MASK 0x8
28 #define FT1_DA1_MASK 0xc
30 #define FT1_N_REG(slize, n, reg) \
31 (offs[slice].ft1_slot_base + FT1_SLOT_SIZE * (n) + (reg))
33 #define FT1_LEN_MASK GENMASK(19, 16)
34 #define FT1_LEN_SHIFT 16
35 #define FT1_LEN(len) (((len) << FT1_LEN_SHIFT) & FT1_LEN_MASK)
36 #define FT1_START_MASK GENMASK(14, 0)
37 #define FT1_START(start) ((start) & FT1_START_MASK)
38 #define FT1_MATCH_SLOT(n) (GENMASK(23, 16) & (BIT(n) << 16))
42 FT1_CFG_TYPE_DISABLED = 0,
48 #define FT1_CFG_SHIFT(n) (2 * (n))
49 #define FT1_CFG_MASK(n) (0x3 << FT1_CFG_SHIFT((n)))
52 #define FT3_NUM_SLOTS 16
53 #define FT3_SLOT_SIZE 0x20 /* bytes */
55 /* offsets from FT3 slot n's base */
57 #define FT3_START_AUTO 0x4
58 #define FT3_START_OFFSET 0x8
59 #define FT3_JUMP_OFFSET 0xc
63 #define FT3_T_MASK 0x1c
65 #define FT3_N_REG(slize, n, reg) \
66 (offs[slice].ft3_slot_base + FT3_SLOT_SIZE * (n) + (reg))
68 /* offsets from rx_class n's base */
69 #define RX_CLASS_AND_EN 0
70 #define RX_CLASS_OR_EN 0x4
71 #define RX_CLASS_NUM_SLOTS 16
72 #define RX_CLASS_EN_SIZE 0x8 /* bytes */
74 #define RX_CLASS_N_REG(slice, n, reg) \
75 (offs[slice].rx_class_base + RX_CLASS_EN_SIZE * (n) + (reg))
78 #define RX_CLASS_GATES_SIZE 0x4 /* bytes */
80 #define RX_CLASS_GATES_N_REG(slice, n) \
81 (offs[slice].rx_class_gates_base + RX_CLASS_GATES_SIZE * (n))
83 #define RX_CLASS_GATES_ALLOW_MASK BIT(6)
84 #define RX_CLASS_GATES_RAW_MASK BIT(5)
85 #define RX_CLASS_GATES_PHASE_MASK BIT(4)
87 /* RX Class traffic data matching bits */
88 #define RX_CLASS_FT_UC BIT(31)
89 #define RX_CLASS_FT_MC BIT(30)
90 #define RX_CLASS_FT_BC BIT(29)
91 #define RX_CLASS_FT_FW BIT(28)
92 #define RX_CLASS_FT_RCV BIT(27)
93 #define RX_CLASS_FT_VLAN BIT(26)
94 #define RX_CLASS_FT_DA_P BIT(25)
95 #define RX_CLASS_FT_DA_I BIT(24)
96 #define RX_CLASS_FT_FT1_MATCH_MASK GENMASK(23, 16)
97 #define RX_CLASS_FT_FT1_MATCH_SHIFT 16
98 #define RX_CLASS_FT_FT3_MATCH_MASK GENMASK(15, 0)
99 #define RX_CLASS_FT_FT3_MATCH_SHIFT 0
101 #define RX_CLASS_FT_FT1_MATCH(slot) \
102 ((BIT(slot) << RX_CLASS_FT_FT1_MATCH_SHIFT) & \
103 RX_CLASS_FT_FT1_MATCH_MASK)
106 enum rx_class_sel_type {
107 RX_CLASS_SEL_TYPE_OR = 0,
108 RX_CLASS_SEL_TYPE_AND = 1,
109 RX_CLASS_SEL_TYPE_OR_AND_AND = 2,
110 RX_CLASS_SEL_TYPE_OR_OR_AND = 3,
113 #define FT1_CFG_SHIFT(n) (2 * (n))
114 #define FT1_CFG_MASK(n) (0x3 << FT1_CFG_SHIFT((n)))
116 #define RX_CLASS_SEL_SHIFT(n) (2 * (n))
117 #define RX_CLASS_SEL_MASK(n) (0x3 << RX_CLASS_SEL_SHIFT((n)))
119 #define ICSSG_CFG_OFFSET 0
120 #define MAC_INTERFACE_0 0x18
121 #define MAC_INTERFACE_1 0x1c
123 #define ICSSG_CFG_RX_L2_G_EN BIT(2)
125 /* These are register offsets per PRU */
126 struct miig_rt_offsets {
138 u32 rx_class_gates_base;
140 u32 rx_rate_cfg_base;
141 u32 rx_rate_src_sel0;
142 u32 rx_rate_src_sel1;
143 u32 tx_rate_cfg_base;
151 /* These are the offset values for miig_rt_offsets registers */
152 static const struct miig_rt_offsets offs[] = {
205 static void rx_class_ft1_set_start_len(struct regmap *miig_rt, int slice,
210 offset = offs[slice].ft1_start_len;
211 val = FT1_LEN(len) | FT1_START(start);
212 regmap_write(miig_rt, offset, val);
215 static void rx_class_ft1_set_da(struct regmap *miig_rt, int slice,
216 int n, const u8 *addr)
220 offset = FT1_N_REG(slice, n, FT1_DA0);
221 regmap_write(miig_rt, offset, (u32)(addr[0] | addr[1] << 8 |
222 addr[2] << 16 | addr[3] << 24));
223 offset = FT1_N_REG(slice, n, FT1_DA1);
224 regmap_write(miig_rt, offset, (u32)(addr[4] | addr[5] << 8));
227 static void rx_class_ft1_set_da_mask(struct regmap *miig_rt, int slice,
228 int n, const u8 *addr)
232 offset = FT1_N_REG(slice, n, FT1_DA0_MASK);
233 regmap_write(miig_rt, offset, (u32)(addr[0] | addr[1] << 8 |
234 addr[2] << 16 | addr[3] << 24));
235 offset = FT1_N_REG(slice, n, FT1_DA1_MASK);
236 regmap_write(miig_rt, offset, (u32)(addr[4] | addr[5] << 8));
239 static void rx_class_ft1_cfg_set_type(struct regmap *miig_rt, int slice, int n,
240 enum ft1_cfg_type type)
244 offset = offs[slice].ft1_cfg;
245 regmap_update_bits(miig_rt, offset, FT1_CFG_MASK(n),
246 type << FT1_CFG_SHIFT(n));
249 static void rx_class_sel_set_type(struct regmap *miig_rt, int slice, int n,
250 enum rx_class_sel_type type)
254 offset = offs[slice].rx_class_cfg1;
255 regmap_update_bits(miig_rt, offset, RX_CLASS_SEL_MASK(n),
256 type << RX_CLASS_SEL_SHIFT(n));
259 static void rx_class_set_and(struct regmap *miig_rt, int slice, int n,
264 offset = RX_CLASS_N_REG(slice, n, RX_CLASS_AND_EN);
265 regmap_write(miig_rt, offset, data);
268 static void rx_class_set_or(struct regmap *miig_rt, int slice, int n,
273 offset = RX_CLASS_N_REG(slice, n, RX_CLASS_OR_EN);
274 regmap_write(miig_rt, offset, data);
277 static u32 rx_class_get_or(struct regmap *miig_rt, int slice, int n)
281 offset = RX_CLASS_N_REG(slice, n, RX_CLASS_OR_EN);
282 regmap_read(miig_rt, offset, &val);
287 void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac)
289 regmap_write(miig_rt, MAC_INTERFACE_0, (u32)(mac[0] | mac[1] << 8 |
290 mac[2] << 16 | mac[3] << 24));
291 regmap_write(miig_rt, MAC_INTERFACE_1, (u32)(mac[4] | mac[5] << 8));
294 void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac)
296 regmap_write(miig_rt, offs[slice].mac0, (u32)(mac[0] | mac[1] << 8 |
297 mac[2] << 16 | mac[3] << 24));
298 regmap_write(miig_rt, offs[slice].mac1, (u32)(mac[4] | mac[5] << 8));
301 static void icssg_class_ft1_add_mcast(struct regmap *miig_rt, int slice,
302 int slot, const u8 *addr, const u8 *mask)
307 WARN(slot >= FT1_NUM_SLOTS, "invalid slot: %d\n", slot);
309 rx_class_ft1_set_da(miig_rt, slice, slot, addr);
310 rx_class_ft1_set_da_mask(miig_rt, slice, slot, mask);
311 rx_class_ft1_cfg_set_type(miig_rt, slice, slot, FT1_CFG_TYPE_EQ);
313 /* Enable the FT1 slot in OR enable for all classifiers */
314 for (i = 0; i < ICSSG_NUM_CLASSIFIERS_IN_USE; i++) {
315 val = rx_class_get_or(miig_rt, slice, i);
316 val |= RX_CLASS_FT_FT1_MATCH(slot);
317 rx_class_set_or(miig_rt, slice, i, val);
321 /* disable all RX traffic */
322 void icssg_class_disable(struct regmap *miig_rt, int slice)
328 regmap_update_bits(miig_rt, ICSSG_CFG_OFFSET, ICSSG_CFG_RX_L2_G_EN,
329 ICSSG_CFG_RX_L2_G_EN);
331 for (n = 0; n < ICSSG_NUM_CLASSIFIERS; n++) {
333 rx_class_set_and(miig_rt, slice, n, 0);
335 rx_class_set_or(miig_rt, slice, n, 0);
338 rx_class_sel_set_type(miig_rt, slice, n, RX_CLASS_SEL_TYPE_OR);
341 offset = RX_CLASS_GATES_N_REG(slice, n);
342 regmap_read(miig_rt, offset, &data);
343 /* clear class_raw so we go through filters */
344 data &= ~RX_CLASS_GATES_RAW_MASK;
345 /* set allow and phase mask */
346 data |= RX_CLASS_GATES_ALLOW_MASK | RX_CLASS_GATES_PHASE_MASK;
347 regmap_write(miig_rt, offset, data);
351 for (n = 0; n < ICSSG_NUM_FT1_SLOTS; n++) {
352 const u8 addr[] = { 0, 0, 0, 0, 0, 0, };
354 rx_class_ft1_cfg_set_type(miig_rt, slice, n,
355 FT1_CFG_TYPE_DISABLED);
356 rx_class_ft1_set_da(miig_rt, slice, n, addr);
357 rx_class_ft1_set_da_mask(miig_rt, slice, n, addr);
361 regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0);
364 void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti,
367 int num_classifiers = is_sr1 ? ICSSG_NUM_CLASSIFIERS_IN_USE : 1;
372 icssg_class_disable(miig_rt, slice);
374 /* Setup Classifier */
375 for (n = 0; n < num_classifiers; n++) {
376 /* match on Broadcast or MAC_PRU address */
377 data = RX_CLASS_FT_BC | RX_CLASS_FT_DA_P;
381 data |= RX_CLASS_FT_MC;
383 rx_class_set_or(miig_rt, slice, n, data);
385 /* set CFG1 for OR_OR_AND for classifier */
386 rx_class_sel_set_type(miig_rt, slice, n,
387 RX_CLASS_SEL_TYPE_OR_OR_AND);
391 regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0);
394 void icssg_class_promiscuous_sr1(struct regmap *miig_rt, int slice)
400 icssg_class_disable(miig_rt, slice);
402 /* Setup Classifier */
403 for (n = 0; n < ICSSG_NUM_CLASSIFIERS_IN_USE; n++) {
404 /* set RAW_MASK to bypass filters */
405 offset = RX_CLASS_GATES_N_REG(slice, n);
406 regmap_read(miig_rt, offset, &data);
407 data |= RX_CLASS_GATES_RAW_MASK;
408 regmap_write(miig_rt, offset, data);
412 void icssg_class_add_mcast_sr1(struct regmap *miig_rt, int slice,
413 struct net_device *ndev)
415 u8 mask_addr[6] = { 0, 0, 0, 0, 0, 0xff };
416 struct netdev_hw_addr *ha;
419 rx_class_ft1_set_start_len(miig_rt, slice, 0, 6);
420 /* reserve first 2 slots for
421 * 1) 01-80-C2-00-00-XX Known Service Ethernet Multicast addresses
422 * 2) 01-00-5e-00-00-XX Local Network Control Block
423 * (224.0.0.0 - 224.0.0.255 (224.0.0/24))
425 icssg_class_ft1_add_mcast(miig_rt, slice, 0,
426 eth_reserved_addr_base, mask_addr);
427 icssg_class_ft1_add_mcast(miig_rt, slice, 1,
428 eth_ipv4_mcast_addr_base, mask_addr);
430 netdev_for_each_mc_addr(ha, ndev) {
431 /* skip addresses matching reserved slots */
432 if (!memcmp(eth_reserved_addr_base, ha->addr, 5) ||
433 !memcmp(eth_ipv4_mcast_addr_base, ha->addr, 5)) {
434 netdev_dbg(ndev, "mcast skip %pM\n", ha->addr);
438 if (slot >= FT1_NUM_SLOTS) {
440 "can't add more than %d MC addresses, enabling allmulti\n",
442 icssg_class_default(miig_rt, slice, 1, true);
446 netdev_dbg(ndev, "mcast add %pM\n", ha->addr);
447 icssg_class_ft1_add_mcast(miig_rt, slice, slot,
448 ha->addr, mask_addr);
453 /* required for SAV check */
454 void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr)
456 const u8 mask_addr[] = { 0, 0, 0, 0, 0, 0, };
458 rx_class_ft1_set_start_len(miig_rt, slice, 0, 6);
459 rx_class_ft1_set_da(miig_rt, slice, 0, mac_addr);
460 rx_class_ft1_set_da_mask(miig_rt, slice, 0, mask_addr);
461 rx_class_ft1_cfg_set_type(miig_rt, slice, 0, FT1_CFG_TYPE_EQ);