Commit | Line | Data |
---|---|---|
227d07a0 VO |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com> | |
3 | */ | |
4 | #include <linux/if_vlan.h> | |
5 | #include <linux/dsa/sja1105.h> | |
6 | #include <linux/dsa/8021q.h> | |
7 | #include <linux/packing.h> | |
bd954b82 VO |
8 | |
9 | #include "tag.h" | |
227d07a0 | 10 | |
94793a56 VO |
11 | #define SJA1105_NAME "sja1105" |
12 | #define SJA1110_NAME "sja1110" | |
13 | ||
4913b8eb VO |
14 | /* Is this a TX or an RX header? */ |
15 | #define SJA1110_HEADER_HOST_TO_SWITCH BIT(15) | |
16 | ||
17 | /* RX header */ | |
18 | #define SJA1110_RX_HEADER_IS_METADATA BIT(14) | |
19 | #define SJA1110_RX_HEADER_HOST_ONLY BIT(13) | |
20 | #define SJA1110_RX_HEADER_HAS_TRAILER BIT(12) | |
21 | ||
22 | /* Trap-to-host format (no trailer present) */ | |
23 | #define SJA1110_RX_HEADER_SRC_PORT(x) (((x) & GENMASK(7, 4)) >> 4) | |
24 | #define SJA1110_RX_HEADER_SWITCH_ID(x) ((x) & GENMASK(3, 0)) | |
25 | ||
26 | /* Timestamp format (trailer present) */ | |
27 | #define SJA1110_RX_HEADER_TRAILER_POS(x) ((x) & GENMASK(11, 0)) | |
28 | ||
29 | #define SJA1110_RX_TRAILER_SWITCH_ID(x) (((x) & GENMASK(7, 4)) >> 4) | |
30 | #define SJA1110_RX_TRAILER_SRC_PORT(x) ((x) & GENMASK(3, 0)) | |
31 | ||
566b18c8 VO |
32 | /* Meta frame format (for 2-step TX timestamps) */ |
33 | #define SJA1110_RX_HEADER_N_TS(x) (((x) & GENMASK(8, 4)) >> 4) | |
34 | ||
4913b8eb VO |
35 | /* TX header */ |
36 | #define SJA1110_TX_HEADER_UPDATE_TC BIT(14) | |
37 | #define SJA1110_TX_HEADER_TAKE_TS BIT(13) | |
38 | #define SJA1110_TX_HEADER_TAKE_TS_CASC BIT(12) | |
39 | #define SJA1110_TX_HEADER_HAS_TRAILER BIT(11) | |
40 | ||
41 | /* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is false */ | |
42 | #define SJA1110_TX_HEADER_PRIO(x) (((x) << 7) & GENMASK(10, 7)) | |
43 | #define SJA1110_TX_HEADER_TSTAMP_ID(x) ((x) & GENMASK(7, 0)) | |
44 | ||
45 | /* Only valid if SJA1110_TX_HEADER_HAS_TRAILER is true */ | |
46 | #define SJA1110_TX_HEADER_TRAILER_POS(x) ((x) & GENMASK(10, 0)) | |
47 | ||
48 | #define SJA1110_TX_TRAILER_TSTAMP_ID(x) (((x) << 24) & GENMASK(31, 24)) | |
49 | #define SJA1110_TX_TRAILER_PRIO(x) (((x) << 21) & GENMASK(23, 21)) | |
50 | #define SJA1110_TX_TRAILER_SWITCHID(x) (((x) << 12) & GENMASK(15, 12)) | |
51 | #define SJA1110_TX_TRAILER_DESTPORTS(x) (((x) << 1) & GENMASK(11, 1)) | |
52 | ||
566b18c8 VO |
53 | #define SJA1110_META_TSTAMP_SIZE 10 |
54 | ||
4913b8eb VO |
55 | #define SJA1110_HEADER_LEN 4 |
56 | #define SJA1110_RX_TRAILER_LEN 13 | |
57 | #define SJA1110_TX_TRAILER_LEN 4 | |
58 | #define SJA1110_MAX_PADDING_LEN 15 | |
59 | ||
950a419d VO |
60 | #define SJA1105_HWTS_RX_EN 0 |
61 | ||
62 | struct sja1105_tagger_private { | |
63 | struct sja1105_tagger_data data; /* Must be first */ | |
64 | unsigned long state; | |
65 | /* Protects concurrent access to the meta state machine | |
66 | * from taggers running on multiple ports on SMP systems | |
67 | */ | |
68 | spinlock_t meta_lock; | |
69 | struct sk_buff *stampable_skb; | |
70 | struct kthread_worker *xmit_worker; | |
71 | }; | |
72 | ||
73 | static struct sja1105_tagger_private * | |
74 | sja1105_tagger_private(struct dsa_switch *ds) | |
75 | { | |
76 | return ds->tagger_data; | |
77 | } | |
78 | ||
227d07a0 VO |
79 | /* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */ |
80 | static inline bool sja1105_is_link_local(const struct sk_buff *skb) | |
81 | { | |
82 | const struct ethhdr *hdr = eth_hdr(skb); | |
83 | u64 dmac = ether_addr_to_u64(hdr->h_dest); | |
84 | ||
79fa7061 VO |
85 | if (ntohs(hdr->h_proto) == ETH_P_SJA1105_META) |
86 | return false; | |
227d07a0 VO |
87 | if ((dmac & SJA1105_LINKLOCAL_FILTER_A_MASK) == |
88 | SJA1105_LINKLOCAL_FILTER_A) | |
89 | return true; | |
90 | if ((dmac & SJA1105_LINKLOCAL_FILTER_B_MASK) == | |
91 | SJA1105_LINKLOCAL_FILTER_B) | |
92 | return true; | |
93 | return false; | |
94 | } | |
95 | ||
e53e18a6 VO |
96 | struct sja1105_meta { |
97 | u64 tstamp; | |
98 | u64 dmac_byte_4; | |
99 | u64 dmac_byte_3; | |
100 | u64 source_port; | |
101 | u64 switch_id; | |
102 | }; | |
103 | ||
104 | static void sja1105_meta_unpack(const struct sk_buff *skb, | |
105 | struct sja1105_meta *meta) | |
106 | { | |
107 | u8 *buf = skb_mac_header(skb) + ETH_HLEN; | |
108 | ||
109 | /* UM10944.pdf section 4.2.17 AVB Parameters: | |
110 | * Structure of the meta-data follow-up frame. | |
111 | * It is in network byte order, so there are no quirks | |
112 | * while unpacking the meta frame. | |
113 | * | |
114 | * Also SJA1105 E/T only populates bits 23:0 of the timestamp | |
115 | * whereas P/Q/R/S does 32 bits. Since the structure is the | |
116 | * same and the E/T puts zeroes in the high-order byte, use | |
117 | * a unified unpacking command for both device series. | |
118 | */ | |
119 | packing(buf, &meta->tstamp, 31, 0, 4, UNPACK, 0); | |
120 | packing(buf + 4, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0); | |
121 | packing(buf + 5, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0); | |
122 | packing(buf + 6, &meta->source_port, 7, 0, 1, UNPACK, 0); | |
123 | packing(buf + 7, &meta->switch_id, 7, 0, 1, UNPACK, 0); | |
124 | } | |
125 | ||
d3f9b90b VO |
126 | static inline bool sja1105_is_meta_frame(const struct sk_buff *skb) |
127 | { | |
128 | const struct ethhdr *hdr = eth_hdr(skb); | |
129 | u64 smac = ether_addr_to_u64(hdr->h_source); | |
130 | u64 dmac = ether_addr_to_u64(hdr->h_dest); | |
131 | ||
132 | if (smac != SJA1105_META_SMAC) | |
133 | return false; | |
134 | if (dmac != SJA1105_META_DMAC) | |
135 | return false; | |
136 | if (ntohs(hdr->h_proto) != ETH_P_SJA1105_META) | |
137 | return false; | |
138 | return true; | |
139 | } | |
140 | ||
a68578c2 | 141 | /* Calls sja1105_port_deferred_xmit in sja1105_main.c */ |
994d2cbb | 142 | static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp, |
a68578c2 VO |
143 | struct sk_buff *skb) |
144 | { | |
c79e8486 | 145 | struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(dp->ds); |
950a419d | 146 | struct sja1105_tagger_private *priv = sja1105_tagger_private(dp->ds); |
d38049bb VO |
147 | void (*xmit_work_fn)(struct kthread_work *work); |
148 | struct sja1105_deferred_xmit_work *xmit_work; | |
d38049bb | 149 | struct kthread_worker *xmit_worker; |
994d2cbb | 150 | |
bfcf1425 | 151 | xmit_work_fn = tagger_data->xmit_work_fn; |
950a419d | 152 | xmit_worker = priv->xmit_worker; |
994d2cbb | 153 | |
d38049bb VO |
154 | if (!xmit_work_fn || !xmit_worker) |
155 | return NULL; | |
156 | ||
157 | xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC); | |
158 | if (!xmit_work) | |
159 | return NULL; | |
160 | ||
161 | kthread_init_work(&xmit_work->work, xmit_work_fn); | |
a68578c2 VO |
162 | /* Increase refcount so the kfree_skb in dsa_slave_xmit |
163 | * won't really free the packet. | |
164 | */ | |
d38049bb VO |
165 | xmit_work->dp = dp; |
166 | xmit_work->skb = skb_get(skb); | |
167 | ||
168 | kthread_queue_work(xmit_worker, &xmit_work->work); | |
a68578c2 VO |
169 | |
170 | return NULL; | |
171 | } | |
172 | ||
8ded9160 VO |
173 | /* Send VLAN tags with a TPID that blends in with whatever VLAN protocol a |
174 | * bridge spanning ports of this switch might have. | |
175 | */ | |
994d2cbb | 176 | static u16 sja1105_xmit_tpid(struct dsa_port *dp) |
38b5beea | 177 | { |
8ded9160 VO |
178 | struct dsa_switch *ds = dp->ds; |
179 | struct dsa_port *other_dp; | |
180 | u16 proto; | |
181 | ||
182 | /* Since VLAN awareness is global, then if this port is VLAN-unaware, | |
183 | * all ports are. Use the VLAN-unaware TPID used for tag_8021q. | |
184 | */ | |
185 | if (!dsa_port_is_vlan_filtering(dp)) | |
186 | return ETH_P_SJA1105; | |
187 | ||
188 | /* Port is VLAN-aware, so there is a bridge somewhere (a single one, | |
189 | * we're sure about that). It may not be on this port though, so we | |
190 | * need to find it. | |
191 | */ | |
5068887a | 192 | dsa_switch_for_each_port(other_dp, ds) { |
36cbf39b VO |
193 | struct net_device *br = dsa_port_bridge_dev_get(other_dp); |
194 | ||
195 | if (!br) | |
8ded9160 VO |
196 | continue; |
197 | ||
198 | /* Error is returned only if CONFIG_BRIDGE_VLAN_FILTERING, | |
199 | * which seems pointless to handle, as our port cannot become | |
200 | * VLAN-aware in that case. | |
201 | */ | |
36cbf39b | 202 | br_vlan_get_proto(br, &proto); |
8ded9160 VO |
203 | |
204 | return proto; | |
205 | } | |
994d2cbb | 206 | |
8ded9160 | 207 | WARN_ONCE(1, "Port is VLAN-aware but cannot find associated bridge!\n"); |
994d2cbb | 208 | |
8ded9160 | 209 | return ETH_P_SJA1105; |
38b5beea VO |
210 | } |
211 | ||
b6ad86e6 VO |
212 | static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb, |
213 | struct net_device *netdev) | |
214 | { | |
215 | struct dsa_port *dp = dsa_slave_to_port(netdev); | |
36cbf39b VO |
216 | unsigned int bridge_num = dsa_port_bridge_num_get(dp); |
217 | struct net_device *br = dsa_port_bridge_dev_get(dp); | |
b6ad86e6 VO |
218 | u16 tx_vid; |
219 | ||
220 | /* If the port is under a VLAN-aware bridge, just slide the | |
221 | * VLAN-tagged packet into the FDB and hope for the best. | |
222 | * This works because we support a single VLAN-aware bridge | |
223 | * across the entire dst, and its VLANs cannot be shared with | |
224 | * any standalone port. | |
225 | */ | |
226 | if (br_vlan_enabled(br)) | |
227 | return skb; | |
228 | ||
229 | /* If the port is under a VLAN-unaware bridge, use an imprecise | |
230 | * TX VLAN that targets the bridge's entire broadcast domain, | |
231 | * instead of just the specific port. | |
232 | */ | |
b6362bdf | 233 | tx_vid = dsa_tag_8021q_bridge_vid(bridge_num); |
b6ad86e6 | 234 | |
994d2cbb | 235 | return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), tx_vid); |
b6ad86e6 VO |
236 | } |
237 | ||
b0b8c67e VO |
238 | /* Transform untagged control packets into pvid-tagged control packets so that |
239 | * all packets sent by this tagger are VLAN-tagged and we can configure the | |
240 | * switch to drop untagged packets coming from the DSA master. | |
241 | */ | |
242 | static struct sk_buff *sja1105_pvid_tag_control_pkt(struct dsa_port *dp, | |
243 | struct sk_buff *skb, u8 pcp) | |
244 | { | |
245 | __be16 xmit_tpid = htons(sja1105_xmit_tpid(dp)); | |
246 | struct vlan_ethhdr *hdr; | |
247 | ||
248 | /* If VLAN tag is in hwaccel area, move it to the payload | |
249 | * to deal with both cases uniformly and to ensure that | |
250 | * the VLANs are added in the right order. | |
251 | */ | |
252 | if (unlikely(skb_vlan_tag_present(skb))) { | |
253 | skb = __vlan_hwaccel_push_inside(skb); | |
254 | if (!skb) | |
255 | return NULL; | |
256 | } | |
257 | ||
258 | hdr = (struct vlan_ethhdr *)skb_mac_header(skb); | |
259 | ||
260 | /* If skb is already VLAN-tagged, leave that VLAN ID in place */ | |
261 | if (hdr->h_vlan_proto == xmit_tpid) | |
262 | return skb; | |
263 | ||
264 | return vlan_insert_tag(skb, xmit_tpid, (pcp << VLAN_PRIO_SHIFT) | | |
265 | SJA1105_DEFAULT_VLAN); | |
266 | } | |
267 | ||
227d07a0 VO |
268 | static struct sk_buff *sja1105_xmit(struct sk_buff *skb, |
269 | struct net_device *netdev) | |
270 | { | |
271 | struct dsa_port *dp = dsa_slave_to_port(netdev); | |
5f06c63b VO |
272 | u16 queue_mapping = skb_get_queue_mapping(skb); |
273 | u8 pcp = netdev_txq_to_tc(netdev, queue_mapping); | |
04b67e18 | 274 | u16 tx_vid = dsa_tag_8021q_standalone_vid(dp); |
227d07a0 | 275 | |
b6ad86e6 VO |
276 | if (skb->offload_fwd_mark) |
277 | return sja1105_imprecise_xmit(skb, netdev); | |
278 | ||
227d07a0 VO |
279 | /* Transmitting management traffic does not rely upon switch tagging, |
280 | * but instead SPI-installed management routes. Part 2 of this | |
281 | * is the .port_deferred_xmit driver callback. | |
282 | */ | |
b0b8c67e VO |
283 | if (unlikely(sja1105_is_link_local(skb))) { |
284 | skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp); | |
285 | if (!skb) | |
286 | return NULL; | |
287 | ||
994d2cbb | 288 | return sja1105_defer_xmit(dp, skb); |
b0b8c67e | 289 | } |
227d07a0 | 290 | |
994d2cbb | 291 | return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), |
227d07a0 VO |
292 | ((pcp << VLAN_PRIO_SHIFT) | tx_vid)); |
293 | } | |
294 | ||
4913b8eb VO |
295 | static struct sk_buff *sja1110_xmit(struct sk_buff *skb, |
296 | struct net_device *netdev) | |
297 | { | |
566b18c8 | 298 | struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone; |
4913b8eb | 299 | struct dsa_port *dp = dsa_slave_to_port(netdev); |
4913b8eb VO |
300 | u16 queue_mapping = skb_get_queue_mapping(skb); |
301 | u8 pcp = netdev_txq_to_tc(netdev, queue_mapping); | |
04b67e18 | 302 | u16 tx_vid = dsa_tag_8021q_standalone_vid(dp); |
4913b8eb VO |
303 | __be32 *tx_trailer; |
304 | __be16 *tx_header; | |
305 | int trailer_pos; | |
306 | ||
b6ad86e6 VO |
307 | if (skb->offload_fwd_mark) |
308 | return sja1105_imprecise_xmit(skb, netdev); | |
309 | ||
4913b8eb VO |
310 | /* Transmitting control packets is done using in-band control |
311 | * extensions, while data packets are transmitted using | |
312 | * tag_8021q TX VLANs. | |
313 | */ | |
314 | if (likely(!sja1105_is_link_local(skb))) | |
994d2cbb | 315 | return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), |
4913b8eb VO |
316 | ((pcp << VLAN_PRIO_SHIFT) | tx_vid)); |
317 | ||
b0b8c67e VO |
318 | skb = sja1105_pvid_tag_control_pkt(dp, skb, pcp); |
319 | if (!skb) | |
320 | return NULL; | |
321 | ||
4913b8eb VO |
322 | skb_push(skb, SJA1110_HEADER_LEN); |
323 | ||
6bef794d | 324 | dsa_alloc_etype_header(skb, SJA1110_HEADER_LEN); |
4913b8eb VO |
325 | |
326 | trailer_pos = skb->len; | |
327 | ||
a72808b6 | 328 | tx_header = dsa_etype_header_pos_tx(skb); |
4913b8eb VO |
329 | tx_trailer = skb_put(skb, SJA1110_TX_TRAILER_LEN); |
330 | ||
a72808b6 VO |
331 | tx_header[0] = htons(ETH_P_SJA1110); |
332 | tx_header[1] = htons(SJA1110_HEADER_HOST_TO_SWITCH | | |
333 | SJA1110_TX_HEADER_HAS_TRAILER | | |
334 | SJA1110_TX_HEADER_TRAILER_POS(trailer_pos)); | |
4913b8eb VO |
335 | *tx_trailer = cpu_to_be32(SJA1110_TX_TRAILER_PRIO(pcp) | |
336 | SJA1110_TX_TRAILER_SWITCHID(dp->ds->index) | | |
337 | SJA1110_TX_TRAILER_DESTPORTS(BIT(dp->index))); | |
566b18c8 VO |
338 | if (clone) { |
339 | u8 ts_id = SJA1105_SKB_CB(clone)->ts_id; | |
340 | ||
a72808b6 | 341 | tx_header[1] |= htons(SJA1110_TX_HEADER_TAKE_TS); |
566b18c8 VO |
342 | *tx_trailer |= cpu_to_be32(SJA1110_TX_TRAILER_TSTAMP_ID(ts_id)); |
343 | } | |
4913b8eb VO |
344 | |
345 | return skb; | |
346 | } | |
347 | ||
f3097be2 VO |
348 | static void sja1105_transfer_meta(struct sk_buff *skb, |
349 | const struct sja1105_meta *meta) | |
350 | { | |
351 | struct ethhdr *hdr = eth_hdr(skb); | |
352 | ||
353 | hdr->h_dest[3] = meta->dmac_byte_3; | |
354 | hdr->h_dest[4] = meta->dmac_byte_4; | |
617ef8d9 | 355 | SJA1105_SKB_CB(skb)->tstamp = meta->tstamp; |
f3097be2 VO |
356 | } |
357 | ||
358 | /* This is a simple state machine which follows the hardware mechanism of | |
359 | * generating RX timestamps: | |
360 | * | |
361 | * After each timestampable skb (all traffic for which send_meta1 and | |
362 | * send_meta0 is true, aka all MAC-filtered link-local traffic) a meta frame | |
363 | * containing a partial timestamp is immediately generated by the switch and | |
364 | * sent as a follow-up to the link-local frame on the CPU port. | |
365 | * | |
366 | * The meta frames have no unique identifier (such as sequence number) by which | |
367 | * one may pair them to the correct timestampable frame. | |
368 | * Instead, the switch has internal logic that ensures no frames are sent on | |
369 | * the CPU port between a link-local timestampable frame and its corresponding | |
370 | * meta follow-up. It also ensures strict ordering between ports (lower ports | |
371 | * have higher priority towards the CPU port). For this reason, a per-port | |
372 | * data structure is not needed/desirable. | |
373 | * | |
374 | * This function pairs the link-local frame with its partial timestamp from the | |
375 | * meta follow-up frame. The full timestamp will be reconstructed later in a | |
376 | * work queue. | |
377 | */ | |
378 | static struct sk_buff | |
379 | *sja1105_rcv_meta_state_machine(struct sk_buff *skb, | |
380 | struct sja1105_meta *meta, | |
381 | bool is_link_local, | |
382 | bool is_meta) | |
383 | { | |
f3097be2 VO |
384 | /* Step 1: A timestampable frame was received. |
385 | * Buffer it until we get its meta frame. | |
386 | */ | |
3e8db7e5 | 387 | if (is_link_local) { |
994d2cbb | 388 | struct dsa_port *dp = dsa_slave_to_port(skb->dev); |
950a419d | 389 | struct sja1105_tagger_private *priv; |
c79e8486 | 390 | struct dsa_switch *ds = dp->ds; |
994d2cbb | 391 | |
950a419d | 392 | priv = sja1105_tagger_private(ds); |
994d2cbb | 393 | |
950a419d | 394 | if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state)) |
3e8db7e5 VO |
395 | /* Do normal processing. */ |
396 | return skb; | |
397 | ||
950a419d | 398 | spin_lock(&priv->meta_lock); |
f3097be2 VO |
399 | /* Was this a link-local frame instead of the meta |
400 | * that we were expecting? | |
401 | */ | |
950a419d | 402 | if (priv->stampable_skb) { |
c79e8486 | 403 | dev_err_ratelimited(ds->dev, |
f3097be2 VO |
404 | "Expected meta frame, is %12llx " |
405 | "in the DSA master multicast filter?\n", | |
406 | SJA1105_META_DMAC); | |
950a419d | 407 | kfree_skb(priv->stampable_skb); |
f3097be2 VO |
408 | } |
409 | ||
410 | /* Hold a reference to avoid dsa_switch_rcv | |
411 | * from freeing the skb. | |
412 | */ | |
950a419d VO |
413 | priv->stampable_skb = skb_get(skb); |
414 | spin_unlock(&priv->meta_lock); | |
f3097be2 VO |
415 | |
416 | /* Tell DSA we got nothing */ | |
417 | return NULL; | |
418 | ||
419 | /* Step 2: The meta frame arrived. | |
420 | * Time to take the stampable skb out of the closet, annotate it | |
421 | * with the partial timestamp, and pretend that we received it | |
422 | * just now (basically masquerade the buffered frame as the meta | |
423 | * frame, which serves no further purpose). | |
424 | */ | |
425 | } else if (is_meta) { | |
994d2cbb | 426 | struct dsa_port *dp = dsa_slave_to_port(skb->dev); |
950a419d | 427 | struct sja1105_tagger_private *priv; |
c79e8486 | 428 | struct dsa_switch *ds = dp->ds; |
f3097be2 VO |
429 | struct sk_buff *stampable_skb; |
430 | ||
950a419d | 431 | priv = sja1105_tagger_private(ds); |
994d2cbb | 432 | |
3e8db7e5 VO |
433 | /* Drop the meta frame if we're not in the right state |
434 | * to process it. | |
435 | */ | |
950a419d | 436 | if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state)) |
3e8db7e5 VO |
437 | return NULL; |
438 | ||
950a419d | 439 | spin_lock(&priv->meta_lock); |
f3097be2 | 440 | |
950a419d VO |
441 | stampable_skb = priv->stampable_skb; |
442 | priv->stampable_skb = NULL; | |
f3097be2 VO |
443 | |
444 | /* Was this a meta frame instead of the link-local | |
445 | * that we were expecting? | |
446 | */ | |
447 | if (!stampable_skb) { | |
c79e8486 | 448 | dev_err_ratelimited(ds->dev, |
f3097be2 | 449 | "Unexpected meta frame\n"); |
950a419d | 450 | spin_unlock(&priv->meta_lock); |
f3097be2 VO |
451 | return NULL; |
452 | } | |
453 | ||
454 | if (stampable_skb->dev != skb->dev) { | |
c79e8486 | 455 | dev_err_ratelimited(ds->dev, |
f3097be2 | 456 | "Meta frame on wrong port\n"); |
950a419d | 457 | spin_unlock(&priv->meta_lock); |
f3097be2 VO |
458 | return NULL; |
459 | } | |
460 | ||
461 | /* Free the meta frame and give DSA the buffered stampable_skb | |
462 | * for further processing up the network stack. | |
463 | */ | |
464 | kfree_skb(skb); | |
f163fed2 | 465 | skb = stampable_skb; |
f3097be2 | 466 | sja1105_transfer_meta(skb, meta); |
f3097be2 | 467 | |
950a419d | 468 | spin_unlock(&priv->meta_lock); |
f3097be2 VO |
469 | } |
470 | ||
471 | return skb; | |
472 | } | |
473 | ||
950a419d VO |
474 | static bool sja1105_rxtstamp_get_state(struct dsa_switch *ds) |
475 | { | |
476 | struct sja1105_tagger_private *priv = sja1105_tagger_private(ds); | |
477 | ||
478 | return test_bit(SJA1105_HWTS_RX_EN, &priv->state); | |
479 | } | |
480 | ||
481 | static void sja1105_rxtstamp_set_state(struct dsa_switch *ds, bool on) | |
482 | { | |
483 | struct sja1105_tagger_private *priv = sja1105_tagger_private(ds); | |
484 | ||
485 | if (on) | |
486 | set_bit(SJA1105_HWTS_RX_EN, &priv->state); | |
487 | else | |
488 | clear_bit(SJA1105_HWTS_RX_EN, &priv->state); | |
489 | ||
490 | /* Initialize the meta state machine to a known state */ | |
491 | if (!priv->stampable_skb) | |
492 | return; | |
493 | ||
494 | kfree_skb(priv->stampable_skb); | |
495 | priv->stampable_skb = NULL; | |
496 | } | |
497 | ||
233697b3 VO |
498 | static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb) |
499 | { | |
500 | u16 tpid = ntohs(eth_hdr(skb)->h_proto); | |
501 | ||
502 | return tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q || | |
503 | skb_vlan_tag_present(skb); | |
504 | } | |
505 | ||
4913b8eb VO |
506 | static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb) |
507 | { | |
508 | return ntohs(eth_hdr(skb)->h_proto) == ETH_P_SJA1110; | |
509 | } | |
510 | ||
04a17583 VO |
511 | /* If the VLAN in the packet is a tag_8021q one, set @source_port and |
512 | * @switch_id and strip the header. Otherwise set @vid and keep it in the | |
513 | * packet. | |
884be12f | 514 | */ |
04a17583 | 515 | static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port, |
d7f9787a | 516 | int *switch_id, int *vbid, u16 *vid) |
884be12f VO |
517 | { |
518 | struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)skb_mac_header(skb); | |
519 | u16 vlan_tci; | |
520 | ||
521 | if (skb_vlan_tag_present(skb)) | |
522 | vlan_tci = skb_vlan_tag_get(skb); | |
523 | else | |
524 | vlan_tci = ntohs(hdr->h_vlan_TCI); | |
525 | ||
d7f9787a VO |
526 | if (vid_is_dsa_8021q(vlan_tci & VLAN_VID_MASK)) |
527 | return dsa_8021q_rcv(skb, source_port, switch_id, vbid); | |
884be12f VO |
528 | |
529 | /* Try our best with imprecise RX */ | |
530 | *vid = vlan_tci & VLAN_VID_MASK; | |
884be12f VO |
531 | } |
532 | ||
227d07a0 | 533 | static struct sk_buff *sja1105_rcv(struct sk_buff *skb, |
29a097b7 | 534 | struct net_device *netdev) |
227d07a0 | 535 | { |
d7f9787a | 536 | int source_port = -1, switch_id = -1, vbid = -1; |
e53e18a6 | 537 | struct sja1105_meta meta = {0}; |
e80f40cb | 538 | struct ethhdr *hdr; |
42824463 | 539 | bool is_link_local; |
e53e18a6 | 540 | bool is_meta; |
884be12f | 541 | u16 vid; |
227d07a0 | 542 | |
e80f40cb | 543 | hdr = eth_hdr(skb); |
42824463 | 544 | is_link_local = sja1105_is_link_local(skb); |
e53e18a6 | 545 | is_meta = sja1105_is_meta_frame(skb); |
227d07a0 | 546 | |
233697b3 | 547 | if (sja1105_skb_has_tag_8021q(skb)) { |
42824463 | 548 | /* Normal traffic path. */ |
d7f9787a | 549 | sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid); |
42824463 | 550 | } else if (is_link_local) { |
227d07a0 VO |
551 | /* Management traffic path. Switch embeds the switch ID and |
552 | * port ID into bytes of the destination MAC, courtesy of | |
553 | * the incl_srcpt options. | |
554 | */ | |
555 | source_port = hdr->h_dest[3]; | |
556 | switch_id = hdr->h_dest[4]; | |
557 | /* Clear the DMAC bytes that were mangled by the switch */ | |
558 | hdr->h_dest[3] = 0; | |
559 | hdr->h_dest[4] = 0; | |
e53e18a6 VO |
560 | } else if (is_meta) { |
561 | sja1105_meta_unpack(skb, &meta); | |
562 | source_port = meta.source_port; | |
563 | switch_id = meta.switch_id; | |
227d07a0 | 564 | } else { |
42824463 | 565 | return NULL; |
227d07a0 VO |
566 | } |
567 | ||
d7f9787a VO |
568 | if (vbid >= 1) |
569 | skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid); | |
570 | else if (source_port == -1 || switch_id == -1) | |
884be12f VO |
571 | skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid); |
572 | else | |
573 | skb->dev = dsa_master_find_slave(netdev, switch_id, source_port); | |
227d07a0 VO |
574 | if (!skb->dev) { |
575 | netdev_warn(netdev, "Couldn't decode source port\n"); | |
576 | return NULL; | |
577 | } | |
578 | ||
bea79078 VO |
579 | if (!is_link_local) |
580 | dsa_default_offload_fwd_mark(skb); | |
581 | ||
f3097be2 VO |
582 | return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local, |
583 | is_meta); | |
227d07a0 VO |
584 | } |
585 | ||
566b18c8 VO |
586 | static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header) |
587 | { | |
5d928ff4 | 588 | u8 *buf = dsa_etype_header_pos_rx(skb) + SJA1110_HEADER_LEN; |
566b18c8 VO |
589 | int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header); |
590 | int n_ts = SJA1110_RX_HEADER_N_TS(rx_header); | |
fcbf979a | 591 | struct sja1105_tagger_data *tagger_data; |
566b18c8 VO |
592 | struct net_device *master = skb->dev; |
593 | struct dsa_port *cpu_dp; | |
566b18c8 VO |
594 | struct dsa_switch *ds; |
595 | int i; | |
596 | ||
597 | cpu_dp = master->dsa_ptr; | |
598 | ds = dsa_switch_find(cpu_dp->dst->index, switch_id); | |
599 | if (!ds) { | |
600 | net_err_ratelimited("%s: cannot find switch id %d\n", | |
601 | master->name, switch_id); | |
602 | return NULL; | |
603 | } | |
604 | ||
fcbf979a VO |
605 | tagger_data = sja1105_tagger_data(ds); |
606 | if (!tagger_data->meta_tstamp_handler) | |
607 | return NULL; | |
608 | ||
566b18c8 VO |
609 | for (i = 0; i <= n_ts; i++) { |
610 | u8 ts_id, source_port, dir; | |
611 | u64 tstamp; | |
612 | ||
613 | ts_id = buf[0]; | |
614 | source_port = (buf[1] & GENMASK(7, 4)) >> 4; | |
615 | dir = (buf[1] & BIT(3)) >> 3; | |
616 | tstamp = be64_to_cpu(*(__be64 *)(buf + 2)); | |
617 | ||
fcbf979a VO |
618 | tagger_data->meta_tstamp_handler(ds, source_port, ts_id, dir, |
619 | tstamp); | |
566b18c8 VO |
620 | |
621 | buf += SJA1110_META_TSTAMP_SIZE; | |
622 | } | |
623 | ||
624 | /* Discard the meta frame, we've consumed the timestamps it contained */ | |
625 | return NULL; | |
626 | } | |
627 | ||
4913b8eb VO |
628 | static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb, |
629 | int *source_port, | |
bea79078 VO |
630 | int *switch_id, |
631 | bool *host_only) | |
4913b8eb VO |
632 | { |
633 | u16 rx_header; | |
634 | ||
635 | if (unlikely(!pskb_may_pull(skb, SJA1110_HEADER_LEN))) | |
636 | return NULL; | |
637 | ||
638 | /* skb->data points to skb_mac_header(skb) + ETH_HLEN, which is exactly | |
639 | * what we need because the caller has checked the EtherType (which is | |
640 | * located 2 bytes back) and we just need a pointer to the header that | |
641 | * comes afterwards. | |
642 | */ | |
643 | rx_header = ntohs(*(__be16 *)skb->data); | |
566b18c8 | 644 | |
bea79078 VO |
645 | if (rx_header & SJA1110_RX_HEADER_HOST_ONLY) |
646 | *host_only = true; | |
647 | ||
566b18c8 VO |
648 | if (rx_header & SJA1110_RX_HEADER_IS_METADATA) |
649 | return sja1110_rcv_meta(skb, rx_header); | |
4913b8eb VO |
650 | |
651 | /* Timestamp frame, we have a trailer */ | |
652 | if (rx_header & SJA1110_RX_HEADER_HAS_TRAILER) { | |
653 | int start_of_padding = SJA1110_RX_HEADER_TRAILER_POS(rx_header); | |
654 | u8 *rx_trailer = skb_tail_pointer(skb) - SJA1110_RX_TRAILER_LEN; | |
655 | u64 *tstamp = &SJA1105_SKB_CB(skb)->tstamp; | |
656 | u8 last_byte = rx_trailer[12]; | |
657 | ||
658 | /* The timestamp is unaligned, so we need to use packing() | |
659 | * to get it | |
660 | */ | |
661 | packing(rx_trailer, tstamp, 63, 0, 8, UNPACK, 0); | |
662 | ||
663 | *source_port = SJA1110_RX_TRAILER_SRC_PORT(last_byte); | |
664 | *switch_id = SJA1110_RX_TRAILER_SWITCH_ID(last_byte); | |
665 | ||
666 | /* skb->len counts from skb->data, while start_of_padding | |
667 | * counts from the destination MAC address. Right now skb->data | |
668 | * is still as set by the DSA master, so to trim away the | |
669 | * padding and trailer we need to account for the fact that | |
670 | * skb->data points to skb_mac_header(skb) + ETH_HLEN. | |
671 | */ | |
672 | pskb_trim_rcsum(skb, start_of_padding - ETH_HLEN); | |
673 | /* Trap-to-host frame, no timestamp trailer */ | |
674 | } else { | |
675 | *source_port = SJA1110_RX_HEADER_SRC_PORT(rx_header); | |
676 | *switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header); | |
677 | } | |
678 | ||
679 | /* Advance skb->data past the DSA header */ | |
680 | skb_pull_rcsum(skb, SJA1110_HEADER_LEN); | |
681 | ||
f1dacd7a | 682 | dsa_strip_etype_header(skb, SJA1110_HEADER_LEN); |
4913b8eb VO |
683 | |
684 | /* With skb->data in its final place, update the MAC header | |
685 | * so that eth_hdr() continues to works properly. | |
686 | */ | |
687 | skb_set_mac_header(skb, -ETH_HLEN); | |
688 | ||
689 | return skb; | |
690 | } | |
691 | ||
692 | static struct sk_buff *sja1110_rcv(struct sk_buff *skb, | |
29a097b7 | 693 | struct net_device *netdev) |
4913b8eb | 694 | { |
d7f9787a | 695 | int source_port = -1, switch_id = -1, vbid = -1; |
bea79078 | 696 | bool host_only = false; |
421297ef | 697 | u16 vid = 0; |
4913b8eb | 698 | |
4913b8eb VO |
699 | if (sja1110_skb_has_inband_control_extension(skb)) { |
700 | skb = sja1110_rcv_inband_control_extension(skb, &source_port, | |
bea79078 VO |
701 | &switch_id, |
702 | &host_only); | |
4913b8eb VO |
703 | if (!skb) |
704 | return NULL; | |
705 | } | |
706 | ||
707 | /* Packets with in-band control extensions might still have RX VLANs */ | |
708 | if (likely(sja1105_skb_has_tag_8021q(skb))) | |
d7f9787a | 709 | sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid); |
4913b8eb | 710 | |
d7f9787a VO |
711 | if (vbid >= 1) |
712 | skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid); | |
713 | else if (source_port == -1 || switch_id == -1) | |
884be12f VO |
714 | skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid); |
715 | else | |
716 | skb->dev = dsa_master_find_slave(netdev, switch_id, source_port); | |
4913b8eb | 717 | if (!skb->dev) { |
884be12f | 718 | netdev_warn(netdev, "Couldn't decode source port\n"); |
4913b8eb VO |
719 | return NULL; |
720 | } | |
721 | ||
bea79078 VO |
722 | if (!host_only) |
723 | dsa_default_offload_fwd_mark(skb); | |
724 | ||
4913b8eb VO |
725 | return skb; |
726 | } | |
727 | ||
e6652979 VO |
728 | static void sja1105_flow_dissect(const struct sk_buff *skb, __be16 *proto, |
729 | int *offset) | |
730 | { | |
731 | /* No tag added for management frames, all ok */ | |
732 | if (unlikely(sja1105_is_link_local(skb))) | |
733 | return; | |
734 | ||
735 | dsa_tag_generic_flow_dissect(skb, proto, offset); | |
736 | } | |
737 | ||
4913b8eb VO |
738 | static void sja1110_flow_dissect(const struct sk_buff *skb, __be16 *proto, |
739 | int *offset) | |
740 | { | |
741 | /* Management frames have 2 DSA tags on RX, so the needed_headroom we | |
742 | * declared is fine for the generic dissector adjustment procedure. | |
743 | */ | |
744 | if (unlikely(sja1105_is_link_local(skb))) | |
745 | return dsa_tag_generic_flow_dissect(skb, proto, offset); | |
746 | ||
747 | /* For the rest, there is a single DSA tag, the tag_8021q one */ | |
748 | *offset = VLAN_HLEN; | |
749 | *proto = ((__be16 *)skb->data)[(VLAN_HLEN / 2) - 1]; | |
750 | } | |
751 | ||
7f297314 | 752 | static void sja1105_disconnect(struct dsa_switch *ds) |
c79e8486 | 753 | { |
7f297314 | 754 | struct sja1105_tagger_private *priv = ds->tagger_data; |
c79e8486 | 755 | |
7f297314 VO |
756 | kthread_destroy_worker(priv->xmit_worker); |
757 | kfree(priv); | |
758 | ds->tagger_data = NULL; | |
c79e8486 VO |
759 | } |
760 | ||
7f297314 | 761 | static int sja1105_connect(struct dsa_switch *ds) |
c79e8486 VO |
762 | { |
763 | struct sja1105_tagger_data *tagger_data; | |
950a419d | 764 | struct sja1105_tagger_private *priv; |
c79e8486 | 765 | struct kthread_worker *xmit_worker; |
c79e8486 VO |
766 | int err; |
767 | ||
7f297314 VO |
768 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
769 | if (!priv) | |
770 | return -ENOMEM; | |
c79e8486 | 771 | |
7f297314 | 772 | spin_lock_init(&priv->meta_lock); |
c79e8486 | 773 | |
7f297314 VO |
774 | xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit", |
775 | ds->dst->index, ds->index); | |
776 | if (IS_ERR(xmit_worker)) { | |
777 | err = PTR_ERR(xmit_worker); | |
778 | kfree(priv); | |
779 | return err; | |
c79e8486 VO |
780 | } |
781 | ||
7f297314 VO |
782 | priv->xmit_worker = xmit_worker; |
783 | /* Export functions for switch driver use */ | |
784 | tagger_data = &priv->data; | |
785 | tagger_data->rxtstamp_get_state = sja1105_rxtstamp_get_state; | |
786 | tagger_data->rxtstamp_set_state = sja1105_rxtstamp_set_state; | |
787 | ds->tagger_data = priv; | |
c79e8486 | 788 | |
7f297314 | 789 | return 0; |
c79e8486 VO |
790 | } |
791 | ||
097f0244 | 792 | static const struct dsa_device_ops sja1105_netdev_ops = { |
94793a56 | 793 | .name = SJA1105_NAME, |
227d07a0 VO |
794 | .proto = DSA_TAG_PROTO_SJA1105, |
795 | .xmit = sja1105_xmit, | |
796 | .rcv = sja1105_rcv, | |
c79e8486 VO |
797 | .connect = sja1105_connect, |
798 | .disconnect = sja1105_disconnect, | |
4e500251 | 799 | .needed_headroom = VLAN_HLEN, |
e6652979 | 800 | .flow_dissect = sja1105_flow_dissect, |
707091eb | 801 | .promisc_on_master = true, |
227d07a0 VO |
802 | }; |
803 | ||
4913b8eb | 804 | DSA_TAG_DRIVER(sja1105_netdev_ops); |
94793a56 | 805 | MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1105, SJA1105_NAME); |
227d07a0 | 806 | |
4913b8eb | 807 | static const struct dsa_device_ops sja1110_netdev_ops = { |
94793a56 | 808 | .name = SJA1110_NAME, |
4913b8eb VO |
809 | .proto = DSA_TAG_PROTO_SJA1110, |
810 | .xmit = sja1110_xmit, | |
811 | .rcv = sja1110_rcv, | |
c79e8486 VO |
812 | .connect = sja1105_connect, |
813 | .disconnect = sja1105_disconnect, | |
4913b8eb VO |
814 | .flow_dissect = sja1110_flow_dissect, |
815 | .needed_headroom = SJA1110_HEADER_LEN + VLAN_HLEN, | |
816 | .needed_tailroom = SJA1110_RX_TRAILER_LEN + SJA1110_MAX_PADDING_LEN, | |
817 | }; | |
818 | ||
819 | DSA_TAG_DRIVER(sja1110_netdev_ops); | |
94793a56 | 820 | MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1110, SJA1110_NAME); |
4913b8eb VO |
821 | |
822 | static struct dsa_tag_driver *sja1105_tag_driver_array[] = { | |
823 | &DSA_TAG_DRIVER_NAME(sja1105_netdev_ops), | |
824 | &DSA_TAG_DRIVER_NAME(sja1110_netdev_ops), | |
825 | }; | |
826 | ||
827 | module_dsa_tag_drivers(sja1105_tag_driver_array); | |
828 | ||
829 | MODULE_LICENSE("GPL v2"); |