Commit | Line | Data |
---|---|---|
9a9f26e8 HV |
1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | ||
3 | #include <linux/mrp_bridge.h> | |
4 | #include "br_private_mrp.h" | |
5 | ||
6 | static const u8 mrp_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 }; | |
537ed567 HV |
7 | static const u8 mrp_in_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x3 }; |
8 | ||
90c628dd HB |
9 | static int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb); |
10 | ||
11 | static struct br_frame_type mrp_frame_type __read_mostly = { | |
12 | .type = cpu_to_be16(ETH_P_MRP), | |
13 | .frame_handler = br_mrp_process, | |
14 | }; | |
15 | ||
537ed567 HV |
16 | static bool br_mrp_is_ring_port(struct net_bridge_port *p_port, |
17 | struct net_bridge_port *s_port, | |
18 | struct net_bridge_port *port) | |
19 | { | |
20 | if (port == p_port || | |
21 | port == s_port) | |
22 | return true; | |
23 | ||
24 | return false; | |
25 | } | |
26 | ||
27 | static bool br_mrp_is_in_port(struct net_bridge_port *i_port, | |
28 | struct net_bridge_port *port) | |
29 | { | |
30 | if (port == i_port) | |
31 | return true; | |
32 | ||
33 | return false; | |
34 | } | |
9a9f26e8 HV |
35 | |
36 | static struct net_bridge_port *br_mrp_get_port(struct net_bridge *br, | |
37 | u32 ifindex) | |
38 | { | |
39 | struct net_bridge_port *res = NULL; | |
40 | struct net_bridge_port *port; | |
41 | ||
42 | list_for_each_entry(port, &br->port_list, list) { | |
43 | if (port->dev->ifindex == ifindex) { | |
44 | res = port; | |
45 | break; | |
46 | } | |
47 | } | |
48 | ||
49 | return res; | |
50 | } | |
51 | ||
52 | static struct br_mrp *br_mrp_find_id(struct net_bridge *br, u32 ring_id) | |
53 | { | |
54 | struct br_mrp *res = NULL; | |
55 | struct br_mrp *mrp; | |
56 | ||
0169b820 HV |
57 | hlist_for_each_entry_rcu(mrp, &br->mrp_list, list, |
58 | lockdep_rtnl_is_held()) { | |
9a9f26e8 HV |
59 | if (mrp->ring_id == ring_id) { |
60 | res = mrp; | |
61 | break; | |
62 | } | |
63 | } | |
64 | ||
65 | return res; | |
66 | } | |
67 | ||
537ed567 HV |
68 | static struct br_mrp *br_mrp_find_in_id(struct net_bridge *br, u32 in_id) |
69 | { | |
70 | struct br_mrp *res = NULL; | |
71 | struct br_mrp *mrp; | |
72 | ||
0169b820 HV |
73 | hlist_for_each_entry_rcu(mrp, &br->mrp_list, list, |
74 | lockdep_rtnl_is_held()) { | |
537ed567 HV |
75 | if (mrp->in_id == in_id) { |
76 | res = mrp; | |
77 | break; | |
78 | } | |
79 | } | |
80 | ||
81 | return res; | |
82 | } | |
83 | ||
7aa38018 HV |
84 | static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex) |
85 | { | |
86 | struct br_mrp *mrp; | |
87 | ||
0169b820 HV |
88 | hlist_for_each_entry_rcu(mrp, &br->mrp_list, list, |
89 | lockdep_rtnl_is_held()) { | |
7aa38018 HV |
90 | struct net_bridge_port *p; |
91 | ||
92 | p = rtnl_dereference(mrp->p_port); | |
93 | if (p && p->dev->ifindex == ifindex) | |
94 | return false; | |
95 | ||
96 | p = rtnl_dereference(mrp->s_port); | |
97 | if (p && p->dev->ifindex == ifindex) | |
98 | return false; | |
537ed567 HV |
99 | |
100 | p = rtnl_dereference(mrp->i_port); | |
101 | if (p && p->dev->ifindex == ifindex) | |
102 | return false; | |
7aa38018 HV |
103 | } |
104 | ||
105 | return true; | |
106 | } | |
107 | ||
9a9f26e8 HV |
108 | static struct br_mrp *br_mrp_find_port(struct net_bridge *br, |
109 | struct net_bridge_port *p) | |
110 | { | |
111 | struct br_mrp *res = NULL; | |
112 | struct br_mrp *mrp; | |
113 | ||
0169b820 HV |
114 | hlist_for_each_entry_rcu(mrp, &br->mrp_list, list, |
115 | lockdep_rtnl_is_held()) { | |
9a9f26e8 | 116 | if (rcu_access_pointer(mrp->p_port) == p || |
537ed567 HV |
117 | rcu_access_pointer(mrp->s_port) == p || |
118 | rcu_access_pointer(mrp->i_port) == p) { | |
9a9f26e8 HV |
119 | res = mrp; |
120 | break; | |
121 | } | |
122 | } | |
123 | ||
124 | return res; | |
125 | } | |
126 | ||
127 | static int br_mrp_next_seq(struct br_mrp *mrp) | |
128 | { | |
129 | mrp->seq_id++; | |
130 | return mrp->seq_id; | |
131 | } | |
132 | ||
133 | static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p, | |
134 | const u8 *src, const u8 *dst) | |
135 | { | |
136 | struct ethhdr *eth_hdr; | |
137 | struct sk_buff *skb; | |
9b14d1f8 | 138 | __be16 *version; |
9a9f26e8 HV |
139 | |
140 | skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH); | |
141 | if (!skb) | |
142 | return NULL; | |
143 | ||
144 | skb->dev = p->dev; | |
145 | skb->protocol = htons(ETH_P_MRP); | |
146 | skb->priority = MRP_FRAME_PRIO; | |
147 | skb_reserve(skb, sizeof(*eth_hdr)); | |
148 | ||
149 | eth_hdr = skb_push(skb, sizeof(*eth_hdr)); | |
150 | ether_addr_copy(eth_hdr->h_dest, dst); | |
151 | ether_addr_copy(eth_hdr->h_source, src); | |
152 | eth_hdr->h_proto = htons(ETH_P_MRP); | |
153 | ||
154 | version = skb_put(skb, sizeof(*version)); | |
155 | *version = cpu_to_be16(MRP_VERSION); | |
156 | ||
157 | return skb; | |
158 | } | |
159 | ||
160 | static void br_mrp_skb_tlv(struct sk_buff *skb, | |
161 | enum br_mrp_tlv_header_type type, | |
162 | u8 length) | |
163 | { | |
164 | struct br_mrp_tlv_hdr *hdr; | |
165 | ||
166 | hdr = skb_put(skb, sizeof(*hdr)); | |
167 | hdr->type = type; | |
168 | hdr->length = length; | |
169 | } | |
170 | ||
171 | static void br_mrp_skb_common(struct sk_buff *skb, struct br_mrp *mrp) | |
172 | { | |
173 | struct br_mrp_common_hdr *hdr; | |
174 | ||
175 | br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_COMMON, sizeof(*hdr)); | |
176 | ||
177 | hdr = skb_put(skb, sizeof(*hdr)); | |
178 | hdr->seq_id = cpu_to_be16(br_mrp_next_seq(mrp)); | |
179 | memset(hdr->domain, 0xff, MRP_DOMAIN_UUID_LENGTH); | |
180 | } | |
181 | ||
182 | static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp, | |
183 | struct net_bridge_port *p, | |
184 | enum br_mrp_port_role_type port_role) | |
185 | { | |
186 | struct br_mrp_ring_test_hdr *hdr = NULL; | |
187 | struct sk_buff *skb = NULL; | |
188 | ||
189 | if (!p) | |
190 | return NULL; | |
191 | ||
192 | skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_test_dmac); | |
193 | if (!skb) | |
194 | return NULL; | |
195 | ||
196 | br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_RING_TEST, sizeof(*hdr)); | |
197 | hdr = skb_put(skb, sizeof(*hdr)); | |
198 | ||
4b3a61b0 | 199 | hdr->prio = cpu_to_be16(mrp->prio); |
9a9f26e8 HV |
200 | ether_addr_copy(hdr->sa, p->br->dev->dev_addr); |
201 | hdr->port_role = cpu_to_be16(port_role); | |
202 | hdr->state = cpu_to_be16(mrp->ring_state); | |
203 | hdr->transitions = cpu_to_be16(mrp->ring_transitions); | |
204 | hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies)); | |
205 | ||
206 | br_mrp_skb_common(skb, mrp); | |
207 | br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0); | |
208 | ||
209 | return skb; | |
210 | } | |
211 | ||
537ed567 HV |
212 | static struct sk_buff *br_mrp_alloc_in_test_skb(struct br_mrp *mrp, |
213 | struct net_bridge_port *p, | |
214 | enum br_mrp_port_role_type port_role) | |
215 | { | |
216 | struct br_mrp_in_test_hdr *hdr = NULL; | |
217 | struct sk_buff *skb = NULL; | |
218 | ||
219 | if (!p) | |
220 | return NULL; | |
221 | ||
222 | skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_in_test_dmac); | |
223 | if (!skb) | |
224 | return NULL; | |
225 | ||
226 | br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_IN_TEST, sizeof(*hdr)); | |
227 | hdr = skb_put(skb, sizeof(*hdr)); | |
228 | ||
229 | hdr->id = cpu_to_be16(mrp->in_id); | |
230 | ether_addr_copy(hdr->sa, p->br->dev->dev_addr); | |
231 | hdr->port_role = cpu_to_be16(port_role); | |
232 | hdr->state = cpu_to_be16(mrp->in_state); | |
233 | hdr->transitions = cpu_to_be16(mrp->in_transitions); | |
234 | hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies)); | |
235 | ||
236 | br_mrp_skb_common(skb, mrp); | |
237 | br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0); | |
238 | ||
239 | return skb; | |
240 | } | |
241 | ||
c6676e7d HV |
242 | /* This function is continuously called in the following cases: |
243 | * - when node role is MRM, in this case test_monitor is always set to false | |
244 | * because it needs to notify the userspace that the ring is open and needs to | |
245 | * send MRP_Test frames | |
246 | * - when node role is MRA, there are 2 subcases: | |
247 | * - when MRA behaves as MRM, in this case is similar with MRM role | |
248 | * - when MRA behaves as MRC, in this case test_monitor is set to true, | |
249 | * because it needs to detect when it stops seeing MRP_Test frames | |
250 | * from MRM node but it doesn't need to send MRP_Test frames. | |
251 | */ | |
9a9f26e8 HV |
252 | static void br_mrp_test_work_expired(struct work_struct *work) |
253 | { | |
254 | struct delayed_work *del_work = to_delayed_work(work); | |
255 | struct br_mrp *mrp = container_of(del_work, struct br_mrp, test_work); | |
256 | struct net_bridge_port *p; | |
257 | bool notify_open = false; | |
258 | struct sk_buff *skb; | |
259 | ||
260 | if (time_before_eq(mrp->test_end, jiffies)) | |
261 | return; | |
262 | ||
263 | if (mrp->test_count_miss < mrp->test_max_miss) { | |
264 | mrp->test_count_miss++; | |
265 | } else { | |
266 | /* Notify that the ring is open only if the ring state is | |
267 | * closed, otherwise it would continue to notify at every | |
268 | * interval. | |
c6676e7d HV |
269 | * Also notify that the ring is open when the node has the |
270 | * role MRA and behaves as MRC. The reason is that the | |
271 | * userspace needs to know when the MRM stopped sending | |
272 | * MRP_Test frames so that the current node to try to take | |
273 | * the role of a MRM. | |
9a9f26e8 | 274 | */ |
c6676e7d HV |
275 | if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED || |
276 | mrp->test_monitor) | |
9a9f26e8 HV |
277 | notify_open = true; |
278 | } | |
279 | ||
280 | rcu_read_lock(); | |
281 | ||
282 | p = rcu_dereference(mrp->p_port); | |
283 | if (p) { | |
c6676e7d HV |
284 | if (!mrp->test_monitor) { |
285 | skb = br_mrp_alloc_test_skb(mrp, p, | |
286 | BR_MRP_PORT_ROLE_PRIMARY); | |
287 | if (!skb) | |
288 | goto out; | |
289 | ||
290 | skb_reset_network_header(skb); | |
291 | dev_queue_xmit(skb); | |
292 | } | |
9a9f26e8 HV |
293 | |
294 | if (notify_open && !mrp->ring_role_offloaded) | |
4cc625c6 | 295 | br_mrp_ring_port_open(p->dev, true); |
9a9f26e8 HV |
296 | } |
297 | ||
298 | p = rcu_dereference(mrp->s_port); | |
299 | if (p) { | |
c6676e7d HV |
300 | if (!mrp->test_monitor) { |
301 | skb = br_mrp_alloc_test_skb(mrp, p, | |
302 | BR_MRP_PORT_ROLE_SECONDARY); | |
303 | if (!skb) | |
304 | goto out; | |
305 | ||
306 | skb_reset_network_header(skb); | |
307 | dev_queue_xmit(skb); | |
308 | } | |
9a9f26e8 HV |
309 | |
310 | if (notify_open && !mrp->ring_role_offloaded) | |
4cc625c6 | 311 | br_mrp_ring_port_open(p->dev, true); |
9a9f26e8 HV |
312 | } |
313 | ||
314 | out: | |
315 | rcu_read_unlock(); | |
316 | ||
317 | queue_delayed_work(system_wq, &mrp->test_work, | |
318 | usecs_to_jiffies(mrp->test_interval)); | |
319 | } | |
320 | ||
537ed567 HV |
321 | /* This function is continuously called when the node has the interconnect role |
322 | * MIM. It would generate interconnect test frames and will send them on all 3 | |
323 | * ports. But will also check if it stop receiving interconnect test frames. | |
324 | */ | |
325 | static void br_mrp_in_test_work_expired(struct work_struct *work) | |
326 | { | |
327 | struct delayed_work *del_work = to_delayed_work(work); | |
328 | struct br_mrp *mrp = container_of(del_work, struct br_mrp, in_test_work); | |
329 | struct net_bridge_port *p; | |
330 | bool notify_open = false; | |
331 | struct sk_buff *skb; | |
332 | ||
333 | if (time_before_eq(mrp->in_test_end, jiffies)) | |
334 | return; | |
335 | ||
336 | if (mrp->in_test_count_miss < mrp->in_test_max_miss) { | |
337 | mrp->in_test_count_miss++; | |
338 | } else { | |
339 | /* Notify that the interconnect ring is open only if the | |
340 | * interconnect ring state is closed, otherwise it would | |
341 | * continue to notify at every interval. | |
342 | */ | |
343 | if (mrp->in_state == BR_MRP_IN_STATE_CLOSED) | |
344 | notify_open = true; | |
345 | } | |
346 | ||
347 | rcu_read_lock(); | |
348 | ||
349 | p = rcu_dereference(mrp->p_port); | |
350 | if (p) { | |
351 | skb = br_mrp_alloc_in_test_skb(mrp, p, | |
352 | BR_MRP_PORT_ROLE_PRIMARY); | |
353 | if (!skb) | |
354 | goto out; | |
355 | ||
356 | skb_reset_network_header(skb); | |
357 | dev_queue_xmit(skb); | |
358 | ||
359 | if (notify_open && !mrp->in_role_offloaded) | |
360 | br_mrp_in_port_open(p->dev, true); | |
361 | } | |
362 | ||
363 | p = rcu_dereference(mrp->s_port); | |
364 | if (p) { | |
365 | skb = br_mrp_alloc_in_test_skb(mrp, p, | |
366 | BR_MRP_PORT_ROLE_SECONDARY); | |
367 | if (!skb) | |
368 | goto out; | |
369 | ||
370 | skb_reset_network_header(skb); | |
371 | dev_queue_xmit(skb); | |
372 | ||
373 | if (notify_open && !mrp->in_role_offloaded) | |
374 | br_mrp_in_port_open(p->dev, true); | |
375 | } | |
376 | ||
377 | p = rcu_dereference(mrp->i_port); | |
378 | if (p) { | |
379 | skb = br_mrp_alloc_in_test_skb(mrp, p, | |
380 | BR_MRP_PORT_ROLE_INTER); | |
381 | if (!skb) | |
382 | goto out; | |
383 | ||
384 | skb_reset_network_header(skb); | |
385 | dev_queue_xmit(skb); | |
386 | ||
387 | if (notify_open && !mrp->in_role_offloaded) | |
388 | br_mrp_in_port_open(p->dev, true); | |
389 | } | |
390 | ||
391 | out: | |
392 | rcu_read_unlock(); | |
393 | ||
394 | queue_delayed_work(system_wq, &mrp->in_test_work, | |
395 | usecs_to_jiffies(mrp->in_test_interval)); | |
396 | } | |
397 | ||
9a9f26e8 HV |
398 | /* Deletes the MRP instance. |
399 | * note: called under rtnl_lock | |
400 | */ | |
401 | static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp) | |
402 | { | |
403 | struct net_bridge_port *p; | |
4fb13499 | 404 | u8 state; |
9a9f26e8 HV |
405 | |
406 | /* Stop sending MRP_Test frames */ | |
407 | cancel_delayed_work_sync(&mrp->test_work); | |
c6676e7d | 408 | br_mrp_switchdev_send_ring_test(br, mrp, 0, 0, 0, 0); |
9a9f26e8 | 409 | |
537ed567 HV |
410 | /* Stop sending MRP_InTest frames if has an interconnect role */ |
411 | cancel_delayed_work_sync(&mrp->in_test_work); | |
412 | br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0); | |
413 | ||
9a9f26e8 HV |
414 | br_mrp_switchdev_del(br, mrp); |
415 | ||
416 | /* Reset the ports */ | |
417 | p = rtnl_dereference(mrp->p_port); | |
418 | if (p) { | |
419 | spin_lock_bh(&br->lock); | |
4fb13499 HV |
420 | state = netif_running(br->dev) ? |
421 | BR_STATE_FORWARDING : BR_STATE_DISABLED; | |
422 | p->state = state; | |
9a9f26e8 HV |
423 | p->flags &= ~BR_MRP_AWARE; |
424 | spin_unlock_bh(&br->lock); | |
4fb13499 | 425 | br_mrp_port_switchdev_set_state(p, state); |
9a9f26e8 HV |
426 | rcu_assign_pointer(mrp->p_port, NULL); |
427 | } | |
428 | ||
429 | p = rtnl_dereference(mrp->s_port); | |
430 | if (p) { | |
431 | spin_lock_bh(&br->lock); | |
4fb13499 HV |
432 | state = netif_running(br->dev) ? |
433 | BR_STATE_FORWARDING : BR_STATE_DISABLED; | |
434 | p->state = state; | |
9a9f26e8 HV |
435 | p->flags &= ~BR_MRP_AWARE; |
436 | spin_unlock_bh(&br->lock); | |
4fb13499 | 437 | br_mrp_port_switchdev_set_state(p, state); |
9a9f26e8 HV |
438 | rcu_assign_pointer(mrp->s_port, NULL); |
439 | } | |
440 | ||
537ed567 HV |
441 | p = rtnl_dereference(mrp->i_port); |
442 | if (p) { | |
443 | spin_lock_bh(&br->lock); | |
444 | state = netif_running(br->dev) ? | |
445 | BR_STATE_FORWARDING : BR_STATE_DISABLED; | |
446 | p->state = state; | |
447 | p->flags &= ~BR_MRP_AWARE; | |
448 | spin_unlock_bh(&br->lock); | |
449 | br_mrp_port_switchdev_set_state(p, state); | |
450 | rcu_assign_pointer(mrp->i_port, NULL); | |
451 | } | |
452 | ||
0169b820 | 453 | hlist_del_rcu(&mrp->list); |
9a9f26e8 | 454 | kfree_rcu(mrp, rcu); |
90c628dd | 455 | |
0169b820 | 456 | if (hlist_empty(&br->mrp_list)) |
90c628dd | 457 | br_del_frame(br, &mrp_frame_type); |
9a9f26e8 HV |
458 | } |
459 | ||
460 | /* Adds a new MRP instance. | |
461 | * note: called under rtnl_lock | |
462 | */ | |
463 | int br_mrp_add(struct net_bridge *br, struct br_mrp_instance *instance) | |
464 | { | |
465 | struct net_bridge_port *p; | |
466 | struct br_mrp *mrp; | |
467 | int err; | |
468 | ||
469 | /* If the ring exists, it is not possible to create another one with the | |
470 | * same ring_id | |
471 | */ | |
472 | mrp = br_mrp_find_id(br, instance->ring_id); | |
473 | if (mrp) | |
474 | return -EINVAL; | |
475 | ||
476 | if (!br_mrp_get_port(br, instance->p_ifindex) || | |
477 | !br_mrp_get_port(br, instance->s_ifindex)) | |
478 | return -EINVAL; | |
479 | ||
7aa38018 HV |
480 | /* It is not possible to have the same port part of multiple rings */ |
481 | if (!br_mrp_unique_ifindex(br, instance->p_ifindex) || | |
482 | !br_mrp_unique_ifindex(br, instance->s_ifindex)) | |
483 | return -EINVAL; | |
484 | ||
9a9f26e8 HV |
485 | mrp = kzalloc(sizeof(*mrp), GFP_KERNEL); |
486 | if (!mrp) | |
487 | return -ENOMEM; | |
488 | ||
489 | mrp->ring_id = instance->ring_id; | |
4b3a61b0 | 490 | mrp->prio = instance->prio; |
9a9f26e8 HV |
491 | |
492 | p = br_mrp_get_port(br, instance->p_ifindex); | |
493 | spin_lock_bh(&br->lock); | |
494 | p->state = BR_STATE_FORWARDING; | |
495 | p->flags |= BR_MRP_AWARE; | |
496 | spin_unlock_bh(&br->lock); | |
497 | rcu_assign_pointer(mrp->p_port, p); | |
498 | ||
499 | p = br_mrp_get_port(br, instance->s_ifindex); | |
500 | spin_lock_bh(&br->lock); | |
501 | p->state = BR_STATE_FORWARDING; | |
502 | p->flags |= BR_MRP_AWARE; | |
503 | spin_unlock_bh(&br->lock); | |
504 | rcu_assign_pointer(mrp->s_port, p); | |
505 | ||
0169b820 | 506 | if (hlist_empty(&br->mrp_list)) |
90c628dd HB |
507 | br_add_frame(br, &mrp_frame_type); |
508 | ||
9a9f26e8 | 509 | INIT_DELAYED_WORK(&mrp->test_work, br_mrp_test_work_expired); |
537ed567 | 510 | INIT_DELAYED_WORK(&mrp->in_test_work, br_mrp_in_test_work_expired); |
0169b820 | 511 | hlist_add_tail_rcu(&mrp->list, &br->mrp_list); |
9a9f26e8 HV |
512 | |
513 | err = br_mrp_switchdev_add(br, mrp); | |
514 | if (err) | |
515 | goto delete_mrp; | |
516 | ||
517 | return 0; | |
518 | ||
519 | delete_mrp: | |
520 | br_mrp_del_impl(br, mrp); | |
521 | ||
522 | return err; | |
523 | } | |
524 | ||
525 | /* Deletes the MRP instance from which the port is part of | |
526 | * note: called under rtnl_lock | |
527 | */ | |
528 | void br_mrp_port_del(struct net_bridge *br, struct net_bridge_port *p) | |
529 | { | |
530 | struct br_mrp *mrp = br_mrp_find_port(br, p); | |
531 | ||
532 | /* If the port is not part of a MRP instance just bail out */ | |
533 | if (!mrp) | |
534 | return; | |
535 | ||
536 | br_mrp_del_impl(br, mrp); | |
537 | } | |
538 | ||
539 | /* Deletes existing MRP instance based on ring_id | |
540 | * note: called under rtnl_lock | |
541 | */ | |
542 | int br_mrp_del(struct net_bridge *br, struct br_mrp_instance *instance) | |
543 | { | |
544 | struct br_mrp *mrp = br_mrp_find_id(br, instance->ring_id); | |
545 | ||
546 | if (!mrp) | |
547 | return -EINVAL; | |
548 | ||
549 | br_mrp_del_impl(br, mrp); | |
550 | ||
551 | return 0; | |
552 | } | |
553 | ||
554 | /* Set port state, port state can be forwarding, blocked or disabled | |
555 | * note: already called with rtnl_lock | |
556 | */ | |
557 | int br_mrp_set_port_state(struct net_bridge_port *p, | |
558 | enum br_mrp_port_state_type state) | |
559 | { | |
560 | if (!p || !(p->flags & BR_MRP_AWARE)) | |
561 | return -EINVAL; | |
562 | ||
563 | spin_lock_bh(&p->br->lock); | |
564 | ||
565 | if (state == BR_MRP_PORT_STATE_FORWARDING) | |
566 | p->state = BR_STATE_FORWARDING; | |
567 | else | |
568 | p->state = BR_STATE_BLOCKING; | |
569 | ||
570 | spin_unlock_bh(&p->br->lock); | |
571 | ||
572 | br_mrp_port_switchdev_set_state(p, state); | |
573 | ||
574 | return 0; | |
575 | } | |
576 | ||
577 | /* Set port role, port role can be primary or secondary | |
578 | * note: already called with rtnl_lock | |
579 | */ | |
580 | int br_mrp_set_port_role(struct net_bridge_port *p, | |
20f6a05e | 581 | enum br_mrp_port_role_type role) |
9a9f26e8 HV |
582 | { |
583 | struct br_mrp *mrp; | |
584 | ||
585 | if (!p || !(p->flags & BR_MRP_AWARE)) | |
586 | return -EINVAL; | |
587 | ||
20f6a05e | 588 | mrp = br_mrp_find_port(p->br, p); |
9a9f26e8 HV |
589 | |
590 | if (!mrp) | |
591 | return -EINVAL; | |
592 | ||
7882c895 HV |
593 | switch (role) { |
594 | case BR_MRP_PORT_ROLE_PRIMARY: | |
9a9f26e8 | 595 | rcu_assign_pointer(mrp->p_port, p); |
7882c895 HV |
596 | break; |
597 | case BR_MRP_PORT_ROLE_SECONDARY: | |
9a9f26e8 | 598 | rcu_assign_pointer(mrp->s_port, p); |
7882c895 HV |
599 | break; |
600 | default: | |
601 | return -EINVAL; | |
602 | } | |
9a9f26e8 | 603 | |
20f6a05e | 604 | br_mrp_port_switchdev_set_role(p, role); |
9a9f26e8 HV |
605 | |
606 | return 0; | |
607 | } | |
608 | ||
609 | /* Set ring state, ring state can be only Open or Closed | |
610 | * note: already called with rtnl_lock | |
611 | */ | |
612 | int br_mrp_set_ring_state(struct net_bridge *br, | |
613 | struct br_mrp_ring_state *state) | |
614 | { | |
615 | struct br_mrp *mrp = br_mrp_find_id(br, state->ring_id); | |
616 | ||
617 | if (!mrp) | |
618 | return -EINVAL; | |
619 | ||
620 | if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED && | |
621 | state->ring_state != BR_MRP_RING_STATE_CLOSED) | |
622 | mrp->ring_transitions++; | |
623 | ||
624 | mrp->ring_state = state->ring_state; | |
625 | ||
626 | br_mrp_switchdev_set_ring_state(br, mrp, state->ring_state); | |
627 | ||
628 | return 0; | |
629 | } | |
630 | ||
631 | /* Set ring role, ring role can be only MRM(Media Redundancy Manager) or | |
632 | * MRC(Media Redundancy Client). | |
633 | * note: already called with rtnl_lock | |
634 | */ | |
635 | int br_mrp_set_ring_role(struct net_bridge *br, | |
636 | struct br_mrp_ring_role *role) | |
637 | { | |
638 | struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id); | |
639 | int err; | |
640 | ||
641 | if (!mrp) | |
642 | return -EINVAL; | |
643 | ||
644 | mrp->ring_role = role->ring_role; | |
645 | ||
646 | /* If there is an error just bailed out */ | |
647 | err = br_mrp_switchdev_set_ring_role(br, mrp, role->ring_role); | |
648 | if (err && err != -EOPNOTSUPP) | |
649 | return err; | |
650 | ||
651 | /* Now detect if the HW actually applied the role or not. If the HW | |
652 | * applied the role it means that the SW will not to do those operations | |
653 | * anymore. For example if the role ir MRM then the HW will notify the | |
654 | * SW when ring is open, but if the is not pushed to the HW the SW will | |
655 | * need to detect when the ring is open | |
656 | */ | |
657 | mrp->ring_role_offloaded = err == -EOPNOTSUPP ? 0 : 1; | |
658 | ||
659 | return 0; | |
660 | } | |
661 | ||
c6676e7d HV |
662 | /* Start to generate or monitor MRP test frames, the frames are generated by |
663 | * HW and if it fails, they are generated by the SW. | |
9a9f26e8 HV |
664 | * note: already called with rtnl_lock |
665 | */ | |
666 | int br_mrp_start_test(struct net_bridge *br, | |
667 | struct br_mrp_start_test *test) | |
668 | { | |
669 | struct br_mrp *mrp = br_mrp_find_id(br, test->ring_id); | |
670 | ||
671 | if (!mrp) | |
672 | return -EINVAL; | |
673 | ||
c6676e7d HV |
674 | /* Try to push it to the HW and if it fails then continue with SW |
675 | * implementation and if that also fails then return error. | |
9a9f26e8 HV |
676 | */ |
677 | if (!br_mrp_switchdev_send_ring_test(br, mrp, test->interval, | |
c6676e7d HV |
678 | test->max_miss, test->period, |
679 | test->monitor)) | |
9a9f26e8 HV |
680 | return 0; |
681 | ||
682 | mrp->test_interval = test->interval; | |
683 | mrp->test_end = jiffies + usecs_to_jiffies(test->period); | |
684 | mrp->test_max_miss = test->max_miss; | |
c6676e7d | 685 | mrp->test_monitor = test->monitor; |
9a9f26e8 HV |
686 | mrp->test_count_miss = 0; |
687 | queue_delayed_work(system_wq, &mrp->test_work, | |
688 | usecs_to_jiffies(test->interval)); | |
689 | ||
690 | return 0; | |
691 | } | |
692 | ||
537ed567 HV |
693 | /* Set in state, int state can be only Open or Closed |
694 | * note: already called with rtnl_lock | |
695 | */ | |
696 | int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state) | |
697 | { | |
698 | struct br_mrp *mrp = br_mrp_find_in_id(br, state->in_id); | |
699 | ||
700 | if (!mrp) | |
701 | return -EINVAL; | |
702 | ||
703 | if (mrp->in_state == BR_MRP_IN_STATE_CLOSED && | |
704 | state->in_state != BR_MRP_IN_STATE_CLOSED) | |
705 | mrp->in_transitions++; | |
706 | ||
707 | mrp->in_state = state->in_state; | |
708 | ||
709 | br_mrp_switchdev_set_in_state(br, mrp, state->in_state); | |
710 | ||
711 | return 0; | |
712 | } | |
713 | ||
714 | /* Set in role, in role can be only MIM(Media Interconnection Manager) or | |
715 | * MIC(Media Interconnection Client). | |
716 | * note: already called with rtnl_lock | |
717 | */ | |
718 | int br_mrp_set_in_role(struct net_bridge *br, struct br_mrp_in_role *role) | |
719 | { | |
720 | struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id); | |
721 | struct net_bridge_port *p; | |
722 | int err; | |
723 | ||
724 | if (!mrp) | |
725 | return -EINVAL; | |
726 | ||
727 | if (!br_mrp_get_port(br, role->i_ifindex)) | |
728 | return -EINVAL; | |
729 | ||
730 | if (role->in_role == BR_MRP_IN_ROLE_DISABLED) { | |
731 | u8 state; | |
732 | ||
733 | /* It is not allowed to disable a port that doesn't exist */ | |
734 | p = rtnl_dereference(mrp->i_port); | |
735 | if (!p) | |
736 | return -EINVAL; | |
737 | ||
738 | /* Stop the generating MRP_InTest frames */ | |
739 | cancel_delayed_work_sync(&mrp->in_test_work); | |
740 | br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0); | |
741 | ||
742 | /* Remove the port */ | |
743 | spin_lock_bh(&br->lock); | |
744 | state = netif_running(br->dev) ? | |
745 | BR_STATE_FORWARDING : BR_STATE_DISABLED; | |
746 | p->state = state; | |
747 | p->flags &= ~BR_MRP_AWARE; | |
748 | spin_unlock_bh(&br->lock); | |
749 | br_mrp_port_switchdev_set_state(p, state); | |
750 | rcu_assign_pointer(mrp->i_port, NULL); | |
751 | ||
752 | mrp->in_role = role->in_role; | |
753 | mrp->in_id = 0; | |
754 | ||
755 | return 0; | |
756 | } | |
757 | ||
758 | /* It is not possible to have the same port part of multiple rings */ | |
759 | if (!br_mrp_unique_ifindex(br, role->i_ifindex)) | |
760 | return -EINVAL; | |
761 | ||
762 | /* It is not allowed to set a different interconnect port if the mrp | |
763 | * instance has already one. First it needs to be disabled and after | |
764 | * that set the new port | |
765 | */ | |
766 | if (rcu_access_pointer(mrp->i_port)) | |
767 | return -EINVAL; | |
768 | ||
769 | p = br_mrp_get_port(br, role->i_ifindex); | |
770 | spin_lock_bh(&br->lock); | |
771 | p->state = BR_STATE_FORWARDING; | |
772 | p->flags |= BR_MRP_AWARE; | |
773 | spin_unlock_bh(&br->lock); | |
774 | rcu_assign_pointer(mrp->i_port, p); | |
775 | ||
776 | mrp->in_role = role->in_role; | |
777 | mrp->in_id = role->in_id; | |
778 | ||
779 | /* If there is an error just bailed out */ | |
780 | err = br_mrp_switchdev_set_in_role(br, mrp, role->in_id, | |
781 | role->ring_id, role->in_role); | |
782 | if (err && err != -EOPNOTSUPP) | |
783 | return err; | |
784 | ||
785 | /* Now detect if the HW actually applied the role or not. If the HW | |
786 | * applied the role it means that the SW will not to do those operations | |
787 | * anymore. For example if the role is MIM then the HW will notify the | |
788 | * SW when interconnect ring is open, but if the is not pushed to the HW | |
789 | * the SW will need to detect when the interconnect ring is open. | |
790 | */ | |
791 | mrp->in_role_offloaded = err == -EOPNOTSUPP ? 0 : 1; | |
792 | ||
793 | return 0; | |
794 | } | |
795 | ||
796 | /* Start to generate MRP_InTest frames, the frames are generated by | |
797 | * HW and if it fails, they are generated by the SW. | |
798 | * note: already called with rtnl_lock | |
799 | */ | |
800 | int br_mrp_start_in_test(struct net_bridge *br, | |
801 | struct br_mrp_start_in_test *in_test) | |
802 | { | |
803 | struct br_mrp *mrp = br_mrp_find_in_id(br, in_test->in_id); | |
804 | ||
805 | if (!mrp) | |
806 | return -EINVAL; | |
807 | ||
808 | if (mrp->in_role != BR_MRP_IN_ROLE_MIM) | |
809 | return -EINVAL; | |
810 | ||
811 | /* Try to push it to the HW and if it fails then continue with SW | |
812 | * implementation and if that also fails then return error. | |
813 | */ | |
814 | if (!br_mrp_switchdev_send_in_test(br, mrp, in_test->interval, | |
815 | in_test->max_miss, in_test->period)) | |
816 | return 0; | |
817 | ||
818 | mrp->in_test_interval = in_test->interval; | |
819 | mrp->in_test_end = jiffies + usecs_to_jiffies(in_test->period); | |
820 | mrp->in_test_max_miss = in_test->max_miss; | |
821 | mrp->in_test_count_miss = 0; | |
822 | queue_delayed_work(system_wq, &mrp->in_test_work, | |
823 | usecs_to_jiffies(in_test->interval)); | |
824 | ||
825 | return 0; | |
826 | } | |
827 | ||
828 | /* Determin if the frame type is a ring frame */ | |
829 | static bool br_mrp_ring_frame(struct sk_buff *skb) | |
830 | { | |
831 | const struct br_mrp_tlv_hdr *hdr; | |
832 | struct br_mrp_tlv_hdr _hdr; | |
833 | ||
834 | hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); | |
835 | if (!hdr) | |
836 | return false; | |
837 | ||
838 | if (hdr->type == BR_MRP_TLV_HEADER_RING_TEST || | |
839 | hdr->type == BR_MRP_TLV_HEADER_RING_TOPO || | |
840 | hdr->type == BR_MRP_TLV_HEADER_RING_LINK_DOWN || | |
841 | hdr->type == BR_MRP_TLV_HEADER_RING_LINK_UP || | |
842 | hdr->type == BR_MRP_TLV_HEADER_OPTION) | |
843 | return true; | |
844 | ||
845 | return false; | |
846 | } | |
847 | ||
848 | /* Determin if the frame type is an interconnect frame */ | |
849 | static bool br_mrp_in_frame(struct sk_buff *skb) | |
850 | { | |
851 | const struct br_mrp_tlv_hdr *hdr; | |
852 | struct br_mrp_tlv_hdr _hdr; | |
853 | ||
854 | hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); | |
855 | if (!hdr) | |
856 | return false; | |
857 | ||
858 | if (hdr->type == BR_MRP_TLV_HEADER_IN_TEST || | |
859 | hdr->type == BR_MRP_TLV_HEADER_IN_TOPO || | |
860 | hdr->type == BR_MRP_TLV_HEADER_IN_LINK_DOWN || | |
861 | hdr->type == BR_MRP_TLV_HEADER_IN_LINK_UP) | |
862 | return true; | |
863 | ||
864 | return false; | |
865 | } | |
866 | ||
9a9f26e8 HV |
867 | /* Process only MRP Test frame. All the other MRP frames are processed by |
868 | * userspace application | |
869 | * note: already called with rcu_read_lock | |
870 | */ | |
871 | static void br_mrp_mrm_process(struct br_mrp *mrp, struct net_bridge_port *port, | |
872 | struct sk_buff *skb) | |
873 | { | |
874 | const struct br_mrp_tlv_hdr *hdr; | |
875 | struct br_mrp_tlv_hdr _hdr; | |
876 | ||
877 | /* Each MRP header starts with a version field which is 16 bits. | |
878 | * Therefore skip the version and get directly the TLV header. | |
879 | */ | |
880 | hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); | |
881 | if (!hdr) | |
882 | return; | |
883 | ||
884 | if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST) | |
885 | return; | |
886 | ||
887 | mrp->test_count_miss = 0; | |
888 | ||
889 | /* Notify the userspace that the ring is closed only when the ring is | |
890 | * not closed | |
891 | */ | |
892 | if (mrp->ring_state != BR_MRP_RING_STATE_CLOSED) | |
4cc625c6 | 893 | br_mrp_ring_port_open(port->dev, false); |
9a9f26e8 HV |
894 | } |
895 | ||
c6676e7d HV |
896 | /* Determin if the test hdr has a better priority than the node */ |
897 | static bool br_mrp_test_better_than_own(struct br_mrp *mrp, | |
898 | struct net_bridge *br, | |
899 | const struct br_mrp_ring_test_hdr *hdr) | |
900 | { | |
901 | u16 prio = be16_to_cpu(hdr->prio); | |
902 | ||
903 | if (prio < mrp->prio || | |
904 | (prio == mrp->prio && | |
905 | ether_addr_to_u64(hdr->sa) < ether_addr_to_u64(br->dev->dev_addr))) | |
906 | return true; | |
907 | ||
908 | return false; | |
909 | } | |
910 | ||
911 | /* Process only MRP Test frame. All the other MRP frames are processed by | |
912 | * userspace application | |
913 | * note: already called with rcu_read_lock | |
914 | */ | |
915 | static void br_mrp_mra_process(struct br_mrp *mrp, struct net_bridge *br, | |
916 | struct net_bridge_port *port, | |
917 | struct sk_buff *skb) | |
918 | { | |
919 | const struct br_mrp_ring_test_hdr *test_hdr; | |
920 | struct br_mrp_ring_test_hdr _test_hdr; | |
921 | const struct br_mrp_tlv_hdr *hdr; | |
922 | struct br_mrp_tlv_hdr _hdr; | |
923 | ||
924 | /* Each MRP header starts with a version field which is 16 bits. | |
925 | * Therefore skip the version and get directly the TLV header. | |
926 | */ | |
927 | hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); | |
928 | if (!hdr) | |
929 | return; | |
930 | ||
931 | if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST) | |
932 | return; | |
933 | ||
934 | test_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr), | |
935 | sizeof(_test_hdr), &_test_hdr); | |
936 | if (!test_hdr) | |
937 | return; | |
938 | ||
939 | /* Only frames that have a better priority than the node will | |
940 | * clear the miss counter because otherwise the node will need to behave | |
941 | * as MRM. | |
942 | */ | |
943 | if (br_mrp_test_better_than_own(mrp, br, test_hdr)) | |
944 | mrp->test_count_miss = 0; | |
945 | } | |
946 | ||
537ed567 HV |
947 | /* Process only MRP InTest frame. All the other MRP frames are processed by |
948 | * userspace application | |
949 | * note: already called with rcu_read_lock | |
950 | */ | |
951 | static bool br_mrp_mim_process(struct br_mrp *mrp, struct net_bridge_port *port, | |
952 | struct sk_buff *skb) | |
953 | { | |
954 | const struct br_mrp_in_test_hdr *in_hdr; | |
955 | struct br_mrp_in_test_hdr _in_hdr; | |
956 | const struct br_mrp_tlv_hdr *hdr; | |
957 | struct br_mrp_tlv_hdr _hdr; | |
958 | ||
959 | /* Each MRP header starts with a version field which is 16 bits. | |
960 | * Therefore skip the version and get directly the TLV header. | |
961 | */ | |
962 | hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); | |
963 | if (!hdr) | |
964 | return false; | |
965 | ||
966 | /* The check for InTest frame type was already done */ | |
967 | in_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr), | |
968 | sizeof(_in_hdr), &_in_hdr); | |
969 | if (!in_hdr) | |
970 | return false; | |
971 | ||
972 | /* It needs to process only it's own InTest frames. */ | |
973 | if (mrp->in_id != ntohs(in_hdr->id)) | |
974 | return false; | |
975 | ||
976 | mrp->in_test_count_miss = 0; | |
977 | ||
978 | /* Notify the userspace that the ring is closed only when the ring is | |
979 | * not closed | |
980 | */ | |
981 | if (mrp->in_state != BR_MRP_IN_STATE_CLOSED) | |
982 | br_mrp_in_port_open(port->dev, false); | |
983 | ||
984 | return true; | |
985 | } | |
986 | ||
987 | /* Get the MRP frame type | |
988 | * note: already called with rcu_read_lock | |
989 | */ | |
990 | static u8 br_mrp_get_frame_type(struct sk_buff *skb) | |
991 | { | |
992 | const struct br_mrp_tlv_hdr *hdr; | |
993 | struct br_mrp_tlv_hdr _hdr; | |
994 | ||
995 | /* Each MRP header starts with a version field which is 16 bits. | |
996 | * Therefore skip the version and get directly the TLV header. | |
997 | */ | |
998 | hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); | |
999 | if (!hdr) | |
1000 | return 0xff; | |
1001 | ||
1002 | return hdr->type; | |
1003 | } | |
1004 | ||
1005 | static bool br_mrp_mrm_behaviour(struct br_mrp *mrp) | |
1006 | { | |
1007 | if (mrp->ring_role == BR_MRP_RING_ROLE_MRM || | |
1008 | (mrp->ring_role == BR_MRP_RING_ROLE_MRA && !mrp->test_monitor)) | |
1009 | return true; | |
1010 | ||
1011 | return false; | |
1012 | } | |
1013 | ||
1014 | static bool br_mrp_mrc_behaviour(struct br_mrp *mrp) | |
1015 | { | |
1016 | if (mrp->ring_role == BR_MRP_RING_ROLE_MRC || | |
1017 | (mrp->ring_role == BR_MRP_RING_ROLE_MRA && mrp->test_monitor)) | |
1018 | return true; | |
1019 | ||
1020 | return false; | |
1021 | } | |
1022 | ||
1023 | /* This will just forward the frame to the other mrp ring ports, depending on | |
1024 | * the frame type, ring role and interconnect role | |
9a9f26e8 HV |
1025 | * note: already called with rcu_read_lock |
1026 | */ | |
1027 | static int br_mrp_rcv(struct net_bridge_port *p, | |
1028 | struct sk_buff *skb, struct net_device *dev) | |
1029 | { | |
537ed567 HV |
1030 | struct net_bridge_port *p_port, *s_port, *i_port = NULL; |
1031 | struct net_bridge_port *p_dst, *s_dst, *i_dst = NULL; | |
9a9f26e8 | 1032 | struct net_bridge *br; |
9a9f26e8 HV |
1033 | struct br_mrp *mrp; |
1034 | ||
1035 | /* If port is disabled don't accept any frames */ | |
1036 | if (p->state == BR_STATE_DISABLED) | |
1037 | return 0; | |
1038 | ||
1039 | br = p->br; | |
1040 | mrp = br_mrp_find_port(br, p); | |
1041 | if (unlikely(!mrp)) | |
1042 | return 0; | |
1043 | ||
1044 | p_port = rcu_dereference(mrp->p_port); | |
1045 | if (!p_port) | |
1046 | return 0; | |
537ed567 | 1047 | p_dst = p_port; |
9a9f26e8 HV |
1048 | |
1049 | s_port = rcu_dereference(mrp->s_port); | |
1050 | if (!s_port) | |
1051 | return 0; | |
537ed567 | 1052 | s_dst = s_port; |
9a9f26e8 | 1053 | |
537ed567 HV |
1054 | /* If the frame is a ring frame then it is not required to check the |
1055 | * interconnect role and ports to process or forward the frame | |
c6676e7d | 1056 | */ |
537ed567 HV |
1057 | if (br_mrp_ring_frame(skb)) { |
1058 | /* If the role is MRM then don't forward the frames */ | |
1059 | if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) { | |
c6676e7d | 1060 | br_mrp_mrm_process(mrp, p, skb); |
537ed567 | 1061 | goto no_forward; |
c6676e7d HV |
1062 | } |
1063 | ||
537ed567 HV |
1064 | /* If the role is MRA then don't forward the frames if it |
1065 | * behaves as MRM node | |
1066 | */ | |
1067 | if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) { | |
1068 | if (!mrp->test_monitor) { | |
1069 | br_mrp_mrm_process(mrp, p, skb); | |
1070 | goto no_forward; | |
1071 | } | |
1072 | ||
1073 | br_mrp_mra_process(mrp, br, p, skb); | |
1074 | } | |
1075 | ||
1076 | goto forward; | |
c6676e7d HV |
1077 | } |
1078 | ||
537ed567 HV |
1079 | if (br_mrp_in_frame(skb)) { |
1080 | u8 in_type = br_mrp_get_frame_type(skb); | |
9a9f26e8 | 1081 | |
537ed567 HV |
1082 | i_port = rcu_dereference(mrp->i_port); |
1083 | i_dst = i_port; | |
9a9f26e8 | 1084 | |
537ed567 HV |
1085 | /* If the ring port is in block state it should not forward |
1086 | * In_Test frames | |
1087 | */ | |
1088 | if (br_mrp_is_ring_port(p_port, s_port, p) && | |
1089 | p->state == BR_STATE_BLOCKING && | |
1090 | in_type == BR_MRP_TLV_HEADER_IN_TEST) | |
1091 | goto no_forward; | |
1092 | ||
1093 | /* Nodes that behaves as MRM needs to stop forwarding the | |
1094 | * frames in case the ring is closed, otherwise will be a loop. | |
1095 | * In this case the frame is no forward between the ring ports. | |
1096 | */ | |
1097 | if (br_mrp_mrm_behaviour(mrp) && | |
1098 | br_mrp_is_ring_port(p_port, s_port, p) && | |
1099 | (s_port->state != BR_STATE_FORWARDING || | |
1100 | p_port->state != BR_STATE_FORWARDING)) { | |
1101 | p_dst = NULL; | |
1102 | s_dst = NULL; | |
1103 | } | |
1104 | ||
1105 | /* A node that behaves as MRC and doesn't have a interconnect | |
1106 | * role then it should forward all frames between the ring ports | |
1107 | * because it doesn't have an interconnect port | |
1108 | */ | |
1109 | if (br_mrp_mrc_behaviour(mrp) && | |
1110 | mrp->in_role == BR_MRP_IN_ROLE_DISABLED) | |
1111 | goto forward; | |
1112 | ||
1113 | if (mrp->in_role == BR_MRP_IN_ROLE_MIM) { | |
1114 | if (in_type == BR_MRP_TLV_HEADER_IN_TEST) { | |
1115 | /* MIM should not forward it's own InTest | |
1116 | * frames | |
1117 | */ | |
1118 | if (br_mrp_mim_process(mrp, p, skb)) { | |
1119 | goto no_forward; | |
1120 | } else { | |
1121 | if (br_mrp_is_ring_port(p_port, s_port, | |
1122 | p)) | |
1123 | i_dst = NULL; | |
1124 | ||
1125 | if (br_mrp_is_in_port(i_port, p)) | |
1126 | goto no_forward; | |
1127 | } | |
1128 | } else { | |
1129 | /* MIM should forward IntLinkChange and | |
1130 | * IntTopoChange between ring ports but MIM | |
1131 | * should not forward IntLinkChange and | |
1132 | * IntTopoChange if the frame was received at | |
1133 | * the interconnect port | |
1134 | */ | |
1135 | if (br_mrp_is_ring_port(p_port, s_port, p)) | |
1136 | i_dst = NULL; | |
1137 | ||
1138 | if (br_mrp_is_in_port(i_port, p)) | |
1139 | goto no_forward; | |
1140 | } | |
1141 | } | |
1142 | ||
1143 | if (mrp->in_role == BR_MRP_IN_ROLE_MIC) { | |
1144 | /* MIC should forward InTest frames on all ports | |
1145 | * regardless of the received port | |
1146 | */ | |
1147 | if (in_type == BR_MRP_TLV_HEADER_IN_TEST) | |
1148 | goto forward; | |
1149 | ||
1150 | /* MIC should forward IntLinkChange frames only if they | |
1151 | * are received on ring ports to all the ports | |
1152 | */ | |
1153 | if (br_mrp_is_ring_port(p_port, s_port, p) && | |
1154 | (in_type == BR_MRP_TLV_HEADER_IN_LINK_UP || | |
1155 | in_type == BR_MRP_TLV_HEADER_IN_LINK_DOWN)) | |
1156 | goto forward; | |
1157 | ||
1158 | /* Should forward the InTopo frames only between the | |
1159 | * ring ports | |
1160 | */ | |
1161 | if (in_type == BR_MRP_TLV_HEADER_IN_TOPO) { | |
1162 | i_dst = NULL; | |
1163 | goto forward; | |
1164 | } | |
1165 | ||
1166 | /* In all the other cases don't forward the frames */ | |
1167 | goto no_forward; | |
1168 | } | |
1169 | } | |
9a9f26e8 | 1170 | |
537ed567 HV |
1171 | forward: |
1172 | if (p_dst) | |
1173 | br_forward(p_dst, skb, true, false); | |
1174 | if (s_dst) | |
1175 | br_forward(s_dst, skb, true, false); | |
1176 | if (i_dst) | |
1177 | br_forward(i_dst, skb, true, false); | |
9a9f26e8 | 1178 | |
537ed567 | 1179 | no_forward: |
9a9f26e8 HV |
1180 | return 1; |
1181 | } | |
1182 | ||
1183 | /* Check if the frame was received on a port that is part of MRP ring | |
1184 | * and if the frame has MRP eth. In that case process the frame otherwise do | |
1185 | * normal forwarding. | |
1186 | * note: already called with rcu_read_lock | |
1187 | */ | |
90c628dd | 1188 | static int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb) |
9a9f26e8 HV |
1189 | { |
1190 | /* If there is no MRP instance do normal forwarding */ | |
1191 | if (likely(!(p->flags & BR_MRP_AWARE))) | |
1192 | goto out; | |
1193 | ||
90c628dd | 1194 | return br_mrp_rcv(p, skb, p->dev); |
9a9f26e8 HV |
1195 | out: |
1196 | return 0; | |
1197 | } | |
1198 | ||
1199 | bool br_mrp_enabled(struct net_bridge *br) | |
1200 | { | |
0169b820 | 1201 | return !hlist_empty(&br->mrp_list); |
9a9f26e8 | 1202 | } |