1 /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License as
5 * published by the Free Software Foundation; either version 2 of
6 * the License, or (at your option) any later version.
12 static u32 ipvlan_jhash_secret __read_mostly;
14 void ipvlan_init_secret(void)
16 net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
19 static void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
20 unsigned int len, bool success, bool mcast)
25 if (likely(success)) {
26 struct ipvl_pcpu_stats *pcptr;
28 pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
29 u64_stats_update_begin(&pcptr->syncp);
31 pcptr->rx_bytes += len;
34 u64_stats_update_end(&pcptr->syncp);
36 this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
40 static u8 ipvlan_get_v6_hash(const void *iaddr)
42 const struct in6_addr *ip6_addr = iaddr;
44 return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) &
48 static u8 ipvlan_get_v4_hash(const void *iaddr)
50 const struct in_addr *ip4_addr = iaddr;
52 return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) &
56 static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
57 const void *iaddr, bool is_v6)
59 struct ipvl_addr *addr;
62 hash = is_v6 ? ipvlan_get_v6_hash(iaddr) :
63 ipvlan_get_v4_hash(iaddr);
64 hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode) {
65 if (is_v6 && addr->atype == IPVL_IPV6 &&
66 ipv6_addr_equal(&addr->ip6addr, iaddr))
68 else if (!is_v6 && addr->atype == IPVL_IPV4 &&
69 addr->ip4addr.s_addr ==
70 ((struct in_addr *)iaddr)->s_addr)
76 void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
78 struct ipvl_port *port = ipvlan->port;
81 hash = (addr->atype == IPVL_IPV6) ?
82 ipvlan_get_v6_hash(&addr->ip6addr) :
83 ipvlan_get_v4_hash(&addr->ip4addr);
84 if (hlist_unhashed(&addr->hlnode))
85 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
88 void ipvlan_ht_addr_del(struct ipvl_addr *addr)
90 hlist_del_init_rcu(&addr->hlnode);
93 struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
94 const void *iaddr, bool is_v6)
96 struct ipvl_addr *addr;
98 list_for_each_entry(addr, &ipvlan->addrs, anode) {
99 if ((is_v6 && addr->atype == IPVL_IPV6 &&
100 ipv6_addr_equal(&addr->ip6addr, iaddr)) ||
101 (!is_v6 && addr->atype == IPVL_IPV4 &&
102 addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr))
108 bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
110 struct ipvl_dev *ipvlan;
114 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
115 if (ipvlan_find_addr(ipvlan, iaddr, is_v6))
121 static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
125 switch (skb->protocol) {
126 case htons(ETH_P_ARP): {
129 if (unlikely(!pskb_may_pull(skb, sizeof(*arph))))
137 case htons(ETH_P_IP): {
141 if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h))))
145 pktlen = ntohs(ip4h->tot_len);
146 if (ip4h->ihl < 5 || ip4h->version != 4)
148 if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
155 case htons(ETH_P_IPV6): {
156 struct ipv6hdr *ip6h;
158 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h))))
161 ip6h = ipv6_hdr(skb);
162 if (ip6h->version != 6)
167 /* Only Neighbour Solicitation pkts need different treatment */
168 if (ipv6_addr_any(&ip6h->saddr) &&
169 ip6h->nexthdr == NEXTHDR_ICMP) {
182 unsigned int ipvlan_mac_hash(const unsigned char *addr)
184 u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
185 ipvlan_jhash_secret);
187 return hash & IPVLAN_MAC_FILTER_MASK;
190 void ipvlan_process_multicast(struct work_struct *work)
192 struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
194 struct ipvl_dev *ipvlan;
195 struct sk_buff *skb, *nskb;
196 struct sk_buff_head list;
198 unsigned int mac_hash;
203 __skb_queue_head_init(&list);
205 spin_lock_bh(&port->backlog.lock);
206 skb_queue_splice_tail_init(&port->backlog, &list);
207 spin_unlock_bh(&port->backlog.lock);
209 while ((skb = __skb_dequeue(&list)) != NULL) {
210 struct net_device *dev = skb->dev;
211 bool consumed = false;
214 hlocal = ether_addr_equal(ethh->h_source, port->dev->dev_addr);
215 mac_hash = ipvlan_mac_hash(ethh->h_dest);
217 if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
218 pkt_type = PACKET_BROADCAST;
220 pkt_type = PACKET_MULTICAST;
224 list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
225 if (hlocal && (ipvlan->dev == dev)) {
229 if (!test_bit(mac_hash, ipvlan->mac_filters))
231 if (!(ipvlan->dev->flags & IFF_UP))
234 len = skb->len + ETH_HLEN;
235 nskb = skb_clone(skb, GFP_ATOMIC);
239 nskb->pkt_type = pkt_type;
240 nskb->dev = ipvlan->dev;
242 ret = dev_forward_skb(ipvlan->dev, nskb);
244 ret = netif_rx(nskb);
246 ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
252 /* If the packet originated here, send it out. */
253 skb->dev = port->dev;
254 skb->pkt_type = pkt_type;
267 static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev)
272 xnet = !net_eq(dev_net(skb->dev), dev_net(dev));
274 skb_scrub_packet(skb, xnet);
279 static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
282 struct ipvl_dev *ipvlan = addr->master;
283 struct net_device *dev = ipvlan->dev;
285 rx_handler_result_t ret = RX_HANDLER_CONSUMED;
286 bool success = false;
287 struct sk_buff *skb = *pskb;
289 len = skb->len + ETH_HLEN;
290 /* Only packets exchanged between two local slaves need to have
291 * device-up check as well as skb-share check.
294 if (unlikely(!(dev->flags & IFF_UP))) {
299 skb = skb_share_check(skb, GFP_ATOMIC);
305 ipvlan_skb_crossing_ns(skb, dev);
308 skb->pkt_type = PACKET_HOST;
309 if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
312 ret = RX_HANDLER_ANOTHER;
317 ipvlan_count_rx(ipvlan, len, success, false);
321 static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port,
322 void *lyr3h, int addr_type,
325 struct ipvl_addr *addr = NULL;
327 if (addr_type == IPVL_IPV6) {
328 struct ipv6hdr *ip6h;
329 struct in6_addr *i6addr;
331 ip6h = (struct ipv6hdr *)lyr3h;
332 i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
333 addr = ipvlan_ht_addr_lookup(port, i6addr, true);
334 } else if (addr_type == IPVL_ICMPV6) {
336 struct in6_addr *i6addr;
338 /* Make sure that the NeighborSolicitation ICMPv6 packets
339 * are handled to avoid DAD issue.
341 ndmh = (struct nd_msg *)lyr3h;
342 if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
343 i6addr = &ndmh->target;
344 addr = ipvlan_ht_addr_lookup(port, i6addr, true);
346 } else if (addr_type == IPVL_IPV4) {
350 ip4h = (struct iphdr *)lyr3h;
351 i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr;
352 addr = ipvlan_ht_addr_lookup(port, i4addr, false);
353 } else if (addr_type == IPVL_ARP) {
355 unsigned char *arp_ptr;
358 arph = (struct arphdr *)lyr3h;
359 arp_ptr = (unsigned char *)(arph + 1);
361 arp_ptr += (2 * port->dev->addr_len) + 4;
363 arp_ptr += port->dev->addr_len;
365 memcpy(&dip, arp_ptr, 4);
366 addr = ipvlan_ht_addr_lookup(port, &dip, false);
372 static int ipvlan_process_v4_outbound(struct sk_buff *skb)
374 const struct iphdr *ip4h = ip_hdr(skb);
375 struct net_device *dev = skb->dev;
376 struct net *net = dev_net(dev);
378 int err, ret = NET_XMIT_DROP;
379 struct flowi4 fl4 = {
380 .flowi4_oif = dev->ifindex,
381 .flowi4_tos = RT_TOS(ip4h->tos),
382 .flowi4_flags = FLOWI_FLAG_ANYSRC,
383 .daddr = ip4h->daddr,
384 .saddr = ip4h->saddr,
387 rt = ip_route_output_flow(net, &fl4, NULL);
391 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
395 skb_dst_set(skb, &rt->dst);
396 err = ip_local_out(net, skb->sk, skb);
397 if (unlikely(net_xmit_eval(err)))
398 dev->stats.tx_errors++;
400 ret = NET_XMIT_SUCCESS;
403 dev->stats.tx_errors++;
409 static int ipvlan_process_v6_outbound(struct sk_buff *skb)
411 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
412 struct net_device *dev = skb->dev;
413 struct net *net = dev_net(dev);
414 struct dst_entry *dst;
415 int err, ret = NET_XMIT_DROP;
416 struct flowi6 fl6 = {
417 .flowi6_iif = dev->ifindex,
418 .daddr = ip6h->daddr,
419 .saddr = ip6h->saddr,
420 .flowi6_flags = FLOWI_FLAG_ANYSRC,
421 .flowlabel = ip6_flowinfo(ip6h),
422 .flowi6_mark = skb->mark,
423 .flowi6_proto = ip6h->nexthdr,
426 dst = ip6_route_output(net, NULL, &fl6);
432 skb_dst_set(skb, dst);
433 err = ip6_local_out(net, skb->sk, skb);
434 if (unlikely(net_xmit_eval(err)))
435 dev->stats.tx_errors++;
437 ret = NET_XMIT_SUCCESS;
440 dev->stats.tx_errors++;
446 static int ipvlan_process_outbound(struct sk_buff *skb)
448 struct ethhdr *ethh = eth_hdr(skb);
449 int ret = NET_XMIT_DROP;
451 /* In this mode we dont care about multicast and broadcast traffic */
452 if (is_multicast_ether_addr(ethh->h_dest)) {
453 pr_warn_ratelimited("Dropped {multi|broad}cast of type= [%x]\n",
454 ntohs(skb->protocol));
459 /* The ipvlan is a pseudo-L2 device, so the packets that we receive
460 * will have L2; which need to discarded and processed further
461 * in the net-ns of the main-device.
463 if (skb_mac_header_was_set(skb)) {
464 skb_pull(skb, sizeof(*ethh));
465 skb->mac_header = (typeof(skb->mac_header))~0U;
466 skb_reset_network_header(skb);
469 if (skb->protocol == htons(ETH_P_IPV6))
470 ret = ipvlan_process_v6_outbound(skb);
471 else if (skb->protocol == htons(ETH_P_IP))
472 ret = ipvlan_process_v4_outbound(skb);
474 pr_warn_ratelimited("Dropped outbound packet type=%x\n",
475 ntohs(skb->protocol));
482 static void ipvlan_multicast_enqueue(struct ipvl_port *port,
485 if (skb->protocol == htons(ETH_P_PAUSE)) {
490 spin_lock(&port->backlog.lock);
491 if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
494 __skb_queue_tail(&port->backlog, skb);
495 spin_unlock(&port->backlog.lock);
496 schedule_work(&port->wq);
498 spin_unlock(&port->backlog.lock);
499 atomic_long_inc(&skb->dev->rx_dropped);
504 static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
506 const struct ipvl_dev *ipvlan = netdev_priv(dev);
508 struct ipvl_addr *addr;
511 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
515 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
517 return ipvlan_rcv_frame(addr, &skb, true);
520 ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
521 return ipvlan_process_outbound(skb);
524 static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
526 const struct ipvl_dev *ipvlan = netdev_priv(dev);
527 struct ethhdr *eth = eth_hdr(skb);
528 struct ipvl_addr *addr;
532 if (ether_addr_equal(eth->h_dest, eth->h_source)) {
533 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
535 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
537 return ipvlan_rcv_frame(addr, &skb, true);
539 skb = skb_share_check(skb, GFP_ATOMIC);
541 return NET_XMIT_DROP;
543 /* Packet definitely does not belong to any of the
544 * virtual devices, but the dest is local. So forward
545 * the skb for the main-dev. At the RX side we just return
546 * RX_PASS for it to be processed further on the stack.
548 return dev_forward_skb(ipvlan->phy_dev, skb);
550 } else if (is_multicast_ether_addr(eth->h_dest)) {
551 ipvlan_skb_crossing_ns(skb, NULL);
552 ipvlan_multicast_enqueue(ipvlan->port, skb);
553 return NET_XMIT_SUCCESS;
556 ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
557 return dev_queue_xmit(skb);
560 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
562 struct ipvl_dev *ipvlan = netdev_priv(dev);
563 struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
568 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
573 return ipvlan_xmit_mode_l2(skb, dev);
575 case IPVLAN_MODE_L3S:
576 return ipvlan_xmit_mode_l3(skb, dev);
579 /* Should not reach here */
580 WARN_ONCE(true, "ipvlan_queue_xmit() called for mode = [%hx]\n",
584 return NET_XMIT_DROP;
587 static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
589 struct ethhdr *eth = eth_hdr(skb);
590 struct ipvl_addr *addr;
594 if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
595 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
599 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false);
607 static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
608 struct ipvl_port *port)
612 struct ipvl_addr *addr;
613 struct sk_buff *skb = *pskb;
614 rx_handler_result_t ret = RX_HANDLER_PASS;
616 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
620 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
622 ret = ipvlan_rcv_frame(addr, pskb, false);
628 static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
629 struct ipvl_port *port)
631 struct sk_buff *skb = *pskb;
632 struct ethhdr *eth = eth_hdr(skb);
633 rx_handler_result_t ret = RX_HANDLER_PASS;
637 if (is_multicast_ether_addr(eth->h_dest)) {
638 if (ipvlan_external_frame(skb, port)) {
639 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
641 /* External frames are queued for device local
642 * distribution, but a copy is given to master
643 * straight away to avoid sending duplicates later
644 * when work-queue processes this frame. This is
645 * achieved by returning RX_HANDLER_PASS.
648 ipvlan_skb_crossing_ns(nskb, NULL);
649 ipvlan_multicast_enqueue(port, nskb);
653 struct ipvl_addr *addr;
655 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
659 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
661 ret = ipvlan_rcv_frame(addr, pskb, false);
667 rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
669 struct sk_buff *skb = *pskb;
670 struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev);
673 return RX_HANDLER_PASS;
675 switch (port->mode) {
677 return ipvlan_handle_mode_l2(pskb, port);
679 return ipvlan_handle_mode_l3(pskb, port);
680 case IPVLAN_MODE_L3S:
681 return RX_HANDLER_PASS;
684 /* Should not reach here */
685 WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
688 return RX_HANDLER_CONSUMED;
691 static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb,
692 struct net_device *dev)
694 struct ipvl_addr *addr = NULL;
695 struct ipvl_port *port;
699 if (!dev || !netif_is_ipvlan_port(dev))
702 port = ipvlan_port_get_rcu(dev);
703 if (!port || port->mode != IPVLAN_MODE_L3S)
706 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
710 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
715 struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb,
718 struct ipvl_addr *addr;
719 struct net_device *sdev;
721 addr = ipvlan_skb_to_addr(skb, dev);
725 sdev = addr->master->dev;
730 struct iphdr *ip4h = ip_hdr(skb);
732 err = ip_route_input_noref(skb, ip4h->daddr, ip4h->saddr,
740 struct dst_entry *dst;
741 struct ipv6hdr *ip6h = ipv6_hdr(skb);
742 int flags = RT6_LOOKUP_F_HAS_SADDR;
743 struct flowi6 fl6 = {
744 .flowi6_iif = sdev->ifindex,
745 .daddr = ip6h->daddr,
746 .saddr = ip6h->saddr,
747 .flowlabel = ip6_flowinfo(ip6h),
748 .flowi6_mark = skb->mark,
749 .flowi6_proto = ip6h->nexthdr,
753 dst = ip6_route_input_lookup(dev_net(sdev), sdev, &fl6, flags);
754 skb_dst_set(skb, dst);
765 unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
766 const struct nf_hook_state *state)
768 struct ipvl_addr *addr;
771 addr = ipvlan_skb_to_addr(skb, skb->dev);
775 skb->dev = addr->master->dev;
776 len = skb->len + ETH_HLEN;
777 ipvlan_count_rx(addr->master, len, true, false);