2 * DECnet An implementation of the DECnet protocol suite for the LINUX
3 * operating system. DECnet is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * DECnet Routing Functions (Endnode and Router)
8 * Authors: Steve Whitehouse <SteveW@ACM.org>
9 * Eduardo Marcelo Serrat <emserrat@geocities.com>
12 * Steve Whitehouse : Fixes to allow "intra-ethernet" and
13 * "return-to-sender" bits on outgoing
15 * Steve Whitehouse : Timeouts for cached routes.
16 * Steve Whitehouse : Use dst cache for input routes too.
17 * Steve Whitehouse : Fixed error values in dn_send_skb.
18 * Steve Whitehouse : Rework routing functions to better fit
19 * DECnet routing design
20 * Alexey Kuznetsov : New SMP locking
21 * Steve Whitehouse : More SMP locking changes & dn_cache_dump()
22 * Steve Whitehouse : Prerouting NF hook, now really is prerouting.
23 * Fixed possible skb leak in rtnetlink funcs.
24 * Steve Whitehouse : Dave Miller's dynamic hash table sizing and
25 * Alexey Kuznetsov's finer grained locking
27 * Steve Whitehouse : Routing is now starting to look like a
28 * sensible set of code now, mainly due to
29 * my copying the IPv4 routing code. The
30 * hooks here are modified and will continue
31 * to evolve for a while.
32 * Steve Whitehouse : Real SMP at last :-) Also new netfilter
33 * stuff. Look out raw sockets your days
35 * Steve Whitehouse : Added return-to-sender functions. Added
36 * backlog congestion level return codes.
37 * Steve Whitehouse : Fixed bug where routes were set up with
38 * no ref count on net devices.
39 * Steve Whitehouse : RCU for the route cache
40 * Steve Whitehouse : Preparations for the flow cache
41 * Steve Whitehouse : Prepare for nonlinear skbs
44 /******************************************************************************
45 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
47 This program is free software; you can redistribute it and/or modify
48 it under the terms of the GNU General Public License as published by
49 the Free Software Foundation; either version 2 of the License, or
52 This program is distributed in the hope that it will be useful,
53 but WITHOUT ANY WARRANTY; without even the implied warranty of
54 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
55 GNU General Public License for more details.
56 *******************************************************************************/
58 #include <linux/errno.h>
59 #include <linux/types.h>
60 #include <linux/socket.h>
62 #include <linux/kernel.h>
63 #include <linux/sockios.h>
64 #include <linux/net.h>
65 #include <linux/netdevice.h>
66 #include <linux/inet.h>
67 #include <linux/route.h>
68 #include <linux/in_route.h>
69 #include <linux/slab.h>
72 #include <linux/proc_fs.h>
73 #include <linux/seq_file.h>
74 #include <linux/init.h>
75 #include <linux/rtnetlink.h>
76 #include <linux/string.h>
77 #include <linux/netfilter_decnet.h>
78 #include <linux/rcupdate.h>
79 #include <linux/times.h>
80 #include <linux/export.h>
81 #include <asm/errno.h>
82 #include <net/net_namespace.h>
83 #include <net/netlink.h>
84 #include <net/neighbour.h>
87 #include <net/fib_rules.h>
89 #include <net/dn_dev.h>
90 #include <net/dn_nsp.h>
91 #include <net/dn_route.h>
92 #include <net/dn_neigh.h>
93 #include <net/dn_fib.h>
95 struct dn_rt_hash_bucket
97 struct dn_route __rcu *chain;
101 extern struct neigh_table dn_neigh_table;
104 static unsigned char dn_hiord_addr[6] = {0xAA,0x00,0x04,0x00,0x00,0x00};
106 static const int dn_rt_min_delay = 2 * HZ;
107 static const int dn_rt_max_delay = 10 * HZ;
108 static const int dn_rt_mtu_expires = 10 * 60 * HZ;
110 static unsigned long dn_rt_deadline;
112 static int dn_dst_gc(struct dst_ops *ops);
113 static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
114 static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
115 static unsigned int dn_dst_mtu(const struct dst_entry *dst);
116 static void dn_dst_destroy(struct dst_entry *);
117 static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how);
118 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
119 static void dn_dst_link_failure(struct sk_buff *);
120 static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu);
121 static void dn_dst_redirect(struct dst_entry *dst, struct sk_buff *skb);
122 static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
125 static int dn_route_input(struct sk_buff *);
126 static void dn_run_flush(unsigned long dummy);
128 static struct dn_rt_hash_bucket *dn_rt_hash_table;
129 static unsigned int dn_rt_hash_mask;
131 static struct timer_list dn_route_timer;
132 static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0);
133 int decnet_dst_gc_interval = 2;
135 static struct dst_ops dn_dst_ops = {
137 .protocol = cpu_to_be16(ETH_P_DNA_RT),
140 .check = dn_dst_check,
141 .default_advmss = dn_dst_default_advmss,
143 .cow_metrics = dst_cow_metrics_generic,
144 .destroy = dn_dst_destroy,
145 .ifdown = dn_dst_ifdown,
146 .negative_advice = dn_dst_negative_advice,
147 .link_failure = dn_dst_link_failure,
148 .update_pmtu = dn_dst_update_pmtu,
149 .redirect = dn_dst_redirect,
150 .neigh_lookup = dn_dst_neigh_lookup,
153 static void dn_dst_destroy(struct dst_entry *dst)
155 struct dn_route *rt = (struct dn_route *) dst;
158 neigh_release(rt->n);
159 dst_destroy_metrics_generic(dst);
162 static void dn_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int how)
165 struct dn_route *rt = (struct dn_route *) dst;
166 struct neighbour *n = rt->n;
168 if (n && n->dev == dev) {
169 n->dev = dev_net(dev)->loopback_dev;
176 static __inline__ unsigned int dn_hash(__le16 src, __le16 dst)
178 __u16 tmp = (__u16 __force)(src ^ dst);
182 return dn_rt_hash_mask & (unsigned int)tmp;
185 static inline void dnrt_free(struct dn_route *rt)
187 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
190 static inline void dnrt_drop(struct dn_route *rt)
192 dst_release(&rt->dst);
193 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
196 static void dn_dst_check_expire(unsigned long dummy)
200 struct dn_route __rcu **rtp;
201 unsigned long now = jiffies;
202 unsigned long expire = 120 * HZ;
204 for (i = 0; i <= dn_rt_hash_mask; i++) {
205 rtp = &dn_rt_hash_table[i].chain;
207 spin_lock(&dn_rt_hash_table[i].lock);
208 while ((rt = rcu_dereference_protected(*rtp,
209 lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
210 if (atomic_read(&rt->dst.__refcnt) ||
211 (now - rt->dst.lastuse) < expire) {
212 rtp = &rt->dst.dn_next;
215 *rtp = rt->dst.dn_next;
216 rt->dst.dn_next = NULL;
219 spin_unlock(&dn_rt_hash_table[i].lock);
221 if ((jiffies - now) > 0)
225 mod_timer(&dn_route_timer, now + decnet_dst_gc_interval * HZ);
228 static int dn_dst_gc(struct dst_ops *ops)
231 struct dn_route __rcu **rtp;
233 unsigned long now = jiffies;
234 unsigned long expire = 10 * HZ;
236 for (i = 0; i <= dn_rt_hash_mask; i++) {
238 spin_lock_bh(&dn_rt_hash_table[i].lock);
239 rtp = &dn_rt_hash_table[i].chain;
241 while ((rt = rcu_dereference_protected(*rtp,
242 lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
243 if (atomic_read(&rt->dst.__refcnt) ||
244 (now - rt->dst.lastuse) < expire) {
245 rtp = &rt->dst.dn_next;
248 *rtp = rt->dst.dn_next;
249 rt->dst.dn_next = NULL;
253 spin_unlock_bh(&dn_rt_hash_table[i].lock);
260 * The decnet standards don't impose a particular minimum mtu, what they
261 * do insist on is that the routing layer accepts a datagram of at least
262 * 230 bytes long. Here we have to subtract the routing header length from
263 * 230 to get the minimum acceptable mtu. If there is no neighbour, then we
264 * assume the worst and use a long header size.
266 * We update both the mtu and the advertised mss (i.e. the segment size we
267 * advertise to the other end).
269 static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
271 struct dn_route *rt = (struct dn_route *) dst;
272 struct neighbour *n = rt->n;
276 dn = n ? rcu_dereference_raw(n->dev->dn_ptr) : NULL;
278 if (dn && dn->use_long == 0)
283 if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) {
284 if (!(dst_metric_locked(dst, RTAX_MTU))) {
285 dst_metric_set(dst, RTAX_MTU, mtu);
286 dst_set_expires(dst, dn_rt_mtu_expires);
288 if (!(dst_metric_locked(dst, RTAX_ADVMSS))) {
289 u32 mss = mtu - DN_MAX_NSP_DATA_HEADER;
290 u32 existing_mss = dst_metric_raw(dst, RTAX_ADVMSS);
291 if (!existing_mss || existing_mss > mss)
292 dst_metric_set(dst, RTAX_ADVMSS, mss);
297 static void dn_dst_redirect(struct dst_entry *dst, struct sk_buff *skb)
302 * When a route has been marked obsolete. (e.g. routing cache flush)
304 static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie)
309 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst)
315 static void dn_dst_link_failure(struct sk_buff *skb)
319 static inline int compare_keys(struct flowidn *fl1, struct flowidn *fl2)
321 return ((fl1->daddr ^ fl2->daddr) |
322 (fl1->saddr ^ fl2->saddr) |
323 (fl1->flowidn_mark ^ fl2->flowidn_mark) |
324 (fl1->flowidn_scope ^ fl2->flowidn_scope) |
325 (fl1->flowidn_oif ^ fl2->flowidn_oif) |
326 (fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0;
329 static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_route **rp)
331 struct dn_route *rth;
332 struct dn_route __rcu **rthp;
333 unsigned long now = jiffies;
335 rthp = &dn_rt_hash_table[hash].chain;
337 spin_lock_bh(&dn_rt_hash_table[hash].lock);
338 while ((rth = rcu_dereference_protected(*rthp,
339 lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) {
340 if (compare_keys(&rth->fld, &rt->fld)) {
342 *rthp = rth->dst.dn_next;
343 rcu_assign_pointer(rth->dst.dn_next,
344 dn_rt_hash_table[hash].chain);
345 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
347 dst_use(&rth->dst, now);
348 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
354 rthp = &rth->dst.dn_next;
357 rcu_assign_pointer(rt->dst.dn_next, dn_rt_hash_table[hash].chain);
358 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
360 dst_use(&rt->dst, now);
361 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
366 static void dn_run_flush(unsigned long dummy)
369 struct dn_route *rt, *next;
371 for (i = 0; i < dn_rt_hash_mask; i++) {
372 spin_lock_bh(&dn_rt_hash_table[i].lock);
374 if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL)
375 goto nothing_to_declare;
377 for(; rt; rt = next) {
378 next = rcu_dereference_raw(rt->dst.dn_next);
379 RCU_INIT_POINTER(rt->dst.dn_next, NULL);
380 dst_free((struct dst_entry *)rt);
384 spin_unlock_bh(&dn_rt_hash_table[i].lock);
388 static DEFINE_SPINLOCK(dn_rt_flush_lock);
390 void dn_rt_cache_flush(int delay)
392 unsigned long now = jiffies;
393 int user_mode = !in_interrupt();
396 delay = dn_rt_min_delay;
398 spin_lock_bh(&dn_rt_flush_lock);
400 if (del_timer(&dn_rt_flush_timer) && delay > 0 && dn_rt_deadline) {
401 long tmo = (long)(dn_rt_deadline - now);
403 if (user_mode && tmo < dn_rt_max_delay - dn_rt_min_delay)
411 spin_unlock_bh(&dn_rt_flush_lock);
416 if (dn_rt_deadline == 0)
417 dn_rt_deadline = now + dn_rt_max_delay;
419 dn_rt_flush_timer.expires = now + delay;
420 add_timer(&dn_rt_flush_timer);
421 spin_unlock_bh(&dn_rt_flush_lock);
425 * dn_return_short - Return a short packet to its sender
426 * @skb: The packet to return
429 static int dn_return_short(struct sk_buff *skb)
431 struct dn_skb_cb *cb;
436 /* Add back headers */
437 skb_push(skb, skb->data - skb_network_header(skb));
439 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
443 /* Skip packet length and point to flags */
445 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
451 *ptr = 0; /* Zero hop count */
455 skb->pkt_type = PACKET_OUTGOING;
456 dn_rt_finish_output(skb, NULL, NULL);
457 return NET_RX_SUCCESS;
461 * dn_return_long - Return a long packet to its sender
462 * @skb: The long format packet to return
465 static int dn_return_long(struct sk_buff *skb)
467 struct dn_skb_cb *cb;
469 unsigned char *src_addr, *dst_addr;
470 unsigned char tmp[ETH_ALEN];
472 /* Add back all headers */
473 skb_push(skb, skb->data - skb_network_header(skb));
475 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
479 /* Ignore packet length and point to flags */
483 if (*ptr & DN_RT_F_PF) {
484 char padlen = (*ptr & ~DN_RT_F_PF);
488 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
494 *ptr = 0; /* Zero hop count */
496 /* Swap source and destination */
497 memcpy(tmp, src_addr, ETH_ALEN);
498 memcpy(src_addr, dst_addr, ETH_ALEN);
499 memcpy(dst_addr, tmp, ETH_ALEN);
501 skb->pkt_type = PACKET_OUTGOING;
502 dn_rt_finish_output(skb, dst_addr, src_addr);
503 return NET_RX_SUCCESS;
507 * dn_route_rx_packet - Try and find a route for an incoming packet
508 * @skb: The packet to find a route for
510 * Returns: result of input function if route is found, error code otherwise
512 static int dn_route_rx_packet(struct sk_buff *skb)
514 struct dn_skb_cb *cb;
517 if ((err = dn_route_input(skb)) == 0)
518 return dst_input(skb);
521 if (decnet_debug_level & 4) {
522 char *devname = skb->dev ? skb->dev->name : "???";
525 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n",
526 (int)cb->rt_flags, devname, skb->len,
527 le16_to_cpu(cb->src), le16_to_cpu(cb->dst),
531 if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) {
532 switch (cb->rt_flags & DN_RT_PKT_MSK) {
533 case DN_RT_PKT_SHORT:
534 return dn_return_short(skb);
536 return dn_return_long(skb);
544 static int dn_route_rx_long(struct sk_buff *skb)
546 struct dn_skb_cb *cb = DN_SKB_CB(skb);
547 unsigned char *ptr = skb->data;
549 if (!pskb_may_pull(skb, 21)) /* 20 for long header, 1 for shortest nsp */
553 skb_reset_transport_header(skb);
555 /* Destination info */
557 cb->dst = dn_eth2dn(ptr);
558 if (memcmp(ptr, dn_hiord_addr, 4) != 0)
565 cb->src = dn_eth2dn(ptr);
566 if (memcmp(ptr, dn_hiord_addr, 4) != 0)
571 cb->hops = *ptr++; /* Visit Count */
573 return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL,
583 static int dn_route_rx_short(struct sk_buff *skb)
585 struct dn_skb_cb *cb = DN_SKB_CB(skb);
586 unsigned char *ptr = skb->data;
588 if (!pskb_may_pull(skb, 6)) /* 5 for short header + 1 for shortest nsp */
592 skb_reset_transport_header(skb);
594 cb->dst = *(__le16 *)ptr;
596 cb->src = *(__le16 *)ptr;
598 cb->hops = *ptr & 0x3f;
600 return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL,
608 static int dn_route_discard(struct sk_buff *skb)
611 * I know we drop the packet here, but thats considered success in
615 return NET_RX_SUCCESS;
618 static int dn_route_ptp_hello(struct sk_buff *skb)
621 dn_neigh_pointopoint_hello(skb);
622 return NET_RX_SUCCESS;
625 int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
627 struct dn_skb_cb *cb;
628 unsigned char flags = 0;
629 __u16 len = le16_to_cpu(*(__le16 *)skb->data);
630 struct dn_dev *dn = rcu_dereference(dev->dn_ptr);
631 unsigned char padlen = 0;
633 if (!net_eq(dev_net(dev), &init_net))
639 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
642 if (!pskb_may_pull(skb, 3))
656 cb->iif = dev->ifindex;
659 * If we have padding, remove it.
661 if (flags & DN_RT_F_PF) {
662 padlen = flags & ~DN_RT_F_PF;
663 if (!pskb_may_pull(skb, padlen + 1))
665 skb_pull(skb, padlen);
669 skb_reset_network_header(skb);
672 * Weed out future version DECnet
674 if (flags & DN_RT_F_VER)
677 cb->rt_flags = flags;
679 if (decnet_debug_level & 1)
681 "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n",
682 (int)flags, (dev) ? dev->name : "???", len, skb->len,
685 if (flags & DN_RT_PKT_CNTL) {
686 if (unlikely(skb_linearize(skb)))
689 switch (flags & DN_RT_CNTL_MSK) {
691 dn_dev_init_pkt(skb);
694 dn_dev_veri_pkt(skb);
698 if (dn->parms.state != DN_DEV_S_RU)
701 switch (flags & DN_RT_CNTL_MSK) {
703 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
709 return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE,
713 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
715 dn_neigh_router_hello);
718 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
720 dn_neigh_endnode_hello);
723 if (dn->parms.state != DN_DEV_S_RU)
726 skb_pull(skb, 1); /* Pull flags */
728 switch (flags & DN_RT_PKT_MSK) {
730 return dn_route_rx_long(skb);
731 case DN_RT_PKT_SHORT:
732 return dn_route_rx_short(skb);
742 static int dn_to_neigh_output(struct sk_buff *skb)
744 struct dst_entry *dst = skb_dst(skb);
745 struct dn_route *rt = (struct dn_route *) dst;
746 struct neighbour *n = rt->n;
748 return n->output(n, skb);
751 static int dn_output(struct sk_buff *skb)
753 struct dst_entry *dst = skb_dst(skb);
754 struct dn_route *rt = (struct dn_route *)dst;
755 struct net_device *dev = dst->dev;
756 struct dn_skb_cb *cb = DN_SKB_CB(skb);
765 cb->src = rt->rt_saddr;
766 cb->dst = rt->rt_daddr;
769 * Always set the Intra-Ethernet bit on all outgoing packets
770 * originated on this node. Only valid flag from upper layers
771 * is return-to-sender-requested. Set hop count to 0 too.
773 cb->rt_flags &= ~DN_RT_F_RQR;
774 cb->rt_flags |= DN_RT_F_IE;
777 return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, skb, NULL, dev,
781 net_dbg_ratelimited("dn_output: This should not happen\n");
788 static int dn_forward(struct sk_buff *skb)
790 struct dn_skb_cb *cb = DN_SKB_CB(skb);
791 struct dst_entry *dst = skb_dst(skb);
792 struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr);
795 #ifdef CONFIG_NETFILTER
796 struct net_device *dev = skb->dev;
799 if (skb->pkt_type != PACKET_HOST)
802 /* Ensure that we have enough space for headers */
803 rt = (struct dn_route *)skb_dst(skb);
804 header_len = dn_db->use_long ? 21 : 6;
805 if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len))
809 * Hop count exceeded.
814 skb->dev = rt->dst.dev;
817 * If packet goes out same interface it came in on, then set
818 * the Intra-Ethernet bit. This has no effect for short
819 * packets, so we don't need to test for them here.
821 cb->rt_flags &= ~DN_RT_F_IE;
822 if (rt->rt_flags & RTCF_DOREDIRECT)
823 cb->rt_flags |= DN_RT_F_IE;
825 return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, skb, dev, skb->dev,
834 * Used to catch bugs. This should never normally get
837 static int dn_rt_bug(struct sk_buff *skb)
839 struct dn_skb_cb *cb = DN_SKB_CB(skb);
841 net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n",
842 le16_to_cpu(cb->src), le16_to_cpu(cb->dst));
849 static unsigned int dn_dst_default_advmss(const struct dst_entry *dst)
851 return dn_mss_from_pmtu(dst->dev, dst_mtu(dst));
854 static unsigned int dn_dst_mtu(const struct dst_entry *dst)
856 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
858 return mtu ? : dst->dev->mtu;
861 static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
865 return __neigh_lookup_errno(&dn_neigh_table, daddr, dst->dev);
868 static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
870 struct dn_fib_info *fi = res->fi;
871 struct net_device *dev = rt->dst.dev;
872 unsigned int mss_metric;
876 if (DN_FIB_RES_GW(*res) &&
877 DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
878 rt->rt_gateway = DN_FIB_RES_GW(*res);
879 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
881 rt->rt_type = res->type;
883 if (dev != NULL && rt->n == NULL) {
884 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
890 if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
891 dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu);
892 mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
894 unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
895 if (mss_metric > mss)
896 dst_metric_set(&rt->dst, RTAX_ADVMSS, mss);
901 static inline int dn_match_addr(__le16 addr1, __le16 addr2)
903 __u16 tmp = le16_to_cpu(addr1) ^ le16_to_cpu(addr2);
912 static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope)
915 struct dn_dev *dn_db;
916 struct dn_ifaddr *ifa;
921 dn_db = rcu_dereference(dev->dn_ptr);
922 for (ifa = rcu_dereference(dn_db->ifa_list);
924 ifa = rcu_dereference(ifa->ifa_next)) {
925 if (ifa->ifa_scope > scope)
928 saddr = ifa->ifa_local;
931 ret = dn_match_addr(daddr, ifa->ifa_local);
932 if (ret > best_match)
933 saddr = ifa->ifa_local;
935 saddr = ifa->ifa_local;
942 static inline __le16 __dn_fib_res_prefsrc(struct dn_fib_res *res)
944 return dnet_select_source(DN_FIB_RES_DEV(*res), DN_FIB_RES_GW(*res), res->scope);
947 static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_res *res)
949 __le16 mask = dnet_make_mask(res->prefixlen);
950 return (daddr&~mask)|res->fi->fib_nh->nh_gw;
953 static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *oldflp, int try_hard)
955 struct flowidn fld = {
956 .daddr = oldflp->daddr,
957 .saddr = oldflp->saddr,
958 .flowidn_scope = RT_SCOPE_UNIVERSE,
959 .flowidn_mark = oldflp->flowidn_mark,
960 .flowidn_iif = init_net.loopback_dev->ifindex,
961 .flowidn_oif = oldflp->flowidn_oif,
963 struct dn_route *rt = NULL;
964 struct net_device *dev_out = NULL, *dev;
965 struct neighbour *neigh = NULL;
967 unsigned int flags = 0;
968 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST };
973 if (decnet_debug_level & 16)
975 "dn_route_output_slow: dst=%04x src=%04x mark=%d"
976 " iif=%d oif=%d\n", le16_to_cpu(oldflp->daddr),
977 le16_to_cpu(oldflp->saddr),
978 oldflp->flowidn_mark, init_net.loopback_dev->ifindex,
979 oldflp->flowidn_oif);
981 /* If we have an output interface, verify its a DECnet device */
982 if (oldflp->flowidn_oif) {
983 dev_out = dev_get_by_index(&init_net, oldflp->flowidn_oif);
985 if (dev_out && dev_out->dn_ptr == NULL) {
993 /* If we have a source address, verify that its a local address */
995 err = -EADDRNOTAVAIL;
998 if (dn_dev_islocal(dev_out, oldflp->saddr))
1004 for_each_netdev_rcu(&init_net, dev) {
1007 if (!dn_dev_islocal(dev, oldflp->saddr))
1009 if ((dev->flags & IFF_LOOPBACK) &&
1011 !dn_dev_islocal(dev, oldflp->daddr))
1018 if (dev_out == NULL)
1025 /* No destination? Assume its local */
1027 fld.daddr = fld.saddr;
1029 err = -EADDRNOTAVAIL;
1032 dev_out = init_net.loopback_dev;
1036 fld.saddr = dnet_select_source(dev_out, 0,
1041 fld.flowidn_oif = init_net.loopback_dev->ifindex;
1042 res.type = RTN_LOCAL;
1046 if (decnet_debug_level & 16)
1048 "dn_route_output_slow: initial checks complete."
1049 " dst=%o4x src=%04x oif=%d try_hard=%d\n",
1050 le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr),
1051 fld.flowidn_oif, try_hard);
1054 * N.B. If the kernel is compiled without router support then
1055 * dn_fib_lookup() will evaluate to non-zero so this if () block
1056 * will always be executed.
1059 if (try_hard || (err = dn_fib_lookup(&fld, &res)) != 0) {
1060 struct dn_dev *dn_db;
1064 * Here the fallback is basically the standard algorithm for
1065 * routing in endnodes which is described in the DECnet routing
1068 * If we are not trying hard, look in neighbour cache.
1069 * The result is tested to ensure that if a specific output
1070 * device/source address was requested, then we honour that
1074 neigh = neigh_lookup_nodev(&dn_neigh_table, &init_net, &fld.daddr);
1076 if ((oldflp->flowidn_oif &&
1077 (neigh->dev->ifindex != oldflp->flowidn_oif)) ||
1079 (!dn_dev_islocal(neigh->dev,
1081 neigh_release(neigh);
1086 if (dn_dev_islocal(neigh->dev, fld.daddr)) {
1087 dev_out = init_net.loopback_dev;
1088 res.type = RTN_LOCAL;
1090 dev_out = neigh->dev;
1098 /* Not there? Perhaps its a local address */
1099 if (dev_out == NULL)
1100 dev_out = dn_dev_get_default();
1102 if (dev_out == NULL)
1104 dn_db = rcu_dereference_raw(dev_out->dn_ptr);
1105 /* Possible improvement - check all devices for local addr */
1106 if (dn_dev_islocal(dev_out, fld.daddr)) {
1108 dev_out = init_net.loopback_dev;
1110 res.type = RTN_LOCAL;
1113 /* Not local either.... try sending it to the default router */
1114 neigh = neigh_clone(dn_db->router);
1115 BUG_ON(neigh && neigh->dev != dev_out);
1117 /* Ok then, we assume its directly connected and move on */
1120 gateway = ((struct dn_neigh *)neigh)->addr;
1122 gateway = fld.daddr;
1123 if (fld.saddr == 0) {
1124 fld.saddr = dnet_select_source(dev_out, gateway,
1125 res.type == RTN_LOCAL ?
1128 if (fld.saddr == 0 && res.type != RTN_LOCAL)
1131 fld.flowidn_oif = dev_out->ifindex;
1136 if (res.type == RTN_NAT)
1139 if (res.type == RTN_LOCAL) {
1141 fld.saddr = fld.daddr;
1144 dev_out = init_net.loopback_dev;
1146 fld.flowidn_oif = dev_out->ifindex;
1148 dn_fib_info_put(res.fi);
1153 if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0)
1154 dn_fib_select_multipath(&fld, &res);
1157 * We could add some logic to deal with default routes here and
1158 * get rid of some of the special casing above.
1162 fld.saddr = DN_FIB_RES_PREFSRC(res);
1166 dev_out = DN_FIB_RES_DEV(res);
1168 fld.flowidn_oif = dev_out->ifindex;
1169 gateway = DN_FIB_RES_GW(res);
1172 if (dev_out->flags & IFF_LOOPBACK)
1173 flags |= RTCF_LOCAL;
1175 rt = dst_alloc(&dn_dst_ops, dev_out, 1, 0, DST_HOST);
1179 memset(&rt->fld, 0, sizeof(rt->fld));
1180 rt->fld.saddr = oldflp->saddr;
1181 rt->fld.daddr = oldflp->daddr;
1182 rt->fld.flowidn_oif = oldflp->flowidn_oif;
1183 rt->fld.flowidn_iif = 0;
1184 rt->fld.flowidn_mark = oldflp->flowidn_mark;
1186 rt->rt_saddr = fld.saddr;
1187 rt->rt_daddr = fld.daddr;
1188 rt->rt_gateway = gateway ? gateway : fld.daddr;
1189 rt->rt_local_src = fld.saddr;
1191 rt->rt_dst_map = fld.daddr;
1192 rt->rt_src_map = fld.saddr;
1197 rt->dst.lastuse = jiffies;
1198 rt->dst.output = dn_output;
1199 rt->dst.input = dn_rt_bug;
1200 rt->rt_flags = flags;
1201 if (flags & RTCF_LOCAL)
1202 rt->dst.input = dn_nsp_rx;
1204 err = dn_rt_set_next_hop(rt, &res);
1208 hash = dn_hash(rt->fld.saddr, rt->fld.daddr);
1209 dn_insert_route(rt, hash, (struct dn_route **)pprt);
1213 neigh_release(neigh);
1215 dn_fib_res_put(&res);
1222 err = -EADDRNOTAVAIL;
1237 * N.B. The flags may be moved into the flowi at some future stage.
1239 static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags)
1241 unsigned int hash = dn_hash(flp->saddr, flp->daddr);
1242 struct dn_route *rt = NULL;
1244 if (!(flags & MSG_TRYHARD)) {
1246 for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
1247 rt = rcu_dereference_bh(rt->dst.dn_next)) {
1248 if ((flp->daddr == rt->fld.daddr) &&
1249 (flp->saddr == rt->fld.saddr) &&
1250 (flp->flowidn_mark == rt->fld.flowidn_mark) &&
1251 dn_is_output_route(rt) &&
1252 (rt->fld.flowidn_oif == flp->flowidn_oif)) {
1253 dst_use(&rt->dst, jiffies);
1254 rcu_read_unlock_bh();
1259 rcu_read_unlock_bh();
1262 return dn_route_output_slow(pprt, flp, flags);
1265 static int dn_route_output_key(struct dst_entry **pprt, struct flowidn *flp, int flags)
1269 err = __dn_route_output_key(pprt, flp, flags);
1270 if (err == 0 && flp->flowidn_proto) {
1271 *pprt = xfrm_lookup(&init_net, *pprt,
1272 flowidn_to_flowi(flp), NULL, 0);
1273 if (IS_ERR(*pprt)) {
1274 err = PTR_ERR(*pprt);
1281 int dn_route_output_sock(struct dst_entry **pprt, struct flowidn *fl, struct sock *sk, int flags)
1285 err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD);
1286 if (err == 0 && fl->flowidn_proto) {
1287 if (!(flags & MSG_DONTWAIT))
1288 fl->flowidn_flags |= FLOWI_FLAG_CAN_SLEEP;
1289 *pprt = xfrm_lookup(&init_net, *pprt,
1290 flowidn_to_flowi(fl), sk, 0);
1291 if (IS_ERR(*pprt)) {
1292 err = PTR_ERR(*pprt);
1299 static int dn_route_input_slow(struct sk_buff *skb)
1301 struct dn_route *rt = NULL;
1302 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1303 struct net_device *in_dev = skb->dev;
1304 struct net_device *out_dev = NULL;
1305 struct dn_dev *dn_db;
1306 struct neighbour *neigh = NULL;
1310 __le16 local_src = 0;
1311 struct flowidn fld = {
1314 .flowidn_scope = RT_SCOPE_UNIVERSE,
1315 .flowidn_mark = skb->mark,
1316 .flowidn_iif = skb->dev->ifindex,
1318 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE };
1324 if ((dn_db = rcu_dereference(in_dev->dn_ptr)) == NULL)
1327 /* Zero source addresses are not allowed */
1332 * In this case we've just received a packet from a source
1333 * outside ourselves pretending to come from us. We don't
1334 * allow it any further to prevent routing loops, spoofing and
1335 * other nasties. Loopback packets already have the dst attached
1336 * so this only affects packets which have originated elsewhere.
1339 if (dn_dev_islocal(in_dev, cb->src))
1342 err = dn_fib_lookup(&fld, &res);
1347 * Is the destination us ?
1349 if (!dn_dev_islocal(in_dev, cb->dst))
1352 res.type = RTN_LOCAL;
1354 __le16 src_map = fld.saddr;
1357 out_dev = DN_FIB_RES_DEV(res);
1358 if (out_dev == NULL) {
1359 net_crit_ratelimited("Bug in dn_route_input_slow() No output device\n");
1365 src_map = fld.saddr; /* no NAT support for now */
1367 gateway = DN_FIB_RES_GW(res);
1368 if (res.type == RTN_NAT) {
1369 fld.daddr = dn_fib_rules_map_destination(fld.daddr, &res);
1370 dn_fib_res_put(&res);
1372 if (dn_fib_lookup(&fld, &res))
1375 if (res.type != RTN_UNICAST)
1378 gateway = fld.daddr;
1380 fld.saddr = src_map;
1386 * Forwarding check here, we only check for forwarding
1387 * being turned off, if you want to only forward intra
1388 * area, its up to you to set the routing tables up
1391 if (dn_db->parms.forwarding == 0)
1394 if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0)
1395 dn_fib_select_multipath(&fld, &res);
1398 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT
1399 * flag as a hint to set the intra-ethernet bit when
1400 * forwarding. If we've got NAT in operation, we don't do
1401 * this optimisation.
1403 if (out_dev == in_dev && !(flags & RTCF_NAT))
1404 flags |= RTCF_DOREDIRECT;
1406 local_src = DN_FIB_RES_PREFSRC(res);
1409 case RTN_UNREACHABLE:
1412 flags |= RTCF_LOCAL;
1413 fld.saddr = cb->dst;
1414 fld.daddr = cb->src;
1416 /* Routing tables gave us a gateway */
1420 /* Packet was intra-ethernet, so we know its on-link */
1421 if (cb->rt_flags & DN_RT_F_IE) {
1423 flags |= RTCF_DIRECTSRC;
1427 /* Use the default router if there is one */
1428 neigh = neigh_clone(dn_db->router);
1430 gateway = ((struct dn_neigh *)neigh)->addr;
1434 /* Close eyes and pray */
1436 flags |= RTCF_DIRECTSRC;
1443 rt = dst_alloc(&dn_dst_ops, out_dev, 0, 0, DST_HOST);
1447 memset(&rt->fld, 0, sizeof(rt->fld));
1448 rt->rt_saddr = fld.saddr;
1449 rt->rt_daddr = fld.daddr;
1450 rt->rt_gateway = fld.daddr;
1452 rt->rt_gateway = gateway;
1453 rt->rt_local_src = local_src ? local_src : rt->rt_saddr;
1455 rt->rt_dst_map = fld.daddr;
1456 rt->rt_src_map = fld.saddr;
1458 rt->fld.saddr = cb->src;
1459 rt->fld.daddr = cb->dst;
1460 rt->fld.flowidn_oif = 0;
1461 rt->fld.flowidn_iif = in_dev->ifindex;
1462 rt->fld.flowidn_mark = fld.flowidn_mark;
1465 rt->dst.lastuse = jiffies;
1466 rt->dst.output = dn_rt_bug;
1469 rt->dst.input = dn_forward;
1472 rt->dst.output = dn_output;
1473 rt->dst.input = dn_nsp_rx;
1474 rt->dst.dev = in_dev;
1475 flags |= RTCF_LOCAL;
1478 case RTN_UNREACHABLE:
1480 rt->dst.input = dst_discard;
1482 rt->rt_flags = flags;
1484 err = dn_rt_set_next_hop(rt, &res);
1488 hash = dn_hash(rt->fld.saddr, rt->fld.daddr);
1489 dn_insert_route(rt, hash, &rt);
1490 skb_dst_set(skb, &rt->dst);
1494 neigh_release(neigh);
1496 dn_fib_res_put(&res);
1516 static int dn_route_input(struct sk_buff *skb)
1518 struct dn_route *rt;
1519 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1520 unsigned int hash = dn_hash(cb->src, cb->dst);
1526 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
1527 rt = rcu_dereference(rt->dst.dn_next)) {
1528 if ((rt->fld.saddr == cb->src) &&
1529 (rt->fld.daddr == cb->dst) &&
1530 (rt->fld.flowidn_oif == 0) &&
1531 (rt->fld.flowidn_mark == skb->mark) &&
1532 (rt->fld.flowidn_iif == cb->iif)) {
1533 dst_use(&rt->dst, jiffies);
1535 skb_dst_set(skb, (struct dst_entry *)rt);
1541 return dn_route_input_slow(skb);
1544 static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1545 int event, int nowait, unsigned int flags)
1547 struct dn_route *rt = (struct dn_route *)skb_dst(skb);
1549 struct nlmsghdr *nlh;
1552 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
1556 r = nlmsg_data(nlh);
1557 r->rtm_family = AF_DECnet;
1558 r->rtm_dst_len = 16;
1561 r->rtm_table = RT_TABLE_MAIN;
1562 r->rtm_type = rt->rt_type;
1563 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
1564 r->rtm_scope = RT_SCOPE_UNIVERSE;
1565 r->rtm_protocol = RTPROT_UNSPEC;
1567 if (rt->rt_flags & RTCF_NOTIFY)
1568 r->rtm_flags |= RTM_F_NOTIFY;
1570 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN) < 0 ||
1571 nla_put_le16(skb, RTA_DST, rt->rt_daddr) < 0)
1574 if (rt->fld.saddr) {
1575 r->rtm_src_len = 16;
1576 if (nla_put_le16(skb, RTA_SRC, rt->fld.saddr) < 0)
1580 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex) < 0)
1584 * Note to self - change this if input routes reverse direction when
1585 * they deal only with inputs and not with replies like they do
1588 if (nla_put_le16(skb, RTA_PREFSRC, rt->rt_local_src) < 0)
1591 if (rt->rt_daddr != rt->rt_gateway &&
1592 nla_put_le16(skb, RTA_GATEWAY, rt->rt_gateway) < 0)
1595 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
1598 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
1599 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires,
1603 if (dn_is_input_route(rt) &&
1604 nla_put_u32(skb, RTA_IIF, rt->fld.flowidn_iif) < 0)
1607 return nlmsg_end(skb, nlh);
1610 nlmsg_cancel(skb, nlh);
1615 * This is called by both endnodes and routers now.
1617 static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
1619 struct net *net = sock_net(in_skb->sk);
1620 struct rtattr **rta = arg;
1621 struct rtmsg *rtm = nlmsg_data(nlh);
1622 struct dn_route *rt = NULL;
1623 struct dn_skb_cb *cb;
1625 struct sk_buff *skb;
1628 if (!net_eq(net, &init_net))
1631 memset(&fld, 0, sizeof(fld));
1632 fld.flowidn_proto = DNPROTO_NSP;
1634 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1637 skb_reset_mac_header(skb);
1638 cb = DN_SKB_CB(skb);
1641 memcpy(&fld.saddr, RTA_DATA(rta[RTA_SRC-1]), 2);
1643 memcpy(&fld.daddr, RTA_DATA(rta[RTA_DST-1]), 2);
1645 memcpy(&fld.flowidn_iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int));
1647 if (fld.flowidn_iif) {
1648 struct net_device *dev;
1649 if ((dev = dev_get_by_index(&init_net, fld.flowidn_iif)) == NULL) {
1658 skb->protocol = htons(ETH_P_DNA_RT);
1660 cb->src = fld.saddr;
1661 cb->dst = fld.daddr;
1663 err = dn_route_input(skb);
1665 memset(cb, 0, sizeof(struct dn_skb_cb));
1666 rt = (struct dn_route *)skb_dst(skb);
1667 if (!err && -rt->dst.error)
1668 err = rt->dst.error;
1671 if (rta[RTA_OIF - 1])
1672 memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int));
1673 fld.flowidn_oif = oif;
1674 err = dn_route_output_key((struct dst_entry **)&rt, &fld, 0);
1682 skb_dst_set(skb, &rt->dst);
1683 if (rtm->rtm_flags & RTM_F_NOTIFY)
1684 rt->rt_flags |= RTCF_NOTIFY;
1686 err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);
1695 return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid);
1703 * For routers, this is called from dn_fib_dump, but for endnodes its
1704 * called directly from the rtnetlink dispatch table.
1706 int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
1708 struct net *net = sock_net(skb->sk);
1709 struct dn_route *rt;
1714 if (!net_eq(net, &init_net))
1717 if (nlmsg_len(cb->nlh) < sizeof(struct rtmsg))
1720 rtm = nlmsg_data(cb->nlh);
1721 if (!(rtm->rtm_flags & RTM_F_CLONED))
1725 s_idx = idx = cb->args[1];
1726 for(h = 0; h <= dn_rt_hash_mask; h++) {
1732 for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
1734 rt = rcu_dereference_bh(rt->dst.dn_next), idx++) {
1737 skb_dst_set(skb, dst_clone(&rt->dst));
1738 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
1739 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1740 1, NLM_F_MULTI) <= 0) {
1742 rcu_read_unlock_bh();
1747 rcu_read_unlock_bh();
1756 #ifdef CONFIG_PROC_FS
1757 struct dn_rt_cache_iter_state {
1761 static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq)
1763 struct dn_route *rt = NULL;
1764 struct dn_rt_cache_iter_state *s = seq->private;
1766 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {
1768 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
1771 rcu_read_unlock_bh();
1776 static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt)
1778 struct dn_rt_cache_iter_state *s = seq->private;
1780 rt = rcu_dereference_bh(rt->dst.dn_next);
1782 rcu_read_unlock_bh();
1783 if (--s->bucket < 0)
1786 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
1791 static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
1793 struct dn_route *rt = dn_rt_cache_get_first(seq);
1796 while(*pos && (rt = dn_rt_cache_get_next(seq, rt)))
1799 return *pos ? NULL : rt;
1802 static void *dn_rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1804 struct dn_route *rt = dn_rt_cache_get_next(seq, v);
1809 static void dn_rt_cache_seq_stop(struct seq_file *seq, void *v)
1812 rcu_read_unlock_bh();
1815 static int dn_rt_cache_seq_show(struct seq_file *seq, void *v)
1817 struct dn_route *rt = v;
1818 char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN];
1820 seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n",
1821 rt->dst.dev ? rt->dst.dev->name : "*",
1822 dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1),
1823 dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2),
1824 atomic_read(&rt->dst.__refcnt),
1829 static const struct seq_operations dn_rt_cache_seq_ops = {
1830 .start = dn_rt_cache_seq_start,
1831 .next = dn_rt_cache_seq_next,
1832 .stop = dn_rt_cache_seq_stop,
1833 .show = dn_rt_cache_seq_show,
1836 static int dn_rt_cache_seq_open(struct inode *inode, struct file *file)
1838 return seq_open_private(file, &dn_rt_cache_seq_ops,
1839 sizeof(struct dn_rt_cache_iter_state));
1842 static const struct file_operations dn_rt_cache_seq_fops = {
1843 .owner = THIS_MODULE,
1844 .open = dn_rt_cache_seq_open,
1846 .llseek = seq_lseek,
1847 .release = seq_release_private,
1850 #endif /* CONFIG_PROC_FS */
1852 void __init dn_route_init(void)
1856 dn_dst_ops.kmem_cachep =
1857 kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0,
1858 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1859 dst_entries_init(&dn_dst_ops);
1860 setup_timer(&dn_route_timer, dn_dst_check_expire, 0);
1861 dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
1862 add_timer(&dn_route_timer);
1864 goal = totalram_pages >> (26 - PAGE_SHIFT);
1866 for(order = 0; (1UL << order) < goal; order++)
1870 * Only want 1024 entries max, since the table is very, very unlikely
1871 * to be larger than that.
1873 while(order && ((((1UL << order) * PAGE_SIZE) /
1874 sizeof(struct dn_rt_hash_bucket)) >= 2048))
1878 dn_rt_hash_mask = (1UL << order) * PAGE_SIZE /
1879 sizeof(struct dn_rt_hash_bucket);
1880 while(dn_rt_hash_mask & (dn_rt_hash_mask - 1))
1882 dn_rt_hash_table = (struct dn_rt_hash_bucket *)
1883 __get_free_pages(GFP_ATOMIC, order);
1884 } while (dn_rt_hash_table == NULL && --order > 0);
1886 if (!dn_rt_hash_table)
1887 panic("Failed to allocate DECnet route cache hash table\n");
1890 "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n",
1892 (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024);
1895 for(i = 0; i <= dn_rt_hash_mask; i++) {
1896 spin_lock_init(&dn_rt_hash_table[i].lock);
1897 dn_rt_hash_table[i].chain = NULL;
1900 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);
1902 proc_net_fops_create(&init_net, "decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops);
1904 #ifdef CONFIG_DECNET_ROUTER
1905 rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
1908 rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
1909 dn_cache_dump, NULL);
1913 void __exit dn_route_cleanup(void)
1915 del_timer(&dn_route_timer);
1918 proc_net_remove(&init_net, "decnet_cache");
1919 dst_entries_destroy(&dn_dst_ops);