Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * net/dst.h Protocol independent destination cache definitions. | |
3 | * | |
4 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | |
5 | * | |
6 | */ | |
7 | ||
8 | #ifndef _NET_DST_H | |
9 | #define _NET_DST_H | |
10 | ||
86393e52 | 11 | #include <net/dst_ops.h> |
14c85021 | 12 | #include <linux/netdevice.h> |
1da177e4 LT |
13 | #include <linux/rtnetlink.h> |
14 | #include <linux/rcupdate.h> | |
187f1882 | 15 | #include <linux/bug.h> |
1da177e4 | 16 | #include <linux/jiffies.h> |
9620fef2 | 17 | #include <linux/refcount.h> |
1da177e4 LT |
18 | #include <net/neighbour.h> |
19 | #include <asm/processor.h> | |
20 | ||
1da177e4 LT |
21 | #define DST_GC_MIN (HZ/10) |
22 | #define DST_GC_INC (HZ/2) | |
23 | #define DST_GC_MAX (120*HZ) | |
24 | ||
25 | /* Each dst_entry has reference count and sits in some parent list(s). | |
26 | * When it is removed from parent list, it is "freed" (dst_free). | |
27 | * After this it enters dead state (dst->obsolete > 0) and if its refcnt | |
28 | * is zero, it can be destroyed immediately, otherwise it is added | |
29 | * to gc list and garbage collector periodically checks the refcnt. | |
30 | */ | |
31 | ||
32 | struct sk_buff; | |
33 | ||
fd2c3ef7 | 34 | struct dst_entry { |
66727145 | 35 | struct net_device *dev; |
1e19e02c | 36 | struct rcu_head rcu_head; |
1da177e4 | 37 | struct dst_entry *child; |
62fa8a84 DM |
38 | struct dst_ops *ops; |
39 | unsigned long _metrics; | |
ecd98837 | 40 | unsigned long expires; |
f1dd9c37 | 41 | struct dst_entry *path; |
ecd98837 | 42 | struct dst_entry *from; |
def8b4fa | 43 | #ifdef CONFIG_XFRM |
1da177e4 | 44 | struct xfrm_state *xfrm; |
5635c10d ED |
45 | #else |
46 | void *__pad1; | |
def8b4fa | 47 | #endif |
7f95e188 | 48 | int (*input)(struct sk_buff *); |
ede2059d | 49 | int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb); |
1da177e4 | 50 | |
5110effe | 51 | unsigned short flags; |
f6b72b62 DM |
52 | #define DST_HOST 0x0001 |
53 | #define DST_NOXFRM 0x0002 | |
54 | #define DST_NOPOLICY 0x0004 | |
1eb04e7c WW |
55 | #define DST_NOCOUNT 0x0008 |
56 | #define DST_FAKE_RTABLE 0x0010 | |
57 | #define DST_XFRM_TUNNEL 0x0020 | |
58 | #define DST_XFRM_QUEUE 0x0040 | |
59 | #define DST_METADATA 0x0080 | |
f6b72b62 | 60 | |
62fa8a84 | 61 | short error; |
f5b0a874 DM |
62 | |
63 | /* A non-zero value of dst->obsolete forces by-hand validation | |
64 | * of the route entry. Positive values are set by the generic | |
65 | * dst layer to indicate that the entry has been forcefully | |
66 | * destroyed. | |
67 | * | |
68 | * Negative values are used by the implementation layer code to | |
69 | * force invocation of the dst_ops->check() method. | |
70 | */ | |
62fa8a84 | 71 | short obsolete; |
f5b0a874 DM |
72 | #define DST_OBSOLETE_NONE 0 |
73 | #define DST_OBSOLETE_DEAD 2 | |
74 | #define DST_OBSOLETE_FORCE_CHK -1 | |
ceb33206 | 75 | #define DST_OBSOLETE_KILL -2 |
62fa8a84 DM |
76 | unsigned short header_len; /* more space at head required */ |
77 | unsigned short trailer_len; /* space to reserve at tail */ | |
51ce8bd4 JA |
78 | unsigned short __pad3; |
79 | ||
c7066f70 | 80 | #ifdef CONFIG_IP_ROUTE_CLASSID |
f1dd9c37 | 81 | __u32 tclassid; |
5635c10d ED |
82 | #else |
83 | __u32 __pad2; | |
f1dd9c37 ZY |
84 | #endif |
85 | ||
751a587a | 86 | #ifdef CONFIG_64BIT |
5635c10d ED |
87 | /* |
88 | * Align __refcnt to a 64 bytes alignment | |
89 | * (L1_CACHE_SIZE would be too much) | |
90 | */ | |
0868e253 | 91 | long __pad_to_align_refcnt[2]; |
5635c10d | 92 | #endif |
f1dd9c37 ZY |
93 | /* |
94 | * __refcnt wants to be on a different cache line from | |
95 | * input/output/ops or performance tanks badly | |
96 | */ | |
1e19e02c ED |
97 | atomic_t __refcnt; /* client references */ |
98 | int __use; | |
f1dd9c37 | 99 | unsigned long lastuse; |
751a587a | 100 | struct lwtunnel_state *lwtstate; |
1e19e02c | 101 | union { |
fc766e4c ED |
102 | struct dst_entry *next; |
103 | struct rtable __rcu *rt_next; | |
104 | struct rt6_info *rt6_next; | |
105 | struct dn_route __rcu *dn_next; | |
1e19e02c | 106 | }; |
1da177e4 LT |
107 | }; |
108 | ||
3fb07daf ED |
109 | struct dst_metrics { |
110 | u32 metrics[RTAX_MAX]; | |
9620fef2 | 111 | refcount_t refcnt; |
3fb07daf ED |
112 | }; |
113 | extern const struct dst_metrics dst_default_metrics; | |
114 | ||
a4023dd0 | 115 | u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); |
62fa8a84 | 116 | |
e5fd387a | 117 | #define DST_METRICS_READ_ONLY 0x1UL |
3fb07daf | 118 | #define DST_METRICS_REFCOUNTED 0x2UL |
e5fd387a | 119 | #define DST_METRICS_FLAGS 0x3UL |
62fa8a84 | 120 | #define __DST_METRICS_PTR(Y) \ |
e5fd387a | 121 | ((u32 *)((Y) & ~DST_METRICS_FLAGS)) |
62fa8a84 DM |
122 | #define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics) |
123 | ||
124 | static inline bool dst_metrics_read_only(const struct dst_entry *dst) | |
125 | { | |
126 | return dst->_metrics & DST_METRICS_READ_ONLY; | |
127 | } | |
128 | ||
a4023dd0 | 129 | void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old); |
62fa8a84 DM |
130 | |
131 | static inline void dst_destroy_metrics_generic(struct dst_entry *dst) | |
132 | { | |
133 | unsigned long val = dst->_metrics; | |
134 | if (!(val & DST_METRICS_READ_ONLY)) | |
135 | __dst_destroy_metrics_generic(dst, val); | |
136 | } | |
137 | ||
138 | static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst) | |
139 | { | |
140 | unsigned long p = dst->_metrics; | |
141 | ||
1f37070d SH |
142 | BUG_ON(!p); |
143 | ||
62fa8a84 DM |
144 | if (p & DST_METRICS_READ_ONLY) |
145 | return dst->ops->cow_metrics(dst, p); | |
146 | return __DST_METRICS_PTR(p); | |
147 | } | |
148 | ||
149 | /* This may only be invoked before the entry has reached global | |
150 | * visibility. | |
151 | */ | |
152 | static inline void dst_init_metrics(struct dst_entry *dst, | |
153 | const u32 *src_metrics, | |
154 | bool read_only) | |
155 | { | |
156 | dst->_metrics = ((unsigned long) src_metrics) | | |
157 | (read_only ? DST_METRICS_READ_ONLY : 0); | |
158 | } | |
159 | ||
160 | static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src) | |
161 | { | |
162 | u32 *dst_metrics = dst_metrics_write_ptr(dest); | |
163 | ||
164 | if (dst_metrics) { | |
165 | u32 *src_metrics = DST_METRICS_PTR(src); | |
166 | ||
167 | memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32)); | |
168 | } | |
169 | } | |
170 | ||
171 | static inline u32 *dst_metrics_ptr(struct dst_entry *dst) | |
172 | { | |
173 | return DST_METRICS_PTR(dst); | |
174 | } | |
175 | ||
1da177e4 | 176 | static inline u32 |
5170ae82 | 177 | dst_metric_raw(const struct dst_entry *dst, const int metric) |
1da177e4 | 178 | { |
62fa8a84 DM |
179 | u32 *p = DST_METRICS_PTR(dst); |
180 | ||
181 | return p[metric-1]; | |
defb3519 DM |
182 | } |
183 | ||
5170ae82 DM |
184 | static inline u32 |
185 | dst_metric(const struct dst_entry *dst, const int metric) | |
186 | { | |
0dbaee3b | 187 | WARN_ON_ONCE(metric == RTAX_HOPLIMIT || |
d33e4553 DM |
188 | metric == RTAX_ADVMSS || |
189 | metric == RTAX_MTU); | |
5170ae82 DM |
190 | return dst_metric_raw(dst, metric); |
191 | } | |
192 | ||
0dbaee3b DM |
193 | static inline u32 |
194 | dst_metric_advmss(const struct dst_entry *dst) | |
195 | { | |
196 | u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS); | |
197 | ||
198 | if (!advmss) | |
199 | advmss = dst->ops->default_advmss(dst); | |
200 | ||
201 | return advmss; | |
202 | } | |
203 | ||
defb3519 DM |
204 | static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val) |
205 | { | |
62fa8a84 | 206 | u32 *p = dst_metrics_write_ptr(dst); |
defb3519 | 207 | |
62fa8a84 DM |
208 | if (p) |
209 | p[metric-1] = val; | |
1da177e4 LT |
210 | } |
211 | ||
c3a8d947 DB |
212 | /* Kernel-internal feature bits that are unallocated in user space. */ |
213 | #define DST_FEATURE_ECN_CA (1 << 31) | |
214 | ||
215 | #define DST_FEATURE_MASK (DST_FEATURE_ECN_CA) | |
216 | #define DST_FEATURE_ECN_MASK (DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN) | |
217 | ||
0c3adfb8 GBY |
218 | static inline u32 |
219 | dst_feature(const struct dst_entry *dst, u32 feature) | |
220 | { | |
bb5b7c11 | 221 | return dst_metric(dst, RTAX_FEATURES) & feature; |
0c3adfb8 GBY |
222 | } |
223 | ||
1da177e4 LT |
224 | static inline u32 dst_mtu(const struct dst_entry *dst) |
225 | { | |
618f9bc7 | 226 | return dst->ops->mtu(dst); |
1da177e4 LT |
227 | } |
228 | ||
c1e20f7c SH |
229 | /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */ |
230 | static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric) | |
231 | { | |
232 | return msecs_to_jiffies(dst_metric(dst, metric)); | |
233 | } | |
234 | ||
1da177e4 LT |
235 | static inline u32 |
236 | dst_allfrag(const struct dst_entry *dst) | |
237 | { | |
0c3adfb8 | 238 | int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG); |
1da177e4 LT |
239 | return ret; |
240 | } | |
241 | ||
242 | static inline int | |
d33e4553 | 243 | dst_metric_locked(const struct dst_entry *dst, int metric) |
1da177e4 LT |
244 | { |
245 | return dst_metric(dst, RTAX_LOCK) & (1<<metric); | |
246 | } | |
247 | ||
7f95e188 | 248 | static inline void dst_hold(struct dst_entry *dst) |
1da177e4 | 249 | { |
5635c10d ED |
250 | /* |
251 | * If your kernel compilation stops here, please check | |
252 | * __pad_to_align_refcnt declaration in struct dst_entry | |
253 | */ | |
254 | BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63); | |
44ebe791 | 255 | WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0); |
1da177e4 LT |
256 | } |
257 | ||
03f49f34 PE |
258 | static inline void dst_use(struct dst_entry *dst, unsigned long time) |
259 | { | |
260 | dst_hold(dst); | |
261 | dst->__use++; | |
262 | dst->lastuse = time; | |
263 | } | |
264 | ||
7fee226a ED |
265 | static inline void dst_use_noref(struct dst_entry *dst, unsigned long time) |
266 | { | |
267 | dst->__use++; | |
268 | dst->lastuse = time; | |
269 | } | |
270 | ||
7f95e188 | 271 | static inline struct dst_entry *dst_clone(struct dst_entry *dst) |
1da177e4 LT |
272 | { |
273 | if (dst) | |
274 | atomic_inc(&dst->__refcnt); | |
275 | return dst; | |
276 | } | |
277 | ||
a4023dd0 | 278 | void dst_release(struct dst_entry *dst); |
7fee226a | 279 | |
5f56f409 WW |
280 | void dst_release_immediate(struct dst_entry *dst); |
281 | ||
7fee226a ED |
282 | static inline void refdst_drop(unsigned long refdst) |
283 | { | |
284 | if (!(refdst & SKB_DST_NOREF)) | |
285 | dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK)); | |
286 | } | |
287 | ||
288 | /** | |
289 | * skb_dst_drop - drops skb dst | |
290 | * @skb: buffer | |
291 | * | |
292 | * Drops dst reference count if a reference was taken. | |
293 | */ | |
adf30907 ED |
294 | static inline void skb_dst_drop(struct sk_buff *skb) |
295 | { | |
7fee226a ED |
296 | if (skb->_skb_refdst) { |
297 | refdst_drop(skb->_skb_refdst); | |
298 | skb->_skb_refdst = 0UL; | |
299 | } | |
300 | } | |
301 | ||
e79e2595 | 302 | static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst) |
7fee226a | 303 | { |
e79e2595 | 304 | nskb->_skb_refdst = refdst; |
7fee226a ED |
305 | if (!(nskb->_skb_refdst & SKB_DST_NOREF)) |
306 | dst_clone(skb_dst(nskb)); | |
307 | } | |
308 | ||
e79e2595 JS |
309 | static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb) |
310 | { | |
311 | __skb_dst_copy(nskb, oskb->_skb_refdst); | |
312 | } | |
313 | ||
7fee226a ED |
314 | /** |
315 | * skb_dst_force - makes sure skb dst is refcounted | |
316 | * @skb: buffer | |
317 | * | |
318 | * If dst is not yet refcounted, let's do it | |
319 | */ | |
320 | static inline void skb_dst_force(struct sk_buff *skb) | |
321 | { | |
322 | if (skb_dst_is_noref(skb)) { | |
323 | WARN_ON(!rcu_read_lock_held()); | |
324 | skb->_skb_refdst &= ~SKB_DST_NOREF; | |
325 | dst_clone(skb_dst(skb)); | |
326 | } | |
adf30907 | 327 | } |
1da177e4 | 328 | |
5037e9ef ED |
329 | /** |
330 | * dst_hold_safe - Take a reference on a dst if possible | |
331 | * @dst: pointer to dst entry | |
332 | * | |
333 | * This helper returns false if it could not safely | |
334 | * take a reference on a dst. | |
335 | */ | |
336 | static inline bool dst_hold_safe(struct dst_entry *dst) | |
337 | { | |
b2a9c0ed | 338 | return atomic_inc_not_zero(&dst->__refcnt); |
5037e9ef ED |
339 | } |
340 | ||
341 | /** | |
342 | * skb_dst_force_safe - makes sure skb dst is refcounted | |
343 | * @skb: buffer | |
344 | * | |
345 | * If dst is not yet refcounted and not destroyed, grab a ref on it. | |
346 | */ | |
347 | static inline void skb_dst_force_safe(struct sk_buff *skb) | |
348 | { | |
349 | if (skb_dst_is_noref(skb)) { | |
350 | struct dst_entry *dst = skb_dst(skb); | |
351 | ||
352 | if (!dst_hold_safe(dst)) | |
353 | dst = NULL; | |
354 | ||
355 | skb->_skb_refdst = (unsigned long)dst; | |
356 | } | |
357 | } | |
358 | ||
d19d56dd | 359 | |
290b895e ED |
360 | /** |
361 | * __skb_tunnel_rx - prepare skb for rx reinsert | |
362 | * @skb: buffer | |
363 | * @dev: tunnel device | |
ea23192e | 364 | * @net: netns for packet i/o |
290b895e ED |
365 | * |
366 | * After decapsulation, packet is going to re-enter (netif_rx()) our stack, | |
367 | * so make some cleanups. (no accounting done) | |
368 | */ | |
ea23192e ND |
369 | static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, |
370 | struct net *net) | |
290b895e ED |
371 | { |
372 | skb->dev = dev; | |
bdeab991 TH |
373 | |
374 | /* | |
7539fadc | 375 | * Clear hash so that we can recalulate the hash for the |
bdeab991 TH |
376 | * encapsulated packet, unless we have already determine the hash |
377 | * over the L4 4-tuple. | |
378 | */ | |
7539fadc | 379 | skb_clear_hash_if_not_l4(skb); |
290b895e | 380 | skb_set_queue_mapping(skb, 0); |
ea23192e | 381 | skb_scrub_packet(skb, !net_eq(net, dev_net(dev))); |
290b895e ED |
382 | } |
383 | ||
d19d56dd ED |
384 | /** |
385 | * skb_tunnel_rx - prepare skb for rx reinsert | |
386 | * @skb: buffer | |
387 | * @dev: tunnel device | |
388 | * | |
389 | * After decapsulation, packet is going to re-enter (netif_rx()) our stack, | |
390 | * so make some cleanups, and perform accounting. | |
290b895e | 391 | * Note: this accounting is not SMP safe. |
d19d56dd | 392 | */ |
ea23192e ND |
393 | static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, |
394 | struct net *net) | |
d19d56dd | 395 | { |
d19d56dd ED |
396 | /* TODO : stats should be SMP safe */ |
397 | dev->stats.rx_packets++; | |
398 | dev->stats.rx_bytes += skb->len; | |
ea23192e | 399 | __skb_tunnel_rx(skb, dev, net); |
d19d56dd ED |
400 | } |
401 | ||
808c1b69 DB |
402 | static inline u32 dst_tclassid(const struct sk_buff *skb) |
403 | { | |
404 | #ifdef CONFIG_IP_ROUTE_CLASSID | |
405 | const struct dst_entry *dst; | |
406 | ||
407 | dst = skb_dst(skb); | |
408 | if (dst) | |
409 | return dst->tclassid; | |
410 | #endif | |
411 | return 0; | |
412 | } | |
413 | ||
ede2059d | 414 | int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb); |
aad88724 ED |
415 | static inline int dst_discard(struct sk_buff *skb) |
416 | { | |
ede2059d | 417 | return dst_discard_out(&init_net, skb->sk, skb); |
aad88724 | 418 | } |
a4023dd0 JP |
419 | void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref, |
420 | int initial_obsolete, unsigned short flags); | |
f38a9eb1 TG |
421 | void dst_init(struct dst_entry *dst, struct dst_ops *ops, |
422 | struct net_device *dev, int initial_ref, int initial_obsolete, | |
423 | unsigned short flags); | |
a4023dd0 | 424 | struct dst_entry *dst_destroy(struct dst_entry *dst); |
4a6ce2b6 | 425 | void dst_dev_put(struct dst_entry *dst); |
1da177e4 | 426 | |
1da177e4 LT |
427 | static inline void dst_confirm(struct dst_entry *dst) |
428 | { | |
5110effe | 429 | } |
f2c31e32 | 430 | |
d3aaeb38 DM |
431 | static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) |
432 | { | |
aaa0c23c ZZ |
433 | struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr); |
434 | return IS_ERR(n) ? NULL : n; | |
f894cbf8 DM |
435 | } |
436 | ||
437 | static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, | |
438 | struct sk_buff *skb) | |
439 | { | |
aaa0c23c ZZ |
440 | struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL); |
441 | return IS_ERR(n) ? NULL : n; | |
d3aaeb38 DM |
442 | } |
443 | ||
63fca65d JA |
444 | static inline void dst_confirm_neigh(const struct dst_entry *dst, |
445 | const void *daddr) | |
446 | { | |
447 | if (dst->ops->confirm_neigh) | |
448 | dst->ops->confirm_neigh(dst, daddr); | |
449 | } | |
450 | ||
1da177e4 LT |
451 | static inline void dst_link_failure(struct sk_buff *skb) |
452 | { | |
adf30907 | 453 | struct dst_entry *dst = skb_dst(skb); |
1da177e4 LT |
454 | if (dst && dst->ops && dst->ops->link_failure) |
455 | dst->ops->link_failure(skb); | |
456 | } | |
457 | ||
458 | static inline void dst_set_expires(struct dst_entry *dst, int timeout) | |
459 | { | |
460 | unsigned long expires = jiffies + timeout; | |
461 | ||
462 | if (expires == 0) | |
463 | expires = 1; | |
464 | ||
465 | if (dst->expires == 0 || time_before(expires, dst->expires)) | |
466 | dst->expires = expires; | |
467 | } | |
468 | ||
469 | /* Output packet to network from transport. */ | |
13206b6b | 470 | static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb) |
aad88724 | 471 | { |
ede2059d | 472 | return skb_dst(skb)->output(net, sk, skb); |
aad88724 | 473 | } |
1da177e4 LT |
474 | |
475 | /* Input packet from network to transport. */ | |
476 | static inline int dst_input(struct sk_buff *skb) | |
477 | { | |
adf30907 | 478 | return skb_dst(skb)->input(skb); |
1da177e4 LT |
479 | } |
480 | ||
481 | static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) | |
482 | { | |
483 | if (dst->obsolete) | |
484 | dst = dst->ops->check(dst, cookie); | |
485 | return dst; | |
486 | } | |
487 | ||
815f4e57 HX |
488 | /* Flags for xfrm_lookup flags argument. */ |
489 | enum { | |
80c0bc9e | 490 | XFRM_LOOKUP_ICMP = 1 << 0, |
b8c203b2 | 491 | XFRM_LOOKUP_QUEUE = 1 << 1, |
ac37e251 | 492 | XFRM_LOOKUP_KEEP_DST_REF = 1 << 2, |
815f4e57 HX |
493 | }; |
494 | ||
1da177e4 LT |
495 | struct flowi; |
496 | #ifndef CONFIG_XFRM | |
452edd59 DM |
497 | static inline struct dst_entry *xfrm_lookup(struct net *net, |
498 | struct dst_entry *dst_orig, | |
6f9c9615 ED |
499 | const struct flowi *fl, |
500 | const struct sock *sk, | |
452edd59 | 501 | int flags) |
1da177e4 | 502 | { |
452edd59 | 503 | return dst_orig; |
f92ee619 SK |
504 | } |
505 | ||
506 | static inline struct dst_entry *xfrm_lookup_route(struct net *net, | |
507 | struct dst_entry *dst_orig, | |
508 | const struct flowi *fl, | |
6f9c9615 | 509 | const struct sock *sk, |
f92ee619 SK |
510 | int flags) |
511 | { | |
512 | return dst_orig; | |
513 | } | |
e87b3998 VY |
514 | |
515 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) | |
516 | { | |
517 | return NULL; | |
518 | } | |
519 | ||
1da177e4 | 520 | #else |
a4023dd0 | 521 | struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, |
6f9c9615 | 522 | const struct flowi *fl, const struct sock *sk, |
a4023dd0 | 523 | int flags); |
e87b3998 | 524 | |
f92ee619 | 525 | struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, |
6f9c9615 | 526 | const struct flowi *fl, const struct sock *sk, |
f92ee619 SK |
527 | int flags); |
528 | ||
e87b3998 VY |
529 | /* skb attached with this dst needs transformation if dst->xfrm is valid */ |
530 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) | |
531 | { | |
532 | return dst->xfrm; | |
533 | } | |
1da177e4 | 534 | #endif |
1da177e4 LT |
535 | |
536 | #endif /* _NET_DST_H */ |