Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * net/dst.h Protocol independent destination cache definitions. | |
4 | * | |
5 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | |
6 | * | |
7 | */ | |
8 | ||
9 | #ifndef _NET_DST_H | |
10 | #define _NET_DST_H | |
11 | ||
86393e52 | 12 | #include <net/dst_ops.h> |
14c85021 | 13 | #include <linux/netdevice.h> |
1da177e4 LT |
14 | #include <linux/rtnetlink.h> |
15 | #include <linux/rcupdate.h> | |
187f1882 | 16 | #include <linux/bug.h> |
1da177e4 | 17 | #include <linux/jiffies.h> |
9620fef2 | 18 | #include <linux/refcount.h> |
1da177e4 LT |
19 | #include <net/neighbour.h> |
20 | #include <asm/processor.h> | |
e43b2190 | 21 | #include <linux/indirect_call_wrapper.h> |
1da177e4 | 22 | |
1da177e4 LT |
23 | struct sk_buff; |
24 | ||
fd2c3ef7 | 25 | struct dst_entry { |
66727145 | 26 | struct net_device *dev; |
62fa8a84 DM |
27 | struct dst_ops *ops; |
28 | unsigned long _metrics; | |
ecd98837 | 29 | unsigned long expires; |
def8b4fa | 30 | #ifdef CONFIG_XFRM |
1da177e4 | 31 | struct xfrm_state *xfrm; |
5635c10d ED |
32 | #else |
33 | void *__pad1; | |
def8b4fa | 34 | #endif |
7f95e188 | 35 | int (*input)(struct sk_buff *); |
ede2059d | 36 | int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb); |
1da177e4 | 37 | |
5110effe | 38 | unsigned short flags; |
f6b72b62 DM |
39 | #define DST_NOXFRM 0x0002 |
40 | #define DST_NOPOLICY 0x0004 | |
1eb04e7c WW |
41 | #define DST_NOCOUNT 0x0008 |
42 | #define DST_FAKE_RTABLE 0x0010 | |
43 | #define DST_XFRM_TUNNEL 0x0020 | |
44 | #define DST_XFRM_QUEUE 0x0040 | |
45 | #define DST_METADATA 0x0080 | |
f6b72b62 | 46 | |
f5b0a874 DM |
47 | /* A non-zero value of dst->obsolete forces by-hand validation |
48 | * of the route entry. Positive values are set by the generic | |
49 | * dst layer to indicate that the entry has been forcefully | |
50 | * destroyed. | |
51 | * | |
52 | * Negative values are used by the implementation layer code to | |
53 | * force invocation of the dst_ops->check() method. | |
54 | */ | |
62fa8a84 | 55 | short obsolete; |
f5b0a874 DM |
56 | #define DST_OBSOLETE_NONE 0 |
57 | #define DST_OBSOLETE_DEAD 2 | |
58 | #define DST_OBSOLETE_FORCE_CHK -1 | |
ceb33206 | 59 | #define DST_OBSOLETE_KILL -2 |
62fa8a84 DM |
60 | unsigned short header_len; /* more space at head required */ |
61 | unsigned short trailer_len; /* space to reserve at tail */ | |
51ce8bd4 | 62 | |
f1dd9c37 ZY |
63 | /* |
64 | * __refcnt wants to be on a different cache line from | |
65 | * input/output/ops or performance tanks badly | |
66 | */ | |
8b207e73 DM |
67 | #ifdef CONFIG_64BIT |
68 | atomic_t __refcnt; /* 64-bit offset 64 */ | |
69 | #endif | |
1e19e02c | 70 | int __use; |
f1dd9c37 | 71 | unsigned long lastuse; |
751a587a | 72 | struct lwtunnel_state *lwtstate; |
8b207e73 DM |
73 | struct rcu_head rcu_head; |
74 | short error; | |
75 | short __pad; | |
76 | __u32 tclassid; | |
77 | #ifndef CONFIG_64BIT | |
78 | atomic_t __refcnt; /* 32-bit offset 64 */ | |
79 | #endif | |
1da177e4 LT |
80 | }; |
81 | ||
3fb07daf ED |
82 | struct dst_metrics { |
83 | u32 metrics[RTAX_MAX]; | |
9620fef2 | 84 | refcount_t refcnt; |
258a980d | 85 | } __aligned(4); /* Low pointer bits contain DST_METRICS_FLAGS */ |
3fb07daf ED |
86 | extern const struct dst_metrics dst_default_metrics; |
87 | ||
a4023dd0 | 88 | u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); |
62fa8a84 | 89 | |
e5fd387a | 90 | #define DST_METRICS_READ_ONLY 0x1UL |
3fb07daf | 91 | #define DST_METRICS_REFCOUNTED 0x2UL |
e5fd387a | 92 | #define DST_METRICS_FLAGS 0x3UL |
62fa8a84 | 93 | #define __DST_METRICS_PTR(Y) \ |
e5fd387a | 94 | ((u32 *)((Y) & ~DST_METRICS_FLAGS)) |
62fa8a84 DM |
95 | #define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics) |
96 | ||
97 | static inline bool dst_metrics_read_only(const struct dst_entry *dst) | |
98 | { | |
99 | return dst->_metrics & DST_METRICS_READ_ONLY; | |
100 | } | |
101 | ||
a4023dd0 | 102 | void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old); |
62fa8a84 DM |
103 | |
104 | static inline void dst_destroy_metrics_generic(struct dst_entry *dst) | |
105 | { | |
106 | unsigned long val = dst->_metrics; | |
107 | if (!(val & DST_METRICS_READ_ONLY)) | |
108 | __dst_destroy_metrics_generic(dst, val); | |
109 | } | |
110 | ||
111 | static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst) | |
112 | { | |
113 | unsigned long p = dst->_metrics; | |
114 | ||
1f37070d SH |
115 | BUG_ON(!p); |
116 | ||
62fa8a84 DM |
117 | if (p & DST_METRICS_READ_ONLY) |
118 | return dst->ops->cow_metrics(dst, p); | |
119 | return __DST_METRICS_PTR(p); | |
120 | } | |
121 | ||
122 | /* This may only be invoked before the entry has reached global | |
123 | * visibility. | |
124 | */ | |
125 | static inline void dst_init_metrics(struct dst_entry *dst, | |
126 | const u32 *src_metrics, | |
127 | bool read_only) | |
128 | { | |
129 | dst->_metrics = ((unsigned long) src_metrics) | | |
130 | (read_only ? DST_METRICS_READ_ONLY : 0); | |
131 | } | |
132 | ||
133 | static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src) | |
134 | { | |
135 | u32 *dst_metrics = dst_metrics_write_ptr(dest); | |
136 | ||
137 | if (dst_metrics) { | |
138 | u32 *src_metrics = DST_METRICS_PTR(src); | |
139 | ||
140 | memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32)); | |
141 | } | |
142 | } | |
143 | ||
144 | static inline u32 *dst_metrics_ptr(struct dst_entry *dst) | |
145 | { | |
146 | return DST_METRICS_PTR(dst); | |
147 | } | |
148 | ||
1da177e4 | 149 | static inline u32 |
5170ae82 | 150 | dst_metric_raw(const struct dst_entry *dst, const int metric) |
1da177e4 | 151 | { |
62fa8a84 DM |
152 | u32 *p = DST_METRICS_PTR(dst); |
153 | ||
154 | return p[metric-1]; | |
defb3519 DM |
155 | } |
156 | ||
5170ae82 DM |
157 | static inline u32 |
158 | dst_metric(const struct dst_entry *dst, const int metric) | |
159 | { | |
0dbaee3b | 160 | WARN_ON_ONCE(metric == RTAX_HOPLIMIT || |
d33e4553 DM |
161 | metric == RTAX_ADVMSS || |
162 | metric == RTAX_MTU); | |
5170ae82 DM |
163 | return dst_metric_raw(dst, metric); |
164 | } | |
165 | ||
0dbaee3b DM |
166 | static inline u32 |
167 | dst_metric_advmss(const struct dst_entry *dst) | |
168 | { | |
169 | u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS); | |
170 | ||
171 | if (!advmss) | |
172 | advmss = dst->ops->default_advmss(dst); | |
173 | ||
174 | return advmss; | |
175 | } | |
176 | ||
defb3519 DM |
177 | static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val) |
178 | { | |
62fa8a84 | 179 | u32 *p = dst_metrics_write_ptr(dst); |
defb3519 | 180 | |
62fa8a84 DM |
181 | if (p) |
182 | p[metric-1] = val; | |
1da177e4 LT |
183 | } |
184 | ||
c3a8d947 | 185 | /* Kernel-internal feature bits that are unallocated in user space. */ |
40f6a2cb | 186 | #define DST_FEATURE_ECN_CA (1U << 31) |
c3a8d947 DB |
187 | |
188 | #define DST_FEATURE_MASK (DST_FEATURE_ECN_CA) | |
189 | #define DST_FEATURE_ECN_MASK (DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN) | |
190 | ||
0c3adfb8 GBY |
191 | static inline u32 |
192 | dst_feature(const struct dst_entry *dst, u32 feature) | |
193 | { | |
bb5b7c11 | 194 | return dst_metric(dst, RTAX_FEATURES) & feature; |
0c3adfb8 GBY |
195 | } |
196 | ||
f67fbeae BV |
197 | INDIRECT_CALLABLE_DECLARE(unsigned int ip6_mtu(const struct dst_entry *)); |
198 | INDIRECT_CALLABLE_DECLARE(unsigned int ipv4_mtu(const struct dst_entry *)); | |
1da177e4 LT |
199 | static inline u32 dst_mtu(const struct dst_entry *dst) |
200 | { | |
f67fbeae | 201 | return INDIRECT_CALL_INET(dst->ops->mtu, ip6_mtu, ipv4_mtu, dst); |
1da177e4 LT |
202 | } |
203 | ||
c1e20f7c SH |
204 | /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */ |
205 | static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric) | |
206 | { | |
207 | return msecs_to_jiffies(dst_metric(dst, metric)); | |
208 | } | |
209 | ||
1da177e4 LT |
210 | static inline u32 |
211 | dst_allfrag(const struct dst_entry *dst) | |
212 | { | |
0c3adfb8 | 213 | int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG); |
1da177e4 LT |
214 | return ret; |
215 | } | |
216 | ||
217 | static inline int | |
d33e4553 | 218 | dst_metric_locked(const struct dst_entry *dst, int metric) |
1da177e4 | 219 | { |
5af68891 | 220 | return dst_metric(dst, RTAX_LOCK) & (1 << metric); |
1da177e4 LT |
221 | } |
222 | ||
7f95e188 | 223 | static inline void dst_hold(struct dst_entry *dst) |
1da177e4 | 224 | { |
5635c10d ED |
225 | /* |
226 | * If your kernel compilation stops here, please check | |
8b207e73 | 227 | * the placement of __refcnt in struct dst_entry |
5635c10d ED |
228 | */ |
229 | BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63); | |
44ebe791 | 230 | WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0); |
1da177e4 LT |
231 | } |
232 | ||
0da4af00 | 233 | static inline void dst_use_noref(struct dst_entry *dst, unsigned long time) |
03f49f34 | 234 | { |
32d18ab1 | 235 | if (unlikely(time != dst->lastuse)) { |
0da4af00 WW |
236 | dst->__use++; |
237 | dst->lastuse = time; | |
238 | } | |
03f49f34 PE |
239 | } |
240 | ||
0da4af00 | 241 | static inline void dst_hold_and_use(struct dst_entry *dst, unsigned long time) |
7fee226a | 242 | { |
0da4af00 WW |
243 | dst_hold(dst); |
244 | dst_use_noref(dst, time); | |
7fee226a ED |
245 | } |
246 | ||
7f95e188 | 247 | static inline struct dst_entry *dst_clone(struct dst_entry *dst) |
1da177e4 LT |
248 | { |
249 | if (dst) | |
222d7dbd | 250 | dst_hold(dst); |
1da177e4 LT |
251 | return dst; |
252 | } | |
253 | ||
a4023dd0 | 254 | void dst_release(struct dst_entry *dst); |
7fee226a | 255 | |
5f56f409 WW |
256 | void dst_release_immediate(struct dst_entry *dst); |
257 | ||
7fee226a ED |
258 | static inline void refdst_drop(unsigned long refdst) |
259 | { | |
260 | if (!(refdst & SKB_DST_NOREF)) | |
261 | dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK)); | |
262 | } | |
263 | ||
264 | /** | |
265 | * skb_dst_drop - drops skb dst | |
266 | * @skb: buffer | |
267 | * | |
268 | * Drops dst reference count if a reference was taken. | |
269 | */ | |
adf30907 ED |
270 | static inline void skb_dst_drop(struct sk_buff *skb) |
271 | { | |
7fee226a ED |
272 | if (skb->_skb_refdst) { |
273 | refdst_drop(skb->_skb_refdst); | |
274 | skb->_skb_refdst = 0UL; | |
275 | } | |
276 | } | |
277 | ||
e79e2595 | 278 | static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst) |
7fee226a | 279 | { |
8a886b14 | 280 | nskb->slow_gro |= !!refdst; |
e79e2595 | 281 | nskb->_skb_refdst = refdst; |
7fee226a ED |
282 | if (!(nskb->_skb_refdst & SKB_DST_NOREF)) |
283 | dst_clone(skb_dst(nskb)); | |
284 | } | |
285 | ||
e79e2595 JS |
286 | static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb) |
287 | { | |
288 | __skb_dst_copy(nskb, oskb->_skb_refdst); | |
289 | } | |
290 | ||
5037e9ef ED |
291 | /** |
292 | * dst_hold_safe - Take a reference on a dst if possible | |
293 | * @dst: pointer to dst entry | |
294 | * | |
295 | * This helper returns false if it could not safely | |
296 | * take a reference on a dst. | |
297 | */ | |
298 | static inline bool dst_hold_safe(struct dst_entry *dst) | |
299 | { | |
b2a9c0ed | 300 | return atomic_inc_not_zero(&dst->__refcnt); |
5037e9ef ED |
301 | } |
302 | ||
303 | /** | |
222d7dbd | 304 | * skb_dst_force - makes sure skb dst is refcounted |
5037e9ef ED |
305 | * @skb: buffer |
306 | * | |
307 | * If dst is not yet refcounted and not destroyed, grab a ref on it. | |
b60a7738 | 308 | * Returns true if dst is refcounted. |
5037e9ef | 309 | */ |
b60a7738 | 310 | static inline bool skb_dst_force(struct sk_buff *skb) |
5037e9ef ED |
311 | { |
312 | if (skb_dst_is_noref(skb)) { | |
313 | struct dst_entry *dst = skb_dst(skb); | |
314 | ||
222d7dbd | 315 | WARN_ON(!rcu_read_lock_held()); |
5037e9ef ED |
316 | if (!dst_hold_safe(dst)) |
317 | dst = NULL; | |
318 | ||
319 | skb->_skb_refdst = (unsigned long)dst; | |
8a886b14 | 320 | skb->slow_gro |= !!dst; |
5037e9ef | 321 | } |
b60a7738 FW |
322 | |
323 | return skb->_skb_refdst != 0UL; | |
5037e9ef ED |
324 | } |
325 | ||
d19d56dd | 326 | |
290b895e ED |
327 | /** |
328 | * __skb_tunnel_rx - prepare skb for rx reinsert | |
329 | * @skb: buffer | |
330 | * @dev: tunnel device | |
ea23192e | 331 | * @net: netns for packet i/o |
290b895e ED |
332 | * |
333 | * After decapsulation, packet is going to re-enter (netif_rx()) our stack, | |
334 | * so make some cleanups. (no accounting done) | |
335 | */ | |
ea23192e ND |
336 | static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, |
337 | struct net *net) | |
290b895e ED |
338 | { |
339 | skb->dev = dev; | |
bdeab991 TH |
340 | |
341 | /* | |
7539fadc | 342 | * Clear hash so that we can recalulate the hash for the |
bdeab991 TH |
343 | * encapsulated packet, unless we have already determine the hash |
344 | * over the L4 4-tuple. | |
345 | */ | |
7539fadc | 346 | skb_clear_hash_if_not_l4(skb); |
290b895e | 347 | skb_set_queue_mapping(skb, 0); |
ea23192e | 348 | skb_scrub_packet(skb, !net_eq(net, dev_net(dev))); |
290b895e ED |
349 | } |
350 | ||
d19d56dd ED |
351 | /** |
352 | * skb_tunnel_rx - prepare skb for rx reinsert | |
353 | * @skb: buffer | |
354 | * @dev: tunnel device | |
8eb1a859 | 355 | * @net: netns for packet i/o |
d19d56dd ED |
356 | * |
357 | * After decapsulation, packet is going to re-enter (netif_rx()) our stack, | |
358 | * so make some cleanups, and perform accounting. | |
290b895e | 359 | * Note: this accounting is not SMP safe. |
d19d56dd | 360 | */ |
ea23192e ND |
361 | static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, |
362 | struct net *net) | |
d19d56dd | 363 | { |
d19d56dd ED |
364 | /* TODO : stats should be SMP safe */ |
365 | dev->stats.rx_packets++; | |
366 | dev->stats.rx_bytes += skb->len; | |
ea23192e | 367 | __skb_tunnel_rx(skb, dev, net); |
d19d56dd ED |
368 | } |
369 | ||
808c1b69 DB |
370 | static inline u32 dst_tclassid(const struct sk_buff *skb) |
371 | { | |
372 | #ifdef CONFIG_IP_ROUTE_CLASSID | |
373 | const struct dst_entry *dst; | |
374 | ||
375 | dst = skb_dst(skb); | |
376 | if (dst) | |
377 | return dst->tclassid; | |
378 | #endif | |
379 | return 0; | |
380 | } | |
381 | ||
ede2059d | 382 | int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb); |
aad88724 ED |
383 | static inline int dst_discard(struct sk_buff *skb) |
384 | { | |
ede2059d | 385 | return dst_discard_out(&init_net, skb->sk, skb); |
aad88724 | 386 | } |
a4023dd0 JP |
387 | void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref, |
388 | int initial_obsolete, unsigned short flags); | |
f38a9eb1 TG |
389 | void dst_init(struct dst_entry *dst, struct dst_ops *ops, |
390 | struct net_device *dev, int initial_ref, int initial_obsolete, | |
391 | unsigned short flags); | |
a4023dd0 | 392 | struct dst_entry *dst_destroy(struct dst_entry *dst); |
4a6ce2b6 | 393 | void dst_dev_put(struct dst_entry *dst); |
1da177e4 | 394 | |
1da177e4 LT |
395 | static inline void dst_confirm(struct dst_entry *dst) |
396 | { | |
5110effe | 397 | } |
f2c31e32 | 398 | |
d3aaeb38 DM |
399 | static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) |
400 | { | |
aaa0c23c ZZ |
401 | struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr); |
402 | return IS_ERR(n) ? NULL : n; | |
f894cbf8 DM |
403 | } |
404 | ||
405 | static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, | |
406 | struct sk_buff *skb) | |
407 | { | |
0992d67b | 408 | struct neighbour *n; |
394de110 | 409 | |
0992d67b GN |
410 | if (WARN_ON_ONCE(!dst->ops->neigh_lookup)) |
411 | return NULL; | |
412 | ||
413 | n = dst->ops->neigh_lookup(dst, skb, NULL); | |
394de110 | 414 | |
aaa0c23c | 415 | return IS_ERR(n) ? NULL : n; |
d3aaeb38 DM |
416 | } |
417 | ||
63fca65d JA |
418 | static inline void dst_confirm_neigh(const struct dst_entry *dst, |
419 | const void *daddr) | |
420 | { | |
421 | if (dst->ops->confirm_neigh) | |
422 | dst->ops->confirm_neigh(dst, daddr); | |
423 | } | |
424 | ||
1da177e4 LT |
425 | static inline void dst_link_failure(struct sk_buff *skb) |
426 | { | |
adf30907 | 427 | struct dst_entry *dst = skb_dst(skb); |
1da177e4 LT |
428 | if (dst && dst->ops && dst->ops->link_failure) |
429 | dst->ops->link_failure(skb); | |
430 | } | |
431 | ||
432 | static inline void dst_set_expires(struct dst_entry *dst, int timeout) | |
433 | { | |
434 | unsigned long expires = jiffies + timeout; | |
435 | ||
436 | if (expires == 0) | |
437 | expires = 1; | |
438 | ||
439 | if (dst->expires == 0 || time_before(expires, dst->expires)) | |
440 | dst->expires = expires; | |
441 | } | |
442 | ||
6585d7dc BV |
443 | INDIRECT_CALLABLE_DECLARE(int ip6_output(struct net *, struct sock *, |
444 | struct sk_buff *)); | |
445 | INDIRECT_CALLABLE_DECLARE(int ip_output(struct net *, struct sock *, | |
446 | struct sk_buff *)); | |
1da177e4 | 447 | /* Output packet to network from transport. */ |
13206b6b | 448 | static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb) |
aad88724 | 449 | { |
6585d7dc BV |
450 | return INDIRECT_CALL_INET(skb_dst(skb)->output, |
451 | ip6_output, ip_output, | |
452 | net, sk, skb); | |
aad88724 | 453 | } |
1da177e4 | 454 | |
e43b2190 BV |
455 | INDIRECT_CALLABLE_DECLARE(int ip6_input(struct sk_buff *)); |
456 | INDIRECT_CALLABLE_DECLARE(int ip_local_deliver(struct sk_buff *)); | |
1da177e4 LT |
457 | /* Input packet from network to transport. */ |
458 | static inline int dst_input(struct sk_buff *skb) | |
459 | { | |
e43b2190 BV |
460 | return INDIRECT_CALL_INET(skb_dst(skb)->input, |
461 | ip6_input, ip_local_deliver, skb); | |
1da177e4 LT |
462 | } |
463 | ||
bbd807df BV |
464 | INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *, |
465 | u32)); | |
466 | INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *, | |
467 | u32)); | |
1da177e4 LT |
468 | static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) |
469 | { | |
470 | if (dst->obsolete) | |
bbd807df BV |
471 | dst = INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, |
472 | ipv4_dst_check, dst, cookie); | |
1da177e4 LT |
473 | return dst; |
474 | } | |
475 | ||
815f4e57 HX |
476 | /* Flags for xfrm_lookup flags argument. */ |
477 | enum { | |
80c0bc9e | 478 | XFRM_LOOKUP_ICMP = 1 << 0, |
b8c203b2 | 479 | XFRM_LOOKUP_QUEUE = 1 << 1, |
ac37e251 | 480 | XFRM_LOOKUP_KEEP_DST_REF = 1 << 2, |
815f4e57 HX |
481 | }; |
482 | ||
1da177e4 LT |
483 | struct flowi; |
484 | #ifndef CONFIG_XFRM | |
452edd59 DM |
485 | static inline struct dst_entry *xfrm_lookup(struct net *net, |
486 | struct dst_entry *dst_orig, | |
6f9c9615 ED |
487 | const struct flowi *fl, |
488 | const struct sock *sk, | |
452edd59 | 489 | int flags) |
1da177e4 | 490 | { |
452edd59 | 491 | return dst_orig; |
f92ee619 SK |
492 | } |
493 | ||
bc56b334 BW |
494 | static inline struct dst_entry * |
495 | xfrm_lookup_with_ifid(struct net *net, struct dst_entry *dst_orig, | |
496 | const struct flowi *fl, const struct sock *sk, | |
497 | int flags, u32 if_id) | |
498 | { | |
499 | return dst_orig; | |
500 | } | |
501 | ||
f92ee619 SK |
502 | static inline struct dst_entry *xfrm_lookup_route(struct net *net, |
503 | struct dst_entry *dst_orig, | |
504 | const struct flowi *fl, | |
6f9c9615 | 505 | const struct sock *sk, |
f92ee619 SK |
506 | int flags) |
507 | { | |
508 | return dst_orig; | |
509 | } | |
e87b3998 VY |
510 | |
511 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) | |
512 | { | |
513 | return NULL; | |
514 | } | |
515 | ||
1da177e4 | 516 | #else |
a4023dd0 | 517 | struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, |
6f9c9615 | 518 | const struct flowi *fl, const struct sock *sk, |
a4023dd0 | 519 | int flags); |
e87b3998 | 520 | |
bc56b334 BW |
521 | struct dst_entry *xfrm_lookup_with_ifid(struct net *net, |
522 | struct dst_entry *dst_orig, | |
523 | const struct flowi *fl, | |
524 | const struct sock *sk, int flags, | |
525 | u32 if_id); | |
526 | ||
f92ee619 | 527 | struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, |
6f9c9615 | 528 | const struct flowi *fl, const struct sock *sk, |
f92ee619 SK |
529 | int flags); |
530 | ||
e87b3998 VY |
531 | /* skb attached with this dst needs transformation if dst->xfrm is valid */ |
532 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) | |
533 | { | |
534 | return dst->xfrm; | |
535 | } | |
1da177e4 | 536 | #endif |
1da177e4 | 537 | |
f15ca723 ND |
538 | static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu) |
539 | { | |
540 | struct dst_entry *dst = skb_dst(skb); | |
541 | ||
542 | if (dst && dst->ops->update_pmtu) | |
bd085ef6 | 543 | dst->ops->update_pmtu(dst, NULL, skb, mtu, true); |
f15ca723 ND |
544 | } |
545 | ||
07dc35c6 HL |
546 | /* update dst pmtu but not do neighbor confirm */ |
547 | static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu) | |
548 | { | |
549 | struct dst_entry *dst = skb_dst(skb); | |
550 | ||
551 | if (dst && dst->ops->update_pmtu) | |
552 | dst->ops->update_pmtu(dst, NULL, skb, mtu, false); | |
553 | } | |
554 | ||
c4c877b2 DB |
555 | struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie); |
556 | void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, | |
557 | struct sk_buff *skb, u32 mtu, bool confirm_neigh); | |
558 | void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk, | |
559 | struct sk_buff *skb); | |
560 | u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old); | |
561 | struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst, | |
562 | struct sk_buff *skb, | |
563 | const void *daddr); | |
564 | unsigned int dst_blackhole_mtu(const struct dst_entry *dst); | |
565 | ||
1da177e4 | 566 | #endif /* _NET_DST_H */ |