Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * net/dst.h Protocol independent destination cache definitions. | |
4 | * | |
5 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | |
6 | * | |
7 | */ | |
8 | ||
9 | #ifndef _NET_DST_H | |
10 | #define _NET_DST_H | |
11 | ||
86393e52 | 12 | #include <net/dst_ops.h> |
14c85021 | 13 | #include <linux/netdevice.h> |
1da177e4 LT |
14 | #include <linux/rtnetlink.h> |
15 | #include <linux/rcupdate.h> | |
187f1882 | 16 | #include <linux/bug.h> |
1da177e4 | 17 | #include <linux/jiffies.h> |
9620fef2 | 18 | #include <linux/refcount.h> |
bc9d3a9f | 19 | #include <linux/rcuref.h> |
1da177e4 LT |
20 | #include <net/neighbour.h> |
21 | #include <asm/processor.h> | |
e43b2190 | 22 | #include <linux/indirect_call_wrapper.h> |
1da177e4 | 23 | |
1da177e4 LT |
24 | struct sk_buff; |
25 | ||
fd2c3ef7 | 26 | struct dst_entry { |
66727145 | 27 | struct net_device *dev; |
62fa8a84 DM |
28 | struct dst_ops *ops; |
29 | unsigned long _metrics; | |
ecd98837 | 30 | unsigned long expires; |
def8b4fa | 31 | #ifdef CONFIG_XFRM |
1da177e4 | 32 | struct xfrm_state *xfrm; |
5635c10d ED |
33 | #else |
34 | void *__pad1; | |
def8b4fa | 35 | #endif |
7f95e188 | 36 | int (*input)(struct sk_buff *); |
ede2059d | 37 | int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb); |
1da177e4 | 38 | |
5110effe | 39 | unsigned short flags; |
f6b72b62 DM |
40 | #define DST_NOXFRM 0x0002 |
41 | #define DST_NOPOLICY 0x0004 | |
1eb04e7c WW |
42 | #define DST_NOCOUNT 0x0008 |
43 | #define DST_FAKE_RTABLE 0x0010 | |
44 | #define DST_XFRM_TUNNEL 0x0020 | |
45 | #define DST_XFRM_QUEUE 0x0040 | |
46 | #define DST_METADATA 0x0080 | |
f6b72b62 | 47 | |
f5b0a874 DM |
48 | /* A non-zero value of dst->obsolete forces by-hand validation |
49 | * of the route entry. Positive values are set by the generic | |
50 | * dst layer to indicate that the entry has been forcefully | |
51 | * destroyed. | |
52 | * | |
53 | * Negative values are used by the implementation layer code to | |
54 | * force invocation of the dst_ops->check() method. | |
55 | */ | |
62fa8a84 | 56 | short obsolete; |
f5b0a874 DM |
57 | #define DST_OBSOLETE_NONE 0 |
58 | #define DST_OBSOLETE_DEAD 2 | |
59 | #define DST_OBSOLETE_FORCE_CHK -1 | |
ceb33206 | 60 | #define DST_OBSOLETE_KILL -2 |
62fa8a84 DM |
61 | unsigned short header_len; /* more space at head required */ |
62 | unsigned short trailer_len; /* space to reserve at tail */ | |
51ce8bd4 | 63 | |
f1dd9c37 | 64 | /* |
bc9d3a9f | 65 | * __rcuref wants to be on a different cache line from |
f1dd9c37 ZY |
66 | * input/output/ops or performance tanks badly |
67 | */ | |
8b207e73 | 68 | #ifdef CONFIG_64BIT |
bc9d3a9f | 69 | rcuref_t __rcuref; /* 64-bit offset 64 */ |
8b207e73 | 70 | #endif |
1e19e02c | 71 | int __use; |
f1dd9c37 | 72 | unsigned long lastuse; |
8b207e73 DM |
73 | struct rcu_head rcu_head; |
74 | short error; | |
75 | short __pad; | |
76 | __u32 tclassid; | |
77 | #ifndef CONFIG_64BIT | |
d288a162 | 78 | struct lwtunnel_state *lwtstate; |
bc9d3a9f | 79 | rcuref_t __rcuref; /* 32-bit offset 64 */ |
8b207e73 | 80 | #endif |
9038c320 | 81 | netdevice_tracker dev_tracker; |
d288a162 WG |
82 | |
83 | /* | |
84 | * Used by rtable and rt6_info. Moves lwtstate into the next cache | |
85 | * line on 64bit so that lwtstate does not cause false sharing with | |
bc9d3a9f | 86 | * __rcuref under contention of __rcuref. This also puts the |
d288a162 | 87 | * frequently accessed members of rtable and rt6_info out of the |
bc9d3a9f | 88 | * __rcuref cache line. |
d288a162 WG |
89 | */ |
90 | struct list_head rt_uncached; | |
91 | struct uncached_list *rt_uncached_list; | |
92 | #ifdef CONFIG_64BIT | |
93 | struct lwtunnel_state *lwtstate; | |
94 | #endif | |
1da177e4 LT |
95 | }; |
96 | ||
3fb07daf ED |
97 | struct dst_metrics { |
98 | u32 metrics[RTAX_MAX]; | |
9620fef2 | 99 | refcount_t refcnt; |
258a980d | 100 | } __aligned(4); /* Low pointer bits contain DST_METRICS_FLAGS */ |
3fb07daf ED |
101 | extern const struct dst_metrics dst_default_metrics; |
102 | ||
a4023dd0 | 103 | u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); |
62fa8a84 | 104 | |
e5fd387a | 105 | #define DST_METRICS_READ_ONLY 0x1UL |
3fb07daf | 106 | #define DST_METRICS_REFCOUNTED 0x2UL |
e5fd387a | 107 | #define DST_METRICS_FLAGS 0x3UL |
62fa8a84 | 108 | #define __DST_METRICS_PTR(Y) \ |
e5fd387a | 109 | ((u32 *)((Y) & ~DST_METRICS_FLAGS)) |
62fa8a84 DM |
110 | #define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics) |
111 | ||
112 | static inline bool dst_metrics_read_only(const struct dst_entry *dst) | |
113 | { | |
114 | return dst->_metrics & DST_METRICS_READ_ONLY; | |
115 | } | |
116 | ||
a4023dd0 | 117 | void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old); |
62fa8a84 DM |
118 | |
119 | static inline void dst_destroy_metrics_generic(struct dst_entry *dst) | |
120 | { | |
121 | unsigned long val = dst->_metrics; | |
122 | if (!(val & DST_METRICS_READ_ONLY)) | |
123 | __dst_destroy_metrics_generic(dst, val); | |
124 | } | |
125 | ||
126 | static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst) | |
127 | { | |
128 | unsigned long p = dst->_metrics; | |
129 | ||
1f37070d SH |
130 | BUG_ON(!p); |
131 | ||
62fa8a84 DM |
132 | if (p & DST_METRICS_READ_ONLY) |
133 | return dst->ops->cow_metrics(dst, p); | |
134 | return __DST_METRICS_PTR(p); | |
135 | } | |
136 | ||
137 | /* This may only be invoked before the entry has reached global | |
138 | * visibility. | |
139 | */ | |
140 | static inline void dst_init_metrics(struct dst_entry *dst, | |
141 | const u32 *src_metrics, | |
142 | bool read_only) | |
143 | { | |
144 | dst->_metrics = ((unsigned long) src_metrics) | | |
145 | (read_only ? DST_METRICS_READ_ONLY : 0); | |
146 | } | |
147 | ||
148 | static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src) | |
149 | { | |
150 | u32 *dst_metrics = dst_metrics_write_ptr(dest); | |
151 | ||
152 | if (dst_metrics) { | |
153 | u32 *src_metrics = DST_METRICS_PTR(src); | |
154 | ||
155 | memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32)); | |
156 | } | |
157 | } | |
158 | ||
159 | static inline u32 *dst_metrics_ptr(struct dst_entry *dst) | |
160 | { | |
161 | return DST_METRICS_PTR(dst); | |
162 | } | |
163 | ||
1da177e4 | 164 | static inline u32 |
5170ae82 | 165 | dst_metric_raw(const struct dst_entry *dst, const int metric) |
1da177e4 | 166 | { |
62fa8a84 DM |
167 | u32 *p = DST_METRICS_PTR(dst); |
168 | ||
169 | return p[metric-1]; | |
defb3519 DM |
170 | } |
171 | ||
5170ae82 DM |
172 | static inline u32 |
173 | dst_metric(const struct dst_entry *dst, const int metric) | |
174 | { | |
0dbaee3b | 175 | WARN_ON_ONCE(metric == RTAX_HOPLIMIT || |
d33e4553 DM |
176 | metric == RTAX_ADVMSS || |
177 | metric == RTAX_MTU); | |
5170ae82 DM |
178 | return dst_metric_raw(dst, metric); |
179 | } | |
180 | ||
0dbaee3b DM |
181 | static inline u32 |
182 | dst_metric_advmss(const struct dst_entry *dst) | |
183 | { | |
184 | u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS); | |
185 | ||
186 | if (!advmss) | |
187 | advmss = dst->ops->default_advmss(dst); | |
188 | ||
189 | return advmss; | |
190 | } | |
191 | ||
defb3519 DM |
192 | static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val) |
193 | { | |
62fa8a84 | 194 | u32 *p = dst_metrics_write_ptr(dst); |
defb3519 | 195 | |
62fa8a84 DM |
196 | if (p) |
197 | p[metric-1] = val; | |
1da177e4 LT |
198 | } |
199 | ||
c3a8d947 | 200 | /* Kernel-internal feature bits that are unallocated in user space. */ |
40f6a2cb | 201 | #define DST_FEATURE_ECN_CA (1U << 31) |
c3a8d947 DB |
202 | |
203 | #define DST_FEATURE_MASK (DST_FEATURE_ECN_CA) | |
204 | #define DST_FEATURE_ECN_MASK (DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN) | |
205 | ||
0c3adfb8 GBY |
206 | static inline u32 |
207 | dst_feature(const struct dst_entry *dst, u32 feature) | |
208 | { | |
bb5b7c11 | 209 | return dst_metric(dst, RTAX_FEATURES) & feature; |
0c3adfb8 GBY |
210 | } |
211 | ||
f67fbeae BV |
212 | INDIRECT_CALLABLE_DECLARE(unsigned int ip6_mtu(const struct dst_entry *)); |
213 | INDIRECT_CALLABLE_DECLARE(unsigned int ipv4_mtu(const struct dst_entry *)); | |
1da177e4 LT |
214 | static inline u32 dst_mtu(const struct dst_entry *dst) |
215 | { | |
f67fbeae | 216 | return INDIRECT_CALL_INET(dst->ops->mtu, ip6_mtu, ipv4_mtu, dst); |
1da177e4 LT |
217 | } |
218 | ||
c1e20f7c SH |
219 | /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */ |
220 | static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric) | |
221 | { | |
222 | return msecs_to_jiffies(dst_metric(dst, metric)); | |
223 | } | |
224 | ||
1da177e4 | 225 | static inline int |
d33e4553 | 226 | dst_metric_locked(const struct dst_entry *dst, int metric) |
1da177e4 | 227 | { |
5af68891 | 228 | return dst_metric(dst, RTAX_LOCK) & (1 << metric); |
1da177e4 LT |
229 | } |
230 | ||
7f95e188 | 231 | static inline void dst_hold(struct dst_entry *dst) |
1da177e4 | 232 | { |
5635c10d ED |
233 | /* |
234 | * If your kernel compilation stops here, please check | |
bc9d3a9f | 235 | * the placement of __rcuref in struct dst_entry |
5635c10d | 236 | */ |
bc9d3a9f TG |
237 | BUILD_BUG_ON(offsetof(struct dst_entry, __rcuref) & 63); |
238 | WARN_ON(!rcuref_get(&dst->__rcuref)); | |
1da177e4 LT |
239 | } |
240 | ||
0da4af00 | 241 | static inline void dst_use_noref(struct dst_entry *dst, unsigned long time) |
03f49f34 | 242 | { |
32d18ab1 | 243 | if (unlikely(time != dst->lastuse)) { |
0da4af00 WW |
244 | dst->__use++; |
245 | dst->lastuse = time; | |
246 | } | |
03f49f34 PE |
247 | } |
248 | ||
7f95e188 | 249 | static inline struct dst_entry *dst_clone(struct dst_entry *dst) |
1da177e4 LT |
250 | { |
251 | if (dst) | |
222d7dbd | 252 | dst_hold(dst); |
1da177e4 LT |
253 | return dst; |
254 | } | |
255 | ||
a4023dd0 | 256 | void dst_release(struct dst_entry *dst); |
7fee226a | 257 | |
5f56f409 WW |
258 | void dst_release_immediate(struct dst_entry *dst); |
259 | ||
7fee226a ED |
260 | static inline void refdst_drop(unsigned long refdst) |
261 | { | |
262 | if (!(refdst & SKB_DST_NOREF)) | |
263 | dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK)); | |
264 | } | |
265 | ||
266 | /** | |
267 | * skb_dst_drop - drops skb dst | |
268 | * @skb: buffer | |
269 | * | |
270 | * Drops dst reference count if a reference was taken. | |
271 | */ | |
adf30907 ED |
272 | static inline void skb_dst_drop(struct sk_buff *skb) |
273 | { | |
7fee226a ED |
274 | if (skb->_skb_refdst) { |
275 | refdst_drop(skb->_skb_refdst); | |
276 | skb->_skb_refdst = 0UL; | |
277 | } | |
278 | } | |
279 | ||
e79e2595 | 280 | static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst) |
7fee226a | 281 | { |
8a886b14 | 282 | nskb->slow_gro |= !!refdst; |
e79e2595 | 283 | nskb->_skb_refdst = refdst; |
7fee226a ED |
284 | if (!(nskb->_skb_refdst & SKB_DST_NOREF)) |
285 | dst_clone(skb_dst(nskb)); | |
286 | } | |
287 | ||
e79e2595 JS |
288 | static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb) |
289 | { | |
290 | __skb_dst_copy(nskb, oskb->_skb_refdst); | |
291 | } | |
292 | ||
5037e9ef ED |
293 | /** |
294 | * dst_hold_safe - Take a reference on a dst if possible | |
295 | * @dst: pointer to dst entry | |
296 | * | |
297 | * This helper returns false if it could not safely | |
298 | * take a reference on a dst. | |
299 | */ | |
300 | static inline bool dst_hold_safe(struct dst_entry *dst) | |
301 | { | |
bc9d3a9f | 302 | return rcuref_get(&dst->__rcuref); |
5037e9ef ED |
303 | } |
304 | ||
305 | /** | |
222d7dbd | 306 | * skb_dst_force - makes sure skb dst is refcounted |
5037e9ef ED |
307 | * @skb: buffer |
308 | * | |
309 | * If dst is not yet refcounted and not destroyed, grab a ref on it. | |
b60a7738 | 310 | * Returns true if dst is refcounted. |
5037e9ef | 311 | */ |
b60a7738 | 312 | static inline bool skb_dst_force(struct sk_buff *skb) |
5037e9ef ED |
313 | { |
314 | if (skb_dst_is_noref(skb)) { | |
315 | struct dst_entry *dst = skb_dst(skb); | |
316 | ||
222d7dbd | 317 | WARN_ON(!rcu_read_lock_held()); |
5037e9ef ED |
318 | if (!dst_hold_safe(dst)) |
319 | dst = NULL; | |
320 | ||
321 | skb->_skb_refdst = (unsigned long)dst; | |
8a886b14 | 322 | skb->slow_gro |= !!dst; |
5037e9ef | 323 | } |
b60a7738 FW |
324 | |
325 | return skb->_skb_refdst != 0UL; | |
5037e9ef ED |
326 | } |
327 | ||
d19d56dd | 328 | |
290b895e ED |
329 | /** |
330 | * __skb_tunnel_rx - prepare skb for rx reinsert | |
331 | * @skb: buffer | |
332 | * @dev: tunnel device | |
ea23192e | 333 | * @net: netns for packet i/o |
290b895e ED |
334 | * |
335 | * After decapsulation, packet is going to re-enter (netif_rx()) our stack, | |
336 | * so make some cleanups. (no accounting done) | |
337 | */ | |
ea23192e ND |
338 | static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, |
339 | struct net *net) | |
290b895e ED |
340 | { |
341 | skb->dev = dev; | |
bdeab991 TH |
342 | |
343 | /* | |
7539fadc | 344 | * Clear hash so that we can recalulate the hash for the |
bdeab991 TH |
345 | * encapsulated packet, unless we have already determine the hash |
346 | * over the L4 4-tuple. | |
347 | */ | |
7539fadc | 348 | skb_clear_hash_if_not_l4(skb); |
290b895e | 349 | skb_set_queue_mapping(skb, 0); |
ea23192e | 350 | skb_scrub_packet(skb, !net_eq(net, dev_net(dev))); |
290b895e ED |
351 | } |
352 | ||
d19d56dd ED |
353 | /** |
354 | * skb_tunnel_rx - prepare skb for rx reinsert | |
355 | * @skb: buffer | |
356 | * @dev: tunnel device | |
8eb1a859 | 357 | * @net: netns for packet i/o |
d19d56dd ED |
358 | * |
359 | * After decapsulation, packet is going to re-enter (netif_rx()) our stack, | |
360 | * so make some cleanups, and perform accounting. | |
290b895e | 361 | * Note: this accounting is not SMP safe. |
d19d56dd | 362 | */ |
ea23192e ND |
363 | static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, |
364 | struct net *net) | |
d19d56dd | 365 | { |
6c1c5097 ED |
366 | DEV_STATS_INC(dev, rx_packets); |
367 | DEV_STATS_ADD(dev, rx_bytes, skb->len); | |
ea23192e | 368 | __skb_tunnel_rx(skb, dev, net); |
d19d56dd ED |
369 | } |
370 | ||
808c1b69 DB |
371 | static inline u32 dst_tclassid(const struct sk_buff *skb) |
372 | { | |
373 | #ifdef CONFIG_IP_ROUTE_CLASSID | |
374 | const struct dst_entry *dst; | |
375 | ||
376 | dst = skb_dst(skb); | |
377 | if (dst) | |
378 | return dst->tclassid; | |
379 | #endif | |
380 | return 0; | |
381 | } | |
382 | ||
ede2059d | 383 | int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb); |
aad88724 ED |
384 | static inline int dst_discard(struct sk_buff *skb) |
385 | { | |
ede2059d | 386 | return dst_discard_out(&init_net, skb->sk, skb); |
aad88724 | 387 | } |
762c8dc7 | 388 | void *dst_alloc(struct dst_ops *ops, struct net_device *dev, |
a4023dd0 | 389 | int initial_obsolete, unsigned short flags); |
f38a9eb1 | 390 | void dst_init(struct dst_entry *dst, struct dst_ops *ops, |
762c8dc7 | 391 | struct net_device *dev, int initial_obsolete, |
f38a9eb1 | 392 | unsigned short flags); |
a4023dd0 | 393 | struct dst_entry *dst_destroy(struct dst_entry *dst); |
4a6ce2b6 | 394 | void dst_dev_put(struct dst_entry *dst); |
1da177e4 | 395 | |
1da177e4 LT |
396 | static inline void dst_confirm(struct dst_entry *dst) |
397 | { | |
5110effe | 398 | } |
f2c31e32 | 399 | |
d3aaeb38 DM |
400 | static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) |
401 | { | |
aaa0c23c ZZ |
402 | struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr); |
403 | return IS_ERR(n) ? NULL : n; | |
f894cbf8 DM |
404 | } |
405 | ||
406 | static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, | |
407 | struct sk_buff *skb) | |
408 | { | |
0992d67b | 409 | struct neighbour *n; |
394de110 | 410 | |
0992d67b GN |
411 | if (WARN_ON_ONCE(!dst->ops->neigh_lookup)) |
412 | return NULL; | |
413 | ||
414 | n = dst->ops->neigh_lookup(dst, skb, NULL); | |
394de110 | 415 | |
aaa0c23c | 416 | return IS_ERR(n) ? NULL : n; |
d3aaeb38 DM |
417 | } |
418 | ||
63fca65d JA |
419 | static inline void dst_confirm_neigh(const struct dst_entry *dst, |
420 | const void *daddr) | |
421 | { | |
422 | if (dst->ops->confirm_neigh) | |
423 | dst->ops->confirm_neigh(dst, daddr); | |
424 | } | |
425 | ||
1da177e4 LT |
426 | static inline void dst_link_failure(struct sk_buff *skb) |
427 | { | |
adf30907 | 428 | struct dst_entry *dst = skb_dst(skb); |
1da177e4 LT |
429 | if (dst && dst->ops && dst->ops->link_failure) |
430 | dst->ops->link_failure(skb); | |
431 | } | |
432 | ||
433 | static inline void dst_set_expires(struct dst_entry *dst, int timeout) | |
434 | { | |
435 | unsigned long expires = jiffies + timeout; | |
436 | ||
437 | if (expires == 0) | |
438 | expires = 1; | |
439 | ||
440 | if (dst->expires == 0 || time_before(expires, dst->expires)) | |
441 | dst->expires = expires; | |
442 | } | |
443 | ||
6585d7dc BV |
444 | INDIRECT_CALLABLE_DECLARE(int ip6_output(struct net *, struct sock *, |
445 | struct sk_buff *)); | |
446 | INDIRECT_CALLABLE_DECLARE(int ip_output(struct net *, struct sock *, | |
447 | struct sk_buff *)); | |
1da177e4 | 448 | /* Output packet to network from transport. */ |
13206b6b | 449 | static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb) |
aad88724 | 450 | { |
6585d7dc BV |
451 | return INDIRECT_CALL_INET(skb_dst(skb)->output, |
452 | ip6_output, ip_output, | |
453 | net, sk, skb); | |
aad88724 | 454 | } |
1da177e4 | 455 | |
e43b2190 BV |
456 | INDIRECT_CALLABLE_DECLARE(int ip6_input(struct sk_buff *)); |
457 | INDIRECT_CALLABLE_DECLARE(int ip_local_deliver(struct sk_buff *)); | |
1da177e4 LT |
458 | /* Input packet from network to transport. */ |
459 | static inline int dst_input(struct sk_buff *skb) | |
460 | { | |
e43b2190 BV |
461 | return INDIRECT_CALL_INET(skb_dst(skb)->input, |
462 | ip6_input, ip_local_deliver, skb); | |
1da177e4 LT |
463 | } |
464 | ||
bbd807df BV |
465 | INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *, |
466 | u32)); | |
467 | INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *, | |
468 | u32)); | |
1da177e4 LT |
469 | static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) |
470 | { | |
471 | if (dst->obsolete) | |
bbd807df BV |
472 | dst = INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, |
473 | ipv4_dst_check, dst, cookie); | |
1da177e4 LT |
474 | return dst; |
475 | } | |
476 | ||
815f4e57 HX |
477 | /* Flags for xfrm_lookup flags argument. */ |
478 | enum { | |
80c0bc9e | 479 | XFRM_LOOKUP_ICMP = 1 << 0, |
b8c203b2 | 480 | XFRM_LOOKUP_QUEUE = 1 << 1, |
ac37e251 | 481 | XFRM_LOOKUP_KEEP_DST_REF = 1 << 2, |
815f4e57 HX |
482 | }; |
483 | ||
1da177e4 LT |
484 | struct flowi; |
485 | #ifndef CONFIG_XFRM | |
452edd59 DM |
486 | static inline struct dst_entry *xfrm_lookup(struct net *net, |
487 | struct dst_entry *dst_orig, | |
6f9c9615 ED |
488 | const struct flowi *fl, |
489 | const struct sock *sk, | |
452edd59 | 490 | int flags) |
1da177e4 | 491 | { |
452edd59 | 492 | return dst_orig; |
f92ee619 SK |
493 | } |
494 | ||
bc56b334 BW |
495 | static inline struct dst_entry * |
496 | xfrm_lookup_with_ifid(struct net *net, struct dst_entry *dst_orig, | |
497 | const struct flowi *fl, const struct sock *sk, | |
498 | int flags, u32 if_id) | |
499 | { | |
500 | return dst_orig; | |
501 | } | |
502 | ||
f92ee619 SK |
503 | static inline struct dst_entry *xfrm_lookup_route(struct net *net, |
504 | struct dst_entry *dst_orig, | |
505 | const struct flowi *fl, | |
6f9c9615 | 506 | const struct sock *sk, |
f92ee619 SK |
507 | int flags) |
508 | { | |
509 | return dst_orig; | |
510 | } | |
e87b3998 VY |
511 | |
512 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) | |
513 | { | |
514 | return NULL; | |
515 | } | |
516 | ||
1da177e4 | 517 | #else |
a4023dd0 | 518 | struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, |
6f9c9615 | 519 | const struct flowi *fl, const struct sock *sk, |
a4023dd0 | 520 | int flags); |
e87b3998 | 521 | |
bc56b334 BW |
522 | struct dst_entry *xfrm_lookup_with_ifid(struct net *net, |
523 | struct dst_entry *dst_orig, | |
524 | const struct flowi *fl, | |
525 | const struct sock *sk, int flags, | |
526 | u32 if_id); | |
527 | ||
f92ee619 | 528 | struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, |
6f9c9615 | 529 | const struct flowi *fl, const struct sock *sk, |
f92ee619 SK |
530 | int flags); |
531 | ||
e87b3998 VY |
532 | /* skb attached with this dst needs transformation if dst->xfrm is valid */ |
533 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) | |
534 | { | |
535 | return dst->xfrm; | |
536 | } | |
1da177e4 | 537 | #endif |
1da177e4 | 538 | |
f15ca723 ND |
539 | static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu) |
540 | { | |
541 | struct dst_entry *dst = skb_dst(skb); | |
542 | ||
543 | if (dst && dst->ops->update_pmtu) | |
bd085ef6 | 544 | dst->ops->update_pmtu(dst, NULL, skb, mtu, true); |
f15ca723 ND |
545 | } |
546 | ||
07dc35c6 HL |
547 | /* update dst pmtu but not do neighbor confirm */ |
548 | static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu) | |
549 | { | |
550 | struct dst_entry *dst = skb_dst(skb); | |
551 | ||
552 | if (dst && dst->ops->update_pmtu) | |
553 | dst->ops->update_pmtu(dst, NULL, skb, mtu, false); | |
554 | } | |
555 | ||
c4c877b2 DB |
556 | struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie); |
557 | void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, | |
558 | struct sk_buff *skb, u32 mtu, bool confirm_neigh); | |
559 | void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk, | |
560 | struct sk_buff *skb); | |
561 | u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old); | |
562 | struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst, | |
563 | struct sk_buff *skb, | |
564 | const void *daddr); | |
565 | unsigned int dst_blackhole_mtu(const struct dst_entry *dst); | |
566 | ||
1da177e4 | 567 | #endif /* _NET_DST_H */ |