bpf, mlx4: fix prog refcount in mlx4_en_try_alloc_resources error path
[linux-2.6-block.git] / net / openvswitch / actions.c
CommitLineData
ccb1352e 1/*
971427f3 2 * Copyright (c) 2007-2014 Nicira, Inc.
ccb1352e
JG
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/skbuff.h>
22#include <linux/in.h>
23#include <linux/ip.h>
24#include <linux/openvswitch.h>
7f8a436e 25#include <linux/netfilter_ipv6.h>
a175a723 26#include <linux/sctp.h>
ccb1352e
JG
27#include <linux/tcp.h>
28#include <linux/udp.h>
29#include <linux/in6.h>
30#include <linux/if_arp.h>
31#include <linux/if_vlan.h>
25cd9ba0 32
7f8a436e 33#include <net/dst.h>
ccb1352e 34#include <net/ip.h>
3fdbd1ce 35#include <net/ipv6.h>
7b85b4df 36#include <net/ip6_fib.h>
ccb1352e
JG
37#include <net/checksum.h>
38#include <net/dsfield.h>
25cd9ba0 39#include <net/mpls.h>
a175a723 40#include <net/sctp/checksum.h>
ccb1352e
JG
41
42#include "datapath.h"
971427f3 43#include "flow.h"
7f8a436e 44#include "conntrack.h"
ccb1352e
JG
45#include "vport.h"
46
47static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
2ff3e4e4 48 struct sw_flow_key *key,
651887b0 49 const struct nlattr *attr, int len);
ccb1352e 50
971427f3
AZ
51struct deferred_action {
52 struct sk_buff *skb;
53 const struct nlattr *actions;
54
55 /* Store pkt_key clone when creating deferred action. */
56 struct sw_flow_key pkt_key;
57};
58
7f8a436e
JS
59#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
60struct ovs_frag_data {
61 unsigned long dst;
62 struct vport *vport;
63 struct ovs_skb_cb cb;
64 __be16 inner_protocol;
c66549ff
JB
65 u16 network_offset; /* valid only for MPLS */
66 u16 vlan_tci;
7f8a436e
JS
67 __be16 vlan_proto;
68 unsigned int l2_len;
69 u8 l2_data[MAX_L2_LEN];
70};
71
72static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
73
971427f3 74#define DEFERRED_ACTION_FIFO_SIZE 10
2679d040
LR
75#define OVS_RECURSION_LIMIT 5
76#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
971427f3
AZ
77struct action_fifo {
78 int head;
79 int tail;
80 /* Deferred action fifo queue storage. */
81 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
82};
83
2679d040
LR
84struct recirc_keys {
85 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
86};
87
971427f3 88static struct action_fifo __percpu *action_fifos;
2679d040 89static struct recirc_keys __percpu *recirc_keys;
971427f3
AZ
90static DEFINE_PER_CPU(int, exec_actions_level);
91
92static void action_fifo_init(struct action_fifo *fifo)
93{
94 fifo->head = 0;
95 fifo->tail = 0;
96}
97
12eb18f7 98static bool action_fifo_is_empty(const struct action_fifo *fifo)
971427f3
AZ
99{
100 return (fifo->head == fifo->tail);
101}
102
103static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
104{
105 if (action_fifo_is_empty(fifo))
106 return NULL;
107
108 return &fifo->fifo[fifo->tail++];
109}
110
111static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
112{
113 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
114 return NULL;
115
116 return &fifo->fifo[fifo->head++];
117}
118
119/* Return true if fifo is not full */
120static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
12eb18f7 121 const struct sw_flow_key *key,
971427f3
AZ
122 const struct nlattr *attr)
123{
124 struct action_fifo *fifo;
125 struct deferred_action *da;
126
127 fifo = this_cpu_ptr(action_fifos);
128 da = action_fifo_put(fifo);
129 if (da) {
130 da->skb = skb;
131 da->actions = attr;
132 da->pkt_key = *key;
133 }
134
135 return da;
136}
137
fff06c36
PS
138static void invalidate_flow_key(struct sw_flow_key *key)
139{
140 key->eth.type = htons(0);
141}
142
143static bool is_flow_key_valid(const struct sw_flow_key *key)
144{
145 return !!key->eth.type;
146}
147
bc7cc599
SH
148static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
149 __be16 ethertype)
150{
151 if (skb->ip_summed == CHECKSUM_COMPLETE) {
152 __be16 diff[] = { ~(hdr->h_proto), ethertype };
153
154 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
155 ~skb->csum);
156 }
157
158 hdr->h_proto = ethertype;
159}
160
fff06c36 161static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
25cd9ba0
SH
162 const struct ovs_action_push_mpls *mpls)
163{
85de4a21 164 struct mpls_shim_hdr *new_mpls_lse;
25cd9ba0
SH
165
166 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
167 if (skb->encapsulation)
168 return -ENOTSUPP;
169
170 if (skb_cow_head(skb, MPLS_HLEN) < 0)
171 return -ENOMEM;
172
48d2ab60
DA
173 if (!skb->inner_protocol) {
174 skb_set_inner_network_header(skb, skb->mac_len);
175 skb_set_inner_protocol(skb, skb->protocol);
176 }
177
25cd9ba0
SH
178 skb_push(skb, MPLS_HLEN);
179 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
180 skb->mac_len);
181 skb_reset_mac_header(skb);
48d2ab60 182 skb_set_network_header(skb, skb->mac_len);
25cd9ba0 183
85de4a21
JB
184 new_mpls_lse = mpls_hdr(skb);
185 new_mpls_lse->label_stack_entry = mpls->mpls_lse;
25cd9ba0 186
6b83d28a 187 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
25cd9ba0 188
bc7cc599 189 update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
25cd9ba0
SH
190 skb->protocol = mpls->mpls_ethertype;
191
fff06c36 192 invalidate_flow_key(key);
25cd9ba0
SH
193 return 0;
194}
195
fff06c36
PS
196static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
197 const __be16 ethertype)
25cd9ba0
SH
198{
199 struct ethhdr *hdr;
200 int err;
201
e2195121 202 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
25cd9ba0
SH
203 if (unlikely(err))
204 return err;
205
85de4a21 206 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
25cd9ba0
SH
207
208 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
209 skb->mac_len);
210
211 __skb_pull(skb, MPLS_HLEN);
212 skb_reset_mac_header(skb);
48d2ab60 213 skb_set_network_header(skb, skb->mac_len);
25cd9ba0 214
85de4a21
JB
215 /* mpls_hdr() is used to locate the ethertype field correctly in the
216 * presence of VLAN tags.
25cd9ba0 217 */
85de4a21 218 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
bc7cc599 219 update_ethertype(skb, hdr, ethertype);
25cd9ba0
SH
220 if (eth_p_mpls(skb->protocol))
221 skb->protocol = ethertype;
fff06c36
PS
222
223 invalidate_flow_key(key);
25cd9ba0
SH
224 return 0;
225}
226
83d2b9ba
JR
227static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
228 const __be32 *mpls_lse, const __be32 *mask)
25cd9ba0 229{
85de4a21 230 struct mpls_shim_hdr *stack;
83d2b9ba 231 __be32 lse;
25cd9ba0
SH
232 int err;
233
e2195121 234 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
25cd9ba0
SH
235 if (unlikely(err))
236 return err;
237
85de4a21
JB
238 stack = mpls_hdr(skb);
239 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
25cd9ba0 240 if (skb->ip_summed == CHECKSUM_COMPLETE) {
85de4a21 241 __be32 diff[] = { ~(stack->label_stack_entry), lse };
83d2b9ba 242
25cd9ba0
SH
243 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
244 ~skb->csum);
245 }
246
85de4a21 247 stack->label_stack_entry = lse;
83d2b9ba 248 flow_key->mpls.top_lse = lse;
25cd9ba0
SH
249 return 0;
250}
251
fff06c36 252static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
ccb1352e 253{
ccb1352e
JG
254 int err;
255
93515d53 256 err = skb_vlan_pop(skb);
018c1dda 257 if (skb_vlan_tag_present(skb)) {
93515d53 258 invalidate_flow_key(key);
018c1dda
EG
259 } else {
260 key->eth.vlan.tci = 0;
261 key->eth.vlan.tpid = 0;
262 }
93515d53 263 return err;
ccb1352e
JG
264}
265
fff06c36
PS
266static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
267 const struct ovs_action_push_vlan *vlan)
ccb1352e 268{
018c1dda 269 if (skb_vlan_tag_present(skb)) {
fff06c36 270 invalidate_flow_key(key);
018c1dda
EG
271 } else {
272 key->eth.vlan.tci = vlan->vlan_tci;
273 key->eth.vlan.tpid = vlan->vlan_tpid;
274 }
93515d53
JP
275 return skb_vlan_push(skb, vlan->vlan_tpid,
276 ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
ccb1352e
JG
277}
278
83d2b9ba
JR
279/* 'src' is already properly masked. */
280static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
281{
282 u16 *dst = (u16 *)dst_;
283 const u16 *src = (const u16 *)src_;
284 const u16 *mask = (const u16 *)mask_;
285
be26b9a8
JS
286 OVS_SET_MASKED(dst[0], src[0], mask[0]);
287 OVS_SET_MASKED(dst[1], src[1], mask[1]);
288 OVS_SET_MASKED(dst[2], src[2], mask[2]);
83d2b9ba
JR
289}
290
291static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
292 const struct ovs_key_ethernet *key,
293 const struct ovs_key_ethernet *mask)
ccb1352e
JG
294{
295 int err;
83d2b9ba 296
e2195121 297 err = skb_ensure_writable(skb, ETH_HLEN);
ccb1352e
JG
298 if (unlikely(err))
299 return err;
300
b34df5e8
PS
301 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
302
83d2b9ba
JR
303 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
304 mask->eth_src);
305 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
306 mask->eth_dst);
ccb1352e 307
6b83d28a 308 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
b34df5e8 309
83d2b9ba
JR
310 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
311 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
ccb1352e
JG
312 return 0;
313}
314
3576fd79
GG
315static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
316 __be32 addr, __be32 new_addr)
ccb1352e
JG
317{
318 int transport_len = skb->len - skb_transport_offset(skb);
319
3576fd79
GG
320 if (nh->frag_off & htons(IP_OFFSET))
321 return;
322
ccb1352e
JG
323 if (nh->protocol == IPPROTO_TCP) {
324 if (likely(transport_len >= sizeof(struct tcphdr)))
325 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
4b048d6d 326 addr, new_addr, true);
ccb1352e 327 } else if (nh->protocol == IPPROTO_UDP) {
81e5d41d
JG
328 if (likely(transport_len >= sizeof(struct udphdr))) {
329 struct udphdr *uh = udp_hdr(skb);
330
331 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
332 inet_proto_csum_replace4(&uh->check, skb,
4b048d6d 333 addr, new_addr, true);
81e5d41d
JG
334 if (!uh->check)
335 uh->check = CSUM_MANGLED_0;
336 }
337 }
ccb1352e 338 }
3576fd79 339}
ccb1352e 340
3576fd79
GG
341static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
342 __be32 *addr, __be32 new_addr)
343{
344 update_ip_l4_checksum(skb, nh, *addr, new_addr);
ccb1352e 345 csum_replace4(&nh->check, *addr, new_addr);
7539fadc 346 skb_clear_hash(skb);
ccb1352e
JG
347 *addr = new_addr;
348}
349
3fdbd1ce
AA
350static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
351 __be32 addr[4], const __be32 new_addr[4])
352{
353 int transport_len = skb->len - skb_transport_offset(skb);
354
856447d0 355 if (l4_proto == NEXTHDR_TCP) {
3fdbd1ce
AA
356 if (likely(transport_len >= sizeof(struct tcphdr)))
357 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
4b048d6d 358 addr, new_addr, true);
856447d0 359 } else if (l4_proto == NEXTHDR_UDP) {
3fdbd1ce
AA
360 if (likely(transport_len >= sizeof(struct udphdr))) {
361 struct udphdr *uh = udp_hdr(skb);
362
363 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
364 inet_proto_csum_replace16(&uh->check, skb,
4b048d6d 365 addr, new_addr, true);
3fdbd1ce
AA
366 if (!uh->check)
367 uh->check = CSUM_MANGLED_0;
368 }
369 }
856447d0
JG
370 } else if (l4_proto == NEXTHDR_ICMP) {
371 if (likely(transport_len >= sizeof(struct icmp6hdr)))
372 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
4b048d6d 373 skb, addr, new_addr, true);
3fdbd1ce
AA
374 }
375}
376
83d2b9ba
JR
377static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
378 const __be32 mask[4], __be32 masked[4])
379{
be26b9a8
JS
380 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
381 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
382 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
383 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
83d2b9ba
JR
384}
385
3fdbd1ce
AA
386static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
387 __be32 addr[4], const __be32 new_addr[4],
388 bool recalculate_csum)
389{
390 if (recalculate_csum)
391 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
392
7539fadc 393 skb_clear_hash(skb);
3fdbd1ce
AA
394 memcpy(addr, new_addr, sizeof(__be32[4]));
395}
396
83d2b9ba 397static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
3fdbd1ce 398{
83d2b9ba 399 /* Bits 21-24 are always unmasked, so this retains their values. */
be26b9a8
JS
400 OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
401 OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
402 OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
3fdbd1ce
AA
403}
404
83d2b9ba
JR
405static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
406 u8 mask)
3fdbd1ce 407{
be26b9a8 408 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
3fdbd1ce 409
ccb1352e
JG
410 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
411 nh->ttl = new_ttl;
412}
413
83d2b9ba
JR
414static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
415 const struct ovs_key_ipv4 *key,
416 const struct ovs_key_ipv4 *mask)
ccb1352e
JG
417{
418 struct iphdr *nh;
83d2b9ba 419 __be32 new_addr;
ccb1352e
JG
420 int err;
421
e2195121
JP
422 err = skb_ensure_writable(skb, skb_network_offset(skb) +
423 sizeof(struct iphdr));
ccb1352e
JG
424 if (unlikely(err))
425 return err;
426
427 nh = ip_hdr(skb);
428
83d2b9ba
JR
429 /* Setting an IP addresses is typically only a side effect of
430 * matching on them in the current userspace implementation, so it
431 * makes sense to check if the value actually changed.
432 */
433 if (mask->ipv4_src) {
be26b9a8 434 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
ccb1352e 435
83d2b9ba
JR
436 if (unlikely(new_addr != nh->saddr)) {
437 set_ip_addr(skb, nh, &nh->saddr, new_addr);
438 flow_key->ipv4.addr.src = new_addr;
439 }
fff06c36 440 }
83d2b9ba 441 if (mask->ipv4_dst) {
be26b9a8 442 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
ccb1352e 443
83d2b9ba
JR
444 if (unlikely(new_addr != nh->daddr)) {
445 set_ip_addr(skb, nh, &nh->daddr, new_addr);
446 flow_key->ipv4.addr.dst = new_addr;
447 }
fff06c36 448 }
83d2b9ba
JR
449 if (mask->ipv4_tos) {
450 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
451 flow_key->ip.tos = nh->tos;
452 }
453 if (mask->ipv4_ttl) {
454 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
455 flow_key->ip.ttl = nh->ttl;
fff06c36 456 }
ccb1352e
JG
457
458 return 0;
459}
460
83d2b9ba
JR
461static bool is_ipv6_mask_nonzero(const __be32 addr[4])
462{
463 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
464}
465
466static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
467 const struct ovs_key_ipv6 *key,
468 const struct ovs_key_ipv6 *mask)
3fdbd1ce
AA
469{
470 struct ipv6hdr *nh;
471 int err;
3fdbd1ce 472
e2195121
JP
473 err = skb_ensure_writable(skb, skb_network_offset(skb) +
474 sizeof(struct ipv6hdr));
3fdbd1ce
AA
475 if (unlikely(err))
476 return err;
477
478 nh = ipv6_hdr(skb);
3fdbd1ce 479
83d2b9ba
JR
480 /* Setting an IP addresses is typically only a side effect of
481 * matching on them in the current userspace implementation, so it
482 * makes sense to check if the value actually changed.
483 */
484 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
485 __be32 *saddr = (__be32 *)&nh->saddr;
486 __be32 masked[4];
487
488 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
489
490 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
b4f70527 491 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
83d2b9ba
JR
492 true);
493 memcpy(&flow_key->ipv6.addr.src, masked,
494 sizeof(flow_key->ipv6.addr.src));
495 }
496 }
497 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
3fdbd1ce
AA
498 unsigned int offset = 0;
499 int flags = IP6_FH_F_SKIP_RH;
500 bool recalc_csum = true;
83d2b9ba
JR
501 __be32 *daddr = (__be32 *)&nh->daddr;
502 __be32 masked[4];
503
504 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
505
506 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
507 if (ipv6_ext_hdr(nh->nexthdr))
508 recalc_csum = (ipv6_find_hdr(skb, &offset,
509 NEXTHDR_ROUTING,
510 NULL, &flags)
511 != NEXTHDR_ROUTING);
512
b4f70527 513 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
83d2b9ba
JR
514 recalc_csum);
515 memcpy(&flow_key->ipv6.addr.dst, masked,
516 sizeof(flow_key->ipv6.addr.dst));
517 }
518 }
519 if (mask->ipv6_tclass) {
520 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
521 flow_key->ip.tos = ipv6_get_dsfield(nh);
522 }
523 if (mask->ipv6_label) {
524 set_ipv6_fl(nh, ntohl(key->ipv6_label),
525 ntohl(mask->ipv6_label));
526 flow_key->ipv6.label =
527 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
528 }
529 if (mask->ipv6_hlimit) {
be26b9a8
JS
530 OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
531 mask->ipv6_hlimit);
83d2b9ba 532 flow_key->ip.ttl = nh->hop_limit;
3fdbd1ce 533 }
3fdbd1ce
AA
534 return 0;
535}
536
e2195121 537/* Must follow skb_ensure_writable() since that can move the skb data. */
ccb1352e 538static void set_tp_port(struct sk_buff *skb, __be16 *port,
83d2b9ba 539 __be16 new_port, __sum16 *check)
ccb1352e 540{
4b048d6d 541 inet_proto_csum_replace2(check, skb, *port, new_port, false);
ccb1352e 542 *port = new_port;
81e5d41d
JG
543}
544
83d2b9ba
JR
545static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
546 const struct ovs_key_udp *key,
547 const struct ovs_key_udp *mask)
ccb1352e
JG
548{
549 struct udphdr *uh;
83d2b9ba 550 __be16 src, dst;
ccb1352e
JG
551 int err;
552
e2195121
JP
553 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
554 sizeof(struct udphdr));
ccb1352e
JG
555 if (unlikely(err))
556 return err;
557
558 uh = udp_hdr(skb);
83d2b9ba 559 /* Either of the masks is non-zero, so do not bother checking them. */
be26b9a8
JS
560 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
561 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
ccb1352e 562
83d2b9ba
JR
563 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
564 if (likely(src != uh->source)) {
565 set_tp_port(skb, &uh->source, src, &uh->check);
566 flow_key->tp.src = src;
567 }
568 if (likely(dst != uh->dest)) {
569 set_tp_port(skb, &uh->dest, dst, &uh->check);
570 flow_key->tp.dst = dst;
571 }
572
573 if (unlikely(!uh->check))
574 uh->check = CSUM_MANGLED_0;
575 } else {
576 uh->source = src;
577 uh->dest = dst;
578 flow_key->tp.src = src;
579 flow_key->tp.dst = dst;
fff06c36 580 }
ccb1352e 581
83d2b9ba
JR
582 skb_clear_hash(skb);
583
ccb1352e
JG
584 return 0;
585}
586
83d2b9ba
JR
587static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
588 const struct ovs_key_tcp *key,
589 const struct ovs_key_tcp *mask)
ccb1352e
JG
590{
591 struct tcphdr *th;
83d2b9ba 592 __be16 src, dst;
ccb1352e
JG
593 int err;
594
e2195121
JP
595 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
596 sizeof(struct tcphdr));
ccb1352e
JG
597 if (unlikely(err))
598 return err;
599
600 th = tcp_hdr(skb);
be26b9a8 601 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
83d2b9ba
JR
602 if (likely(src != th->source)) {
603 set_tp_port(skb, &th->source, src, &th->check);
604 flow_key->tp.src = src;
fff06c36 605 }
be26b9a8 606 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
83d2b9ba
JR
607 if (likely(dst != th->dest)) {
608 set_tp_port(skb, &th->dest, dst, &th->check);
609 flow_key->tp.dst = dst;
fff06c36 610 }
83d2b9ba 611 skb_clear_hash(skb);
ccb1352e
JG
612
613 return 0;
614}
615
83d2b9ba
JR
616static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
617 const struct ovs_key_sctp *key,
618 const struct ovs_key_sctp *mask)
a175a723 619{
83d2b9ba 620 unsigned int sctphoff = skb_transport_offset(skb);
a175a723 621 struct sctphdr *sh;
83d2b9ba 622 __le32 old_correct_csum, new_csum, old_csum;
a175a723 623 int err;
a175a723 624
e2195121 625 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
a175a723
JS
626 if (unlikely(err))
627 return err;
628
629 sh = sctp_hdr(skb);
83d2b9ba
JR
630 old_csum = sh->checksum;
631 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
a175a723 632
be26b9a8
JS
633 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
634 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
a175a723 635
83d2b9ba 636 new_csum = sctp_compute_cksum(skb, sctphoff);
a175a723 637
83d2b9ba
JR
638 /* Carry any checksum errors through. */
639 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
a175a723 640
83d2b9ba
JR
641 skb_clear_hash(skb);
642 flow_key->tp.src = sh->source;
643 flow_key->tp.dst = sh->dest;
a175a723
JS
644
645 return 0;
646}
647
188515fb 648static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
7f8a436e
JS
649{
650 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
651 struct vport *vport = data->vport;
652
653 if (skb_cow_head(skb, data->l2_len) < 0) {
654 kfree_skb(skb);
655 return -ENOMEM;
656 }
657
658 __skb_dst_copy(skb, data->dst);
659 *OVS_CB(skb) = data->cb;
660 skb->inner_protocol = data->inner_protocol;
661 skb->vlan_tci = data->vlan_tci;
662 skb->vlan_proto = data->vlan_proto;
663
664 /* Reconstruct the MAC header. */
665 skb_push(skb, data->l2_len);
666 memcpy(skb->data, &data->l2_data, data->l2_len);
6b83d28a 667 skb_postpush_rcsum(skb, skb->data, data->l2_len);
7f8a436e
JS
668 skb_reset_mac_header(skb);
669
c66549ff
JB
670 if (eth_p_mpls(skb->protocol)) {
671 skb->inner_network_header = skb->network_header;
672 skb_set_network_header(skb, data->network_offset);
673 skb_reset_mac_len(skb);
674 }
675
7f8a436e
JS
676 ovs_vport_send(vport, skb);
677 return 0;
678}
679
680static unsigned int
681ovs_dst_get_mtu(const struct dst_entry *dst)
682{
683 return dst->dev->mtu;
684}
685
686static struct dst_ops ovs_dst_ops = {
687 .family = AF_UNSPEC,
688 .mtu = ovs_dst_get_mtu,
689};
690
691/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
692 * ovs_vport_output(), which is called once per fragmented packet.
693 */
c66549ff
JB
694static void prepare_frag(struct vport *vport, struct sk_buff *skb,
695 u16 orig_network_offset)
7f8a436e
JS
696{
697 unsigned int hlen = skb_network_offset(skb);
698 struct ovs_frag_data *data;
699
700 data = this_cpu_ptr(&ovs_frag_data_storage);
701 data->dst = skb->_skb_refdst;
702 data->vport = vport;
703 data->cb = *OVS_CB(skb);
704 data->inner_protocol = skb->inner_protocol;
c66549ff 705 data->network_offset = orig_network_offset;
7f8a436e
JS
706 data->vlan_tci = skb->vlan_tci;
707 data->vlan_proto = skb->vlan_proto;
708 data->l2_len = hlen;
709 memcpy(&data->l2_data, skb->data, hlen);
710
711 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
712 skb_pull(skb, hlen);
713}
714
c559cd3a
EB
715static void ovs_fragment(struct net *net, struct vport *vport,
716 struct sk_buff *skb, u16 mru, __be16 ethertype)
7f8a436e 717{
c66549ff
JB
718 u16 orig_network_offset = 0;
719
720 if (eth_p_mpls(skb->protocol)) {
721 orig_network_offset = skb_network_offset(skb);
722 skb->network_header = skb->inner_network_header;
723 }
724
7f8a436e
JS
725 if (skb_network_offset(skb) > MAX_L2_LEN) {
726 OVS_NLERR(1, "L2 header too long to fragment");
b8f22570 727 goto err;
7f8a436e
JS
728 }
729
730 if (ethertype == htons(ETH_P_IP)) {
731 struct dst_entry ovs_dst;
732 unsigned long orig_dst;
733
c66549ff 734 prepare_frag(vport, skb, orig_network_offset);
7f8a436e
JS
735 dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
736 DST_OBSOLETE_NONE, DST_NOCOUNT);
737 ovs_dst.dev = vport->dev;
738
739 orig_dst = skb->_skb_refdst;
740 skb_dst_set_noref(skb, &ovs_dst);
741 IPCB(skb)->frag_max_size = mru;
742
694869b3 743 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
7f8a436e
JS
744 refdst_drop(orig_dst);
745 } else if (ethertype == htons(ETH_P_IPV6)) {
746 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
747 unsigned long orig_dst;
748 struct rt6_info ovs_rt;
749
750 if (!v6ops) {
b8f22570 751 goto err;
7f8a436e
JS
752 }
753
c66549ff 754 prepare_frag(vport, skb, orig_network_offset);
7f8a436e
JS
755 memset(&ovs_rt, 0, sizeof(ovs_rt));
756 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
757 DST_OBSOLETE_NONE, DST_NOCOUNT);
758 ovs_rt.dst.dev = vport->dev;
759
760 orig_dst = skb->_skb_refdst;
761 skb_dst_set_noref(skb, &ovs_rt.dst);
762 IP6CB(skb)->frag_max_size = mru;
763
7d8c6e39 764 v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
7f8a436e
JS
765 refdst_drop(orig_dst);
766 } else {
767 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
768 ovs_vport_name(vport), ntohs(ethertype), mru,
769 vport->dev->mtu);
b8f22570 770 goto err;
7f8a436e 771 }
b8f22570
JS
772
773 return;
774err:
775 kfree_skb(skb);
7f8a436e
JS
776}
777
778static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
779 struct sw_flow_key *key)
ccb1352e 780{
738967b8 781 struct vport *vport = ovs_vport_rcu(dp, out_port);
ccb1352e 782
7f8a436e
JS
783 if (likely(vport)) {
784 u16 mru = OVS_CB(skb)->mru;
f2a4d086
WT
785 u32 cutlen = OVS_CB(skb)->cutlen;
786
787 if (unlikely(cutlen > 0)) {
788 if (skb->len - cutlen > ETH_HLEN)
789 pskb_trim(skb, skb->len - cutlen);
790 else
791 pskb_trim(skb, ETH_HLEN);
792 }
7f8a436e
JS
793
794 if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
795 ovs_vport_send(vport, skb);
796 } else if (mru <= vport->dev->mtu) {
c559cd3a 797 struct net *net = read_pnet(&dp->net);
7f8a436e
JS
798 __be16 ethertype = key->eth.type;
799
800 if (!is_flow_key_valid(key)) {
801 if (eth_p_mpls(skb->protocol))
802 ethertype = skb->inner_protocol;
803 else
804 ethertype = vlan_get_protocol(skb);
805 }
806
c559cd3a 807 ovs_fragment(net, vport, skb, mru, ethertype);
7f8a436e
JS
808 } else {
809 kfree_skb(skb);
810 }
811 } else {
ccb1352e 812 kfree_skb(skb);
7f8a436e 813 }
ccb1352e
JG
814}
815
816static int output_userspace(struct datapath *dp, struct sk_buff *skb,
ccea7445 817 struct sw_flow_key *key, const struct nlattr *attr,
f2a4d086
WT
818 const struct nlattr *actions, int actions_len,
819 uint32_t cutlen)
ccb1352e
JG
820{
821 struct dp_upcall_info upcall;
822 const struct nlattr *a;
823 int rem;
824
ccea7445 825 memset(&upcall, 0, sizeof(upcall));
ccb1352e 826 upcall.cmd = OVS_PACKET_CMD_ACTION;
7f8a436e 827 upcall.mru = OVS_CB(skb)->mru;
ccb1352e
JG
828
829 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
830 a = nla_next(a, &rem)) {
831 switch (nla_type(a)) {
832 case OVS_USERSPACE_ATTR_USERDATA:
833 upcall.userdata = a;
834 break;
835
836 case OVS_USERSPACE_ATTR_PID:
15e47304 837 upcall.portid = nla_get_u32(a);
ccb1352e 838 break;
8f0aad6f
WZ
839
840 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
841 /* Get out tunnel info. */
842 struct vport *vport;
843
844 vport = ovs_vport_rcu(dp, nla_get_u32(a));
845 if (vport) {
846 int err;
847
fc4099f1
PS
848 err = dev_fill_metadata_dst(vport->dev, skb);
849 if (!err)
850 upcall.egress_tun_info = skb_tunnel_info(skb);
8f0aad6f 851 }
4c222798 852
8f0aad6f 853 break;
ccb1352e 854 }
8f0aad6f 855
ccea7445
NM
856 case OVS_USERSPACE_ATTR_ACTIONS: {
857 /* Include actions. */
858 upcall.actions = actions;
859 upcall.actions_len = actions_len;
860 break;
861 }
862
8f0aad6f 863 } /* End of switch. */
ccb1352e
JG
864 }
865
f2a4d086 866 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
ccb1352e
JG
867}
868
869static int sample(struct datapath *dp, struct sk_buff *skb,
ccea7445
NM
870 struct sw_flow_key *key, const struct nlattr *attr,
871 const struct nlattr *actions, int actions_len)
ccb1352e
JG
872{
873 const struct nlattr *acts_list = NULL;
874 const struct nlattr *a;
875 int rem;
f2a4d086 876 u32 cutlen = 0;
ccb1352e
JG
877
878 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
879 a = nla_next(a, &rem)) {
e05176a3
WZ
880 u32 probability;
881
ccb1352e
JG
882 switch (nla_type(a)) {
883 case OVS_SAMPLE_ATTR_PROBABILITY:
e05176a3
WZ
884 probability = nla_get_u32(a);
885 if (!probability || prandom_u32() > probability)
ccb1352e
JG
886 return 0;
887 break;
888
889 case OVS_SAMPLE_ATTR_ACTIONS:
890 acts_list = a;
891 break;
892 }
893 }
894
651887b0
SH
895 rem = nla_len(acts_list);
896 a = nla_data(acts_list);
897
32ae87ff
AZ
898 /* Actions list is empty, do nothing */
899 if (unlikely(!rem))
900 return 0;
651887b0 901
32ae87ff 902 /* The only known usage of sample action is having a single user-space
f2a4d086 903 * action, or having a truncate action followed by a single user-space
32ae87ff
AZ
904 * action. Treat this usage as a special case.
905 * The output_userspace() should clone the skb to be sent to the
906 * user space. This skb will be consumed by its caller.
651887b0 907 */
f2a4d086
WT
908 if (unlikely(nla_type(a) == OVS_ACTION_ATTR_TRUNC)) {
909 struct ovs_action_trunc *trunc = nla_data(a);
910
911 if (skb->len > trunc->max_len)
912 cutlen = skb->len - trunc->max_len;
913
914 a = nla_next(a, &rem);
915 }
916
32ae87ff 917 if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
941d8ebc 918 nla_is_last(a, rem)))
f2a4d086
WT
919 return output_userspace(dp, skb, key, a, actions,
920 actions_len, cutlen);
32ae87ff
AZ
921
922 skb = skb_clone(skb, GFP_ATOMIC);
923 if (!skb)
924 /* Skip the sample action when out of memory. */
925 return 0;
926
971427f3
AZ
927 if (!add_deferred_actions(skb, key, a)) {
928 if (net_ratelimit())
929 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
930 ovs_dp_name(dp));
931
932 kfree_skb(skb);
933 }
934 return 0;
935}
936
937static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
938 const struct nlattr *attr)
939{
940 struct ovs_action_hash *hash_act = nla_data(attr);
941 u32 hash = 0;
942
943 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
944 hash = skb_get_hash(skb);
945 hash = jhash_1word(hash, hash_act->hash_basis);
946 if (!hash)
947 hash = 0x1;
948
949 key->ovs_flow_hash = hash;
ccb1352e
JG
950}
951
83d2b9ba
JR
952static int execute_set_action(struct sk_buff *skb,
953 struct sw_flow_key *flow_key,
954 const struct nlattr *a)
955{
956 /* Only tunnel set execution is supported without a mask. */
957 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
34ae932a
TG
958 struct ovs_tunnel_info *tun = nla_data(a);
959
960 skb_dst_drop(skb);
961 dst_hold((struct dst_entry *)tun->tun_dst);
962 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
83d2b9ba
JR
963 return 0;
964 }
965
966 return -EINVAL;
967}
968
969/* Mask is at the midpoint of the data. */
970#define get_mask(a, type) ((const type)nla_data(a) + 1)
971
972static int execute_masked_set_action(struct sk_buff *skb,
973 struct sw_flow_key *flow_key,
974 const struct nlattr *a)
ccb1352e
JG
975{
976 int err = 0;
977
83d2b9ba 978 switch (nla_type(a)) {
ccb1352e 979 case OVS_KEY_ATTR_PRIORITY:
be26b9a8
JS
980 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
981 *get_mask(a, u32 *));
83d2b9ba 982 flow_key->phy.priority = skb->priority;
ccb1352e
JG
983 break;
984
39c7caeb 985 case OVS_KEY_ATTR_SKB_MARK:
be26b9a8 986 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
83d2b9ba 987 flow_key->phy.skb_mark = skb->mark;
39c7caeb
AA
988 break;
989
f0b128c1 990 case OVS_KEY_ATTR_TUNNEL_INFO:
83d2b9ba
JR
991 /* Masked data not supported for tunnel. */
992 err = -EINVAL;
7d5437c7
PS
993 break;
994
ccb1352e 995 case OVS_KEY_ATTR_ETHERNET:
83d2b9ba
JR
996 err = set_eth_addr(skb, flow_key, nla_data(a),
997 get_mask(a, struct ovs_key_ethernet *));
ccb1352e
JG
998 break;
999
1000 case OVS_KEY_ATTR_IPV4:
83d2b9ba
JR
1001 err = set_ipv4(skb, flow_key, nla_data(a),
1002 get_mask(a, struct ovs_key_ipv4 *));
ccb1352e
JG
1003 break;
1004
3fdbd1ce 1005 case OVS_KEY_ATTR_IPV6:
83d2b9ba
JR
1006 err = set_ipv6(skb, flow_key, nla_data(a),
1007 get_mask(a, struct ovs_key_ipv6 *));
3fdbd1ce
AA
1008 break;
1009
ccb1352e 1010 case OVS_KEY_ATTR_TCP:
83d2b9ba
JR
1011 err = set_tcp(skb, flow_key, nla_data(a),
1012 get_mask(a, struct ovs_key_tcp *));
ccb1352e
JG
1013 break;
1014
1015 case OVS_KEY_ATTR_UDP:
83d2b9ba
JR
1016 err = set_udp(skb, flow_key, nla_data(a),
1017 get_mask(a, struct ovs_key_udp *));
ccb1352e 1018 break;
a175a723
JS
1019
1020 case OVS_KEY_ATTR_SCTP:
83d2b9ba
JR
1021 err = set_sctp(skb, flow_key, nla_data(a),
1022 get_mask(a, struct ovs_key_sctp *));
a175a723 1023 break;
25cd9ba0
SH
1024
1025 case OVS_KEY_ATTR_MPLS:
83d2b9ba
JR
1026 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1027 __be32 *));
25cd9ba0 1028 break;
7f8a436e
JS
1029
1030 case OVS_KEY_ATTR_CT_STATE:
1031 case OVS_KEY_ATTR_CT_ZONE:
182e3042 1032 case OVS_KEY_ATTR_CT_MARK:
33db4125 1033 case OVS_KEY_ATTR_CT_LABELS:
7f8a436e
JS
1034 err = -EINVAL;
1035 break;
ccb1352e
JG
1036 }
1037
1038 return err;
1039}
1040
971427f3
AZ
1041static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1042 struct sw_flow_key *key,
1043 const struct nlattr *a, int rem)
1044{
1045 struct deferred_action *da;
2679d040 1046 int level;
971427f3 1047
fff06c36
PS
1048 if (!is_flow_key_valid(key)) {
1049 int err;
1050
1051 err = ovs_flow_key_update(skb, key);
1052 if (err)
1053 return err;
1054 }
1055 BUG_ON(!is_flow_key_valid(key));
971427f3 1056
941d8ebc 1057 if (!nla_is_last(a, rem)) {
971427f3
AZ
1058 /* Recirc action is the not the last action
1059 * of the action list, need to clone the skb.
1060 */
1061 skb = skb_clone(skb, GFP_ATOMIC);
1062
1063 /* Skip the recirc action when out of memory, but
1064 * continue on with the rest of the action list.
1065 */
1066 if (!skb)
1067 return 0;
1068 }
1069
2679d040
LR
1070 level = this_cpu_read(exec_actions_level);
1071 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
1072 struct recirc_keys *rks = this_cpu_ptr(recirc_keys);
1073 struct sw_flow_key *recirc_key = &rks->key[level - 1];
1074
1075 *recirc_key = *key;
1076 recirc_key->recirc_id = nla_get_u32(a);
1077 ovs_dp_process_packet(skb, recirc_key);
1078
1079 return 0;
1080 }
1081
971427f3
AZ
1082 da = add_deferred_actions(skb, key, NULL);
1083 if (da) {
1084 da->pkt_key.recirc_id = nla_get_u32(a);
1085 } else {
1086 kfree_skb(skb);
1087
1088 if (net_ratelimit())
1089 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1090 ovs_dp_name(dp));
1091 }
1092
1093 return 0;
1094}
1095
ccb1352e
JG
1096/* Execute a list of actions against 'skb'. */
1097static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
2ff3e4e4 1098 struct sw_flow_key *key,
651887b0 1099 const struct nlattr *attr, int len)
ccb1352e
JG
1100{
1101 /* Every output action needs a separate clone of 'skb', but the common
1102 * case is just a single output action, so that doing a clone and
1103 * then freeing the original skbuff is wasteful. So the following code
fff06c36
PS
1104 * is slightly obscure just to avoid that.
1105 */
ccb1352e
JG
1106 int prev_port = -1;
1107 const struct nlattr *a;
1108 int rem;
1109
1110 for (a = attr, rem = len; rem > 0;
1111 a = nla_next(a, &rem)) {
1112 int err = 0;
1113
738967b8
AZ
1114 if (unlikely(prev_port != -1)) {
1115 struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
1116
1117 if (out_skb)
7f8a436e 1118 do_output(dp, out_skb, prev_port, key);
738967b8 1119
f2a4d086 1120 OVS_CB(skb)->cutlen = 0;
ccb1352e
JG
1121 prev_port = -1;
1122 }
1123
1124 switch (nla_type(a)) {
1125 case OVS_ACTION_ATTR_OUTPUT:
1126 prev_port = nla_get_u32(a);
1127 break;
1128
f2a4d086
WT
1129 case OVS_ACTION_ATTR_TRUNC: {
1130 struct ovs_action_trunc *trunc = nla_data(a);
1131
1132 if (skb->len > trunc->max_len)
1133 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1134 break;
1135 }
1136
ccb1352e 1137 case OVS_ACTION_ATTR_USERSPACE:
f2a4d086
WT
1138 output_userspace(dp, skb, key, a, attr,
1139 len, OVS_CB(skb)->cutlen);
1140 OVS_CB(skb)->cutlen = 0;
ccb1352e
JG
1141 break;
1142
971427f3
AZ
1143 case OVS_ACTION_ATTR_HASH:
1144 execute_hash(skb, key, a);
1145 break;
1146
25cd9ba0 1147 case OVS_ACTION_ATTR_PUSH_MPLS:
fff06c36 1148 err = push_mpls(skb, key, nla_data(a));
25cd9ba0
SH
1149 break;
1150
1151 case OVS_ACTION_ATTR_POP_MPLS:
fff06c36 1152 err = pop_mpls(skb, key, nla_get_be16(a));
25cd9ba0
SH
1153 break;
1154
ccb1352e 1155 case OVS_ACTION_ATTR_PUSH_VLAN:
fff06c36 1156 err = push_vlan(skb, key, nla_data(a));
ccb1352e
JG
1157 break;
1158
1159 case OVS_ACTION_ATTR_POP_VLAN:
fff06c36 1160 err = pop_vlan(skb, key);
ccb1352e
JG
1161 break;
1162
971427f3
AZ
1163 case OVS_ACTION_ATTR_RECIRC:
1164 err = execute_recirc(dp, skb, key, a, rem);
941d8ebc 1165 if (nla_is_last(a, rem)) {
971427f3
AZ
1166 /* If this is the last action, the skb has
1167 * been consumed or freed.
1168 * Return immediately.
1169 */
1170 return err;
1171 }
1172 break;
1173
ccb1352e 1174 case OVS_ACTION_ATTR_SET:
fff06c36 1175 err = execute_set_action(skb, key, nla_data(a));
ccb1352e
JG
1176 break;
1177
83d2b9ba
JR
1178 case OVS_ACTION_ATTR_SET_MASKED:
1179 case OVS_ACTION_ATTR_SET_TO_MASKED:
1180 err = execute_masked_set_action(skb, key, nla_data(a));
1181 break;
1182
ccb1352e 1183 case OVS_ACTION_ATTR_SAMPLE:
ccea7445 1184 err = sample(dp, skb, key, a, attr, len);
ccb1352e 1185 break;
7f8a436e
JS
1186
1187 case OVS_ACTION_ATTR_CT:
ec0d043d
JS
1188 if (!is_flow_key_valid(key)) {
1189 err = ovs_flow_key_update(skb, key);
1190 if (err)
1191 return err;
1192 }
1193
7f8a436e
JS
1194 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1195 nla_data(a));
1196
1197 /* Hide stolen IP fragments from user space. */
74c16618
JS
1198 if (err)
1199 return err == -EINPROGRESS ? 0 : err;
7f8a436e 1200 break;
ccb1352e
JG
1201 }
1202
1203 if (unlikely(err)) {
1204 kfree_skb(skb);
1205 return err;
1206 }
1207 }
1208
651887b0 1209 if (prev_port != -1)
7f8a436e 1210 do_output(dp, skb, prev_port, key);
651887b0 1211 else
ccb1352e
JG
1212 consume_skb(skb);
1213
1214 return 0;
1215}
1216
971427f3
AZ
1217static void process_deferred_actions(struct datapath *dp)
1218{
1219 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1220
1221 /* Do not touch the FIFO in case there is no deferred actions. */
1222 if (action_fifo_is_empty(fifo))
1223 return;
1224
1225 /* Finishing executing all deferred actions. */
1226 do {
1227 struct deferred_action *da = action_fifo_get(fifo);
1228 struct sk_buff *skb = da->skb;
1229 struct sw_flow_key *key = &da->pkt_key;
1230 const struct nlattr *actions = da->actions;
1231
1232 if (actions)
1233 do_execute_actions(dp, skb, key, actions,
1234 nla_len(actions));
1235 else
1236 ovs_dp_process_packet(skb, key);
1237 } while (!action_fifo_is_empty(fifo));
1238
1239 /* Reset FIFO for the next packet. */
1240 action_fifo_init(fifo);
1241}
1242
ccb1352e 1243/* Execute a list of actions against 'skb'. */
2ff3e4e4 1244int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
12eb18f7
TG
1245 const struct sw_flow_actions *acts,
1246 struct sw_flow_key *key)
ccb1352e 1247{
b064d0d8
HFS
1248 int err, level;
1249
1250 level = __this_cpu_inc_return(exec_actions_level);
2679d040 1251 if (unlikely(level > OVS_RECURSION_LIMIT)) {
b064d0d8
HFS
1252 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1253 ovs_dp_name(dp));
1254 kfree_skb(skb);
1255 err = -ENETDOWN;
1256 goto out;
1257 }
971427f3 1258
971427f3
AZ
1259 err = do_execute_actions(dp, skb, key,
1260 acts->actions, acts->actions_len);
1261
b064d0d8 1262 if (level == 1)
971427f3
AZ
1263 process_deferred_actions(dp);
1264
b064d0d8
HFS
1265out:
1266 __this_cpu_dec(exec_actions_level);
971427f3
AZ
1267 return err;
1268}
1269
1270int action_fifos_init(void)
1271{
1272 action_fifos = alloc_percpu(struct action_fifo);
1273 if (!action_fifos)
1274 return -ENOMEM;
ccb1352e 1275
2679d040
LR
1276 recirc_keys = alloc_percpu(struct recirc_keys);
1277 if (!recirc_keys) {
1278 free_percpu(action_fifos);
1279 return -ENOMEM;
1280 }
1281
971427f3
AZ
1282 return 0;
1283}
1284
1285void action_fifos_exit(void)
1286{
1287 free_percpu(action_fifos);
2679d040 1288 free_percpu(recirc_keys);
ccb1352e 1289}