openvswitch: netlink: support L3 packets
[linux-block.git] / net / openvswitch / actions.c
CommitLineData
ccb1352e 1/*
971427f3 2 * Copyright (c) 2007-2014 Nicira, Inc.
ccb1352e
JG
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/skbuff.h>
22#include <linux/in.h>
23#include <linux/ip.h>
24#include <linux/openvswitch.h>
7f8a436e 25#include <linux/netfilter_ipv6.h>
a175a723 26#include <linux/sctp.h>
ccb1352e
JG
27#include <linux/tcp.h>
28#include <linux/udp.h>
29#include <linux/in6.h>
30#include <linux/if_arp.h>
31#include <linux/if_vlan.h>
25cd9ba0 32
7f8a436e 33#include <net/dst.h>
ccb1352e 34#include <net/ip.h>
3fdbd1ce 35#include <net/ipv6.h>
7b85b4df 36#include <net/ip6_fib.h>
ccb1352e
JG
37#include <net/checksum.h>
38#include <net/dsfield.h>
25cd9ba0 39#include <net/mpls.h>
a175a723 40#include <net/sctp/checksum.h>
ccb1352e
JG
41
42#include "datapath.h"
971427f3 43#include "flow.h"
7f8a436e 44#include "conntrack.h"
ccb1352e
JG
45#include "vport.h"
46
47static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
2ff3e4e4 48 struct sw_flow_key *key,
651887b0 49 const struct nlattr *attr, int len);
ccb1352e 50
971427f3
AZ
51struct deferred_action {
52 struct sk_buff *skb;
53 const struct nlattr *actions;
54
55 /* Store pkt_key clone when creating deferred action. */
56 struct sw_flow_key pkt_key;
57};
58
7f8a436e
JS
59#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
60struct ovs_frag_data {
61 unsigned long dst;
62 struct vport *vport;
63 struct ovs_skb_cb cb;
64 __be16 inner_protocol;
c66549ff
JB
65 u16 network_offset; /* valid only for MPLS */
66 u16 vlan_tci;
7f8a436e
JS
67 __be16 vlan_proto;
68 unsigned int l2_len;
e2d9d835 69 u8 mac_proto;
7f8a436e
JS
70 u8 l2_data[MAX_L2_LEN];
71};
72
73static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
74
971427f3 75#define DEFERRED_ACTION_FIFO_SIZE 10
2679d040
LR
76#define OVS_RECURSION_LIMIT 5
77#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
971427f3
AZ
78struct action_fifo {
79 int head;
80 int tail;
81 /* Deferred action fifo queue storage. */
82 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
83};
84
2679d040
LR
85struct recirc_keys {
86 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
87};
88
971427f3 89static struct action_fifo __percpu *action_fifos;
2679d040 90static struct recirc_keys __percpu *recirc_keys;
971427f3
AZ
91static DEFINE_PER_CPU(int, exec_actions_level);
92
93static void action_fifo_init(struct action_fifo *fifo)
94{
95 fifo->head = 0;
96 fifo->tail = 0;
97}
98
12eb18f7 99static bool action_fifo_is_empty(const struct action_fifo *fifo)
971427f3
AZ
100{
101 return (fifo->head == fifo->tail);
102}
103
104static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
105{
106 if (action_fifo_is_empty(fifo))
107 return NULL;
108
109 return &fifo->fifo[fifo->tail++];
110}
111
112static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
113{
114 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
115 return NULL;
116
117 return &fifo->fifo[fifo->head++];
118}
119
120/* Return true if fifo is not full */
121static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
12eb18f7 122 const struct sw_flow_key *key,
971427f3
AZ
123 const struct nlattr *attr)
124{
125 struct action_fifo *fifo;
126 struct deferred_action *da;
127
128 fifo = this_cpu_ptr(action_fifos);
129 da = action_fifo_put(fifo);
130 if (da) {
131 da->skb = skb;
132 da->actions = attr;
133 da->pkt_key = *key;
134 }
135
136 return da;
137}
138
fff06c36
PS
139static void invalidate_flow_key(struct sw_flow_key *key)
140{
329f45bc 141 key->mac_proto |= SW_FLOW_KEY_INVALID;
fff06c36
PS
142}
143
144static bool is_flow_key_valid(const struct sw_flow_key *key)
145{
329f45bc 146 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
fff06c36
PS
147}
148
bc7cc599
SH
149static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
150 __be16 ethertype)
151{
152 if (skb->ip_summed == CHECKSUM_COMPLETE) {
153 __be16 diff[] = { ~(hdr->h_proto), ethertype };
154
155 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
156 ~skb->csum);
157 }
158
159 hdr->h_proto = ethertype;
160}
161
fff06c36 162static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
25cd9ba0
SH
163 const struct ovs_action_push_mpls *mpls)
164{
85de4a21 165 struct mpls_shim_hdr *new_mpls_lse;
25cd9ba0
SH
166
167 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
168 if (skb->encapsulation)
169 return -ENOTSUPP;
170
171 if (skb_cow_head(skb, MPLS_HLEN) < 0)
172 return -ENOMEM;
173
48d2ab60
DA
174 if (!skb->inner_protocol) {
175 skb_set_inner_network_header(skb, skb->mac_len);
176 skb_set_inner_protocol(skb, skb->protocol);
177 }
178
25cd9ba0
SH
179 skb_push(skb, MPLS_HLEN);
180 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
181 skb->mac_len);
182 skb_reset_mac_header(skb);
48d2ab60 183 skb_set_network_header(skb, skb->mac_len);
25cd9ba0 184
85de4a21
JB
185 new_mpls_lse = mpls_hdr(skb);
186 new_mpls_lse->label_stack_entry = mpls->mpls_lse;
25cd9ba0 187
6b83d28a 188 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
25cd9ba0 189
1560a074
JB
190 if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET)
191 update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
25cd9ba0
SH
192 skb->protocol = mpls->mpls_ethertype;
193
fff06c36 194 invalidate_flow_key(key);
25cd9ba0
SH
195 return 0;
196}
197
fff06c36
PS
198static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
199 const __be16 ethertype)
25cd9ba0 200{
25cd9ba0
SH
201 int err;
202
e2195121 203 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
25cd9ba0
SH
204 if (unlikely(err))
205 return err;
206
85de4a21 207 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
25cd9ba0
SH
208
209 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
210 skb->mac_len);
211
212 __skb_pull(skb, MPLS_HLEN);
213 skb_reset_mac_header(skb);
48d2ab60 214 skb_set_network_header(skb, skb->mac_len);
25cd9ba0 215
1560a074
JB
216 if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET) {
217 struct ethhdr *hdr;
218
219 /* mpls_hdr() is used to locate the ethertype field correctly in the
220 * presence of VLAN tags.
221 */
222 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
223 update_ethertype(skb, hdr, ethertype);
224 }
25cd9ba0
SH
225 if (eth_p_mpls(skb->protocol))
226 skb->protocol = ethertype;
fff06c36
PS
227
228 invalidate_flow_key(key);
25cd9ba0
SH
229 return 0;
230}
231
83d2b9ba
JR
232static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
233 const __be32 *mpls_lse, const __be32 *mask)
25cd9ba0 234{
85de4a21 235 struct mpls_shim_hdr *stack;
83d2b9ba 236 __be32 lse;
25cd9ba0
SH
237 int err;
238
e2195121 239 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
25cd9ba0
SH
240 if (unlikely(err))
241 return err;
242
85de4a21
JB
243 stack = mpls_hdr(skb);
244 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
25cd9ba0 245 if (skb->ip_summed == CHECKSUM_COMPLETE) {
85de4a21 246 __be32 diff[] = { ~(stack->label_stack_entry), lse };
83d2b9ba 247
25cd9ba0
SH
248 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
249 ~skb->csum);
250 }
251
85de4a21 252 stack->label_stack_entry = lse;
83d2b9ba 253 flow_key->mpls.top_lse = lse;
25cd9ba0
SH
254 return 0;
255}
256
fff06c36 257static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
ccb1352e 258{
ccb1352e
JG
259 int err;
260
93515d53 261 err = skb_vlan_pop(skb);
018c1dda 262 if (skb_vlan_tag_present(skb)) {
93515d53 263 invalidate_flow_key(key);
018c1dda
EG
264 } else {
265 key->eth.vlan.tci = 0;
266 key->eth.vlan.tpid = 0;
267 }
93515d53 268 return err;
ccb1352e
JG
269}
270
fff06c36
PS
271static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
272 const struct ovs_action_push_vlan *vlan)
ccb1352e 273{
018c1dda 274 if (skb_vlan_tag_present(skb)) {
fff06c36 275 invalidate_flow_key(key);
018c1dda
EG
276 } else {
277 key->eth.vlan.tci = vlan->vlan_tci;
278 key->eth.vlan.tpid = vlan->vlan_tpid;
279 }
93515d53
JP
280 return skb_vlan_push(skb, vlan->vlan_tpid,
281 ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
ccb1352e
JG
282}
283
83d2b9ba
JR
284/* 'src' is already properly masked. */
285static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
286{
287 u16 *dst = (u16 *)dst_;
288 const u16 *src = (const u16 *)src_;
289 const u16 *mask = (const u16 *)mask_;
290
be26b9a8
JS
291 OVS_SET_MASKED(dst[0], src[0], mask[0]);
292 OVS_SET_MASKED(dst[1], src[1], mask[1]);
293 OVS_SET_MASKED(dst[2], src[2], mask[2]);
83d2b9ba
JR
294}
295
296static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
297 const struct ovs_key_ethernet *key,
298 const struct ovs_key_ethernet *mask)
ccb1352e
JG
299{
300 int err;
83d2b9ba 301
e2195121 302 err = skb_ensure_writable(skb, ETH_HLEN);
ccb1352e
JG
303 if (unlikely(err))
304 return err;
305
b34df5e8
PS
306 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
307
83d2b9ba
JR
308 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
309 mask->eth_src);
310 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
311 mask->eth_dst);
ccb1352e 312
6b83d28a 313 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
b34df5e8 314
83d2b9ba
JR
315 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
316 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
ccb1352e
JG
317 return 0;
318}
319
3576fd79
GG
320static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
321 __be32 addr, __be32 new_addr)
ccb1352e
JG
322{
323 int transport_len = skb->len - skb_transport_offset(skb);
324
3576fd79
GG
325 if (nh->frag_off & htons(IP_OFFSET))
326 return;
327
ccb1352e
JG
328 if (nh->protocol == IPPROTO_TCP) {
329 if (likely(transport_len >= sizeof(struct tcphdr)))
330 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
4b048d6d 331 addr, new_addr, true);
ccb1352e 332 } else if (nh->protocol == IPPROTO_UDP) {
81e5d41d
JG
333 if (likely(transport_len >= sizeof(struct udphdr))) {
334 struct udphdr *uh = udp_hdr(skb);
335
336 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
337 inet_proto_csum_replace4(&uh->check, skb,
4b048d6d 338 addr, new_addr, true);
81e5d41d
JG
339 if (!uh->check)
340 uh->check = CSUM_MANGLED_0;
341 }
342 }
ccb1352e 343 }
3576fd79 344}
ccb1352e 345
3576fd79
GG
346static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
347 __be32 *addr, __be32 new_addr)
348{
349 update_ip_l4_checksum(skb, nh, *addr, new_addr);
ccb1352e 350 csum_replace4(&nh->check, *addr, new_addr);
7539fadc 351 skb_clear_hash(skb);
ccb1352e
JG
352 *addr = new_addr;
353}
354
3fdbd1ce
AA
355static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
356 __be32 addr[4], const __be32 new_addr[4])
357{
358 int transport_len = skb->len - skb_transport_offset(skb);
359
856447d0 360 if (l4_proto == NEXTHDR_TCP) {
3fdbd1ce
AA
361 if (likely(transport_len >= sizeof(struct tcphdr)))
362 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
4b048d6d 363 addr, new_addr, true);
856447d0 364 } else if (l4_proto == NEXTHDR_UDP) {
3fdbd1ce
AA
365 if (likely(transport_len >= sizeof(struct udphdr))) {
366 struct udphdr *uh = udp_hdr(skb);
367
368 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
369 inet_proto_csum_replace16(&uh->check, skb,
4b048d6d 370 addr, new_addr, true);
3fdbd1ce
AA
371 if (!uh->check)
372 uh->check = CSUM_MANGLED_0;
373 }
374 }
856447d0
JG
375 } else if (l4_proto == NEXTHDR_ICMP) {
376 if (likely(transport_len >= sizeof(struct icmp6hdr)))
377 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
4b048d6d 378 skb, addr, new_addr, true);
3fdbd1ce
AA
379 }
380}
381
83d2b9ba
JR
382static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
383 const __be32 mask[4], __be32 masked[4])
384{
be26b9a8
JS
385 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
386 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
387 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
388 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
83d2b9ba
JR
389}
390
3fdbd1ce
AA
391static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
392 __be32 addr[4], const __be32 new_addr[4],
393 bool recalculate_csum)
394{
395 if (recalculate_csum)
396 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
397
7539fadc 398 skb_clear_hash(skb);
3fdbd1ce
AA
399 memcpy(addr, new_addr, sizeof(__be32[4]));
400}
401
83d2b9ba 402static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
3fdbd1ce 403{
83d2b9ba 404 /* Bits 21-24 are always unmasked, so this retains their values. */
be26b9a8
JS
405 OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
406 OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
407 OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
3fdbd1ce
AA
408}
409
83d2b9ba
JR
410static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
411 u8 mask)
3fdbd1ce 412{
be26b9a8 413 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
3fdbd1ce 414
ccb1352e
JG
415 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
416 nh->ttl = new_ttl;
417}
418
83d2b9ba
JR
419static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
420 const struct ovs_key_ipv4 *key,
421 const struct ovs_key_ipv4 *mask)
ccb1352e
JG
422{
423 struct iphdr *nh;
83d2b9ba 424 __be32 new_addr;
ccb1352e
JG
425 int err;
426
e2195121
JP
427 err = skb_ensure_writable(skb, skb_network_offset(skb) +
428 sizeof(struct iphdr));
ccb1352e
JG
429 if (unlikely(err))
430 return err;
431
432 nh = ip_hdr(skb);
433
83d2b9ba
JR
434 /* Setting an IP addresses is typically only a side effect of
435 * matching on them in the current userspace implementation, so it
436 * makes sense to check if the value actually changed.
437 */
438 if (mask->ipv4_src) {
be26b9a8 439 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
ccb1352e 440
83d2b9ba
JR
441 if (unlikely(new_addr != nh->saddr)) {
442 set_ip_addr(skb, nh, &nh->saddr, new_addr);
443 flow_key->ipv4.addr.src = new_addr;
444 }
fff06c36 445 }
83d2b9ba 446 if (mask->ipv4_dst) {
be26b9a8 447 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
ccb1352e 448
83d2b9ba
JR
449 if (unlikely(new_addr != nh->daddr)) {
450 set_ip_addr(skb, nh, &nh->daddr, new_addr);
451 flow_key->ipv4.addr.dst = new_addr;
452 }
fff06c36 453 }
83d2b9ba
JR
454 if (mask->ipv4_tos) {
455 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
456 flow_key->ip.tos = nh->tos;
457 }
458 if (mask->ipv4_ttl) {
459 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
460 flow_key->ip.ttl = nh->ttl;
fff06c36 461 }
ccb1352e
JG
462
463 return 0;
464}
465
83d2b9ba
JR
466static bool is_ipv6_mask_nonzero(const __be32 addr[4])
467{
468 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
469}
470
471static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
472 const struct ovs_key_ipv6 *key,
473 const struct ovs_key_ipv6 *mask)
3fdbd1ce
AA
474{
475 struct ipv6hdr *nh;
476 int err;
3fdbd1ce 477
e2195121
JP
478 err = skb_ensure_writable(skb, skb_network_offset(skb) +
479 sizeof(struct ipv6hdr));
3fdbd1ce
AA
480 if (unlikely(err))
481 return err;
482
483 nh = ipv6_hdr(skb);
3fdbd1ce 484
83d2b9ba
JR
485 /* Setting an IP addresses is typically only a side effect of
486 * matching on them in the current userspace implementation, so it
487 * makes sense to check if the value actually changed.
488 */
489 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
490 __be32 *saddr = (__be32 *)&nh->saddr;
491 __be32 masked[4];
492
493 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
494
495 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
b4f70527 496 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
83d2b9ba
JR
497 true);
498 memcpy(&flow_key->ipv6.addr.src, masked,
499 sizeof(flow_key->ipv6.addr.src));
500 }
501 }
502 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
3fdbd1ce
AA
503 unsigned int offset = 0;
504 int flags = IP6_FH_F_SKIP_RH;
505 bool recalc_csum = true;
83d2b9ba
JR
506 __be32 *daddr = (__be32 *)&nh->daddr;
507 __be32 masked[4];
508
509 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
510
511 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
512 if (ipv6_ext_hdr(nh->nexthdr))
513 recalc_csum = (ipv6_find_hdr(skb, &offset,
514 NEXTHDR_ROUTING,
515 NULL, &flags)
516 != NEXTHDR_ROUTING);
517
b4f70527 518 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
83d2b9ba
JR
519 recalc_csum);
520 memcpy(&flow_key->ipv6.addr.dst, masked,
521 sizeof(flow_key->ipv6.addr.dst));
522 }
523 }
524 if (mask->ipv6_tclass) {
525 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
526 flow_key->ip.tos = ipv6_get_dsfield(nh);
527 }
528 if (mask->ipv6_label) {
529 set_ipv6_fl(nh, ntohl(key->ipv6_label),
530 ntohl(mask->ipv6_label));
531 flow_key->ipv6.label =
532 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
533 }
534 if (mask->ipv6_hlimit) {
be26b9a8
JS
535 OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
536 mask->ipv6_hlimit);
83d2b9ba 537 flow_key->ip.ttl = nh->hop_limit;
3fdbd1ce 538 }
3fdbd1ce
AA
539 return 0;
540}
541
e2195121 542/* Must follow skb_ensure_writable() since that can move the skb data. */
ccb1352e 543static void set_tp_port(struct sk_buff *skb, __be16 *port,
83d2b9ba 544 __be16 new_port, __sum16 *check)
ccb1352e 545{
4b048d6d 546 inet_proto_csum_replace2(check, skb, *port, new_port, false);
ccb1352e 547 *port = new_port;
81e5d41d
JG
548}
549
83d2b9ba
JR
550static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
551 const struct ovs_key_udp *key,
552 const struct ovs_key_udp *mask)
ccb1352e
JG
553{
554 struct udphdr *uh;
83d2b9ba 555 __be16 src, dst;
ccb1352e
JG
556 int err;
557
e2195121
JP
558 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
559 sizeof(struct udphdr));
ccb1352e
JG
560 if (unlikely(err))
561 return err;
562
563 uh = udp_hdr(skb);
83d2b9ba 564 /* Either of the masks is non-zero, so do not bother checking them. */
be26b9a8
JS
565 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
566 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
ccb1352e 567
83d2b9ba
JR
568 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
569 if (likely(src != uh->source)) {
570 set_tp_port(skb, &uh->source, src, &uh->check);
571 flow_key->tp.src = src;
572 }
573 if (likely(dst != uh->dest)) {
574 set_tp_port(skb, &uh->dest, dst, &uh->check);
575 flow_key->tp.dst = dst;
576 }
577
578 if (unlikely(!uh->check))
579 uh->check = CSUM_MANGLED_0;
580 } else {
581 uh->source = src;
582 uh->dest = dst;
583 flow_key->tp.src = src;
584 flow_key->tp.dst = dst;
fff06c36 585 }
ccb1352e 586
83d2b9ba
JR
587 skb_clear_hash(skb);
588
ccb1352e
JG
589 return 0;
590}
591
83d2b9ba
JR
592static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
593 const struct ovs_key_tcp *key,
594 const struct ovs_key_tcp *mask)
ccb1352e
JG
595{
596 struct tcphdr *th;
83d2b9ba 597 __be16 src, dst;
ccb1352e
JG
598 int err;
599
e2195121
JP
600 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
601 sizeof(struct tcphdr));
ccb1352e
JG
602 if (unlikely(err))
603 return err;
604
605 th = tcp_hdr(skb);
be26b9a8 606 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
83d2b9ba
JR
607 if (likely(src != th->source)) {
608 set_tp_port(skb, &th->source, src, &th->check);
609 flow_key->tp.src = src;
fff06c36 610 }
be26b9a8 611 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
83d2b9ba
JR
612 if (likely(dst != th->dest)) {
613 set_tp_port(skb, &th->dest, dst, &th->check);
614 flow_key->tp.dst = dst;
fff06c36 615 }
83d2b9ba 616 skb_clear_hash(skb);
ccb1352e
JG
617
618 return 0;
619}
620
83d2b9ba
JR
621static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
622 const struct ovs_key_sctp *key,
623 const struct ovs_key_sctp *mask)
a175a723 624{
83d2b9ba 625 unsigned int sctphoff = skb_transport_offset(skb);
a175a723 626 struct sctphdr *sh;
83d2b9ba 627 __le32 old_correct_csum, new_csum, old_csum;
a175a723 628 int err;
a175a723 629
e2195121 630 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
a175a723
JS
631 if (unlikely(err))
632 return err;
633
634 sh = sctp_hdr(skb);
83d2b9ba
JR
635 old_csum = sh->checksum;
636 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
a175a723 637
be26b9a8
JS
638 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
639 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
a175a723 640
83d2b9ba 641 new_csum = sctp_compute_cksum(skb, sctphoff);
a175a723 642
83d2b9ba
JR
643 /* Carry any checksum errors through. */
644 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
a175a723 645
83d2b9ba
JR
646 skb_clear_hash(skb);
647 flow_key->tp.src = sh->source;
648 flow_key->tp.dst = sh->dest;
a175a723
JS
649
650 return 0;
651}
652
188515fb 653static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
7f8a436e
JS
654{
655 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
656 struct vport *vport = data->vport;
657
658 if (skb_cow_head(skb, data->l2_len) < 0) {
659 kfree_skb(skb);
660 return -ENOMEM;
661 }
662
663 __skb_dst_copy(skb, data->dst);
664 *OVS_CB(skb) = data->cb;
665 skb->inner_protocol = data->inner_protocol;
666 skb->vlan_tci = data->vlan_tci;
667 skb->vlan_proto = data->vlan_proto;
668
669 /* Reconstruct the MAC header. */
670 skb_push(skb, data->l2_len);
671 memcpy(skb->data, &data->l2_data, data->l2_len);
6b83d28a 672 skb_postpush_rcsum(skb, skb->data, data->l2_len);
7f8a436e
JS
673 skb_reset_mac_header(skb);
674
c66549ff
JB
675 if (eth_p_mpls(skb->protocol)) {
676 skb->inner_network_header = skb->network_header;
677 skb_set_network_header(skb, data->network_offset);
678 skb_reset_mac_len(skb);
679 }
680
e2d9d835 681 ovs_vport_send(vport, skb, data->mac_proto);
7f8a436e
JS
682 return 0;
683}
684
685static unsigned int
686ovs_dst_get_mtu(const struct dst_entry *dst)
687{
688 return dst->dev->mtu;
689}
690
691static struct dst_ops ovs_dst_ops = {
692 .family = AF_UNSPEC,
693 .mtu = ovs_dst_get_mtu,
694};
695
696/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
697 * ovs_vport_output(), which is called once per fragmented packet.
698 */
c66549ff 699static void prepare_frag(struct vport *vport, struct sk_buff *skb,
e2d9d835 700 u16 orig_network_offset, u8 mac_proto)
7f8a436e
JS
701{
702 unsigned int hlen = skb_network_offset(skb);
703 struct ovs_frag_data *data;
704
705 data = this_cpu_ptr(&ovs_frag_data_storage);
706 data->dst = skb->_skb_refdst;
707 data->vport = vport;
708 data->cb = *OVS_CB(skb);
709 data->inner_protocol = skb->inner_protocol;
c66549ff 710 data->network_offset = orig_network_offset;
7f8a436e
JS
711 data->vlan_tci = skb->vlan_tci;
712 data->vlan_proto = skb->vlan_proto;
e2d9d835 713 data->mac_proto = mac_proto;
7f8a436e
JS
714 data->l2_len = hlen;
715 memcpy(&data->l2_data, skb->data, hlen);
716
717 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
718 skb_pull(skb, hlen);
719}
720
c559cd3a 721static void ovs_fragment(struct net *net, struct vport *vport,
e2d9d835
JB
722 struct sk_buff *skb, u16 mru,
723 struct sw_flow_key *key)
7f8a436e 724{
c66549ff
JB
725 u16 orig_network_offset = 0;
726
727 if (eth_p_mpls(skb->protocol)) {
728 orig_network_offset = skb_network_offset(skb);
729 skb->network_header = skb->inner_network_header;
730 }
731
7f8a436e
JS
732 if (skb_network_offset(skb) > MAX_L2_LEN) {
733 OVS_NLERR(1, "L2 header too long to fragment");
b8f22570 734 goto err;
7f8a436e
JS
735 }
736
e2d9d835 737 if (key->eth.type == htons(ETH_P_IP)) {
7f8a436e
JS
738 struct dst_entry ovs_dst;
739 unsigned long orig_dst;
740
e2d9d835
JB
741 prepare_frag(vport, skb, orig_network_offset,
742 ovs_key_mac_proto(key));
7f8a436e
JS
743 dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
744 DST_OBSOLETE_NONE, DST_NOCOUNT);
745 ovs_dst.dev = vport->dev;
746
747 orig_dst = skb->_skb_refdst;
748 skb_dst_set_noref(skb, &ovs_dst);
749 IPCB(skb)->frag_max_size = mru;
750
694869b3 751 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
7f8a436e 752 refdst_drop(orig_dst);
e2d9d835 753 } else if (key->eth.type == htons(ETH_P_IPV6)) {
7f8a436e
JS
754 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
755 unsigned long orig_dst;
756 struct rt6_info ovs_rt;
757
758 if (!v6ops) {
b8f22570 759 goto err;
7f8a436e
JS
760 }
761
e2d9d835
JB
762 prepare_frag(vport, skb, orig_network_offset,
763 ovs_key_mac_proto(key));
7f8a436e
JS
764 memset(&ovs_rt, 0, sizeof(ovs_rt));
765 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
766 DST_OBSOLETE_NONE, DST_NOCOUNT);
767 ovs_rt.dst.dev = vport->dev;
768
769 orig_dst = skb->_skb_refdst;
770 skb_dst_set_noref(skb, &ovs_rt.dst);
771 IP6CB(skb)->frag_max_size = mru;
772
7d8c6e39 773 v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
7f8a436e
JS
774 refdst_drop(orig_dst);
775 } else {
776 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
e2d9d835 777 ovs_vport_name(vport), ntohs(key->eth.type), mru,
7f8a436e 778 vport->dev->mtu);
b8f22570 779 goto err;
7f8a436e 780 }
b8f22570
JS
781
782 return;
783err:
784 kfree_skb(skb);
7f8a436e
JS
785}
786
787static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
788 struct sw_flow_key *key)
ccb1352e 789{
738967b8 790 struct vport *vport = ovs_vport_rcu(dp, out_port);
ccb1352e 791
7f8a436e
JS
792 if (likely(vport)) {
793 u16 mru = OVS_CB(skb)->mru;
f2a4d086
WT
794 u32 cutlen = OVS_CB(skb)->cutlen;
795
796 if (unlikely(cutlen > 0)) {
e2d9d835 797 if (skb->len - cutlen > ovs_mac_header_len(key))
f2a4d086
WT
798 pskb_trim(skb, skb->len - cutlen);
799 else
e2d9d835 800 pskb_trim(skb, ovs_mac_header_len(key));
f2a4d086 801 }
7f8a436e 802
738314a0
JB
803 if (likely(!mru ||
804 (skb->len <= mru + vport->dev->hard_header_len))) {
e2d9d835 805 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
7f8a436e 806 } else if (mru <= vport->dev->mtu) {
c559cd3a 807 struct net *net = read_pnet(&dp->net);
7f8a436e 808
e2d9d835 809 ovs_fragment(net, vport, skb, mru, key);
7f8a436e
JS
810 } else {
811 kfree_skb(skb);
812 }
813 } else {
ccb1352e 814 kfree_skb(skb);
7f8a436e 815 }
ccb1352e
JG
816}
817
818static int output_userspace(struct datapath *dp, struct sk_buff *skb,
ccea7445 819 struct sw_flow_key *key, const struct nlattr *attr,
f2a4d086
WT
820 const struct nlattr *actions, int actions_len,
821 uint32_t cutlen)
ccb1352e
JG
822{
823 struct dp_upcall_info upcall;
824 const struct nlattr *a;
825 int rem;
826
ccea7445 827 memset(&upcall, 0, sizeof(upcall));
ccb1352e 828 upcall.cmd = OVS_PACKET_CMD_ACTION;
7f8a436e 829 upcall.mru = OVS_CB(skb)->mru;
ccb1352e
JG
830
831 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
832 a = nla_next(a, &rem)) {
833 switch (nla_type(a)) {
834 case OVS_USERSPACE_ATTR_USERDATA:
835 upcall.userdata = a;
836 break;
837
838 case OVS_USERSPACE_ATTR_PID:
15e47304 839 upcall.portid = nla_get_u32(a);
ccb1352e 840 break;
8f0aad6f
WZ
841
842 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
843 /* Get out tunnel info. */
844 struct vport *vport;
845
846 vport = ovs_vport_rcu(dp, nla_get_u32(a));
847 if (vport) {
848 int err;
849
fc4099f1
PS
850 err = dev_fill_metadata_dst(vport->dev, skb);
851 if (!err)
852 upcall.egress_tun_info = skb_tunnel_info(skb);
8f0aad6f 853 }
4c222798 854
8f0aad6f 855 break;
ccb1352e 856 }
8f0aad6f 857
ccea7445
NM
858 case OVS_USERSPACE_ATTR_ACTIONS: {
859 /* Include actions. */
860 upcall.actions = actions;
861 upcall.actions_len = actions_len;
862 break;
863 }
864
8f0aad6f 865 } /* End of switch. */
ccb1352e
JG
866 }
867
f2a4d086 868 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
ccb1352e
JG
869}
870
871static int sample(struct datapath *dp, struct sk_buff *skb,
ccea7445
NM
872 struct sw_flow_key *key, const struct nlattr *attr,
873 const struct nlattr *actions, int actions_len)
ccb1352e
JG
874{
875 const struct nlattr *acts_list = NULL;
876 const struct nlattr *a;
877 int rem;
f2a4d086 878 u32 cutlen = 0;
ccb1352e
JG
879
880 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
881 a = nla_next(a, &rem)) {
e05176a3
WZ
882 u32 probability;
883
ccb1352e
JG
884 switch (nla_type(a)) {
885 case OVS_SAMPLE_ATTR_PROBABILITY:
e05176a3
WZ
886 probability = nla_get_u32(a);
887 if (!probability || prandom_u32() > probability)
ccb1352e
JG
888 return 0;
889 break;
890
891 case OVS_SAMPLE_ATTR_ACTIONS:
892 acts_list = a;
893 break;
894 }
895 }
896
651887b0
SH
897 rem = nla_len(acts_list);
898 a = nla_data(acts_list);
899
32ae87ff
AZ
900 /* Actions list is empty, do nothing */
901 if (unlikely(!rem))
902 return 0;
651887b0 903
32ae87ff 904 /* The only known usage of sample action is having a single user-space
f2a4d086 905 * action, or having a truncate action followed by a single user-space
32ae87ff
AZ
906 * action. Treat this usage as a special case.
907 * The output_userspace() should clone the skb to be sent to the
908 * user space. This skb will be consumed by its caller.
651887b0 909 */
f2a4d086
WT
910 if (unlikely(nla_type(a) == OVS_ACTION_ATTR_TRUNC)) {
911 struct ovs_action_trunc *trunc = nla_data(a);
912
913 if (skb->len > trunc->max_len)
914 cutlen = skb->len - trunc->max_len;
915
916 a = nla_next(a, &rem);
917 }
918
32ae87ff 919 if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
941d8ebc 920 nla_is_last(a, rem)))
f2a4d086
WT
921 return output_userspace(dp, skb, key, a, actions,
922 actions_len, cutlen);
32ae87ff
AZ
923
924 skb = skb_clone(skb, GFP_ATOMIC);
925 if (!skb)
926 /* Skip the sample action when out of memory. */
927 return 0;
928
971427f3
AZ
929 if (!add_deferred_actions(skb, key, a)) {
930 if (net_ratelimit())
931 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
932 ovs_dp_name(dp));
933
934 kfree_skb(skb);
935 }
936 return 0;
937}
938
939static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
940 const struct nlattr *attr)
941{
942 struct ovs_action_hash *hash_act = nla_data(attr);
943 u32 hash = 0;
944
945 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
946 hash = skb_get_hash(skb);
947 hash = jhash_1word(hash, hash_act->hash_basis);
948 if (!hash)
949 hash = 0x1;
950
951 key->ovs_flow_hash = hash;
ccb1352e
JG
952}
953
83d2b9ba
JR
954static int execute_set_action(struct sk_buff *skb,
955 struct sw_flow_key *flow_key,
956 const struct nlattr *a)
957{
958 /* Only tunnel set execution is supported without a mask. */
959 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
34ae932a
TG
960 struct ovs_tunnel_info *tun = nla_data(a);
961
962 skb_dst_drop(skb);
963 dst_hold((struct dst_entry *)tun->tun_dst);
964 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
83d2b9ba
JR
965 return 0;
966 }
967
968 return -EINVAL;
969}
970
971/* Mask is at the midpoint of the data. */
972#define get_mask(a, type) ((const type)nla_data(a) + 1)
973
974static int execute_masked_set_action(struct sk_buff *skb,
975 struct sw_flow_key *flow_key,
976 const struct nlattr *a)
ccb1352e
JG
977{
978 int err = 0;
979
83d2b9ba 980 switch (nla_type(a)) {
ccb1352e 981 case OVS_KEY_ATTR_PRIORITY:
be26b9a8
JS
982 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
983 *get_mask(a, u32 *));
83d2b9ba 984 flow_key->phy.priority = skb->priority;
ccb1352e
JG
985 break;
986
39c7caeb 987 case OVS_KEY_ATTR_SKB_MARK:
be26b9a8 988 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
83d2b9ba 989 flow_key->phy.skb_mark = skb->mark;
39c7caeb
AA
990 break;
991
f0b128c1 992 case OVS_KEY_ATTR_TUNNEL_INFO:
83d2b9ba
JR
993 /* Masked data not supported for tunnel. */
994 err = -EINVAL;
7d5437c7
PS
995 break;
996
ccb1352e 997 case OVS_KEY_ATTR_ETHERNET:
83d2b9ba
JR
998 err = set_eth_addr(skb, flow_key, nla_data(a),
999 get_mask(a, struct ovs_key_ethernet *));
ccb1352e
JG
1000 break;
1001
1002 case OVS_KEY_ATTR_IPV4:
83d2b9ba
JR
1003 err = set_ipv4(skb, flow_key, nla_data(a),
1004 get_mask(a, struct ovs_key_ipv4 *));
ccb1352e
JG
1005 break;
1006
3fdbd1ce 1007 case OVS_KEY_ATTR_IPV6:
83d2b9ba
JR
1008 err = set_ipv6(skb, flow_key, nla_data(a),
1009 get_mask(a, struct ovs_key_ipv6 *));
3fdbd1ce
AA
1010 break;
1011
ccb1352e 1012 case OVS_KEY_ATTR_TCP:
83d2b9ba
JR
1013 err = set_tcp(skb, flow_key, nla_data(a),
1014 get_mask(a, struct ovs_key_tcp *));
ccb1352e
JG
1015 break;
1016
1017 case OVS_KEY_ATTR_UDP:
83d2b9ba
JR
1018 err = set_udp(skb, flow_key, nla_data(a),
1019 get_mask(a, struct ovs_key_udp *));
ccb1352e 1020 break;
a175a723
JS
1021
1022 case OVS_KEY_ATTR_SCTP:
83d2b9ba
JR
1023 err = set_sctp(skb, flow_key, nla_data(a),
1024 get_mask(a, struct ovs_key_sctp *));
a175a723 1025 break;
25cd9ba0
SH
1026
1027 case OVS_KEY_ATTR_MPLS:
83d2b9ba
JR
1028 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1029 __be32 *));
25cd9ba0 1030 break;
7f8a436e
JS
1031
1032 case OVS_KEY_ATTR_CT_STATE:
1033 case OVS_KEY_ATTR_CT_ZONE:
182e3042 1034 case OVS_KEY_ATTR_CT_MARK:
33db4125 1035 case OVS_KEY_ATTR_CT_LABELS:
7f8a436e
JS
1036 err = -EINVAL;
1037 break;
ccb1352e
JG
1038 }
1039
1040 return err;
1041}
1042
971427f3
AZ
1043static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1044 struct sw_flow_key *key,
1045 const struct nlattr *a, int rem)
1046{
1047 struct deferred_action *da;
2679d040 1048 int level;
971427f3 1049
fff06c36
PS
1050 if (!is_flow_key_valid(key)) {
1051 int err;
1052
1053 err = ovs_flow_key_update(skb, key);
1054 if (err)
1055 return err;
1056 }
1057 BUG_ON(!is_flow_key_valid(key));
971427f3 1058
941d8ebc 1059 if (!nla_is_last(a, rem)) {
971427f3
AZ
1060 /* Recirc action is the not the last action
1061 * of the action list, need to clone the skb.
1062 */
1063 skb = skb_clone(skb, GFP_ATOMIC);
1064
1065 /* Skip the recirc action when out of memory, but
1066 * continue on with the rest of the action list.
1067 */
1068 if (!skb)
1069 return 0;
1070 }
1071
2679d040
LR
1072 level = this_cpu_read(exec_actions_level);
1073 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
1074 struct recirc_keys *rks = this_cpu_ptr(recirc_keys);
1075 struct sw_flow_key *recirc_key = &rks->key[level - 1];
1076
1077 *recirc_key = *key;
1078 recirc_key->recirc_id = nla_get_u32(a);
1079 ovs_dp_process_packet(skb, recirc_key);
1080
1081 return 0;
1082 }
1083
971427f3
AZ
1084 da = add_deferred_actions(skb, key, NULL);
1085 if (da) {
1086 da->pkt_key.recirc_id = nla_get_u32(a);
1087 } else {
1088 kfree_skb(skb);
1089
1090 if (net_ratelimit())
1091 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1092 ovs_dp_name(dp));
1093 }
1094
1095 return 0;
1096}
1097
ccb1352e
JG
1098/* Execute a list of actions against 'skb'. */
1099static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
2ff3e4e4 1100 struct sw_flow_key *key,
651887b0 1101 const struct nlattr *attr, int len)
ccb1352e
JG
1102{
1103 /* Every output action needs a separate clone of 'skb', but the common
1104 * case is just a single output action, so that doing a clone and
1105 * then freeing the original skbuff is wasteful. So the following code
fff06c36
PS
1106 * is slightly obscure just to avoid that.
1107 */
ccb1352e
JG
1108 int prev_port = -1;
1109 const struct nlattr *a;
1110 int rem;
1111
1112 for (a = attr, rem = len; rem > 0;
1113 a = nla_next(a, &rem)) {
1114 int err = 0;
1115
738967b8
AZ
1116 if (unlikely(prev_port != -1)) {
1117 struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
1118
1119 if (out_skb)
7f8a436e 1120 do_output(dp, out_skb, prev_port, key);
738967b8 1121
f2a4d086 1122 OVS_CB(skb)->cutlen = 0;
ccb1352e
JG
1123 prev_port = -1;
1124 }
1125
1126 switch (nla_type(a)) {
1127 case OVS_ACTION_ATTR_OUTPUT:
1128 prev_port = nla_get_u32(a);
1129 break;
1130
f2a4d086
WT
1131 case OVS_ACTION_ATTR_TRUNC: {
1132 struct ovs_action_trunc *trunc = nla_data(a);
1133
1134 if (skb->len > trunc->max_len)
1135 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1136 break;
1137 }
1138
ccb1352e 1139 case OVS_ACTION_ATTR_USERSPACE:
f2a4d086
WT
1140 output_userspace(dp, skb, key, a, attr,
1141 len, OVS_CB(skb)->cutlen);
1142 OVS_CB(skb)->cutlen = 0;
ccb1352e
JG
1143 break;
1144
971427f3
AZ
1145 case OVS_ACTION_ATTR_HASH:
1146 execute_hash(skb, key, a);
1147 break;
1148
25cd9ba0 1149 case OVS_ACTION_ATTR_PUSH_MPLS:
fff06c36 1150 err = push_mpls(skb, key, nla_data(a));
25cd9ba0
SH
1151 break;
1152
1153 case OVS_ACTION_ATTR_POP_MPLS:
fff06c36 1154 err = pop_mpls(skb, key, nla_get_be16(a));
25cd9ba0
SH
1155 break;
1156
ccb1352e 1157 case OVS_ACTION_ATTR_PUSH_VLAN:
fff06c36 1158 err = push_vlan(skb, key, nla_data(a));
ccb1352e
JG
1159 break;
1160
1161 case OVS_ACTION_ATTR_POP_VLAN:
fff06c36 1162 err = pop_vlan(skb, key);
ccb1352e
JG
1163 break;
1164
971427f3
AZ
1165 case OVS_ACTION_ATTR_RECIRC:
1166 err = execute_recirc(dp, skb, key, a, rem);
941d8ebc 1167 if (nla_is_last(a, rem)) {
971427f3
AZ
1168 /* If this is the last action, the skb has
1169 * been consumed or freed.
1170 * Return immediately.
1171 */
1172 return err;
1173 }
1174 break;
1175
ccb1352e 1176 case OVS_ACTION_ATTR_SET:
fff06c36 1177 err = execute_set_action(skb, key, nla_data(a));
ccb1352e
JG
1178 break;
1179
83d2b9ba
JR
1180 case OVS_ACTION_ATTR_SET_MASKED:
1181 case OVS_ACTION_ATTR_SET_TO_MASKED:
1182 err = execute_masked_set_action(skb, key, nla_data(a));
1183 break;
1184
ccb1352e 1185 case OVS_ACTION_ATTR_SAMPLE:
ccea7445 1186 err = sample(dp, skb, key, a, attr, len);
ccb1352e 1187 break;
7f8a436e
JS
1188
1189 case OVS_ACTION_ATTR_CT:
ec0d043d
JS
1190 if (!is_flow_key_valid(key)) {
1191 err = ovs_flow_key_update(skb, key);
1192 if (err)
1193 return err;
1194 }
1195
7f8a436e
JS
1196 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1197 nla_data(a));
1198
1199 /* Hide stolen IP fragments from user space. */
74c16618
JS
1200 if (err)
1201 return err == -EINPROGRESS ? 0 : err;
7f8a436e 1202 break;
ccb1352e
JG
1203 }
1204
1205 if (unlikely(err)) {
1206 kfree_skb(skb);
1207 return err;
1208 }
1209 }
1210
651887b0 1211 if (prev_port != -1)
7f8a436e 1212 do_output(dp, skb, prev_port, key);
651887b0 1213 else
ccb1352e
JG
1214 consume_skb(skb);
1215
1216 return 0;
1217}
1218
971427f3
AZ
1219static void process_deferred_actions(struct datapath *dp)
1220{
1221 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1222
1223 /* Do not touch the FIFO in case there is no deferred actions. */
1224 if (action_fifo_is_empty(fifo))
1225 return;
1226
1227 /* Finishing executing all deferred actions. */
1228 do {
1229 struct deferred_action *da = action_fifo_get(fifo);
1230 struct sk_buff *skb = da->skb;
1231 struct sw_flow_key *key = &da->pkt_key;
1232 const struct nlattr *actions = da->actions;
1233
1234 if (actions)
1235 do_execute_actions(dp, skb, key, actions,
1236 nla_len(actions));
1237 else
1238 ovs_dp_process_packet(skb, key);
1239 } while (!action_fifo_is_empty(fifo));
1240
1241 /* Reset FIFO for the next packet. */
1242 action_fifo_init(fifo);
1243}
1244
ccb1352e 1245/* Execute a list of actions against 'skb'. */
2ff3e4e4 1246int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
12eb18f7
TG
1247 const struct sw_flow_actions *acts,
1248 struct sw_flow_key *key)
ccb1352e 1249{
b064d0d8
HFS
1250 int err, level;
1251
1252 level = __this_cpu_inc_return(exec_actions_level);
2679d040 1253 if (unlikely(level > OVS_RECURSION_LIMIT)) {
b064d0d8
HFS
1254 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1255 ovs_dp_name(dp));
1256 kfree_skb(skb);
1257 err = -ENETDOWN;
1258 goto out;
1259 }
971427f3 1260
971427f3
AZ
1261 err = do_execute_actions(dp, skb, key,
1262 acts->actions, acts->actions_len);
1263
b064d0d8 1264 if (level == 1)
971427f3
AZ
1265 process_deferred_actions(dp);
1266
b064d0d8
HFS
1267out:
1268 __this_cpu_dec(exec_actions_level);
971427f3
AZ
1269 return err;
1270}
1271
1272int action_fifos_init(void)
1273{
1274 action_fifos = alloc_percpu(struct action_fifo);
1275 if (!action_fifos)
1276 return -ENOMEM;
ccb1352e 1277
2679d040
LR
1278 recirc_keys = alloc_percpu(struct recirc_keys);
1279 if (!recirc_keys) {
1280 free_percpu(action_fifos);
1281 return -ENOMEM;
1282 }
1283
971427f3
AZ
1284 return 0;
1285}
1286
1287void action_fifos_exit(void)
1288{
1289 free_percpu(action_fifos);
2679d040 1290 free_percpu(recirc_keys);
ccb1352e 1291}