2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/skbuff.h>
24 #include <linux/openvswitch.h>
25 #include <linux/netfilter_ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/in6.h>
30 #include <linux/if_arp.h>
31 #include <linux/if_vlan.h>
36 #include <net/ip6_fib.h>
37 #include <net/checksum.h>
38 #include <net/dsfield.h>
40 #include <net/sctp/checksum.h>
44 #include "conntrack.h"
47 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
48 struct sw_flow_key *key,
49 const struct nlattr *attr, int len);
51 struct deferred_action {
53 const struct nlattr *actions;
55 /* Store pkt_key clone when creating deferred action. */
56 struct sw_flow_key pkt_key;
59 #define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
60 struct ovs_frag_data {
64 __be16 inner_protocol;
68 u8 l2_data[MAX_L2_LEN];
71 static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
73 #define DEFERRED_ACTION_FIFO_SIZE 10
74 #define OVS_RECURSION_LIMIT 5
75 #define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
79 /* Deferred action fifo queue storage. */
80 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
84 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
87 static struct action_fifo __percpu *action_fifos;
88 static struct recirc_keys __percpu *recirc_keys;
89 static DEFINE_PER_CPU(int, exec_actions_level);
91 static void action_fifo_init(struct action_fifo *fifo)
97 static bool action_fifo_is_empty(const struct action_fifo *fifo)
99 return (fifo->head == fifo->tail);
102 static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
104 if (action_fifo_is_empty(fifo))
107 return &fifo->fifo[fifo->tail++];
110 static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
112 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
115 return &fifo->fifo[fifo->head++];
118 /* Return true if fifo is not full */
119 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
120 const struct sw_flow_key *key,
121 const struct nlattr *attr)
123 struct action_fifo *fifo;
124 struct deferred_action *da;
126 fifo = this_cpu_ptr(action_fifos);
127 da = action_fifo_put(fifo);
137 static void invalidate_flow_key(struct sw_flow_key *key)
139 key->eth.type = htons(0);
142 static bool is_flow_key_valid(const struct sw_flow_key *key)
144 return !!key->eth.type;
147 static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
150 if (skb->ip_summed == CHECKSUM_COMPLETE) {
151 __be16 diff[] = { ~(hdr->h_proto), ethertype };
153 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
156 hdr->h_proto = ethertype;
159 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
160 const struct ovs_action_push_mpls *mpls)
162 struct mpls_shim_hdr *new_mpls_lse;
164 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
165 if (skb->encapsulation)
168 if (skb_cow_head(skb, MPLS_HLEN) < 0)
171 if (!skb->inner_protocol) {
172 skb_set_inner_network_header(skb, skb->mac_len);
173 skb_set_inner_protocol(skb, skb->protocol);
176 skb_push(skb, MPLS_HLEN);
177 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
179 skb_reset_mac_header(skb);
180 skb_set_network_header(skb, skb->mac_len);
182 new_mpls_lse = mpls_hdr(skb);
183 new_mpls_lse->label_stack_entry = mpls->mpls_lse;
185 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
187 update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
188 skb->protocol = mpls->mpls_ethertype;
190 invalidate_flow_key(key);
194 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
195 const __be16 ethertype)
200 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
204 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
206 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
209 __skb_pull(skb, MPLS_HLEN);
210 skb_reset_mac_header(skb);
211 skb_set_network_header(skb, skb->mac_len);
213 /* mpls_hdr() is used to locate the ethertype field correctly in the
214 * presence of VLAN tags.
216 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
217 update_ethertype(skb, hdr, ethertype);
218 if (eth_p_mpls(skb->protocol))
219 skb->protocol = ethertype;
221 invalidate_flow_key(key);
225 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
226 const __be32 *mpls_lse, const __be32 *mask)
228 struct mpls_shim_hdr *stack;
232 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
236 stack = mpls_hdr(skb);
237 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
238 if (skb->ip_summed == CHECKSUM_COMPLETE) {
239 __be32 diff[] = { ~(stack->label_stack_entry), lse };
241 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
244 stack->label_stack_entry = lse;
245 flow_key->mpls.top_lse = lse;
249 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
253 err = skb_vlan_pop(skb);
254 if (skb_vlan_tag_present(skb)) {
255 invalidate_flow_key(key);
257 key->eth.vlan.tci = 0;
258 key->eth.vlan.tpid = 0;
263 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
264 const struct ovs_action_push_vlan *vlan)
266 if (skb_vlan_tag_present(skb)) {
267 invalidate_flow_key(key);
269 key->eth.vlan.tci = vlan->vlan_tci;
270 key->eth.vlan.tpid = vlan->vlan_tpid;
272 return skb_vlan_push(skb, vlan->vlan_tpid,
273 ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
276 /* 'src' is already properly masked. */
277 static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
279 u16 *dst = (u16 *)dst_;
280 const u16 *src = (const u16 *)src_;
281 const u16 *mask = (const u16 *)mask_;
283 OVS_SET_MASKED(dst[0], src[0], mask[0]);
284 OVS_SET_MASKED(dst[1], src[1], mask[1]);
285 OVS_SET_MASKED(dst[2], src[2], mask[2]);
288 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
289 const struct ovs_key_ethernet *key,
290 const struct ovs_key_ethernet *mask)
294 err = skb_ensure_writable(skb, ETH_HLEN);
298 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
300 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
302 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
305 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
307 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
308 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
312 static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
313 __be32 addr, __be32 new_addr)
315 int transport_len = skb->len - skb_transport_offset(skb);
317 if (nh->frag_off & htons(IP_OFFSET))
320 if (nh->protocol == IPPROTO_TCP) {
321 if (likely(transport_len >= sizeof(struct tcphdr)))
322 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
323 addr, new_addr, true);
324 } else if (nh->protocol == IPPROTO_UDP) {
325 if (likely(transport_len >= sizeof(struct udphdr))) {
326 struct udphdr *uh = udp_hdr(skb);
328 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
329 inet_proto_csum_replace4(&uh->check, skb,
330 addr, new_addr, true);
332 uh->check = CSUM_MANGLED_0;
338 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
339 __be32 *addr, __be32 new_addr)
341 update_ip_l4_checksum(skb, nh, *addr, new_addr);
342 csum_replace4(&nh->check, *addr, new_addr);
347 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
348 __be32 addr[4], const __be32 new_addr[4])
350 int transport_len = skb->len - skb_transport_offset(skb);
352 if (l4_proto == NEXTHDR_TCP) {
353 if (likely(transport_len >= sizeof(struct tcphdr)))
354 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
355 addr, new_addr, true);
356 } else if (l4_proto == NEXTHDR_UDP) {
357 if (likely(transport_len >= sizeof(struct udphdr))) {
358 struct udphdr *uh = udp_hdr(skb);
360 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
361 inet_proto_csum_replace16(&uh->check, skb,
362 addr, new_addr, true);
364 uh->check = CSUM_MANGLED_0;
367 } else if (l4_proto == NEXTHDR_ICMP) {
368 if (likely(transport_len >= sizeof(struct icmp6hdr)))
369 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
370 skb, addr, new_addr, true);
374 static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
375 const __be32 mask[4], __be32 masked[4])
377 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
378 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
379 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
380 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
383 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
384 __be32 addr[4], const __be32 new_addr[4],
385 bool recalculate_csum)
387 if (recalculate_csum)
388 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
391 memcpy(addr, new_addr, sizeof(__be32[4]));
394 static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
396 u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
398 ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
400 if (skb->ip_summed == CHECKSUM_COMPLETE)
401 csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
402 (__force __wsum)(ipv6_tclass << 12));
404 ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
407 static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
411 ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
412 fl = OVS_MASKED(ofl, fl, mask);
414 /* Bits 21-24 are always unmasked, so this retains their values. */
415 nh->flow_lbl[0] = (u8)(fl >> 16);
416 nh->flow_lbl[1] = (u8)(fl >> 8);
417 nh->flow_lbl[2] = (u8)fl;
419 if (skb->ip_summed == CHECKSUM_COMPLETE)
420 csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
423 static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
425 new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
427 if (skb->ip_summed == CHECKSUM_COMPLETE)
428 csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
429 (__force __wsum)(new_ttl << 8));
430 nh->hop_limit = new_ttl;
433 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
436 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
438 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
442 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
443 const struct ovs_key_ipv4 *key,
444 const struct ovs_key_ipv4 *mask)
450 err = skb_ensure_writable(skb, skb_network_offset(skb) +
451 sizeof(struct iphdr));
457 /* Setting an IP addresses is typically only a side effect of
458 * matching on them in the current userspace implementation, so it
459 * makes sense to check if the value actually changed.
461 if (mask->ipv4_src) {
462 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
464 if (unlikely(new_addr != nh->saddr)) {
465 set_ip_addr(skb, nh, &nh->saddr, new_addr);
466 flow_key->ipv4.addr.src = new_addr;
469 if (mask->ipv4_dst) {
470 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
472 if (unlikely(new_addr != nh->daddr)) {
473 set_ip_addr(skb, nh, &nh->daddr, new_addr);
474 flow_key->ipv4.addr.dst = new_addr;
477 if (mask->ipv4_tos) {
478 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
479 flow_key->ip.tos = nh->tos;
481 if (mask->ipv4_ttl) {
482 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
483 flow_key->ip.ttl = nh->ttl;
489 static bool is_ipv6_mask_nonzero(const __be32 addr[4])
491 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
494 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
495 const struct ovs_key_ipv6 *key,
496 const struct ovs_key_ipv6 *mask)
501 err = skb_ensure_writable(skb, skb_network_offset(skb) +
502 sizeof(struct ipv6hdr));
508 /* Setting an IP addresses is typically only a side effect of
509 * matching on them in the current userspace implementation, so it
510 * makes sense to check if the value actually changed.
512 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
513 __be32 *saddr = (__be32 *)&nh->saddr;
516 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
518 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
519 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
521 memcpy(&flow_key->ipv6.addr.src, masked,
522 sizeof(flow_key->ipv6.addr.src));
525 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
526 unsigned int offset = 0;
527 int flags = IP6_FH_F_SKIP_RH;
528 bool recalc_csum = true;
529 __be32 *daddr = (__be32 *)&nh->daddr;
532 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
534 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
535 if (ipv6_ext_hdr(nh->nexthdr))
536 recalc_csum = (ipv6_find_hdr(skb, &offset,
541 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
543 memcpy(&flow_key->ipv6.addr.dst, masked,
544 sizeof(flow_key->ipv6.addr.dst));
547 if (mask->ipv6_tclass) {
548 set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
549 flow_key->ip.tos = ipv6_get_dsfield(nh);
551 if (mask->ipv6_label) {
552 set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
553 ntohl(mask->ipv6_label));
554 flow_key->ipv6.label =
555 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
557 if (mask->ipv6_hlimit) {
558 set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
559 flow_key->ip.ttl = nh->hop_limit;
564 /* Must follow skb_ensure_writable() since that can move the skb data. */
565 static void set_tp_port(struct sk_buff *skb, __be16 *port,
566 __be16 new_port, __sum16 *check)
568 inet_proto_csum_replace2(check, skb, *port, new_port, false);
572 static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
573 const struct ovs_key_udp *key,
574 const struct ovs_key_udp *mask)
580 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
581 sizeof(struct udphdr));
586 /* Either of the masks is non-zero, so do not bother checking them. */
587 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
588 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
590 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
591 if (likely(src != uh->source)) {
592 set_tp_port(skb, &uh->source, src, &uh->check);
593 flow_key->tp.src = src;
595 if (likely(dst != uh->dest)) {
596 set_tp_port(skb, &uh->dest, dst, &uh->check);
597 flow_key->tp.dst = dst;
600 if (unlikely(!uh->check))
601 uh->check = CSUM_MANGLED_0;
605 flow_key->tp.src = src;
606 flow_key->tp.dst = dst;
614 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
615 const struct ovs_key_tcp *key,
616 const struct ovs_key_tcp *mask)
622 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
623 sizeof(struct tcphdr));
628 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
629 if (likely(src != th->source)) {
630 set_tp_port(skb, &th->source, src, &th->check);
631 flow_key->tp.src = src;
633 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
634 if (likely(dst != th->dest)) {
635 set_tp_port(skb, &th->dest, dst, &th->check);
636 flow_key->tp.dst = dst;
643 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
644 const struct ovs_key_sctp *key,
645 const struct ovs_key_sctp *mask)
647 unsigned int sctphoff = skb_transport_offset(skb);
649 __le32 old_correct_csum, new_csum, old_csum;
652 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
657 old_csum = sh->checksum;
658 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
660 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
661 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
663 new_csum = sctp_compute_cksum(skb, sctphoff);
665 /* Carry any checksum errors through. */
666 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
669 flow_key->tp.src = sh->source;
670 flow_key->tp.dst = sh->dest;
675 static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
677 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
678 struct vport *vport = data->vport;
680 if (skb_cow_head(skb, data->l2_len) < 0) {
685 __skb_dst_copy(skb, data->dst);
686 *OVS_CB(skb) = data->cb;
687 skb->inner_protocol = data->inner_protocol;
688 skb->vlan_tci = data->vlan_tci;
689 skb->vlan_proto = data->vlan_proto;
691 /* Reconstruct the MAC header. */
692 skb_push(skb, data->l2_len);
693 memcpy(skb->data, &data->l2_data, data->l2_len);
694 skb_postpush_rcsum(skb, skb->data, data->l2_len);
695 skb_reset_mac_header(skb);
697 ovs_vport_send(vport, skb);
702 ovs_dst_get_mtu(const struct dst_entry *dst)
704 return dst->dev->mtu;
707 static struct dst_ops ovs_dst_ops = {
709 .mtu = ovs_dst_get_mtu,
712 /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
713 * ovs_vport_output(), which is called once per fragmented packet.
715 static void prepare_frag(struct vport *vport, struct sk_buff *skb)
717 unsigned int hlen = skb_network_offset(skb);
718 struct ovs_frag_data *data;
720 data = this_cpu_ptr(&ovs_frag_data_storage);
721 data->dst = skb->_skb_refdst;
723 data->cb = *OVS_CB(skb);
724 data->inner_protocol = skb->inner_protocol;
725 data->vlan_tci = skb->vlan_tci;
726 data->vlan_proto = skb->vlan_proto;
728 memcpy(&data->l2_data, skb->data, hlen);
730 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
734 static void ovs_fragment(struct net *net, struct vport *vport,
735 struct sk_buff *skb, u16 mru, __be16 ethertype)
737 if (skb_network_offset(skb) > MAX_L2_LEN) {
738 OVS_NLERR(1, "L2 header too long to fragment");
742 if (ethertype == htons(ETH_P_IP)) {
743 struct rtable ovs_rt = { 0 };
744 unsigned long orig_dst;
746 prepare_frag(vport, skb);
747 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
748 DST_OBSOLETE_NONE, DST_NOCOUNT);
749 ovs_rt.dst.dev = vport->dev;
751 orig_dst = skb->_skb_refdst;
752 skb_dst_set_noref(skb, &ovs_rt.dst);
753 IPCB(skb)->frag_max_size = mru;
755 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
756 refdst_drop(orig_dst);
757 } else if (ethertype == htons(ETH_P_IPV6)) {
758 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
759 unsigned long orig_dst;
760 struct rt6_info ovs_rt;
766 prepare_frag(vport, skb);
767 memset(&ovs_rt, 0, sizeof(ovs_rt));
768 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
769 DST_OBSOLETE_NONE, DST_NOCOUNT);
770 ovs_rt.dst.dev = vport->dev;
772 orig_dst = skb->_skb_refdst;
773 skb_dst_set_noref(skb, &ovs_rt.dst);
774 IP6CB(skb)->frag_max_size = mru;
776 v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
777 refdst_drop(orig_dst);
779 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
780 ovs_vport_name(vport), ntohs(ethertype), mru,
790 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
791 struct sw_flow_key *key)
793 struct vport *vport = ovs_vport_rcu(dp, out_port);
796 u16 mru = OVS_CB(skb)->mru;
797 u32 cutlen = OVS_CB(skb)->cutlen;
799 if (unlikely(cutlen > 0)) {
800 if (skb->len - cutlen > ETH_HLEN)
801 pskb_trim(skb, skb->len - cutlen);
803 pskb_trim(skb, ETH_HLEN);
806 if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
807 ovs_vport_send(vport, skb);
808 } else if (mru <= vport->dev->mtu) {
809 struct net *net = read_pnet(&dp->net);
810 __be16 ethertype = key->eth.type;
812 if (!is_flow_key_valid(key)) {
813 if (eth_p_mpls(skb->protocol))
814 ethertype = skb->inner_protocol;
816 ethertype = vlan_get_protocol(skb);
819 ovs_fragment(net, vport, skb, mru, ethertype);
828 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
829 struct sw_flow_key *key, const struct nlattr *attr,
830 const struct nlattr *actions, int actions_len,
833 struct dp_upcall_info upcall;
834 const struct nlattr *a;
837 memset(&upcall, 0, sizeof(upcall));
838 upcall.cmd = OVS_PACKET_CMD_ACTION;
839 upcall.mru = OVS_CB(skb)->mru;
841 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
842 a = nla_next(a, &rem)) {
843 switch (nla_type(a)) {
844 case OVS_USERSPACE_ATTR_USERDATA:
848 case OVS_USERSPACE_ATTR_PID:
849 upcall.portid = nla_get_u32(a);
852 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
853 /* Get out tunnel info. */
856 vport = ovs_vport_rcu(dp, nla_get_u32(a));
860 err = dev_fill_metadata_dst(vport->dev, skb);
862 upcall.egress_tun_info = skb_tunnel_info(skb);
868 case OVS_USERSPACE_ATTR_ACTIONS: {
869 /* Include actions. */
870 upcall.actions = actions;
871 upcall.actions_len = actions_len;
875 } /* End of switch. */
878 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
881 static int sample(struct datapath *dp, struct sk_buff *skb,
882 struct sw_flow_key *key, const struct nlattr *attr,
883 const struct nlattr *actions, int actions_len)
885 const struct nlattr *acts_list = NULL;
886 const struct nlattr *a;
890 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
891 a = nla_next(a, &rem)) {
894 switch (nla_type(a)) {
895 case OVS_SAMPLE_ATTR_PROBABILITY:
896 probability = nla_get_u32(a);
897 if (!probability || prandom_u32() > probability)
901 case OVS_SAMPLE_ATTR_ACTIONS:
907 rem = nla_len(acts_list);
908 a = nla_data(acts_list);
910 /* Actions list is empty, do nothing */
914 /* The only known usage of sample action is having a single user-space
915 * action, or having a truncate action followed by a single user-space
916 * action. Treat this usage as a special case.
917 * The output_userspace() should clone the skb to be sent to the
918 * user space. This skb will be consumed by its caller.
920 if (unlikely(nla_type(a) == OVS_ACTION_ATTR_TRUNC)) {
921 struct ovs_action_trunc *trunc = nla_data(a);
923 if (skb->len > trunc->max_len)
924 cutlen = skb->len - trunc->max_len;
926 a = nla_next(a, &rem);
929 if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
930 nla_is_last(a, rem)))
931 return output_userspace(dp, skb, key, a, actions,
932 actions_len, cutlen);
934 skb = skb_clone(skb, GFP_ATOMIC);
936 /* Skip the sample action when out of memory. */
939 if (!add_deferred_actions(skb, key, a)) {
941 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
949 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
950 const struct nlattr *attr)
952 struct ovs_action_hash *hash_act = nla_data(attr);
955 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
956 hash = skb_get_hash(skb);
957 hash = jhash_1word(hash, hash_act->hash_basis);
961 key->ovs_flow_hash = hash;
964 static int execute_set_action(struct sk_buff *skb,
965 struct sw_flow_key *flow_key,
966 const struct nlattr *a)
968 /* Only tunnel set execution is supported without a mask. */
969 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
970 struct ovs_tunnel_info *tun = nla_data(a);
973 dst_hold((struct dst_entry *)tun->tun_dst);
974 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
981 /* Mask is at the midpoint of the data. */
982 #define get_mask(a, type) ((const type)nla_data(a) + 1)
984 static int execute_masked_set_action(struct sk_buff *skb,
985 struct sw_flow_key *flow_key,
986 const struct nlattr *a)
990 switch (nla_type(a)) {
991 case OVS_KEY_ATTR_PRIORITY:
992 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
993 *get_mask(a, u32 *));
994 flow_key->phy.priority = skb->priority;
997 case OVS_KEY_ATTR_SKB_MARK:
998 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
999 flow_key->phy.skb_mark = skb->mark;
1002 case OVS_KEY_ATTR_TUNNEL_INFO:
1003 /* Masked data not supported for tunnel. */
1007 case OVS_KEY_ATTR_ETHERNET:
1008 err = set_eth_addr(skb, flow_key, nla_data(a),
1009 get_mask(a, struct ovs_key_ethernet *));
1012 case OVS_KEY_ATTR_IPV4:
1013 err = set_ipv4(skb, flow_key, nla_data(a),
1014 get_mask(a, struct ovs_key_ipv4 *));
1017 case OVS_KEY_ATTR_IPV6:
1018 err = set_ipv6(skb, flow_key, nla_data(a),
1019 get_mask(a, struct ovs_key_ipv6 *));
1022 case OVS_KEY_ATTR_TCP:
1023 err = set_tcp(skb, flow_key, nla_data(a),
1024 get_mask(a, struct ovs_key_tcp *));
1027 case OVS_KEY_ATTR_UDP:
1028 err = set_udp(skb, flow_key, nla_data(a),
1029 get_mask(a, struct ovs_key_udp *));
1032 case OVS_KEY_ATTR_SCTP:
1033 err = set_sctp(skb, flow_key, nla_data(a),
1034 get_mask(a, struct ovs_key_sctp *));
1037 case OVS_KEY_ATTR_MPLS:
1038 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1042 case OVS_KEY_ATTR_CT_STATE:
1043 case OVS_KEY_ATTR_CT_ZONE:
1044 case OVS_KEY_ATTR_CT_MARK:
1045 case OVS_KEY_ATTR_CT_LABELS:
1053 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1054 struct sw_flow_key *key,
1055 const struct nlattr *a, int rem)
1057 struct deferred_action *da;
1060 if (!is_flow_key_valid(key)) {
1063 err = ovs_flow_key_update(skb, key);
1067 BUG_ON(!is_flow_key_valid(key));
1069 if (!nla_is_last(a, rem)) {
1070 /* Recirc action is the not the last action
1071 * of the action list, need to clone the skb.
1073 skb = skb_clone(skb, GFP_ATOMIC);
1075 /* Skip the recirc action when out of memory, but
1076 * continue on with the rest of the action list.
1082 level = this_cpu_read(exec_actions_level);
1083 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
1084 struct recirc_keys *rks = this_cpu_ptr(recirc_keys);
1085 struct sw_flow_key *recirc_key = &rks->key[level - 1];
1088 recirc_key->recirc_id = nla_get_u32(a);
1089 ovs_dp_process_packet(skb, recirc_key);
1094 da = add_deferred_actions(skb, key, NULL);
1096 da->pkt_key.recirc_id = nla_get_u32(a);
1100 if (net_ratelimit())
1101 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1108 /* Execute a list of actions against 'skb'. */
1109 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1110 struct sw_flow_key *key,
1111 const struct nlattr *attr, int len)
1113 /* Every output action needs a separate clone of 'skb', but the common
1114 * case is just a single output action, so that doing a clone and
1115 * then freeing the original skbuff is wasteful. So the following code
1116 * is slightly obscure just to avoid that.
1119 const struct nlattr *a;
1122 for (a = attr, rem = len; rem > 0;
1123 a = nla_next(a, &rem)) {
1126 if (unlikely(prev_port != -1)) {
1127 struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
1130 do_output(dp, out_skb, prev_port, key);
1132 OVS_CB(skb)->cutlen = 0;
1136 switch (nla_type(a)) {
1137 case OVS_ACTION_ATTR_OUTPUT:
1138 prev_port = nla_get_u32(a);
1141 case OVS_ACTION_ATTR_TRUNC: {
1142 struct ovs_action_trunc *trunc = nla_data(a);
1144 if (skb->len > trunc->max_len)
1145 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1149 case OVS_ACTION_ATTR_USERSPACE:
1150 output_userspace(dp, skb, key, a, attr,
1151 len, OVS_CB(skb)->cutlen);
1152 OVS_CB(skb)->cutlen = 0;
1155 case OVS_ACTION_ATTR_HASH:
1156 execute_hash(skb, key, a);
1159 case OVS_ACTION_ATTR_PUSH_MPLS:
1160 err = push_mpls(skb, key, nla_data(a));
1163 case OVS_ACTION_ATTR_POP_MPLS:
1164 err = pop_mpls(skb, key, nla_get_be16(a));
1167 case OVS_ACTION_ATTR_PUSH_VLAN:
1168 err = push_vlan(skb, key, nla_data(a));
1171 case OVS_ACTION_ATTR_POP_VLAN:
1172 err = pop_vlan(skb, key);
1175 case OVS_ACTION_ATTR_RECIRC:
1176 err = execute_recirc(dp, skb, key, a, rem);
1177 if (nla_is_last(a, rem)) {
1178 /* If this is the last action, the skb has
1179 * been consumed or freed.
1180 * Return immediately.
1186 case OVS_ACTION_ATTR_SET:
1187 err = execute_set_action(skb, key, nla_data(a));
1190 case OVS_ACTION_ATTR_SET_MASKED:
1191 case OVS_ACTION_ATTR_SET_TO_MASKED:
1192 err = execute_masked_set_action(skb, key, nla_data(a));
1195 case OVS_ACTION_ATTR_SAMPLE:
1196 err = sample(dp, skb, key, a, attr, len);
1199 case OVS_ACTION_ATTR_CT:
1200 if (!is_flow_key_valid(key)) {
1201 err = ovs_flow_key_update(skb, key);
1206 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1209 /* Hide stolen IP fragments from user space. */
1211 return err == -EINPROGRESS ? 0 : err;
1215 if (unlikely(err)) {
1221 if (prev_port != -1)
1222 do_output(dp, skb, prev_port, key);
1229 static void process_deferred_actions(struct datapath *dp)
1231 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1233 /* Do not touch the FIFO in case there is no deferred actions. */
1234 if (action_fifo_is_empty(fifo))
1237 /* Finishing executing all deferred actions. */
1239 struct deferred_action *da = action_fifo_get(fifo);
1240 struct sk_buff *skb = da->skb;
1241 struct sw_flow_key *key = &da->pkt_key;
1242 const struct nlattr *actions = da->actions;
1245 do_execute_actions(dp, skb, key, actions,
1248 ovs_dp_process_packet(skb, key);
1249 } while (!action_fifo_is_empty(fifo));
1251 /* Reset FIFO for the next packet. */
1252 action_fifo_init(fifo);
1255 /* Execute a list of actions against 'skb'. */
1256 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1257 const struct sw_flow_actions *acts,
1258 struct sw_flow_key *key)
1262 level = __this_cpu_inc_return(exec_actions_level);
1263 if (unlikely(level > OVS_RECURSION_LIMIT)) {
1264 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1271 OVS_CB(skb)->acts_origlen = acts->orig_len;
1272 err = do_execute_actions(dp, skb, key,
1273 acts->actions, acts->actions_len);
1276 process_deferred_actions(dp);
1279 __this_cpu_dec(exec_actions_level);
1283 int action_fifos_init(void)
1285 action_fifos = alloc_percpu(struct action_fifo);
1289 recirc_keys = alloc_percpu(struct recirc_keys);
1291 free_percpu(action_fifos);
1298 void action_fifos_exit(void)
1300 free_percpu(action_fifos);
1301 free_percpu(recirc_keys);