GNU Linux-libre 4.4.288-gnu1
[releases.git] / net / openvswitch / datapath.c
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/ethtool.h>
40 #include <linux/wait.h>
41 #include <asm/div64.h>
42 #include <linux/highmem.h>
43 #include <linux/netfilter_bridge.h>
44 #include <linux/netfilter_ipv4.h>
45 #include <linux/inetdevice.h>
46 #include <linux/list.h>
47 #include <linux/openvswitch.h>
48 #include <linux/rculist.h>
49 #include <linux/dmi.h>
50 #include <net/genetlink.h>
51 #include <net/net_namespace.h>
52 #include <net/netns/generic.h>
53
54 #include "datapath.h"
55 #include "flow.h"
56 #include "flow_table.h"
57 #include "flow_netlink.h"
58 #include "vport-internal_dev.h"
59 #include "vport-netdev.h"
60
61 int ovs_net_id __read_mostly;
62 EXPORT_SYMBOL_GPL(ovs_net_id);
63
64 static struct genl_family dp_packet_genl_family;
65 static struct genl_family dp_flow_genl_family;
66 static struct genl_family dp_datapath_genl_family;
67
68 static const struct nla_policy flow_policy[];
69
70 static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
71         .name = OVS_FLOW_MCGROUP,
72 };
73
74 static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
75         .name = OVS_DATAPATH_MCGROUP,
76 };
77
78 static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
79         .name = OVS_VPORT_MCGROUP,
80 };
81
82 /* Check if need to build a reply message.
83  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
84 static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
85                             unsigned int group)
86 {
87         return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
88                genl_has_listeners(family, genl_info_net(info), group);
89 }
90
91 static void ovs_notify(struct genl_family *family,
92                        struct sk_buff *skb, struct genl_info *info)
93 {
94         genl_notify(family, skb, info, 0, GFP_KERNEL);
95 }
96
97 /**
98  * DOC: Locking:
99  *
100  * All writes e.g. Writes to device state (add/remove datapath, port, set
101  * operations on vports, etc.), Writes to other state (flow table
102  * modifications, set miscellaneous datapath parameters, etc.) are protected
103  * by ovs_lock.
104  *
105  * Reads are protected by RCU.
106  *
107  * There are a few special cases (mostly stats) that have their own
108  * synchronization but they nest under all of above and don't interact with
109  * each other.
110  *
111  * The RTNL lock nests inside ovs_mutex.
112  */
113
114 static DEFINE_MUTEX(ovs_mutex);
115
116 void ovs_lock(void)
117 {
118         mutex_lock(&ovs_mutex);
119 }
120
121 void ovs_unlock(void)
122 {
123         mutex_unlock(&ovs_mutex);
124 }
125
126 #ifdef CONFIG_LOCKDEP
127 int lockdep_ovsl_is_held(void)
128 {
129         if (debug_locks)
130                 return lockdep_is_held(&ovs_mutex);
131         else
132                 return 1;
133 }
134 EXPORT_SYMBOL_GPL(lockdep_ovsl_is_held);
135 #endif
136
137 static struct vport *new_vport(const struct vport_parms *);
138 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
139                              const struct sw_flow_key *,
140                              const struct dp_upcall_info *);
141 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
142                                   const struct sw_flow_key *,
143                                   const struct dp_upcall_info *);
144
145 /* Must be called with rcu_read_lock. */
146 static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
147 {
148         struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
149
150         if (dev) {
151                 struct vport *vport = ovs_internal_dev_get_vport(dev);
152                 if (vport)
153                         return vport->dp;
154         }
155
156         return NULL;
157 }
158
159 /* The caller must hold either ovs_mutex or rcu_read_lock to keep the
160  * returned dp pointer valid.
161  */
162 static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
163 {
164         struct datapath *dp;
165
166         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
167         rcu_read_lock();
168         dp = get_dp_rcu(net, dp_ifindex);
169         rcu_read_unlock();
170
171         return dp;
172 }
173
174 /* Must be called with rcu_read_lock or ovs_mutex. */
175 const char *ovs_dp_name(const struct datapath *dp)
176 {
177         struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
178         return ovs_vport_name(vport);
179 }
180
181 static int get_dpifindex(const struct datapath *dp)
182 {
183         struct vport *local;
184         int ifindex;
185
186         rcu_read_lock();
187
188         local = ovs_vport_rcu(dp, OVSP_LOCAL);
189         if (local)
190                 ifindex = local->dev->ifindex;
191         else
192                 ifindex = 0;
193
194         rcu_read_unlock();
195
196         return ifindex;
197 }
198
199 static void destroy_dp_rcu(struct rcu_head *rcu)
200 {
201         struct datapath *dp = container_of(rcu, struct datapath, rcu);
202
203         ovs_flow_tbl_destroy(&dp->table);
204         free_percpu(dp->stats_percpu);
205         kfree(dp->ports);
206         kfree(dp);
207 }
208
209 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
210                                             u16 port_no)
211 {
212         return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
213 }
214
215 /* Called with ovs_mutex or RCU read lock. */
216 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
217 {
218         struct vport *vport;
219         struct hlist_head *head;
220
221         head = vport_hash_bucket(dp, port_no);
222         hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
223                 if (vport->port_no == port_no)
224                         return vport;
225         }
226         return NULL;
227 }
228
229 /* Called with ovs_mutex. */
230 static struct vport *new_vport(const struct vport_parms *parms)
231 {
232         struct vport *vport;
233
234         vport = ovs_vport_add(parms);
235         if (!IS_ERR(vport)) {
236                 struct datapath *dp = parms->dp;
237                 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
238
239                 hlist_add_head_rcu(&vport->dp_hash_node, head);
240         }
241         return vport;
242 }
243
244 void ovs_dp_detach_port(struct vport *p)
245 {
246         ASSERT_OVSL();
247
248         /* First drop references to device. */
249         hlist_del_rcu(&p->dp_hash_node);
250
251         /* Then destroy it. */
252         ovs_vport_del(p);
253 }
254
255 /* Must be called with rcu_read_lock. */
256 void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
257 {
258         const struct vport *p = OVS_CB(skb)->input_vport;
259         struct datapath *dp = p->dp;
260         struct sw_flow *flow;
261         struct sw_flow_actions *sf_acts;
262         struct dp_stats_percpu *stats;
263         u64 *stats_counter;
264         u32 n_mask_hit;
265
266         stats = this_cpu_ptr(dp->stats_percpu);
267
268         /* Look up flow. */
269         flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit);
270         if (unlikely(!flow)) {
271                 struct dp_upcall_info upcall;
272                 int error;
273
274                 memset(&upcall, 0, sizeof(upcall));
275                 upcall.cmd = OVS_PACKET_CMD_MISS;
276                 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
277                 upcall.mru = OVS_CB(skb)->mru;
278                 error = ovs_dp_upcall(dp, skb, key, &upcall);
279                 if (unlikely(error))
280                         kfree_skb(skb);
281                 else
282                         consume_skb(skb);
283                 stats_counter = &stats->n_missed;
284                 goto out;
285         }
286
287         ovs_flow_stats_update(flow, key->tp.flags, skb);
288         sf_acts = rcu_dereference(flow->sf_acts);
289         ovs_execute_actions(dp, skb, sf_acts, key);
290
291         stats_counter = &stats->n_hit;
292
293 out:
294         /* Update datapath statistics. */
295         u64_stats_update_begin(&stats->syncp);
296         (*stats_counter)++;
297         stats->n_mask_hit += n_mask_hit;
298         u64_stats_update_end(&stats->syncp);
299 }
300
301 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
302                   const struct sw_flow_key *key,
303                   const struct dp_upcall_info *upcall_info)
304 {
305         struct dp_stats_percpu *stats;
306         int err;
307
308         if (upcall_info->portid == 0) {
309                 err = -ENOTCONN;
310                 goto err;
311         }
312
313         if (!skb_is_gso(skb))
314                 err = queue_userspace_packet(dp, skb, key, upcall_info);
315         else
316                 err = queue_gso_packets(dp, skb, key, upcall_info);
317         if (err)
318                 goto err;
319
320         return 0;
321
322 err:
323         stats = this_cpu_ptr(dp->stats_percpu);
324
325         u64_stats_update_begin(&stats->syncp);
326         stats->n_lost++;
327         u64_stats_update_end(&stats->syncp);
328
329         return err;
330 }
331
332 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
333                              const struct sw_flow_key *key,
334                              const struct dp_upcall_info *upcall_info)
335 {
336         unsigned short gso_type = skb_shinfo(skb)->gso_type;
337         struct sw_flow_key later_key;
338         struct sk_buff *segs, *nskb;
339         int err;
340
341         BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET);
342         segs = __skb_gso_segment(skb, NETIF_F_SG, false);
343         if (IS_ERR(segs))
344                 return PTR_ERR(segs);
345         if (segs == NULL)
346                 return -EINVAL;
347
348         if (gso_type & SKB_GSO_UDP) {
349                 /* The initial flow key extracted by ovs_flow_key_extract()
350                  * in this case is for a first fragment, so we need to
351                  * properly mark later fragments.
352                  */
353                 later_key = *key;
354                 later_key.ip.frag = OVS_FRAG_TYPE_LATER;
355         }
356
357         /* Queue all of the segments. */
358         skb = segs;
359         do {
360                 if (gso_type & SKB_GSO_UDP && skb != segs)
361                         key = &later_key;
362
363                 err = queue_userspace_packet(dp, skb, key, upcall_info);
364                 if (err)
365                         break;
366
367         } while ((skb = skb->next));
368
369         /* Free all of the segments. */
370         skb = segs;
371         do {
372                 nskb = skb->next;
373                 if (err)
374                         kfree_skb(skb);
375                 else
376                         consume_skb(skb);
377         } while ((skb = nskb));
378         return err;
379 }
380
381 static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
382                               unsigned int hdrlen)
383 {
384         size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
385                 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
386                 + nla_total_size(ovs_key_attr_size()); /* OVS_PACKET_ATTR_KEY */
387
388         /* OVS_PACKET_ATTR_USERDATA */
389         if (upcall_info->userdata)
390                 size += NLA_ALIGN(upcall_info->userdata->nla_len);
391
392         /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
393         if (upcall_info->egress_tun_info)
394                 size += nla_total_size(ovs_tun_key_attr_size());
395
396         /* OVS_PACKET_ATTR_ACTIONS */
397         if (upcall_info->actions_len)
398                 size += nla_total_size(upcall_info->actions_len);
399
400         /* OVS_PACKET_ATTR_MRU */
401         if (upcall_info->mru)
402                 size += nla_total_size(sizeof(upcall_info->mru));
403
404         return size;
405 }
406
407 static void pad_packet(struct datapath *dp, struct sk_buff *skb)
408 {
409         if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
410                 size_t plen = NLA_ALIGN(skb->len) - skb->len;
411
412                 if (plen > 0)
413                         memset(skb_put(skb, plen), 0, plen);
414         }
415 }
416
417 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
418                                   const struct sw_flow_key *key,
419                                   const struct dp_upcall_info *upcall_info)
420 {
421         struct ovs_header *upcall;
422         struct sk_buff *nskb = NULL;
423         struct sk_buff *user_skb = NULL; /* to be queued to userspace */
424         struct nlattr *nla;
425         struct genl_info info = {
426                 .dst_sk = ovs_dp_get_net(dp)->genl_sock,
427                 .snd_portid = upcall_info->portid,
428         };
429         size_t len;
430         unsigned int hlen;
431         int err, dp_ifindex;
432
433         dp_ifindex = get_dpifindex(dp);
434         if (!dp_ifindex)
435                 return -ENODEV;
436
437         if (skb_vlan_tag_present(skb)) {
438                 nskb = skb_clone(skb, GFP_ATOMIC);
439                 if (!nskb)
440                         return -ENOMEM;
441
442                 nskb = __vlan_hwaccel_push_inside(nskb);
443                 if (!nskb)
444                         return -ENOMEM;
445
446                 skb = nskb;
447         }
448
449         if (nla_attr_size(skb->len) > USHRT_MAX) {
450                 err = -EFBIG;
451                 goto out;
452         }
453
454         /* Complete checksum if needed */
455         if (skb->ip_summed == CHECKSUM_PARTIAL &&
456             (err = skb_checksum_help(skb)))
457                 goto out;
458
459         /* Older versions of OVS user space enforce alignment of the last
460          * Netlink attribute to NLA_ALIGNTO which would require extensive
461          * padding logic. Only perform zerocopy if padding is not required.
462          */
463         if (dp->user_features & OVS_DP_F_UNALIGNED)
464                 hlen = skb_zerocopy_headlen(skb);
465         else
466                 hlen = skb->len;
467
468         len = upcall_msg_size(upcall_info, hlen);
469         user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
470         if (!user_skb) {
471                 err = -ENOMEM;
472                 goto out;
473         }
474
475         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
476                              0, upcall_info->cmd);
477         upcall->dp_ifindex = dp_ifindex;
478
479         err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
480         BUG_ON(err);
481
482         if (upcall_info->userdata)
483                 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
484                           nla_len(upcall_info->userdata),
485                           nla_data(upcall_info->userdata));
486
487         if (upcall_info->egress_tun_info) {
488                 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
489                 err = ovs_nla_put_tunnel_info(user_skb,
490                                               upcall_info->egress_tun_info);
491                 BUG_ON(err);
492                 nla_nest_end(user_skb, nla);
493         }
494
495         if (upcall_info->actions_len) {
496                 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
497                 err = ovs_nla_put_actions(upcall_info->actions,
498                                           upcall_info->actions_len,
499                                           user_skb);
500                 if (!err)
501                         nla_nest_end(user_skb, nla);
502                 else
503                         nla_nest_cancel(user_skb, nla);
504         }
505
506         /* Add OVS_PACKET_ATTR_MRU */
507         if (upcall_info->mru) {
508                 if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
509                                 upcall_info->mru)) {
510                         err = -ENOBUFS;
511                         goto out;
512                 }
513                 pad_packet(dp, user_skb);
514         }
515
516         /* Only reserve room for attribute header, packet data is added
517          * in skb_zerocopy() */
518         if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
519                 err = -ENOBUFS;
520                 goto out;
521         }
522         nla->nla_len = nla_attr_size(skb->len);
523
524         err = skb_zerocopy(user_skb, skb, skb->len, hlen);
525         if (err)
526                 goto out;
527
528         /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
529         pad_packet(dp, user_skb);
530
531         ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
532
533         err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
534         user_skb = NULL;
535 out:
536         if (err)
537                 skb_tx_error(skb);
538         kfree_skb(user_skb);
539         kfree_skb(nskb);
540         return err;
541 }
542
543 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
544 {
545         struct ovs_header *ovs_header = info->userhdr;
546         struct net *net = sock_net(skb->sk);
547         struct nlattr **a = info->attrs;
548         struct sw_flow_actions *acts;
549         struct sk_buff *packet;
550         struct sw_flow *flow;
551         struct sw_flow_actions *sf_acts;
552         struct datapath *dp;
553         struct ethhdr *eth;
554         struct vport *input_vport;
555         u16 mru = 0;
556         int len;
557         int err;
558         bool log = !a[OVS_PACKET_ATTR_PROBE];
559
560         err = -EINVAL;
561         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
562             !a[OVS_PACKET_ATTR_ACTIONS])
563                 goto err;
564
565         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
566         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
567         err = -ENOMEM;
568         if (!packet)
569                 goto err;
570         skb_reserve(packet, NET_IP_ALIGN);
571
572         nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
573
574         skb_reset_mac_header(packet);
575         eth = eth_hdr(packet);
576
577         /* Normally, setting the skb 'protocol' field would be handled by a
578          * call to eth_type_trans(), but it assumes there's a sending
579          * device, which we may not have. */
580         if (eth_proto_is_802_3(eth->h_proto))
581                 packet->protocol = eth->h_proto;
582         else
583                 packet->protocol = htons(ETH_P_802_2);
584
585         /* Set packet's mru */
586         if (a[OVS_PACKET_ATTR_MRU]) {
587                 mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
588                 packet->ignore_df = 1;
589         }
590         OVS_CB(packet)->mru = mru;
591
592         /* Build an sw_flow for sending this packet. */
593         flow = ovs_flow_alloc();
594         err = PTR_ERR(flow);
595         if (IS_ERR(flow))
596                 goto err_kfree_skb;
597
598         err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
599                                              packet, &flow->key, log);
600         if (err)
601                 goto err_flow_free;
602
603         err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
604                                    &flow->key, &acts, log);
605         if (err)
606                 goto err_flow_free;
607
608         rcu_assign_pointer(flow->sf_acts, acts);
609         packet->priority = flow->key.phy.priority;
610         packet->mark = flow->key.phy.skb_mark;
611
612         rcu_read_lock();
613         dp = get_dp_rcu(net, ovs_header->dp_ifindex);
614         err = -ENODEV;
615         if (!dp)
616                 goto err_unlock;
617
618         input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
619         if (!input_vport)
620                 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
621
622         if (!input_vport)
623                 goto err_unlock;
624
625         packet->dev = input_vport->dev;
626         OVS_CB(packet)->input_vport = input_vport;
627         sf_acts = rcu_dereference(flow->sf_acts);
628
629         local_bh_disable();
630         err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
631         local_bh_enable();
632         rcu_read_unlock();
633
634         ovs_flow_free(flow, false);
635         return err;
636
637 err_unlock:
638         rcu_read_unlock();
639 err_flow_free:
640         ovs_flow_free(flow, false);
641 err_kfree_skb:
642         kfree_skb(packet);
643 err:
644         return err;
645 }
646
647 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
648         [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
649         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
650         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
651         [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
652         [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
653 };
654
655 static const struct genl_ops dp_packet_genl_ops[] = {
656         { .cmd = OVS_PACKET_CMD_EXECUTE,
657           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
658           .policy = packet_policy,
659           .doit = ovs_packet_cmd_execute
660         }
661 };
662
663 static struct genl_family dp_packet_genl_family = {
664         .id = GENL_ID_GENERATE,
665         .hdrsize = sizeof(struct ovs_header),
666         .name = OVS_PACKET_FAMILY,
667         .version = OVS_PACKET_VERSION,
668         .maxattr = OVS_PACKET_ATTR_MAX,
669         .netnsok = true,
670         .parallel_ops = true,
671         .ops = dp_packet_genl_ops,
672         .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
673 };
674
675 static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
676                          struct ovs_dp_megaflow_stats *mega_stats)
677 {
678         int i;
679
680         memset(mega_stats, 0, sizeof(*mega_stats));
681
682         stats->n_flows = ovs_flow_tbl_count(&dp->table);
683         mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
684
685         stats->n_hit = stats->n_missed = stats->n_lost = 0;
686
687         for_each_possible_cpu(i) {
688                 const struct dp_stats_percpu *percpu_stats;
689                 struct dp_stats_percpu local_stats;
690                 unsigned int start;
691
692                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
693
694                 do {
695                         start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
696                         local_stats = *percpu_stats;
697                 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
698
699                 stats->n_hit += local_stats.n_hit;
700                 stats->n_missed += local_stats.n_missed;
701                 stats->n_lost += local_stats.n_lost;
702                 mega_stats->n_mask_hit += local_stats.n_mask_hit;
703         }
704 }
705
706 static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
707 {
708         return ovs_identifier_is_ufid(sfid) &&
709                !(ufid_flags & OVS_UFID_F_OMIT_KEY);
710 }
711
712 static bool should_fill_mask(uint32_t ufid_flags)
713 {
714         return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
715 }
716
717 static bool should_fill_actions(uint32_t ufid_flags)
718 {
719         return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
720 }
721
722 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
723                                     const struct sw_flow_id *sfid,
724                                     uint32_t ufid_flags)
725 {
726         size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
727
728         /* OVS_FLOW_ATTR_UFID, or unmasked flow key as fallback
729          * see ovs_nla_put_identifier()
730          */
731         if (sfid && ovs_identifier_is_ufid(sfid))
732                 len += nla_total_size(sfid->ufid_len);
733         else
734                 len += nla_total_size(ovs_key_attr_size());
735
736         /* OVS_FLOW_ATTR_KEY */
737         if (!sfid || should_fill_key(sfid, ufid_flags))
738                 len += nla_total_size(ovs_key_attr_size());
739
740         /* OVS_FLOW_ATTR_MASK */
741         if (should_fill_mask(ufid_flags))
742                 len += nla_total_size(ovs_key_attr_size());
743
744         /* OVS_FLOW_ATTR_ACTIONS */
745         if (should_fill_actions(ufid_flags))
746                 len += nla_total_size(acts->orig_len);
747
748         return len
749                 + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
750                 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
751                 + nla_total_size(8); /* OVS_FLOW_ATTR_USED */
752 }
753
754 /* Called with ovs_mutex or RCU read lock. */
755 static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
756                                    struct sk_buff *skb)
757 {
758         struct ovs_flow_stats stats;
759         __be16 tcp_flags;
760         unsigned long used;
761
762         ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
763
764         if (used &&
765             nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
766                 return -EMSGSIZE;
767
768         if (stats.n_packets &&
769             nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
770                 return -EMSGSIZE;
771
772         if ((u8)ntohs(tcp_flags) &&
773              nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
774                 return -EMSGSIZE;
775
776         return 0;
777 }
778
779 /* Called with ovs_mutex or RCU read lock. */
780 static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
781                                      struct sk_buff *skb, int skb_orig_len)
782 {
783         struct nlattr *start;
784         int err;
785
786         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
787          * this is the first flow to be dumped into 'skb'.  This is unusual for
788          * Netlink but individual action lists can be longer than
789          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
790          * The userspace caller can always fetch the actions separately if it
791          * really wants them.  (Most userspace callers in fact don't care.)
792          *
793          * This can only fail for dump operations because the skb is always
794          * properly sized for single flows.
795          */
796         start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
797         if (start) {
798                 const struct sw_flow_actions *sf_acts;
799
800                 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
801                 err = ovs_nla_put_actions(sf_acts->actions,
802                                           sf_acts->actions_len, skb);
803
804                 if (!err)
805                         nla_nest_end(skb, start);
806                 else {
807                         if (skb_orig_len)
808                                 return err;
809
810                         nla_nest_cancel(skb, start);
811                 }
812         } else if (skb_orig_len) {
813                 return -EMSGSIZE;
814         }
815
816         return 0;
817 }
818
819 /* Called with ovs_mutex or RCU read lock. */
820 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
821                                   struct sk_buff *skb, u32 portid,
822                                   u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
823 {
824         const int skb_orig_len = skb->len;
825         struct ovs_header *ovs_header;
826         int err;
827
828         ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
829                                  flags, cmd);
830         if (!ovs_header)
831                 return -EMSGSIZE;
832
833         ovs_header->dp_ifindex = dp_ifindex;
834
835         err = ovs_nla_put_identifier(flow, skb);
836         if (err)
837                 goto error;
838
839         if (should_fill_key(&flow->id, ufid_flags)) {
840                 err = ovs_nla_put_masked_key(flow, skb);
841                 if (err)
842                         goto error;
843         }
844
845         if (should_fill_mask(ufid_flags)) {
846                 err = ovs_nla_put_mask(flow, skb);
847                 if (err)
848                         goto error;
849         }
850
851         err = ovs_flow_cmd_fill_stats(flow, skb);
852         if (err)
853                 goto error;
854
855         if (should_fill_actions(ufid_flags)) {
856                 err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
857                 if (err)
858                         goto error;
859         }
860
861         genlmsg_end(skb, ovs_header);
862         return 0;
863
864 error:
865         genlmsg_cancel(skb, ovs_header);
866         return err;
867 }
868
869 /* May not be called with RCU read lock. */
870 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
871                                                const struct sw_flow_id *sfid,
872                                                struct genl_info *info,
873                                                bool always,
874                                                uint32_t ufid_flags)
875 {
876         struct sk_buff *skb;
877         size_t len;
878
879         if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
880                 return NULL;
881
882         len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
883         skb = genlmsg_new_unicast(len, info, GFP_KERNEL);
884         if (!skb)
885                 return ERR_PTR(-ENOMEM);
886
887         return skb;
888 }
889
890 /* Called with ovs_mutex. */
891 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
892                                                int dp_ifindex,
893                                                struct genl_info *info, u8 cmd,
894                                                bool always, u32 ufid_flags)
895 {
896         struct sk_buff *skb;
897         int retval;
898
899         skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
900                                       &flow->id, info, always, ufid_flags);
901         if (IS_ERR_OR_NULL(skb))
902                 return skb;
903
904         retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
905                                         info->snd_portid, info->snd_seq, 0,
906                                         cmd, ufid_flags);
907         if (WARN_ON_ONCE(retval < 0)) {
908                 kfree_skb(skb);
909                 skb = ERR_PTR(retval);
910         }
911         return skb;
912 }
913
914 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
915 {
916         struct net *net = sock_net(skb->sk);
917         struct nlattr **a = info->attrs;
918         struct ovs_header *ovs_header = info->userhdr;
919         struct sw_flow *flow = NULL, *new_flow;
920         struct sw_flow_mask mask;
921         struct sk_buff *reply;
922         struct datapath *dp;
923         struct sw_flow_key key;
924         struct sw_flow_actions *acts;
925         struct sw_flow_match match;
926         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
927         int error;
928         bool log = !a[OVS_FLOW_ATTR_PROBE];
929
930         /* Must have key and actions. */
931         error = -EINVAL;
932         if (!a[OVS_FLOW_ATTR_KEY]) {
933                 OVS_NLERR(log, "Flow key attr not present in new flow.");
934                 goto error;
935         }
936         if (!a[OVS_FLOW_ATTR_ACTIONS]) {
937                 OVS_NLERR(log, "Flow actions attr not present in new flow.");
938                 goto error;
939         }
940
941         /* Most of the time we need to allocate a new flow, do it before
942          * locking.
943          */
944         new_flow = ovs_flow_alloc();
945         if (IS_ERR(new_flow)) {
946                 error = PTR_ERR(new_flow);
947                 goto error;
948         }
949
950         /* Extract key. */
951         ovs_match_init(&match, &key, &mask);
952         error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
953                                   a[OVS_FLOW_ATTR_MASK], log);
954         if (error)
955                 goto err_kfree_flow;
956
957         ovs_flow_mask_key(&new_flow->key, &key, true, &mask);
958
959         /* Extract flow identifier. */
960         error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
961                                        &key, log);
962         if (error)
963                 goto err_kfree_flow;
964
965         /* Validate actions. */
966         error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
967                                      &new_flow->key, &acts, log);
968         if (error) {
969                 OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
970                 goto err_kfree_flow;
971         }
972
973         reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
974                                         ufid_flags);
975         if (IS_ERR(reply)) {
976                 error = PTR_ERR(reply);
977                 goto err_kfree_acts;
978         }
979
980         ovs_lock();
981         dp = get_dp(net, ovs_header->dp_ifindex);
982         if (unlikely(!dp)) {
983                 error = -ENODEV;
984                 goto err_unlock_ovs;
985         }
986
987         /* Check if this is a duplicate flow */
988         if (ovs_identifier_is_ufid(&new_flow->id))
989                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
990         if (!flow)
991                 flow = ovs_flow_tbl_lookup(&dp->table, &key);
992         if (likely(!flow)) {
993                 rcu_assign_pointer(new_flow->sf_acts, acts);
994
995                 /* Put flow in bucket. */
996                 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
997                 if (unlikely(error)) {
998                         acts = NULL;
999                         goto err_unlock_ovs;
1000                 }
1001
1002                 if (unlikely(reply)) {
1003                         error = ovs_flow_cmd_fill_info(new_flow,
1004                                                        ovs_header->dp_ifindex,
1005                                                        reply, info->snd_portid,
1006                                                        info->snd_seq, 0,
1007                                                        OVS_FLOW_CMD_NEW,
1008                                                        ufid_flags);
1009                         BUG_ON(error < 0);
1010                 }
1011                 ovs_unlock();
1012         } else {
1013                 struct sw_flow_actions *old_acts;
1014
1015                 /* Bail out if we're not allowed to modify an existing flow.
1016                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1017                  * because Generic Netlink treats the latter as a dump
1018                  * request.  We also accept NLM_F_EXCL in case that bug ever
1019                  * gets fixed.
1020                  */
1021                 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
1022                                                          | NLM_F_EXCL))) {
1023                         error = -EEXIST;
1024                         goto err_unlock_ovs;
1025                 }
1026                 /* The flow identifier has to be the same for flow updates.
1027                  * Look for any overlapping flow.
1028                  */
1029                 if (unlikely(!ovs_flow_cmp(flow, &match))) {
1030                         if (ovs_identifier_is_key(&flow->id))
1031                                 flow = ovs_flow_tbl_lookup_exact(&dp->table,
1032                                                                  &match);
1033                         else /* UFID matches but key is different */
1034                                 flow = NULL;
1035                         if (!flow) {
1036                                 error = -ENOENT;
1037                                 goto err_unlock_ovs;
1038                         }
1039                 }
1040                 /* Update actions. */
1041                 old_acts = ovsl_dereference(flow->sf_acts);
1042                 rcu_assign_pointer(flow->sf_acts, acts);
1043
1044                 if (unlikely(reply)) {
1045                         error = ovs_flow_cmd_fill_info(flow,
1046                                                        ovs_header->dp_ifindex,
1047                                                        reply, info->snd_portid,
1048                                                        info->snd_seq, 0,
1049                                                        OVS_FLOW_CMD_NEW,
1050                                                        ufid_flags);
1051                         BUG_ON(error < 0);
1052                 }
1053                 ovs_unlock();
1054
1055                 ovs_nla_free_flow_actions_rcu(old_acts);
1056                 ovs_flow_free(new_flow, false);
1057         }
1058
1059         if (reply)
1060                 ovs_notify(&dp_flow_genl_family, reply, info);
1061         return 0;
1062
1063 err_unlock_ovs:
1064         ovs_unlock();
1065         kfree_skb(reply);
1066 err_kfree_acts:
1067         ovs_nla_free_flow_actions(acts);
1068 err_kfree_flow:
1069         ovs_flow_free(new_flow, false);
1070 error:
1071         return error;
1072 }
1073
1074 /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
1075 static struct sw_flow_actions *get_flow_actions(struct net *net,
1076                                                 const struct nlattr *a,
1077                                                 const struct sw_flow_key *key,
1078                                                 const struct sw_flow_mask *mask,
1079                                                 bool log)
1080 {
1081         struct sw_flow_actions *acts;
1082         struct sw_flow_key masked_key;
1083         int error;
1084
1085         ovs_flow_mask_key(&masked_key, key, true, mask);
1086         error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
1087         if (error) {
1088                 OVS_NLERR(log,
1089                           "Actions may not be safe on all matching packets");
1090                 return ERR_PTR(error);
1091         }
1092
1093         return acts;
1094 }
1095
1096 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1097 {
1098         struct net *net = sock_net(skb->sk);
1099         struct nlattr **a = info->attrs;
1100         struct ovs_header *ovs_header = info->userhdr;
1101         struct sw_flow_key key;
1102         struct sw_flow *flow;
1103         struct sw_flow_mask mask;
1104         struct sk_buff *reply = NULL;
1105         struct datapath *dp;
1106         struct sw_flow_actions *old_acts = NULL, *acts = NULL;
1107         struct sw_flow_match match;
1108         struct sw_flow_id sfid;
1109         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1110         int error;
1111         bool log = !a[OVS_FLOW_ATTR_PROBE];
1112         bool ufid_present;
1113
1114         /* Extract key. */
1115         error = -EINVAL;
1116         if (!a[OVS_FLOW_ATTR_KEY]) {
1117                 OVS_NLERR(log, "Flow key attribute not present in set flow.");
1118                 goto error;
1119         }
1120
1121         ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
1122         ovs_match_init(&match, &key, &mask);
1123         error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1124                                   a[OVS_FLOW_ATTR_MASK], log);
1125         if (error)
1126                 goto error;
1127
1128         /* Validate actions. */
1129         if (a[OVS_FLOW_ATTR_ACTIONS]) {
1130                 acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], &key,
1131                                         &mask, log);
1132                 if (IS_ERR(acts)) {
1133                         error = PTR_ERR(acts);
1134                         goto error;
1135                 }
1136
1137                 /* Can allocate before locking if have acts. */
1138                 reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
1139                                                 ufid_flags);
1140                 if (IS_ERR(reply)) {
1141                         error = PTR_ERR(reply);
1142                         goto err_kfree_acts;
1143                 }
1144         }
1145
1146         ovs_lock();
1147         dp = get_dp(net, ovs_header->dp_ifindex);
1148         if (unlikely(!dp)) {
1149                 error = -ENODEV;
1150                 goto err_unlock_ovs;
1151         }
1152         /* Check that the flow exists. */
1153         if (ufid_present)
1154                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1155         else
1156                 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1157         if (unlikely(!flow)) {
1158                 error = -ENOENT;
1159                 goto err_unlock_ovs;
1160         }
1161
1162         /* Update actions, if present. */
1163         if (likely(acts)) {
1164                 old_acts = ovsl_dereference(flow->sf_acts);
1165                 rcu_assign_pointer(flow->sf_acts, acts);
1166
1167                 if (unlikely(reply)) {
1168                         error = ovs_flow_cmd_fill_info(flow,
1169                                                        ovs_header->dp_ifindex,
1170                                                        reply, info->snd_portid,
1171                                                        info->snd_seq, 0,
1172                                                        OVS_FLOW_CMD_NEW,
1173                                                        ufid_flags);
1174                         BUG_ON(error < 0);
1175                 }
1176         } else {
1177                 /* Could not alloc without acts before locking. */
1178                 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1179                                                 info, OVS_FLOW_CMD_NEW, false,
1180                                                 ufid_flags);
1181
1182                 if (IS_ERR(reply)) {
1183                         error = PTR_ERR(reply);
1184                         goto err_unlock_ovs;
1185                 }
1186         }
1187
1188         /* Clear stats. */
1189         if (a[OVS_FLOW_ATTR_CLEAR])
1190                 ovs_flow_stats_clear(flow);
1191         ovs_unlock();
1192
1193         if (reply)
1194                 ovs_notify(&dp_flow_genl_family, reply, info);
1195         if (old_acts)
1196                 ovs_nla_free_flow_actions_rcu(old_acts);
1197
1198         return 0;
1199
1200 err_unlock_ovs:
1201         ovs_unlock();
1202         kfree_skb(reply);
1203 err_kfree_acts:
1204         ovs_nla_free_flow_actions(acts);
1205 error:
1206         return error;
1207 }
1208
1209 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1210 {
1211         struct nlattr **a = info->attrs;
1212         struct ovs_header *ovs_header = info->userhdr;
1213         struct net *net = sock_net(skb->sk);
1214         struct sw_flow_key key;
1215         struct sk_buff *reply;
1216         struct sw_flow *flow;
1217         struct datapath *dp;
1218         struct sw_flow_match match;
1219         struct sw_flow_id ufid;
1220         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1221         int err = 0;
1222         bool log = !a[OVS_FLOW_ATTR_PROBE];
1223         bool ufid_present;
1224
1225         ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1226         if (a[OVS_FLOW_ATTR_KEY]) {
1227                 ovs_match_init(&match, &key, NULL);
1228                 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
1229                                         log);
1230         } else if (!ufid_present) {
1231                 OVS_NLERR(log,
1232                           "Flow get message rejected, Key attribute missing.");
1233                 err = -EINVAL;
1234         }
1235         if (err)
1236                 return err;
1237
1238         ovs_lock();
1239         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1240         if (!dp) {
1241                 err = -ENODEV;
1242                 goto unlock;
1243         }
1244
1245         if (ufid_present)
1246                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1247         else
1248                 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1249         if (!flow) {
1250                 err = -ENOENT;
1251                 goto unlock;
1252         }
1253
1254         reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1255                                         OVS_FLOW_CMD_NEW, true, ufid_flags);
1256         if (IS_ERR(reply)) {
1257                 err = PTR_ERR(reply);
1258                 goto unlock;
1259         }
1260
1261         ovs_unlock();
1262         return genlmsg_reply(reply, info);
1263 unlock:
1264         ovs_unlock();
1265         return err;
1266 }
1267
1268 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1269 {
1270         struct nlattr **a = info->attrs;
1271         struct ovs_header *ovs_header = info->userhdr;
1272         struct net *net = sock_net(skb->sk);
1273         struct sw_flow_key key;
1274         struct sk_buff *reply;
1275         struct sw_flow *flow = NULL;
1276         struct datapath *dp;
1277         struct sw_flow_match match;
1278         struct sw_flow_id ufid;
1279         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1280         int err;
1281         bool log = !a[OVS_FLOW_ATTR_PROBE];
1282         bool ufid_present;
1283
1284         ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1285         if (a[OVS_FLOW_ATTR_KEY]) {
1286                 ovs_match_init(&match, &key, NULL);
1287                 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1288                                         NULL, log);
1289                 if (unlikely(err))
1290                         return err;
1291         }
1292
1293         ovs_lock();
1294         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1295         if (unlikely(!dp)) {
1296                 err = -ENODEV;
1297                 goto unlock;
1298         }
1299
1300         if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
1301                 err = ovs_flow_tbl_flush(&dp->table);
1302                 goto unlock;
1303         }
1304
1305         if (ufid_present)
1306                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1307         else
1308                 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1309         if (unlikely(!flow)) {
1310                 err = -ENOENT;
1311                 goto unlock;
1312         }
1313
1314         ovs_flow_tbl_remove(&dp->table, flow);
1315         ovs_unlock();
1316
1317         reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
1318                                         &flow->id, info, false, ufid_flags);
1319         if (likely(reply)) {
1320                 if (likely(!IS_ERR(reply))) {
1321                         rcu_read_lock();        /*To keep RCU checker happy. */
1322                         err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1323                                                      reply, info->snd_portid,
1324                                                      info->snd_seq, 0,
1325                                                      OVS_FLOW_CMD_DEL,
1326                                                      ufid_flags);
1327                         rcu_read_unlock();
1328                         if (WARN_ON_ONCE(err < 0)) {
1329                                 kfree_skb(reply);
1330                                 goto out_free;
1331                         }
1332
1333                         ovs_notify(&dp_flow_genl_family, reply, info);
1334                 } else {
1335                         netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply));
1336                 }
1337         }
1338
1339 out_free:
1340         ovs_flow_free(flow, true);
1341         return 0;
1342 unlock:
1343         ovs_unlock();
1344         return err;
1345 }
1346
1347 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1348 {
1349         struct nlattr *a[__OVS_FLOW_ATTR_MAX];
1350         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1351         struct table_instance *ti;
1352         struct datapath *dp;
1353         u32 ufid_flags;
1354         int err;
1355
1356         err = genlmsg_parse(cb->nlh, &dp_flow_genl_family, a,
1357                             OVS_FLOW_ATTR_MAX, flow_policy);
1358         if (err)
1359                 return err;
1360         ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1361
1362         rcu_read_lock();
1363         dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
1364         if (!dp) {
1365                 rcu_read_unlock();
1366                 return -ENODEV;
1367         }
1368
1369         ti = rcu_dereference(dp->table.ti);
1370         for (;;) {
1371                 struct sw_flow *flow;
1372                 u32 bucket, obj;
1373
1374                 bucket = cb->args[0];
1375                 obj = cb->args[1];
1376                 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1377                 if (!flow)
1378                         break;
1379
1380                 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1381                                            NETLINK_CB(cb->skb).portid,
1382                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1383                                            OVS_FLOW_CMD_NEW, ufid_flags) < 0)
1384                         break;
1385
1386                 cb->args[0] = bucket;
1387                 cb->args[1] = obj;
1388         }
1389         rcu_read_unlock();
1390         return skb->len;
1391 }
1392
1393 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1394         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1395         [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
1396         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1397         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1398         [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
1399         [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
1400         [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
1401 };
1402
1403 static const struct genl_ops dp_flow_genl_ops[] = {
1404         { .cmd = OVS_FLOW_CMD_NEW,
1405           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1406           .policy = flow_policy,
1407           .doit = ovs_flow_cmd_new
1408         },
1409         { .cmd = OVS_FLOW_CMD_DEL,
1410           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1411           .policy = flow_policy,
1412           .doit = ovs_flow_cmd_del
1413         },
1414         { .cmd = OVS_FLOW_CMD_GET,
1415           .flags = 0,               /* OK for unprivileged users. */
1416           .policy = flow_policy,
1417           .doit = ovs_flow_cmd_get,
1418           .dumpit = ovs_flow_cmd_dump
1419         },
1420         { .cmd = OVS_FLOW_CMD_SET,
1421           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1422           .policy = flow_policy,
1423           .doit = ovs_flow_cmd_set,
1424         },
1425 };
1426
1427 static struct genl_family dp_flow_genl_family = {
1428         .id = GENL_ID_GENERATE,
1429         .hdrsize = sizeof(struct ovs_header),
1430         .name = OVS_FLOW_FAMILY,
1431         .version = OVS_FLOW_VERSION,
1432         .maxattr = OVS_FLOW_ATTR_MAX,
1433         .netnsok = true,
1434         .parallel_ops = true,
1435         .ops = dp_flow_genl_ops,
1436         .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1437         .mcgrps = &ovs_dp_flow_multicast_group,
1438         .n_mcgrps = 1,
1439 };
1440
1441 static size_t ovs_dp_cmd_msg_size(void)
1442 {
1443         size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1444
1445         msgsize += nla_total_size(IFNAMSIZ);
1446         msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
1447         msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
1448         msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1449
1450         return msgsize;
1451 }
1452
1453 /* Called with ovs_mutex. */
1454 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1455                                 u32 portid, u32 seq, u32 flags, u8 cmd)
1456 {
1457         struct ovs_header *ovs_header;
1458         struct ovs_dp_stats dp_stats;
1459         struct ovs_dp_megaflow_stats dp_megaflow_stats;
1460         int err;
1461
1462         ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1463                                    flags, cmd);
1464         if (!ovs_header)
1465                 goto error;
1466
1467         ovs_header->dp_ifindex = get_dpifindex(dp);
1468
1469         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1470         if (err)
1471                 goto nla_put_failure;
1472
1473         get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1474         if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1475                         &dp_stats))
1476                 goto nla_put_failure;
1477
1478         if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1479                         sizeof(struct ovs_dp_megaflow_stats),
1480                         &dp_megaflow_stats))
1481                 goto nla_put_failure;
1482
1483         if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1484                 goto nla_put_failure;
1485
1486         genlmsg_end(skb, ovs_header);
1487         return 0;
1488
1489 nla_put_failure:
1490         genlmsg_cancel(skb, ovs_header);
1491 error:
1492         return -EMSGSIZE;
1493 }
1494
1495 static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info)
1496 {
1497         return genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
1498 }
1499
1500 /* Called with rcu_read_lock or ovs_mutex. */
1501 static struct datapath *lookup_datapath(struct net *net,
1502                                         const struct ovs_header *ovs_header,
1503                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1504 {
1505         struct datapath *dp;
1506
1507         if (!a[OVS_DP_ATTR_NAME])
1508                 dp = get_dp(net, ovs_header->dp_ifindex);
1509         else {
1510                 struct vport *vport;
1511
1512                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1513                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1514         }
1515         return dp ? dp : ERR_PTR(-ENODEV);
1516 }
1517
1518 static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1519 {
1520         struct datapath *dp;
1521
1522         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1523         if (IS_ERR(dp))
1524                 return;
1525
1526         WARN(dp->user_features, "Dropping previously announced user features\n");
1527         dp->user_features = 0;
1528 }
1529
1530 static void ovs_dp_change(struct datapath *dp, struct nlattr *a[])
1531 {
1532         if (a[OVS_DP_ATTR_USER_FEATURES])
1533                 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1534 }
1535
1536 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1537 {
1538         struct nlattr **a = info->attrs;
1539         struct vport_parms parms;
1540         struct sk_buff *reply;
1541         struct datapath *dp;
1542         struct vport *vport;
1543         struct ovs_net *ovs_net;
1544         int err, i;
1545
1546         err = -EINVAL;
1547         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1548                 goto err;
1549
1550         reply = ovs_dp_cmd_alloc_info(info);
1551         if (!reply)
1552                 return -ENOMEM;
1553
1554         err = -ENOMEM;
1555         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1556         if (dp == NULL)
1557                 goto err_free_reply;
1558
1559         ovs_dp_set_net(dp, sock_net(skb->sk));
1560
1561         /* Allocate table. */
1562         err = ovs_flow_tbl_init(&dp->table);
1563         if (err)
1564                 goto err_free_dp;
1565
1566         dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
1567         if (!dp->stats_percpu) {
1568                 err = -ENOMEM;
1569                 goto err_destroy_table;
1570         }
1571
1572         dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1573                             GFP_KERNEL);
1574         if (!dp->ports) {
1575                 err = -ENOMEM;
1576                 goto err_destroy_percpu;
1577         }
1578
1579         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1580                 INIT_HLIST_HEAD(&dp->ports[i]);
1581
1582         /* Set up our datapath device. */
1583         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1584         parms.type = OVS_VPORT_TYPE_INTERNAL;
1585         parms.options = NULL;
1586         parms.dp = dp;
1587         parms.port_no = OVSP_LOCAL;
1588         parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1589
1590         ovs_dp_change(dp, a);
1591
1592         /* So far only local changes have been made, now need the lock. */
1593         ovs_lock();
1594
1595         vport = new_vport(&parms);
1596         if (IS_ERR(vport)) {
1597                 err = PTR_ERR(vport);
1598                 if (err == -EBUSY)
1599                         err = -EEXIST;
1600
1601                 if (err == -EEXIST) {
1602                         /* An outdated user space instance that does not understand
1603                          * the concept of user_features has attempted to create a new
1604                          * datapath and is likely to reuse it. Drop all user features.
1605                          */
1606                         if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1607                                 ovs_dp_reset_user_features(skb, info);
1608                 }
1609
1610                 goto err_destroy_ports_array;
1611         }
1612
1613         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1614                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1615         BUG_ON(err < 0);
1616
1617         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1618         list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1619
1620         ovs_unlock();
1621
1622         ovs_notify(&dp_datapath_genl_family, reply, info);
1623         return 0;
1624
1625 err_destroy_ports_array:
1626         ovs_unlock();
1627         kfree(dp->ports);
1628 err_destroy_percpu:
1629         free_percpu(dp->stats_percpu);
1630 err_destroy_table:
1631         ovs_flow_tbl_destroy(&dp->table);
1632 err_free_dp:
1633         kfree(dp);
1634 err_free_reply:
1635         kfree_skb(reply);
1636 err:
1637         return err;
1638 }
1639
1640 /* Called with ovs_mutex. */
1641 static void __dp_destroy(struct datapath *dp)
1642 {
1643         int i;
1644
1645         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1646                 struct vport *vport;
1647                 struct hlist_node *n;
1648
1649                 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1650                         if (vport->port_no != OVSP_LOCAL)
1651                                 ovs_dp_detach_port(vport);
1652         }
1653
1654         list_del_rcu(&dp->list_node);
1655
1656         /* OVSP_LOCAL is datapath internal port. We need to make sure that
1657          * all ports in datapath are destroyed first before freeing datapath.
1658          */
1659         ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1660
1661         /* RCU destroy the flow table */
1662         call_rcu(&dp->rcu, destroy_dp_rcu);
1663 }
1664
1665 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1666 {
1667         struct sk_buff *reply;
1668         struct datapath *dp;
1669         int err;
1670
1671         reply = ovs_dp_cmd_alloc_info(info);
1672         if (!reply)
1673                 return -ENOMEM;
1674
1675         ovs_lock();
1676         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1677         err = PTR_ERR(dp);
1678         if (IS_ERR(dp))
1679                 goto err_unlock_free;
1680
1681         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1682                                    info->snd_seq, 0, OVS_DP_CMD_DEL);
1683         BUG_ON(err < 0);
1684
1685         __dp_destroy(dp);
1686         ovs_unlock();
1687
1688         ovs_notify(&dp_datapath_genl_family, reply, info);
1689
1690         return 0;
1691
1692 err_unlock_free:
1693         ovs_unlock();
1694         kfree_skb(reply);
1695         return err;
1696 }
1697
1698 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1699 {
1700         struct sk_buff *reply;
1701         struct datapath *dp;
1702         int err;
1703
1704         reply = ovs_dp_cmd_alloc_info(info);
1705         if (!reply)
1706                 return -ENOMEM;
1707
1708         ovs_lock();
1709         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1710         err = PTR_ERR(dp);
1711         if (IS_ERR(dp))
1712                 goto err_unlock_free;
1713
1714         ovs_dp_change(dp, info->attrs);
1715
1716         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1717                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1718         BUG_ON(err < 0);
1719
1720         ovs_unlock();
1721         ovs_notify(&dp_datapath_genl_family, reply, info);
1722
1723         return 0;
1724
1725 err_unlock_free:
1726         ovs_unlock();
1727         kfree_skb(reply);
1728         return err;
1729 }
1730
1731 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1732 {
1733         struct sk_buff *reply;
1734         struct datapath *dp;
1735         int err;
1736
1737         reply = ovs_dp_cmd_alloc_info(info);
1738         if (!reply)
1739                 return -ENOMEM;
1740
1741         ovs_lock();
1742         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1743         if (IS_ERR(dp)) {
1744                 err = PTR_ERR(dp);
1745                 goto err_unlock_free;
1746         }
1747         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1748                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1749         BUG_ON(err < 0);
1750         ovs_unlock();
1751
1752         return genlmsg_reply(reply, info);
1753
1754 err_unlock_free:
1755         ovs_unlock();
1756         kfree_skb(reply);
1757         return err;
1758 }
1759
1760 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1761 {
1762         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1763         struct datapath *dp;
1764         int skip = cb->args[0];
1765         int i = 0;
1766
1767         ovs_lock();
1768         list_for_each_entry(dp, &ovs_net->dps, list_node) {
1769                 if (i >= skip &&
1770                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1771                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1772                                          OVS_DP_CMD_NEW) < 0)
1773                         break;
1774                 i++;
1775         }
1776         ovs_unlock();
1777
1778         cb->args[0] = i;
1779
1780         return skb->len;
1781 }
1782
1783 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1784         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1785         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1786         [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1787 };
1788
1789 static const struct genl_ops dp_datapath_genl_ops[] = {
1790         { .cmd = OVS_DP_CMD_NEW,
1791           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1792           .policy = datapath_policy,
1793           .doit = ovs_dp_cmd_new
1794         },
1795         { .cmd = OVS_DP_CMD_DEL,
1796           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1797           .policy = datapath_policy,
1798           .doit = ovs_dp_cmd_del
1799         },
1800         { .cmd = OVS_DP_CMD_GET,
1801           .flags = 0,               /* OK for unprivileged users. */
1802           .policy = datapath_policy,
1803           .doit = ovs_dp_cmd_get,
1804           .dumpit = ovs_dp_cmd_dump
1805         },
1806         { .cmd = OVS_DP_CMD_SET,
1807           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1808           .policy = datapath_policy,
1809           .doit = ovs_dp_cmd_set,
1810         },
1811 };
1812
1813 static struct genl_family dp_datapath_genl_family = {
1814         .id = GENL_ID_GENERATE,
1815         .hdrsize = sizeof(struct ovs_header),
1816         .name = OVS_DATAPATH_FAMILY,
1817         .version = OVS_DATAPATH_VERSION,
1818         .maxattr = OVS_DP_ATTR_MAX,
1819         .netnsok = true,
1820         .parallel_ops = true,
1821         .ops = dp_datapath_genl_ops,
1822         .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1823         .mcgrps = &ovs_dp_datapath_multicast_group,
1824         .n_mcgrps = 1,
1825 };
1826
1827 /* Called with ovs_mutex or RCU read lock. */
1828 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1829                                    u32 portid, u32 seq, u32 flags, u8 cmd)
1830 {
1831         struct ovs_header *ovs_header;
1832         struct ovs_vport_stats vport_stats;
1833         int err;
1834
1835         ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1836                                  flags, cmd);
1837         if (!ovs_header)
1838                 return -EMSGSIZE;
1839
1840         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1841
1842         if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1843             nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1844             nla_put_string(skb, OVS_VPORT_ATTR_NAME,
1845                            ovs_vport_name(vport)))
1846                 goto nla_put_failure;
1847
1848         ovs_vport_get_stats(vport, &vport_stats);
1849         if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1850                     &vport_stats))
1851                 goto nla_put_failure;
1852
1853         if (ovs_vport_get_upcall_portids(vport, skb))
1854                 goto nla_put_failure;
1855
1856         err = ovs_vport_get_options(vport, skb);
1857         if (err == -EMSGSIZE)
1858                 goto error;
1859
1860         genlmsg_end(skb, ovs_header);
1861         return 0;
1862
1863 nla_put_failure:
1864         err = -EMSGSIZE;
1865 error:
1866         genlmsg_cancel(skb, ovs_header);
1867         return err;
1868 }
1869
1870 static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1871 {
1872         return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1873 }
1874
1875 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1876 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1877                                          u32 seq, u8 cmd)
1878 {
1879         struct sk_buff *skb;
1880         int retval;
1881
1882         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1883         if (!skb)
1884                 return ERR_PTR(-ENOMEM);
1885
1886         retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1887         BUG_ON(retval < 0);
1888
1889         return skb;
1890 }
1891
1892 /* Called with ovs_mutex or RCU read lock. */
1893 static struct vport *lookup_vport(struct net *net,
1894                                   const struct ovs_header *ovs_header,
1895                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1896 {
1897         struct datapath *dp;
1898         struct vport *vport;
1899
1900         if (a[OVS_VPORT_ATTR_NAME]) {
1901                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1902                 if (!vport)
1903                         return ERR_PTR(-ENODEV);
1904                 if (ovs_header->dp_ifindex &&
1905                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1906                         return ERR_PTR(-ENODEV);
1907                 return vport;
1908         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1909                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1910
1911                 if (port_no >= DP_MAX_PORTS)
1912                         return ERR_PTR(-EFBIG);
1913
1914                 dp = get_dp(net, ovs_header->dp_ifindex);
1915                 if (!dp)
1916                         return ERR_PTR(-ENODEV);
1917
1918                 vport = ovs_vport_ovsl_rcu(dp, port_no);
1919                 if (!vport)
1920                         return ERR_PTR(-ENODEV);
1921                 return vport;
1922         } else
1923                 return ERR_PTR(-EINVAL);
1924 }
1925
1926 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1927 {
1928         struct nlattr **a = info->attrs;
1929         struct ovs_header *ovs_header = info->userhdr;
1930         struct vport_parms parms;
1931         struct sk_buff *reply;
1932         struct vport *vport;
1933         struct datapath *dp;
1934         u32 port_no;
1935         int err;
1936
1937         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1938             !a[OVS_VPORT_ATTR_UPCALL_PID])
1939                 return -EINVAL;
1940
1941         port_no = a[OVS_VPORT_ATTR_PORT_NO]
1942                 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
1943         if (port_no >= DP_MAX_PORTS)
1944                 return -EFBIG;
1945
1946         reply = ovs_vport_cmd_alloc_info();
1947         if (!reply)
1948                 return -ENOMEM;
1949
1950         ovs_lock();
1951 restart:
1952         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1953         err = -ENODEV;
1954         if (!dp)
1955                 goto exit_unlock_free;
1956
1957         if (port_no) {
1958                 vport = ovs_vport_ovsl(dp, port_no);
1959                 err = -EBUSY;
1960                 if (vport)
1961                         goto exit_unlock_free;
1962         } else {
1963                 for (port_no = 1; ; port_no++) {
1964                         if (port_no >= DP_MAX_PORTS) {
1965                                 err = -EFBIG;
1966                                 goto exit_unlock_free;
1967                         }
1968                         vport = ovs_vport_ovsl(dp, port_no);
1969                         if (!vport)
1970                                 break;
1971                 }
1972         }
1973
1974         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1975         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1976         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1977         parms.dp = dp;
1978         parms.port_no = port_no;
1979         parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
1980
1981         vport = new_vport(&parms);
1982         err = PTR_ERR(vport);
1983         if (IS_ERR(vport)) {
1984                 if (err == -EAGAIN)
1985                         goto restart;
1986                 goto exit_unlock_free;
1987         }
1988
1989         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1990                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1991         BUG_ON(err < 0);
1992         ovs_unlock();
1993
1994         ovs_notify(&dp_vport_genl_family, reply, info);
1995         return 0;
1996
1997 exit_unlock_free:
1998         ovs_unlock();
1999         kfree_skb(reply);
2000         return err;
2001 }
2002
2003 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2004 {
2005         struct nlattr **a = info->attrs;
2006         struct sk_buff *reply;
2007         struct vport *vport;
2008         int err;
2009
2010         reply = ovs_vport_cmd_alloc_info();
2011         if (!reply)
2012                 return -ENOMEM;
2013
2014         ovs_lock();
2015         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2016         err = PTR_ERR(vport);
2017         if (IS_ERR(vport))
2018                 goto exit_unlock_free;
2019
2020         if (a[OVS_VPORT_ATTR_TYPE] &&
2021             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
2022                 err = -EINVAL;
2023                 goto exit_unlock_free;
2024         }
2025
2026         if (a[OVS_VPORT_ATTR_OPTIONS]) {
2027                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
2028                 if (err)
2029                         goto exit_unlock_free;
2030         }
2031
2032
2033         if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
2034                 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
2035
2036                 err = ovs_vport_set_upcall_portids(vport, ids);
2037                 if (err)
2038                         goto exit_unlock_free;
2039         }
2040
2041         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2042                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2043         BUG_ON(err < 0);
2044
2045         ovs_unlock();
2046         ovs_notify(&dp_vport_genl_family, reply, info);
2047         return 0;
2048
2049 exit_unlock_free:
2050         ovs_unlock();
2051         kfree_skb(reply);
2052         return err;
2053 }
2054
2055 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2056 {
2057         struct nlattr **a = info->attrs;
2058         struct sk_buff *reply;
2059         struct vport *vport;
2060         int err;
2061
2062         reply = ovs_vport_cmd_alloc_info();
2063         if (!reply)
2064                 return -ENOMEM;
2065
2066         ovs_lock();
2067         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2068         err = PTR_ERR(vport);
2069         if (IS_ERR(vport))
2070                 goto exit_unlock_free;
2071
2072         if (vport->port_no == OVSP_LOCAL) {
2073                 err = -EINVAL;
2074                 goto exit_unlock_free;
2075         }
2076
2077         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2078                                       info->snd_seq, 0, OVS_VPORT_CMD_DEL);
2079         BUG_ON(err < 0);
2080         ovs_dp_detach_port(vport);
2081         ovs_unlock();
2082
2083         ovs_notify(&dp_vport_genl_family, reply, info);
2084         return 0;
2085
2086 exit_unlock_free:
2087         ovs_unlock();
2088         kfree_skb(reply);
2089         return err;
2090 }
2091
2092 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2093 {
2094         struct nlattr **a = info->attrs;
2095         struct ovs_header *ovs_header = info->userhdr;
2096         struct sk_buff *reply;
2097         struct vport *vport;
2098         int err;
2099
2100         reply = ovs_vport_cmd_alloc_info();
2101         if (!reply)
2102                 return -ENOMEM;
2103
2104         rcu_read_lock();
2105         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
2106         err = PTR_ERR(vport);
2107         if (IS_ERR(vport))
2108                 goto exit_unlock_free;
2109         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2110                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2111         BUG_ON(err < 0);
2112         rcu_read_unlock();
2113
2114         return genlmsg_reply(reply, info);
2115
2116 exit_unlock_free:
2117         rcu_read_unlock();
2118         kfree_skb(reply);
2119         return err;
2120 }
2121
2122 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2123 {
2124         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
2125         struct datapath *dp;
2126         int bucket = cb->args[0], skip = cb->args[1];
2127         int i, j = 0;
2128
2129         rcu_read_lock();
2130         dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
2131         if (!dp) {
2132                 rcu_read_unlock();
2133                 return -ENODEV;
2134         }
2135         for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2136                 struct vport *vport;
2137
2138                 j = 0;
2139                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2140                         if (j >= skip &&
2141                             ovs_vport_cmd_fill_info(vport, skb,
2142                                                     NETLINK_CB(cb->skb).portid,
2143                                                     cb->nlh->nlmsg_seq,
2144                                                     NLM_F_MULTI,
2145                                                     OVS_VPORT_CMD_NEW) < 0)
2146                                 goto out;
2147
2148                         j++;
2149                 }
2150                 skip = 0;
2151         }
2152 out:
2153         rcu_read_unlock();
2154
2155         cb->args[0] = i;
2156         cb->args[1] = j;
2157
2158         return skb->len;
2159 }
2160
2161 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2162         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2163         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2164         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2165         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2166         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
2167         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2168 };
2169
2170 static const struct genl_ops dp_vport_genl_ops[] = {
2171         { .cmd = OVS_VPORT_CMD_NEW,
2172           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2173           .policy = vport_policy,
2174           .doit = ovs_vport_cmd_new
2175         },
2176         { .cmd = OVS_VPORT_CMD_DEL,
2177           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2178           .policy = vport_policy,
2179           .doit = ovs_vport_cmd_del
2180         },
2181         { .cmd = OVS_VPORT_CMD_GET,
2182           .flags = 0,               /* OK for unprivileged users. */
2183           .policy = vport_policy,
2184           .doit = ovs_vport_cmd_get,
2185           .dumpit = ovs_vport_cmd_dump
2186         },
2187         { .cmd = OVS_VPORT_CMD_SET,
2188           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2189           .policy = vport_policy,
2190           .doit = ovs_vport_cmd_set,
2191         },
2192 };
2193
2194 struct genl_family dp_vport_genl_family = {
2195         .id = GENL_ID_GENERATE,
2196         .hdrsize = sizeof(struct ovs_header),
2197         .name = OVS_VPORT_FAMILY,
2198         .version = OVS_VPORT_VERSION,
2199         .maxattr = OVS_VPORT_ATTR_MAX,
2200         .netnsok = true,
2201         .parallel_ops = true,
2202         .ops = dp_vport_genl_ops,
2203         .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2204         .mcgrps = &ovs_dp_vport_multicast_group,
2205         .n_mcgrps = 1,
2206 };
2207
2208 static struct genl_family * const dp_genl_families[] = {
2209         &dp_datapath_genl_family,
2210         &dp_vport_genl_family,
2211         &dp_flow_genl_family,
2212         &dp_packet_genl_family,
2213 };
2214
2215 static void dp_unregister_genl(int n_families)
2216 {
2217         int i;
2218
2219         for (i = 0; i < n_families; i++)
2220                 genl_unregister_family(dp_genl_families[i]);
2221 }
2222
2223 static int dp_register_genl(void)
2224 {
2225         int err;
2226         int i;
2227
2228         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2229
2230                 err = genl_register_family(dp_genl_families[i]);
2231                 if (err)
2232                         goto error;
2233         }
2234
2235         return 0;
2236
2237 error:
2238         dp_unregister_genl(i);
2239         return err;
2240 }
2241
2242 static int __net_init ovs_init_net(struct net *net)
2243 {
2244         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2245
2246         INIT_LIST_HEAD(&ovs_net->dps);
2247         INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2248         ovs_ct_init(net);
2249         return 0;
2250 }
2251
2252 static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2253                                             struct list_head *head)
2254 {
2255         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2256         struct datapath *dp;
2257
2258         list_for_each_entry(dp, &ovs_net->dps, list_node) {
2259                 int i;
2260
2261                 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2262                         struct vport *vport;
2263
2264                         hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
2265                                 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2266                                         continue;
2267
2268                                 if (dev_net(vport->dev) == dnet)
2269                                         list_add(&vport->detach_list, head);
2270                         }
2271                 }
2272         }
2273 }
2274
2275 static void __net_exit ovs_exit_net(struct net *dnet)
2276 {
2277         struct datapath *dp, *dp_next;
2278         struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2279         struct vport *vport, *vport_next;
2280         struct net *net;
2281         LIST_HEAD(head);
2282
2283         ovs_ct_exit(dnet);
2284         ovs_lock();
2285         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2286                 __dp_destroy(dp);
2287
2288         rtnl_lock();
2289         for_each_net(net)
2290                 list_vports_from_net(net, dnet, &head);
2291         rtnl_unlock();
2292
2293         /* Detach all vports from given namespace. */
2294         list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2295                 list_del(&vport->detach_list);
2296                 ovs_dp_detach_port(vport);
2297         }
2298
2299         ovs_unlock();
2300
2301         cancel_work_sync(&ovs_net->dp_notify_work);
2302 }
2303
2304 static struct pernet_operations ovs_net_ops = {
2305         .init = ovs_init_net,
2306         .exit = ovs_exit_net,
2307         .id   = &ovs_net_id,
2308         .size = sizeof(struct ovs_net),
2309 };
2310
2311 static int __init dp_init(void)
2312 {
2313         int err;
2314
2315         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2316
2317         pr_info("Open vSwitch switching datapath\n");
2318
2319         err = action_fifos_init();
2320         if (err)
2321                 goto error;
2322
2323         err = ovs_internal_dev_rtnl_link_register();
2324         if (err)
2325                 goto error_action_fifos_exit;
2326
2327         err = ovs_flow_init();
2328         if (err)
2329                 goto error_unreg_rtnl_link;
2330
2331         err = ovs_vport_init();
2332         if (err)
2333                 goto error_flow_exit;
2334
2335         err = register_pernet_device(&ovs_net_ops);
2336         if (err)
2337                 goto error_vport_exit;
2338
2339         err = register_netdevice_notifier(&ovs_dp_device_notifier);
2340         if (err)
2341                 goto error_netns_exit;
2342
2343         err = ovs_netdev_init();
2344         if (err)
2345                 goto error_unreg_notifier;
2346
2347         err = dp_register_genl();
2348         if (err < 0)
2349                 goto error_unreg_netdev;
2350
2351         return 0;
2352
2353 error_unreg_netdev:
2354         ovs_netdev_exit();
2355 error_unreg_notifier:
2356         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2357 error_netns_exit:
2358         unregister_pernet_device(&ovs_net_ops);
2359 error_vport_exit:
2360         ovs_vport_exit();
2361 error_flow_exit:
2362         ovs_flow_exit();
2363 error_unreg_rtnl_link:
2364         ovs_internal_dev_rtnl_link_unregister();
2365 error_action_fifos_exit:
2366         action_fifos_exit();
2367 error:
2368         return err;
2369 }
2370
2371 static void dp_cleanup(void)
2372 {
2373         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2374         ovs_netdev_exit();
2375         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2376         unregister_pernet_device(&ovs_net_ops);
2377         rcu_barrier();
2378         ovs_vport_exit();
2379         ovs_flow_exit();
2380         ovs_internal_dev_rtnl_link_unregister();
2381         action_fifos_exit();
2382 }
2383
2384 module_init(dp_init);
2385 module_exit(dp_cleanup);
2386
2387 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2388 MODULE_LICENSE("GPL");