GNU Linux-libre 4.9.337-gnu1
[releases.git] / net / bridge / br_vlan.c
1 #include <linux/kernel.h>
2 #include <linux/netdevice.h>
3 #include <linux/rtnetlink.h>
4 #include <linux/slab.h>
5 #include <net/switchdev.h>
6
7 #include "br_private.h"
8
9 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
10                               const void *ptr)
11 {
12         const struct net_bridge_vlan *vle = ptr;
13         u16 vid = *(u16 *)arg->key;
14
15         return vle->vid != vid;
16 }
17
18 static const struct rhashtable_params br_vlan_rht_params = {
19         .head_offset = offsetof(struct net_bridge_vlan, vnode),
20         .key_offset = offsetof(struct net_bridge_vlan, vid),
21         .key_len = sizeof(u16),
22         .nelem_hint = 3,
23         .locks_mul = 1,
24         .max_size = VLAN_N_VID,
25         .obj_cmpfn = br_vlan_cmp,
26         .automatic_shrinking = true,
27 };
28
29 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
30 {
31         return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
32 }
33
34 static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
35 {
36         if (vg->pvid == vid)
37                 return;
38
39         smp_wmb();
40         vg->pvid = vid;
41 }
42
43 static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
44 {
45         if (vg->pvid != vid)
46                 return;
47
48         smp_wmb();
49         vg->pvid = 0;
50 }
51
52 static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
53 {
54         struct net_bridge_vlan_group *vg;
55
56         if (br_vlan_is_master(v))
57                 vg = br_vlan_group(v->br);
58         else
59                 vg = nbp_vlan_group(v->port);
60
61         if (flags & BRIDGE_VLAN_INFO_PVID)
62                 __vlan_add_pvid(vg, v->vid);
63         else
64                 __vlan_delete_pvid(vg, v->vid);
65
66         if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
67                 v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
68         else
69                 v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
70 }
71
72 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
73                           u16 vid, u16 flags)
74 {
75         struct switchdev_obj_port_vlan v = {
76                 .obj.orig_dev = dev,
77                 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
78                 .flags = flags,
79                 .vid_begin = vid,
80                 .vid_end = vid,
81         };
82         int err;
83
84         /* Try switchdev op first. In case it is not supported, fallback to
85          * 8021q add.
86          */
87         err = switchdev_port_obj_add(dev, &v.obj);
88         if (err == -EOPNOTSUPP)
89                 return vlan_vid_add(dev, br->vlan_proto, vid);
90         return err;
91 }
92
93 static void __vlan_add_list(struct net_bridge_vlan *v)
94 {
95         struct net_bridge_vlan_group *vg;
96         struct list_head *headp, *hpos;
97         struct net_bridge_vlan *vent;
98
99         if (br_vlan_is_master(v))
100                 vg = br_vlan_group(v->br);
101         else
102                 vg = nbp_vlan_group(v->port);
103
104         headp = &vg->vlan_list;
105         list_for_each_prev(hpos, headp) {
106                 vent = list_entry(hpos, struct net_bridge_vlan, vlist);
107                 if (v->vid < vent->vid)
108                         continue;
109                 else
110                         break;
111         }
112         list_add_rcu(&v->vlist, hpos);
113 }
114
115 static void __vlan_del_list(struct net_bridge_vlan *v)
116 {
117         list_del_rcu(&v->vlist);
118 }
119
120 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
121                           u16 vid)
122 {
123         struct switchdev_obj_port_vlan v = {
124                 .obj.orig_dev = dev,
125                 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
126                 .vid_begin = vid,
127                 .vid_end = vid,
128         };
129         int err;
130
131         /* Try switchdev op first. In case it is not supported, fallback to
132          * 8021q del.
133          */
134         err = switchdev_port_obj_del(dev, &v.obj);
135         if (err == -EOPNOTSUPP) {
136                 vlan_vid_del(dev, br->vlan_proto, vid);
137                 return 0;
138         }
139         return err;
140 }
141
142 /* Returns a master vlan, if it didn't exist it gets created. In all cases a
143  * a reference is taken to the master vlan before returning.
144  */
145 static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
146 {
147         struct net_bridge_vlan_group *vg;
148         struct net_bridge_vlan *masterv;
149
150         vg = br_vlan_group(br);
151         masterv = br_vlan_find(vg, vid);
152         if (!masterv) {
153                 /* missing global ctx, create it now */
154                 if (br_vlan_add(br, vid, 0))
155                         return NULL;
156                 masterv = br_vlan_find(vg, vid);
157                 if (WARN_ON(!masterv))
158                         return NULL;
159         }
160         atomic_inc(&masterv->refcnt);
161
162         return masterv;
163 }
164
165 static void br_master_vlan_rcu_free(struct rcu_head *rcu)
166 {
167         struct net_bridge_vlan *v;
168
169         v = container_of(rcu, struct net_bridge_vlan, rcu);
170         WARN_ON(!br_vlan_is_master(v));
171         free_percpu(v->stats);
172         v->stats = NULL;
173         kfree(v);
174 }
175
176 static void br_vlan_put_master(struct net_bridge_vlan *masterv)
177 {
178         struct net_bridge_vlan_group *vg;
179
180         if (!br_vlan_is_master(masterv))
181                 return;
182
183         vg = br_vlan_group(masterv->br);
184         if (atomic_dec_and_test(&masterv->refcnt)) {
185                 rhashtable_remove_fast(&vg->vlan_hash,
186                                        &masterv->vnode, br_vlan_rht_params);
187                 __vlan_del_list(masterv);
188                 call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
189         }
190 }
191
192 /* This is the shared VLAN add function which works for both ports and bridge
193  * devices. There are four possible calls to this function in terms of the
194  * vlan entry type:
195  * 1. vlan is being added on a port (no master flags, global entry exists)
196  * 2. vlan is being added on a bridge (both master and brentry flags)
197  * 3. vlan is being added on a port, but a global entry didn't exist which
198  *    is being created right now (master flag set, brentry flag unset), the
199  *    global entry is used for global per-vlan features, but not for filtering
200  * 4. same as 3 but with both master and brentry flags set so the entry
201  *    will be used for filtering in both the port and the bridge
202  */
203 static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
204 {
205         struct net_bridge_vlan *masterv = NULL;
206         struct net_bridge_port *p = NULL;
207         struct net_bridge_vlan_group *vg;
208         struct net_device *dev;
209         struct net_bridge *br;
210         int err;
211
212         if (br_vlan_is_master(v)) {
213                 br = v->br;
214                 dev = br->dev;
215                 vg = br_vlan_group(br);
216         } else {
217                 p = v->port;
218                 br = p->br;
219                 dev = p->dev;
220                 vg = nbp_vlan_group(p);
221         }
222
223         if (p) {
224                 /* Add VLAN to the device filter if it is supported.
225                  * This ensures tagged traffic enters the bridge when
226                  * promiscuous mode is disabled by br_manage_promisc().
227                  */
228                 err = __vlan_vid_add(dev, br, v->vid, flags);
229                 if (err)
230                         goto out;
231
232                 /* need to work on the master vlan too */
233                 if (flags & BRIDGE_VLAN_INFO_MASTER) {
234                         err = br_vlan_add(br, v->vid, flags |
235                                                       BRIDGE_VLAN_INFO_BRENTRY);
236                         if (err)
237                                 goto out_filt;
238                 }
239
240                 masterv = br_vlan_get_master(br, v->vid);
241                 if (!masterv) {
242                         err = -ENOMEM;
243                         goto out_filt;
244                 }
245                 v->brvlan = masterv;
246                 v->stats = masterv->stats;
247         }
248
249         /* Add the dev mac and count the vlan only if it's usable */
250         if (br_vlan_should_use(v)) {
251                 err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
252                 if (err) {
253                         br_err(br, "failed insert local address into bridge forwarding table\n");
254                         goto out_filt;
255                 }
256                 vg->num_vlans++;
257         }
258
259         err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
260                                             br_vlan_rht_params);
261         if (err)
262                 goto out_fdb_insert;
263
264         __vlan_add_list(v);
265         __vlan_add_flags(v, flags);
266 out:
267         return err;
268
269 out_fdb_insert:
270         if (br_vlan_should_use(v)) {
271                 br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
272                 vg->num_vlans--;
273         }
274
275 out_filt:
276         if (p) {
277                 __vlan_vid_del(dev, br, v->vid);
278                 if (masterv) {
279                         br_vlan_put_master(masterv);
280                         v->brvlan = NULL;
281                 }
282         }
283
284         goto out;
285 }
286
287 static int __vlan_del(struct net_bridge_vlan *v)
288 {
289         struct net_bridge_vlan *masterv = v;
290         struct net_bridge_vlan_group *vg;
291         struct net_bridge_port *p = NULL;
292         int err = 0;
293
294         if (br_vlan_is_master(v)) {
295                 vg = br_vlan_group(v->br);
296         } else {
297                 p = v->port;
298                 vg = nbp_vlan_group(v->port);
299                 masterv = v->brvlan;
300         }
301
302         __vlan_delete_pvid(vg, v->vid);
303         if (p) {
304                 err = __vlan_vid_del(p->dev, p->br, v->vid);
305                 if (err)
306                         goto out;
307         }
308
309         if (br_vlan_should_use(v)) {
310                 v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
311                 vg->num_vlans--;
312         }
313
314         if (masterv != v) {
315                 rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
316                                        br_vlan_rht_params);
317                 __vlan_del_list(v);
318                 kfree_rcu(v, rcu);
319         }
320
321         br_vlan_put_master(masterv);
322 out:
323         return err;
324 }
325
326 static void __vlan_group_free(struct net_bridge_vlan_group *vg)
327 {
328         WARN_ON(!list_empty(&vg->vlan_list));
329         rhashtable_destroy(&vg->vlan_hash);
330         kfree(vg);
331 }
332
333 static void __vlan_flush(struct net_bridge_vlan_group *vg)
334 {
335         struct net_bridge_vlan *vlan, *tmp;
336
337         __vlan_delete_pvid(vg, vg->pvid);
338         list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
339                 __vlan_del(vlan);
340 }
341
342 struct sk_buff *br_handle_vlan(struct net_bridge *br,
343                                struct net_bridge_vlan_group *vg,
344                                struct sk_buff *skb)
345 {
346         struct br_vlan_stats *stats;
347         struct net_bridge_vlan *v;
348         u16 vid;
349
350         /* If this packet was not filtered at input, let it pass */
351         if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
352                 goto out;
353
354         /* At this point, we know that the frame was filtered and contains
355          * a valid vlan id.  If the vlan id has untagged flag set,
356          * send untagged; otherwise, send tagged.
357          */
358         br_vlan_get_tag(skb, &vid);
359         v = br_vlan_find(vg, vid);
360         /* Vlan entry must be configured at this point.  The
361          * only exception is the bridge is set in promisc mode and the
362          * packet is destined for the bridge device.  In this case
363          * pass the packet as is.
364          */
365         if (!v || !br_vlan_should_use(v)) {
366                 if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
367                         goto out;
368                 } else {
369                         kfree_skb(skb);
370                         return NULL;
371                 }
372         }
373         if (br->vlan_stats_enabled) {
374                 stats = this_cpu_ptr(v->stats);
375                 u64_stats_update_begin(&stats->syncp);
376                 stats->tx_bytes += skb->len;
377                 stats->tx_packets++;
378                 u64_stats_update_end(&stats->syncp);
379         }
380
381         if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
382                 skb->vlan_tci = 0;
383 out:
384         return skb;
385 }
386
387 /* Called under RCU */
388 static bool __allowed_ingress(const struct net_bridge *br,
389                               struct net_bridge_vlan_group *vg,
390                               struct sk_buff *skb, u16 *vid)
391 {
392         struct br_vlan_stats *stats;
393         struct net_bridge_vlan *v;
394         bool tagged;
395
396         BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
397         /* If vlan tx offload is disabled on bridge device and frame was
398          * sent from vlan device on the bridge device, it does not have
399          * HW accelerated vlan tag.
400          */
401         if (unlikely(!skb_vlan_tag_present(skb) &&
402                      skb->protocol == br->vlan_proto)) {
403                 skb = skb_vlan_untag(skb);
404                 if (unlikely(!skb))
405                         return false;
406         }
407
408         if (!br_vlan_get_tag(skb, vid)) {
409                 /* Tagged frame */
410                 if (skb->vlan_proto != br->vlan_proto) {
411                         /* Protocol-mismatch, empty out vlan_tci for new tag */
412                         skb_push(skb, ETH_HLEN);
413                         skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
414                                                         skb_vlan_tag_get(skb));
415                         if (unlikely(!skb))
416                                 return false;
417
418                         skb_pull(skb, ETH_HLEN);
419                         skb_reset_mac_len(skb);
420                         *vid = 0;
421                         tagged = false;
422                 } else {
423                         tagged = true;
424                 }
425         } else {
426                 /* Untagged frame */
427                 tagged = false;
428         }
429
430         if (!*vid) {
431                 u16 pvid = br_get_pvid(vg);
432
433                 /* Frame had a tag with VID 0 or did not have a tag.
434                  * See if pvid is set on this port.  That tells us which
435                  * vlan untagged or priority-tagged traffic belongs to.
436                  */
437                 if (!pvid)
438                         goto drop;
439
440                 /* PVID is set on this port.  Any untagged or priority-tagged
441                  * ingress frame is considered to belong to this vlan.
442                  */
443                 *vid = pvid;
444                 if (likely(!tagged))
445                         /* Untagged Frame. */
446                         __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
447                 else
448                         /* Priority-tagged Frame.
449                          * At this point, We know that skb->vlan_tci had
450                          * VLAN_TAG_PRESENT bit and its VID field was 0x000.
451                          * We update only VID field and preserve PCP field.
452                          */
453                         skb->vlan_tci |= pvid;
454
455                 /* if stats are disabled we can avoid the lookup */
456                 if (!br->vlan_stats_enabled)
457                         return true;
458         }
459         v = br_vlan_find(vg, *vid);
460         if (!v || !br_vlan_should_use(v))
461                 goto drop;
462
463         if (br->vlan_stats_enabled) {
464                 stats = this_cpu_ptr(v->stats);
465                 u64_stats_update_begin(&stats->syncp);
466                 stats->rx_bytes += skb->len;
467                 stats->rx_packets++;
468                 u64_stats_update_end(&stats->syncp);
469         }
470
471         return true;
472
473 drop:
474         kfree_skb(skb);
475         return false;
476 }
477
478 bool br_allowed_ingress(const struct net_bridge *br,
479                         struct net_bridge_vlan_group *vg, struct sk_buff *skb,
480                         u16 *vid)
481 {
482         /* If VLAN filtering is disabled on the bridge, all packets are
483          * permitted.
484          */
485         if (!br->vlan_enabled) {
486                 BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
487                 return true;
488         }
489
490         return __allowed_ingress(br, vg, skb, vid);
491 }
492
493 /* Called under RCU. */
494 bool br_allowed_egress(struct net_bridge_vlan_group *vg,
495                        const struct sk_buff *skb)
496 {
497         const struct net_bridge_vlan *v;
498         u16 vid;
499
500         /* If this packet was not filtered at input, let it pass */
501         if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
502                 return true;
503
504         br_vlan_get_tag(skb, &vid);
505         v = br_vlan_find(vg, vid);
506         if (v && br_vlan_should_use(v))
507                 return true;
508
509         return false;
510 }
511
512 /* Called under RCU */
513 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
514 {
515         struct net_bridge_vlan_group *vg;
516         struct net_bridge *br = p->br;
517
518         /* If filtering was disabled at input, let it pass. */
519         if (!br->vlan_enabled)
520                 return true;
521
522         vg = nbp_vlan_group_rcu(p);
523         if (!vg || !vg->num_vlans)
524                 return false;
525
526         if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
527                 *vid = 0;
528
529         if (!*vid) {
530                 *vid = br_get_pvid(vg);
531                 if (!*vid)
532                         return false;
533
534                 return true;
535         }
536
537         if (br_vlan_find(vg, *vid))
538                 return true;
539
540         return false;
541 }
542
543 /* Must be protected by RTNL.
544  * Must be called with vid in range from 1 to 4094 inclusive.
545  */
546 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
547 {
548         struct net_bridge_vlan_group *vg;
549         struct net_bridge_vlan *vlan;
550         int ret;
551
552         ASSERT_RTNL();
553
554         vg = br_vlan_group(br);
555         vlan = br_vlan_find(vg, vid);
556         if (vlan) {
557                 if (!br_vlan_is_brentry(vlan)) {
558                         /* Trying to change flags of non-existent bridge vlan */
559                         if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
560                                 return -EINVAL;
561                         /* It was only kept for port vlans, now make it real */
562                         ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
563                                             vlan->vid);
564                         if (ret) {
565                                 br_err(br, "failed insert local address into bridge forwarding table\n");
566                                 return ret;
567                         }
568                         atomic_inc(&vlan->refcnt);
569                         vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
570                         vg->num_vlans++;
571                 }
572                 __vlan_add_flags(vlan, flags);
573                 return 0;
574         }
575
576         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
577         if (!vlan)
578                 return -ENOMEM;
579
580         vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
581         if (!vlan->stats) {
582                 kfree(vlan);
583                 return -ENOMEM;
584         }
585         vlan->vid = vid;
586         vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
587         vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
588         vlan->br = br;
589         if (flags & BRIDGE_VLAN_INFO_BRENTRY)
590                 atomic_set(&vlan->refcnt, 1);
591         ret = __vlan_add(vlan, flags);
592         if (ret) {
593                 free_percpu(vlan->stats);
594                 kfree(vlan);
595         }
596
597         return ret;
598 }
599
600 /* Must be protected by RTNL.
601  * Must be called with vid in range from 1 to 4094 inclusive.
602  */
603 int br_vlan_delete(struct net_bridge *br, u16 vid)
604 {
605         struct net_bridge_vlan_group *vg;
606         struct net_bridge_vlan *v;
607
608         ASSERT_RTNL();
609
610         vg = br_vlan_group(br);
611         v = br_vlan_find(vg, vid);
612         if (!v || !br_vlan_is_brentry(v))
613                 return -ENOENT;
614
615         br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
616         br_fdb_delete_by_port(br, NULL, vid, 0);
617
618         return __vlan_del(v);
619 }
620
621 void br_vlan_flush(struct net_bridge *br)
622 {
623         struct net_bridge_vlan_group *vg;
624
625         ASSERT_RTNL();
626
627         /* delete auto-added default pvid local fdb before flushing vlans
628          * otherwise it will be leaked on bridge device init failure
629          */
630         br_fdb_delete_by_port(br, NULL, 0, 1);
631
632         vg = br_vlan_group(br);
633         __vlan_flush(vg);
634         RCU_INIT_POINTER(br->vlgrp, NULL);
635         synchronize_rcu();
636         __vlan_group_free(vg);
637 }
638
639 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
640 {
641         if (!vg)
642                 return NULL;
643
644         return br_vlan_lookup(&vg->vlan_hash, vid);
645 }
646
647 /* Must be protected by RTNL. */
648 static void recalculate_group_addr(struct net_bridge *br)
649 {
650         if (br->group_addr_set)
651                 return;
652
653         spin_lock_bh(&br->lock);
654         if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
655                 /* Bridge Group Address */
656                 br->group_addr[5] = 0x00;
657         } else { /* vlan_enabled && ETH_P_8021AD */
658                 /* Provider Bridge Group Address */
659                 br->group_addr[5] = 0x08;
660         }
661         spin_unlock_bh(&br->lock);
662 }
663
664 /* Must be protected by RTNL. */
665 void br_recalculate_fwd_mask(struct net_bridge *br)
666 {
667         if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
668                 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
669         else /* vlan_enabled && ETH_P_8021AD */
670                 br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
671                                               ~(1u << br->group_addr[5]);
672 }
673
674 int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
675 {
676         struct switchdev_attr attr = {
677                 .orig_dev = br->dev,
678                 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
679                 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
680                 .u.vlan_filtering = val,
681         };
682         int err;
683
684         if (br->vlan_enabled == val)
685                 return 0;
686
687         err = switchdev_port_attr_set(br->dev, &attr);
688         if (err && err != -EOPNOTSUPP)
689                 return err;
690
691         br->vlan_enabled = val;
692         br_manage_promisc(br);
693         recalculate_group_addr(br);
694         br_recalculate_fwd_mask(br);
695
696         return 0;
697 }
698
699 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
700 {
701         return __br_vlan_filter_toggle(br, val);
702 }
703
704 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
705 {
706         int err = 0;
707         struct net_bridge_port *p;
708         struct net_bridge_vlan *vlan;
709         struct net_bridge_vlan_group *vg;
710         __be16 oldproto;
711
712         if (br->vlan_proto == proto)
713                 return 0;
714
715         /* Add VLANs for the new proto to the device filter. */
716         list_for_each_entry(p, &br->port_list, list) {
717                 vg = nbp_vlan_group(p);
718                 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
719                         err = vlan_vid_add(p->dev, proto, vlan->vid);
720                         if (err)
721                                 goto err_filt;
722                 }
723         }
724
725         oldproto = br->vlan_proto;
726         br->vlan_proto = proto;
727
728         recalculate_group_addr(br);
729         br_recalculate_fwd_mask(br);
730
731         /* Delete VLANs for the old proto from the device filter. */
732         list_for_each_entry(p, &br->port_list, list) {
733                 vg = nbp_vlan_group(p);
734                 list_for_each_entry(vlan, &vg->vlan_list, vlist)
735                         vlan_vid_del(p->dev, oldproto, vlan->vid);
736         }
737
738         return 0;
739
740 err_filt:
741         list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
742                 vlan_vid_del(p->dev, proto, vlan->vid);
743
744         list_for_each_entry_continue_reverse(p, &br->port_list, list) {
745                 vg = nbp_vlan_group(p);
746                 list_for_each_entry(vlan, &vg->vlan_list, vlist)
747                         vlan_vid_del(p->dev, proto, vlan->vid);
748         }
749
750         return err;
751 }
752
753 int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
754 {
755         if (val != ETH_P_8021Q && val != ETH_P_8021AD)
756                 return -EPROTONOSUPPORT;
757
758         return __br_vlan_set_proto(br, htons(val));
759 }
760
761 int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
762 {
763         switch (val) {
764         case 0:
765         case 1:
766                 br->vlan_stats_enabled = val;
767                 break;
768         default:
769                 return -EINVAL;
770         }
771
772         return 0;
773 }
774
775 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
776 {
777         struct net_bridge_vlan *v;
778
779         if (vid != vg->pvid)
780                 return false;
781
782         v = br_vlan_lookup(&vg->vlan_hash, vid);
783         if (v && br_vlan_should_use(v) &&
784             (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
785                 return true;
786
787         return false;
788 }
789
790 static void br_vlan_disable_default_pvid(struct net_bridge *br)
791 {
792         struct net_bridge_port *p;
793         u16 pvid = br->default_pvid;
794
795         /* Disable default_pvid on all ports where it is still
796          * configured.
797          */
798         if (vlan_default_pvid(br_vlan_group(br), pvid))
799                 br_vlan_delete(br, pvid);
800
801         list_for_each_entry(p, &br->port_list, list) {
802                 if (vlan_default_pvid(nbp_vlan_group(p), pvid))
803                         nbp_vlan_delete(p, pvid);
804         }
805
806         br->default_pvid = 0;
807 }
808
809 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
810 {
811         const struct net_bridge_vlan *pvent;
812         struct net_bridge_vlan_group *vg;
813         struct net_bridge_port *p;
814         u16 old_pvid;
815         int err = 0;
816         unsigned long *changed;
817
818         if (!pvid) {
819                 br_vlan_disable_default_pvid(br);
820                 return 0;
821         }
822
823         changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
824                           GFP_KERNEL);
825         if (!changed)
826                 return -ENOMEM;
827
828         old_pvid = br->default_pvid;
829
830         /* Update default_pvid config only if we do not conflict with
831          * user configuration.
832          */
833         vg = br_vlan_group(br);
834         pvent = br_vlan_find(vg, pvid);
835         if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
836             (!pvent || !br_vlan_should_use(pvent))) {
837                 err = br_vlan_add(br, pvid,
838                                   BRIDGE_VLAN_INFO_PVID |
839                                   BRIDGE_VLAN_INFO_UNTAGGED |
840                                   BRIDGE_VLAN_INFO_BRENTRY);
841                 if (err)
842                         goto out;
843                 br_vlan_delete(br, old_pvid);
844                 set_bit(0, changed);
845         }
846
847         list_for_each_entry(p, &br->port_list, list) {
848                 /* Update default_pvid config only if we do not conflict with
849                  * user configuration.
850                  */
851                 vg = nbp_vlan_group(p);
852                 if ((old_pvid &&
853                      !vlan_default_pvid(vg, old_pvid)) ||
854                     br_vlan_find(vg, pvid))
855                         continue;
856
857                 err = nbp_vlan_add(p, pvid,
858                                    BRIDGE_VLAN_INFO_PVID |
859                                    BRIDGE_VLAN_INFO_UNTAGGED);
860                 if (err)
861                         goto err_port;
862                 nbp_vlan_delete(p, old_pvid);
863                 set_bit(p->port_no, changed);
864         }
865
866         br->default_pvid = pvid;
867
868 out:
869         kfree(changed);
870         return err;
871
872 err_port:
873         list_for_each_entry_continue_reverse(p, &br->port_list, list) {
874                 if (!test_bit(p->port_no, changed))
875                         continue;
876
877                 if (old_pvid)
878                         nbp_vlan_add(p, old_pvid,
879                                      BRIDGE_VLAN_INFO_PVID |
880                                      BRIDGE_VLAN_INFO_UNTAGGED);
881                 nbp_vlan_delete(p, pvid);
882         }
883
884         if (test_bit(0, changed)) {
885                 if (old_pvid)
886                         br_vlan_add(br, old_pvid,
887                                     BRIDGE_VLAN_INFO_PVID |
888                                     BRIDGE_VLAN_INFO_UNTAGGED |
889                                     BRIDGE_VLAN_INFO_BRENTRY);
890                 br_vlan_delete(br, pvid);
891         }
892         goto out;
893 }
894
895 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
896 {
897         u16 pvid = val;
898         int err = 0;
899
900         if (val >= VLAN_VID_MASK)
901                 return -EINVAL;
902
903         if (pvid == br->default_pvid)
904                 goto out;
905
906         /* Only allow default pvid change when filtering is disabled */
907         if (br->vlan_enabled) {
908                 pr_info_once("Please disable vlan filtering to change default_pvid\n");
909                 err = -EPERM;
910                 goto out;
911         }
912         err = __br_vlan_set_default_pvid(br, pvid);
913 out:
914         return err;
915 }
916
917 int br_vlan_init(struct net_bridge *br)
918 {
919         struct net_bridge_vlan_group *vg;
920         int ret = -ENOMEM;
921
922         vg = kzalloc(sizeof(*vg), GFP_KERNEL);
923         if (!vg)
924                 goto out;
925         ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
926         if (ret)
927                 goto err_rhtbl;
928         INIT_LIST_HEAD(&vg->vlan_list);
929         br->vlan_proto = htons(ETH_P_8021Q);
930         br->default_pvid = 1;
931         rcu_assign_pointer(br->vlgrp, vg);
932         ret = br_vlan_add(br, 1,
933                           BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
934                           BRIDGE_VLAN_INFO_BRENTRY);
935         if (ret)
936                 goto err_vlan_add;
937
938 out:
939         return ret;
940
941 err_vlan_add:
942         rhashtable_destroy(&vg->vlan_hash);
943 err_rhtbl:
944         kfree(vg);
945
946         goto out;
947 }
948
949 int nbp_vlan_init(struct net_bridge_port *p)
950 {
951         struct switchdev_attr attr = {
952                 .orig_dev = p->br->dev,
953                 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
954                 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
955                 .u.vlan_filtering = p->br->vlan_enabled,
956         };
957         struct net_bridge_vlan_group *vg;
958         int ret = -ENOMEM;
959
960         vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
961         if (!vg)
962                 goto out;
963
964         ret = switchdev_port_attr_set(p->dev, &attr);
965         if (ret && ret != -EOPNOTSUPP)
966                 goto err_vlan_enabled;
967
968         ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
969         if (ret)
970                 goto err_rhtbl;
971         INIT_LIST_HEAD(&vg->vlan_list);
972         rcu_assign_pointer(p->vlgrp, vg);
973         if (p->br->default_pvid) {
974                 ret = nbp_vlan_add(p, p->br->default_pvid,
975                                    BRIDGE_VLAN_INFO_PVID |
976                                    BRIDGE_VLAN_INFO_UNTAGGED);
977                 if (ret)
978                         goto err_vlan_add;
979         }
980 out:
981         return ret;
982
983 err_vlan_add:
984         RCU_INIT_POINTER(p->vlgrp, NULL);
985         synchronize_rcu();
986         rhashtable_destroy(&vg->vlan_hash);
987 err_vlan_enabled:
988 err_rhtbl:
989         kfree(vg);
990
991         goto out;
992 }
993
994 /* Must be protected by RTNL.
995  * Must be called with vid in range from 1 to 4094 inclusive.
996  */
997 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
998 {
999         struct switchdev_obj_port_vlan v = {
1000                 .obj.orig_dev = port->dev,
1001                 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1002                 .flags = flags,
1003                 .vid_begin = vid,
1004                 .vid_end = vid,
1005         };
1006         struct net_bridge_vlan *vlan;
1007         int ret;
1008
1009         ASSERT_RTNL();
1010
1011         vlan = br_vlan_find(nbp_vlan_group(port), vid);
1012         if (vlan) {
1013                 /* Pass the flags to the hardware bridge */
1014                 ret = switchdev_port_obj_add(port->dev, &v.obj);
1015                 if (ret && ret != -EOPNOTSUPP)
1016                         return ret;
1017                 __vlan_add_flags(vlan, flags);
1018                 return 0;
1019         }
1020
1021         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1022         if (!vlan)
1023                 return -ENOMEM;
1024
1025         vlan->vid = vid;
1026         vlan->port = port;
1027         ret = __vlan_add(vlan, flags);
1028         if (ret)
1029                 kfree(vlan);
1030
1031         return ret;
1032 }
1033
1034 /* Must be protected by RTNL.
1035  * Must be called with vid in range from 1 to 4094 inclusive.
1036  */
1037 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1038 {
1039         struct net_bridge_vlan *v;
1040
1041         ASSERT_RTNL();
1042
1043         v = br_vlan_find(nbp_vlan_group(port), vid);
1044         if (!v)
1045                 return -ENOENT;
1046         br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1047         br_fdb_delete_by_port(port->br, port, vid, 0);
1048
1049         return __vlan_del(v);
1050 }
1051
1052 void nbp_vlan_flush(struct net_bridge_port *port)
1053 {
1054         struct net_bridge_vlan_group *vg;
1055
1056         ASSERT_RTNL();
1057
1058         vg = nbp_vlan_group(port);
1059         __vlan_flush(vg);
1060         RCU_INIT_POINTER(port->vlgrp, NULL);
1061         synchronize_rcu();
1062         __vlan_group_free(vg);
1063 }
1064
1065 void br_vlan_get_stats(const struct net_bridge_vlan *v,
1066                        struct br_vlan_stats *stats)
1067 {
1068         int i;
1069
1070         memset(stats, 0, sizeof(*stats));
1071         for_each_possible_cpu(i) {
1072                 u64 rxpackets, rxbytes, txpackets, txbytes;
1073                 struct br_vlan_stats *cpu_stats;
1074                 unsigned int start;
1075
1076                 cpu_stats = per_cpu_ptr(v->stats, i);
1077                 do {
1078                         start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1079                         rxpackets = cpu_stats->rx_packets;
1080                         rxbytes = cpu_stats->rx_bytes;
1081                         txbytes = cpu_stats->tx_bytes;
1082                         txpackets = cpu_stats->tx_packets;
1083                 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1084
1085                 stats->rx_packets += rxpackets;
1086                 stats->rx_bytes += rxbytes;
1087                 stats->tx_bytes += txbytes;
1088                 stats->tx_packets += txpackets;
1089         }
1090 }