GNU Linux-libre 4.4.284-gnu1
[releases.git] / drivers / net / ipvlan / ipvlan_main.c
1 /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of the GNU General Public License as
5  * published by the Free Software Foundation; either version 2 of
6  * the License, or (at your option) any later version.
7  *
8  */
9
10 #include "ipvlan.h"
11
12 void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev)
13 {
14         ipvlan->dev->mtu = dev->mtu - ipvlan->mtu_adj;
15 }
16
17 void ipvlan_set_port_mode(struct ipvl_port *port, u32 nval)
18 {
19         struct ipvl_dev *ipvlan;
20
21         if (port->mode != nval) {
22                 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
23                         if (nval == IPVLAN_MODE_L3)
24                                 ipvlan->dev->flags |= IFF_NOARP;
25                         else
26                                 ipvlan->dev->flags &= ~IFF_NOARP;
27                 }
28                 port->mode = nval;
29         }
30 }
31
32 static int ipvlan_port_create(struct net_device *dev)
33 {
34         struct ipvl_port *port;
35         int err, idx;
36
37         if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) {
38                 netdev_err(dev, "Master is either lo or non-ether device\n");
39                 return -EINVAL;
40         }
41
42         if (netif_is_macvlan_port(dev)) {
43                 netdev_err(dev, "Master is a macvlan port.\n");
44                 return -EBUSY;
45         }
46
47         port = kzalloc(sizeof(struct ipvl_port), GFP_KERNEL);
48         if (!port)
49                 return -ENOMEM;
50
51         port->dev = dev;
52         port->mode = IPVLAN_MODE_L3;
53         INIT_LIST_HEAD(&port->ipvlans);
54         for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++)
55                 INIT_HLIST_HEAD(&port->hlhead[idx]);
56
57         skb_queue_head_init(&port->backlog);
58         INIT_WORK(&port->wq, ipvlan_process_multicast);
59
60         err = netdev_rx_handler_register(dev, ipvlan_handle_frame, port);
61         if (err)
62                 goto err;
63
64         dev->priv_flags |= IFF_IPVLAN_MASTER;
65         return 0;
66
67 err:
68         kfree_rcu(port, rcu);
69         return err;
70 }
71
72 static void ipvlan_port_destroy(struct net_device *dev)
73 {
74         struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
75
76         dev->priv_flags &= ~IFF_IPVLAN_MASTER;
77         netdev_rx_handler_unregister(dev);
78         cancel_work_sync(&port->wq);
79         __skb_queue_purge(&port->backlog);
80         kfree_rcu(port, rcu);
81 }
82
83 /* ipvlan network devices have devices nesting below it and are a special
84  * "super class" of normal network devices; split their locks off into a
85  * separate class since they always nest.
86  */
87 static struct lock_class_key ipvlan_netdev_xmit_lock_key;
88 static struct lock_class_key ipvlan_netdev_addr_lock_key;
89
90 #define IPVLAN_ALWAYS_ON_OFLOADS \
91         (NETIF_F_SG | NETIF_F_HW_CSUM | \
92          NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL)
93
94 #define IPVLAN_ALWAYS_ON \
95         (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED)
96
97 #define IPVLAN_FEATURES \
98         (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
99          NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
100          NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
101          NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
102
103         /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */
104
105 #define IPVLAN_STATE_MASK \
106         ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
107
108 static void ipvlan_set_lockdep_class_one(struct net_device *dev,
109                                          struct netdev_queue *txq,
110                                          void *_unused)
111 {
112         lockdep_set_class(&txq->_xmit_lock, &ipvlan_netdev_xmit_lock_key);
113 }
114
115 static void ipvlan_set_lockdep_class(struct net_device *dev)
116 {
117         lockdep_set_class(&dev->addr_list_lock, &ipvlan_netdev_addr_lock_key);
118         netdev_for_each_tx_queue(dev, ipvlan_set_lockdep_class_one, NULL);
119 }
120
121 static int ipvlan_init(struct net_device *dev)
122 {
123         struct ipvl_dev *ipvlan = netdev_priv(dev);
124         const struct net_device *phy_dev = ipvlan->phy_dev;
125
126         dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
127                      (phy_dev->state & IPVLAN_STATE_MASK);
128         dev->features = phy_dev->features & IPVLAN_FEATURES;
129         dev->features |= IPVLAN_ALWAYS_ON;
130         dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES;
131         dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS;
132         dev->gso_max_size = phy_dev->gso_max_size;
133         dev->hard_header_len = phy_dev->hard_header_len;
134
135         ipvlan_set_lockdep_class(dev);
136
137         ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats);
138         if (!ipvlan->pcpu_stats)
139                 return -ENOMEM;
140
141         return 0;
142 }
143
144 static void ipvlan_uninit(struct net_device *dev)
145 {
146         struct ipvl_dev *ipvlan = netdev_priv(dev);
147         struct ipvl_port *port = ipvlan->port;
148
149         free_percpu(ipvlan->pcpu_stats);
150
151         port->count -= 1;
152         if (!port->count)
153                 ipvlan_port_destroy(port->dev);
154 }
155
156 static int ipvlan_open(struct net_device *dev)
157 {
158         struct ipvl_dev *ipvlan = netdev_priv(dev);
159         struct ipvl_addr *addr;
160
161         if (ipvlan->port->mode == IPVLAN_MODE_L3)
162                 dev->flags |= IFF_NOARP;
163         else
164                 dev->flags &= ~IFF_NOARP;
165
166         list_for_each_entry(addr, &ipvlan->addrs, anode)
167                 ipvlan_ht_addr_add(ipvlan, addr);
168
169         return 0;
170 }
171
172 static int ipvlan_stop(struct net_device *dev)
173 {
174         struct ipvl_dev *ipvlan = netdev_priv(dev);
175         struct net_device *phy_dev = ipvlan->phy_dev;
176         struct ipvl_addr *addr;
177
178         dev_uc_unsync(phy_dev, dev);
179         dev_mc_unsync(phy_dev, dev);
180
181         list_for_each_entry(addr, &ipvlan->addrs, anode)
182                 ipvlan_ht_addr_del(addr);
183
184         return 0;
185 }
186
187 static netdev_tx_t ipvlan_start_xmit(struct sk_buff *skb,
188                                      struct net_device *dev)
189 {
190         const struct ipvl_dev *ipvlan = netdev_priv(dev);
191         int skblen = skb->len;
192         int ret;
193
194         ret = ipvlan_queue_xmit(skb, dev);
195         if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
196                 struct ipvl_pcpu_stats *pcptr;
197
198                 pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
199
200                 u64_stats_update_begin(&pcptr->syncp);
201                 pcptr->tx_pkts++;
202                 pcptr->tx_bytes += skblen;
203                 u64_stats_update_end(&pcptr->syncp);
204         } else {
205                 this_cpu_inc(ipvlan->pcpu_stats->tx_drps);
206         }
207         return ret;
208 }
209
210 static netdev_features_t ipvlan_fix_features(struct net_device *dev,
211                                              netdev_features_t features)
212 {
213         struct ipvl_dev *ipvlan = netdev_priv(dev);
214
215         features |= NETIF_F_ALL_FOR_ALL;
216         features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES);
217         features = netdev_increment_features(ipvlan->phy_dev->features,
218                                              features, features);
219         features |= IPVLAN_ALWAYS_ON;
220         features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON);
221
222         return features;
223 }
224
225 static void ipvlan_change_rx_flags(struct net_device *dev, int change)
226 {
227         struct ipvl_dev *ipvlan = netdev_priv(dev);
228         struct net_device *phy_dev = ipvlan->phy_dev;
229
230         if (change & IFF_ALLMULTI)
231                 dev_set_allmulti(phy_dev, dev->flags & IFF_ALLMULTI? 1 : -1);
232 }
233
234 static void ipvlan_set_multicast_mac_filter(struct net_device *dev)
235 {
236         struct ipvl_dev *ipvlan = netdev_priv(dev);
237
238         if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
239                 bitmap_fill(ipvlan->mac_filters, IPVLAN_MAC_FILTER_SIZE);
240         } else {
241                 struct netdev_hw_addr *ha;
242                 DECLARE_BITMAP(mc_filters, IPVLAN_MAC_FILTER_SIZE);
243
244                 bitmap_zero(mc_filters, IPVLAN_MAC_FILTER_SIZE);
245                 netdev_for_each_mc_addr(ha, dev)
246                         __set_bit(ipvlan_mac_hash(ha->addr), mc_filters);
247
248                 /* Turn-on broadcast bit irrespective of address family,
249                  * since broadcast is deferred to a work-queue, hence no
250                  * impact on fast-path processing.
251                  */
252                 __set_bit(ipvlan_mac_hash(dev->broadcast), mc_filters);
253
254                 bitmap_copy(ipvlan->mac_filters, mc_filters,
255                             IPVLAN_MAC_FILTER_SIZE);
256         }
257         dev_uc_sync(ipvlan->phy_dev, dev);
258         dev_mc_sync(ipvlan->phy_dev, dev);
259 }
260
261 static struct rtnl_link_stats64 *ipvlan_get_stats64(struct net_device *dev,
262                                                     struct rtnl_link_stats64 *s)
263 {
264         struct ipvl_dev *ipvlan = netdev_priv(dev);
265
266         if (ipvlan->pcpu_stats) {
267                 struct ipvl_pcpu_stats *pcptr;
268                 u64 rx_pkts, rx_bytes, rx_mcast, tx_pkts, tx_bytes;
269                 u32 rx_errs = 0, tx_drps = 0;
270                 u32 strt;
271                 int idx;
272
273                 for_each_possible_cpu(idx) {
274                         pcptr = per_cpu_ptr(ipvlan->pcpu_stats, idx);
275                         do {
276                                 strt= u64_stats_fetch_begin_irq(&pcptr->syncp);
277                                 rx_pkts = pcptr->rx_pkts;
278                                 rx_bytes = pcptr->rx_bytes;
279                                 rx_mcast = pcptr->rx_mcast;
280                                 tx_pkts = pcptr->tx_pkts;
281                                 tx_bytes = pcptr->tx_bytes;
282                         } while (u64_stats_fetch_retry_irq(&pcptr->syncp,
283                                                            strt));
284
285                         s->rx_packets += rx_pkts;
286                         s->rx_bytes += rx_bytes;
287                         s->multicast += rx_mcast;
288                         s->tx_packets += tx_pkts;
289                         s->tx_bytes += tx_bytes;
290
291                         /* u32 values are updated without syncp protection. */
292                         rx_errs += pcptr->rx_errs;
293                         tx_drps += pcptr->tx_drps;
294                 }
295                 s->rx_errors = rx_errs;
296                 s->rx_dropped = rx_errs;
297                 s->tx_dropped = tx_drps;
298         }
299         return s;
300 }
301
302 static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
303 {
304         struct ipvl_dev *ipvlan = netdev_priv(dev);
305         struct net_device *phy_dev = ipvlan->phy_dev;
306
307         return vlan_vid_add(phy_dev, proto, vid);
308 }
309
310 static int ipvlan_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
311                                    u16 vid)
312 {
313         struct ipvl_dev *ipvlan = netdev_priv(dev);
314         struct net_device *phy_dev = ipvlan->phy_dev;
315
316         vlan_vid_del(phy_dev, proto, vid);
317         return 0;
318 }
319
320 static int ipvlan_get_iflink(const struct net_device *dev)
321 {
322         struct ipvl_dev *ipvlan = netdev_priv(dev);
323
324         return ipvlan->phy_dev->ifindex;
325 }
326
327 static const struct net_device_ops ipvlan_netdev_ops = {
328         .ndo_init               = ipvlan_init,
329         .ndo_uninit             = ipvlan_uninit,
330         .ndo_open               = ipvlan_open,
331         .ndo_stop               = ipvlan_stop,
332         .ndo_start_xmit         = ipvlan_start_xmit,
333         .ndo_fix_features       = ipvlan_fix_features,
334         .ndo_change_rx_flags    = ipvlan_change_rx_flags,
335         .ndo_set_rx_mode        = ipvlan_set_multicast_mac_filter,
336         .ndo_get_stats64        = ipvlan_get_stats64,
337         .ndo_vlan_rx_add_vid    = ipvlan_vlan_rx_add_vid,
338         .ndo_vlan_rx_kill_vid   = ipvlan_vlan_rx_kill_vid,
339         .ndo_get_iflink         = ipvlan_get_iflink,
340 };
341
342 static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
343                               unsigned short type, const void *daddr,
344                               const void *saddr, unsigned len)
345 {
346         const struct ipvl_dev *ipvlan = netdev_priv(dev);
347         struct net_device *phy_dev = ipvlan->phy_dev;
348
349         /* TODO Probably use a different field than dev_addr so that the
350          * mac-address on the virtual device is portable and can be carried
351          * while the packets use the mac-addr on the physical device.
352          */
353         return dev_hard_header(skb, phy_dev, type, daddr,
354                                saddr ? : dev->dev_addr, len);
355 }
356
357 static const struct header_ops ipvlan_header_ops = {
358         .create         = ipvlan_hard_header,
359         .parse          = eth_header_parse,
360         .cache          = eth_header_cache,
361         .cache_update   = eth_header_cache_update,
362 };
363
364 static int ipvlan_ethtool_get_settings(struct net_device *dev,
365                                        struct ethtool_cmd *cmd)
366 {
367         const struct ipvl_dev *ipvlan = netdev_priv(dev);
368
369         return __ethtool_get_settings(ipvlan->phy_dev, cmd);
370 }
371
372 static void ipvlan_ethtool_get_drvinfo(struct net_device *dev,
373                                        struct ethtool_drvinfo *drvinfo)
374 {
375         strlcpy(drvinfo->driver, IPVLAN_DRV, sizeof(drvinfo->driver));
376         strlcpy(drvinfo->version, IPV_DRV_VER, sizeof(drvinfo->version));
377 }
378
379 static u32 ipvlan_ethtool_get_msglevel(struct net_device *dev)
380 {
381         const struct ipvl_dev *ipvlan = netdev_priv(dev);
382
383         return ipvlan->msg_enable;
384 }
385
386 static void ipvlan_ethtool_set_msglevel(struct net_device *dev, u32 value)
387 {
388         struct ipvl_dev *ipvlan = netdev_priv(dev);
389
390         ipvlan->msg_enable = value;
391 }
392
393 static const struct ethtool_ops ipvlan_ethtool_ops = {
394         .get_link       = ethtool_op_get_link,
395         .get_settings   = ipvlan_ethtool_get_settings,
396         .get_drvinfo    = ipvlan_ethtool_get_drvinfo,
397         .get_msglevel   = ipvlan_ethtool_get_msglevel,
398         .set_msglevel   = ipvlan_ethtool_set_msglevel,
399 };
400
401 static int ipvlan_nl_changelink(struct net_device *dev,
402                                 struct nlattr *tb[], struct nlattr *data[])
403 {
404         struct ipvl_dev *ipvlan = netdev_priv(dev);
405         struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
406
407         if (!data)
408                 return 0;
409         if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN))
410                 return -EPERM;
411
412         if (data[IFLA_IPVLAN_MODE]) {
413                 u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
414
415                 ipvlan_set_port_mode(port, nmode);
416         }
417         return 0;
418 }
419
420 static size_t ipvlan_nl_getsize(const struct net_device *dev)
421 {
422         return (0
423                 + nla_total_size(2) /* IFLA_IPVLAN_MODE */
424                 );
425 }
426
427 static int ipvlan_nl_validate(struct nlattr *tb[], struct nlattr *data[])
428 {
429         if (data && data[IFLA_IPVLAN_MODE]) {
430                 u16 mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
431
432                 if (mode < IPVLAN_MODE_L2 || mode >= IPVLAN_MODE_MAX)
433                         return -EINVAL;
434         }
435         return 0;
436 }
437
438 static int ipvlan_nl_fillinfo(struct sk_buff *skb,
439                               const struct net_device *dev)
440 {
441         struct ipvl_dev *ipvlan = netdev_priv(dev);
442         struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
443         int ret = -EINVAL;
444
445         if (!port)
446                 goto err;
447
448         ret = -EMSGSIZE;
449         if (nla_put_u16(skb, IFLA_IPVLAN_MODE, port->mode))
450                 goto err;
451
452         return 0;
453
454 err:
455         return ret;
456 }
457
458 static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
459                            struct nlattr *tb[], struct nlattr *data[])
460 {
461         struct ipvl_dev *ipvlan = netdev_priv(dev);
462         struct ipvl_port *port;
463         struct net_device *phy_dev;
464         int err;
465
466         if (!tb[IFLA_LINK])
467                 return -EINVAL;
468
469         phy_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
470         if (!phy_dev)
471                 return -ENODEV;
472
473         if (netif_is_ipvlan(phy_dev)) {
474                 struct ipvl_dev *tmp = netdev_priv(phy_dev);
475
476                 phy_dev = tmp->phy_dev;
477                 if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN))
478                         return -EPERM;
479         } else if (!netif_is_ipvlan_port(phy_dev)) {
480                 err = ipvlan_port_create(phy_dev);
481                 if (err < 0)
482                         return err;
483         }
484
485         port = ipvlan_port_get_rtnl(phy_dev);
486         if (data && data[IFLA_IPVLAN_MODE])
487                 port->mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
488
489         ipvlan->phy_dev = phy_dev;
490         ipvlan->dev = dev;
491         ipvlan->port = port;
492         ipvlan->sfeatures = IPVLAN_FEATURES;
493         INIT_LIST_HEAD(&ipvlan->addrs);
494
495         /* TODO Probably put random address here to be presented to the
496          * world but keep using the physical-dev address for the outgoing
497          * packets.
498          */
499         memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN);
500
501         dev->priv_flags |= IFF_IPVLAN_SLAVE;
502
503         port->count += 1;
504         err = register_netdevice(dev);
505         if (err < 0)
506                 goto ipvlan_destroy_port;
507
508         err = netdev_upper_dev_link(phy_dev, dev);
509         if (err)
510                 goto ipvlan_destroy_port;
511
512         list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
513         netif_stacked_transfer_operstate(phy_dev, dev);
514         return 0;
515
516 ipvlan_destroy_port:
517         port->count -= 1;
518         if (!port->count)
519                 ipvlan_port_destroy(phy_dev);
520
521         return err;
522 }
523
524 static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
525 {
526         struct ipvl_dev *ipvlan = netdev_priv(dev);
527         struct ipvl_addr *addr, *next;
528
529         list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
530                 ipvlan_ht_addr_del(addr);
531                 list_del(&addr->anode);
532                 kfree_rcu(addr, rcu);
533         }
534
535         list_del_rcu(&ipvlan->pnode);
536         unregister_netdevice_queue(dev, head);
537         netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
538 }
539
540 static void ipvlan_link_setup(struct net_device *dev)
541 {
542         ether_setup(dev);
543
544         dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
545         dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
546         dev->netdev_ops = &ipvlan_netdev_ops;
547         dev->destructor = free_netdev;
548         dev->header_ops = &ipvlan_header_ops;
549         dev->ethtool_ops = &ipvlan_ethtool_ops;
550 }
551
552 static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] =
553 {
554         [IFLA_IPVLAN_MODE] = { .type = NLA_U16 },
555 };
556
557 static struct rtnl_link_ops ipvlan_link_ops = {
558         .kind           = "ipvlan",
559         .priv_size      = sizeof(struct ipvl_dev),
560
561         .get_size       = ipvlan_nl_getsize,
562         .policy         = ipvlan_nl_policy,
563         .validate       = ipvlan_nl_validate,
564         .fill_info      = ipvlan_nl_fillinfo,
565         .changelink     = ipvlan_nl_changelink,
566         .maxtype        = IFLA_IPVLAN_MAX,
567
568         .setup          = ipvlan_link_setup,
569         .newlink        = ipvlan_link_new,
570         .dellink        = ipvlan_link_delete,
571 };
572
573 static int ipvlan_link_register(struct rtnl_link_ops *ops)
574 {
575         return rtnl_link_register(ops);
576 }
577
578 static int ipvlan_device_event(struct notifier_block *unused,
579                                unsigned long event, void *ptr)
580 {
581         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
582         struct ipvl_dev *ipvlan, *next;
583         struct ipvl_port *port;
584         LIST_HEAD(lst_kill);
585
586         if (!netif_is_ipvlan_port(dev))
587                 return NOTIFY_DONE;
588
589         port = ipvlan_port_get_rtnl(dev);
590
591         switch (event) {
592         case NETDEV_CHANGE:
593                 list_for_each_entry(ipvlan, &port->ipvlans, pnode)
594                         netif_stacked_transfer_operstate(ipvlan->phy_dev,
595                                                          ipvlan->dev);
596                 break;
597
598         case NETDEV_UNREGISTER:
599                 if (dev->reg_state != NETREG_UNREGISTERING)
600                         break;
601
602                 list_for_each_entry_safe(ipvlan, next, &port->ipvlans,
603                                          pnode)
604                         ipvlan->dev->rtnl_link_ops->dellink(ipvlan->dev,
605                                                             &lst_kill);
606                 unregister_netdevice_many(&lst_kill);
607                 break;
608
609         case NETDEV_FEAT_CHANGE:
610                 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
611                         ipvlan->dev->gso_max_size = dev->gso_max_size;
612                         netdev_update_features(ipvlan->dev);
613                 }
614                 break;
615
616         case NETDEV_CHANGEMTU:
617                 list_for_each_entry(ipvlan, &port->ipvlans, pnode)
618                         ipvlan_adjust_mtu(ipvlan, dev);
619                 break;
620
621         case NETDEV_PRE_TYPE_CHANGE:
622                 /* Forbid underlying device to change its type. */
623                 return NOTIFY_BAD;
624         }
625         return NOTIFY_DONE;
626 }
627
628 static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
629 {
630         struct ipvl_addr *addr;
631
632         if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) {
633                 netif_err(ipvlan, ifup, ipvlan->dev,
634                           "Failed to add IPv6=%pI6c addr for %s intf\n",
635                           ip6_addr, ipvlan->dev->name);
636                 return -EINVAL;
637         }
638         addr = kzalloc(sizeof(struct ipvl_addr), GFP_ATOMIC);
639         if (!addr)
640                 return -ENOMEM;
641
642         addr->master = ipvlan;
643         memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
644         addr->atype = IPVL_IPV6;
645         list_add_tail(&addr->anode, &ipvlan->addrs);
646
647         /* If the interface is not up, the address will be added to the hash
648          * list by ipvlan_open.
649          */
650         if (netif_running(ipvlan->dev))
651                 ipvlan_ht_addr_add(ipvlan, addr);
652
653         return 0;
654 }
655
656 static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
657 {
658         struct ipvl_addr *addr;
659
660         addr = ipvlan_find_addr(ipvlan, ip6_addr, true);
661         if (!addr)
662                 return;
663
664         ipvlan_ht_addr_del(addr);
665         list_del(&addr->anode);
666         kfree_rcu(addr, rcu);
667
668         return;
669 }
670
671 static int ipvlan_addr6_event(struct notifier_block *unused,
672                               unsigned long event, void *ptr)
673 {
674         struct inet6_ifaddr *if6 = (struct inet6_ifaddr *)ptr;
675         struct net_device *dev = (struct net_device *)if6->idev->dev;
676         struct ipvl_dev *ipvlan = netdev_priv(dev);
677
678         /* FIXME IPv6 autoconf calls us from bh without RTNL */
679         if (in_softirq())
680                 return NOTIFY_DONE;
681
682         if (!netif_is_ipvlan(dev))
683                 return NOTIFY_DONE;
684
685         if (!ipvlan || !ipvlan->port)
686                 return NOTIFY_DONE;
687
688         switch (event) {
689         case NETDEV_UP:
690                 if (ipvlan_add_addr6(ipvlan, &if6->addr))
691                         return NOTIFY_BAD;
692                 break;
693
694         case NETDEV_DOWN:
695                 ipvlan_del_addr6(ipvlan, &if6->addr);
696                 break;
697         }
698
699         return NOTIFY_OK;
700 }
701
702 static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
703 {
704         struct ipvl_addr *addr;
705
706         if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) {
707                 netif_err(ipvlan, ifup, ipvlan->dev,
708                           "Failed to add IPv4=%pI4 on %s intf.\n",
709                           ip4_addr, ipvlan->dev->name);
710                 return -EINVAL;
711         }
712         addr = kzalloc(sizeof(struct ipvl_addr), GFP_KERNEL);
713         if (!addr)
714                 return -ENOMEM;
715
716         addr->master = ipvlan;
717         memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
718         addr->atype = IPVL_IPV4;
719         list_add_tail(&addr->anode, &ipvlan->addrs);
720
721         /* If the interface is not up, the address will be added to the hash
722          * list by ipvlan_open.
723          */
724         if (netif_running(ipvlan->dev))
725                 ipvlan_ht_addr_add(ipvlan, addr);
726
727         return 0;
728 }
729
730 static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
731 {
732         struct ipvl_addr *addr;
733
734         addr = ipvlan_find_addr(ipvlan, ip4_addr, false);
735         if (!addr)
736                 return;
737
738         ipvlan_ht_addr_del(addr);
739         list_del(&addr->anode);
740         kfree_rcu(addr, rcu);
741
742         return;
743 }
744
745 static int ipvlan_addr4_event(struct notifier_block *unused,
746                               unsigned long event, void *ptr)
747 {
748         struct in_ifaddr *if4 = (struct in_ifaddr *)ptr;
749         struct net_device *dev = (struct net_device *)if4->ifa_dev->dev;
750         struct ipvl_dev *ipvlan = netdev_priv(dev);
751         struct in_addr ip4_addr;
752
753         if (!netif_is_ipvlan(dev))
754                 return NOTIFY_DONE;
755
756         if (!ipvlan || !ipvlan->port)
757                 return NOTIFY_DONE;
758
759         switch (event) {
760         case NETDEV_UP:
761                 ip4_addr.s_addr = if4->ifa_address;
762                 if (ipvlan_add_addr4(ipvlan, &ip4_addr))
763                         return NOTIFY_BAD;
764                 break;
765
766         case NETDEV_DOWN:
767                 ip4_addr.s_addr = if4->ifa_address;
768                 ipvlan_del_addr4(ipvlan, &ip4_addr);
769                 break;
770         }
771
772         return NOTIFY_OK;
773 }
774
775 static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = {
776         .notifier_call = ipvlan_addr4_event,
777 };
778
779 static struct notifier_block ipvlan_notifier_block __read_mostly = {
780         .notifier_call = ipvlan_device_event,
781 };
782
783 static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = {
784         .notifier_call = ipvlan_addr6_event,
785 };
786
787 static int __init ipvlan_init_module(void)
788 {
789         int err;
790
791         ipvlan_init_secret();
792         register_netdevice_notifier(&ipvlan_notifier_block);
793         register_inet6addr_notifier(&ipvlan_addr6_notifier_block);
794         register_inetaddr_notifier(&ipvlan_addr4_notifier_block);
795
796         err = ipvlan_link_register(&ipvlan_link_ops);
797         if (err < 0)
798                 goto error;
799
800         return 0;
801 error:
802         unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
803         unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
804         unregister_netdevice_notifier(&ipvlan_notifier_block);
805         return err;
806 }
807
808 static void __exit ipvlan_cleanup_module(void)
809 {
810         rtnl_link_unregister(&ipvlan_link_ops);
811         unregister_netdevice_notifier(&ipvlan_notifier_block);
812         unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
813         unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
814 }
815
816 module_init(ipvlan_init_module);
817 module_exit(ipvlan_cleanup_module);
818
819 MODULE_LICENSE("GPL");
820 MODULE_AUTHOR("Mahesh Bandewar <maheshb@google.com>");
821 MODULE_DESCRIPTION("Driver for L3 (IPv6/IPv4) based VLANs");
822 MODULE_ALIAS_RTNL_LINK("ipvlan");