2 * GRE over IPv6 protocol decoder.
4 * Authors: Dmitry Kozlov (xeb@mail.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/init.h>
28 #include <linux/in6.h>
29 #include <linux/inetdevice.h>
30 #include <linux/igmp.h>
31 #include <linux/netfilter_ipv4.h>
32 #include <linux/etherdevice.h>
33 #include <linux/if_ether.h>
34 #include <linux/hash.h>
35 #include <linux/if_tunnel.h>
36 #include <linux/ip6_tunnel.h>
40 #include <net/ip_tunnels.h>
42 #include <net/protocol.h>
43 #include <net/addrconf.h>
45 #include <net/checksum.h>
46 #include <net/dsfield.h>
47 #include <net/inet_ecn.h>
49 #include <net/net_namespace.h>
50 #include <net/netns/generic.h>
51 #include <net/rtnetlink.h>
54 #include <net/ip6_fib.h>
55 #include <net/ip6_route.h>
56 #include <net/ip6_tunnel.h>
58 #include <net/erspan.h>
59 #include <net/dst_metadata.h>
62 static bool log_ecn_error = true;
63 module_param(log_ecn_error, bool, 0644);
64 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
66 #define IP6_GRE_HASH_SIZE_SHIFT 5
67 #define IP6_GRE_HASH_SIZE (1 << IP6_GRE_HASH_SIZE_SHIFT)
69 static unsigned int ip6gre_net_id __read_mostly;
71 struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
73 struct ip6_tnl __rcu *collect_md_tun;
74 struct ip6_tnl __rcu *collect_md_tun_erspan;
75 struct net_device *fb_tunnel_dev;
78 static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
79 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
80 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly;
81 static int ip6gre_tunnel_init(struct net_device *dev);
82 static void ip6gre_tunnel_setup(struct net_device *dev);
83 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
84 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
85 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu);
87 /* Tunnel hash table */
97 We require exact key match i.e. if a key is present in packet
98 it will match only tunnel with the same key; if it is not present,
99 it will match only keyless tunnel.
101 All keysless packets, if not matched configured keyless tunnels
102 will match fallback tunnel.
105 #define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(IP6_GRE_HASH_SIZE - 1))
106 static u32 HASH_ADDR(const struct in6_addr *addr)
108 u32 hash = ipv6_addr_hash(addr);
110 return hash_32(hash, IP6_GRE_HASH_SIZE_SHIFT);
113 #define tunnels_r_l tunnels[3]
114 #define tunnels_r tunnels[2]
115 #define tunnels_l tunnels[1]
116 #define tunnels_wc tunnels[0]
118 /* Given src, dst and key, find appropriate for input tunnel. */
120 static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
121 const struct in6_addr *remote, const struct in6_addr *local,
122 __be32 key, __be16 gre_proto)
124 struct net *net = dev_net(dev);
125 int link = dev->ifindex;
126 unsigned int h0 = HASH_ADDR(remote);
127 unsigned int h1 = HASH_KEY(key);
128 struct ip6_tnl *t, *cand = NULL;
129 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
130 int dev_type = (gre_proto == htons(ETH_P_TEB) ||
131 gre_proto == htons(ETH_P_ERSPAN) ||
132 gre_proto == htons(ETH_P_ERSPAN2)) ?
133 ARPHRD_ETHER : ARPHRD_IP6GRE;
134 int score, cand_score = 4;
135 struct net_device *ndev;
137 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
138 if (!ipv6_addr_equal(local, &t->parms.laddr) ||
139 !ipv6_addr_equal(remote, &t->parms.raddr) ||
140 key != t->parms.i_key ||
141 !(t->dev->flags & IFF_UP))
144 if (t->dev->type != ARPHRD_IP6GRE &&
145 t->dev->type != dev_type)
149 if (t->parms.link != link)
151 if (t->dev->type != dev_type)
156 if (score < cand_score) {
162 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
163 if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
164 key != t->parms.i_key ||
165 !(t->dev->flags & IFF_UP))
168 if (t->dev->type != ARPHRD_IP6GRE &&
169 t->dev->type != dev_type)
173 if (t->parms.link != link)
175 if (t->dev->type != dev_type)
180 if (score < cand_score) {
186 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
187 if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
188 (!ipv6_addr_equal(local, &t->parms.raddr) ||
189 !ipv6_addr_is_multicast(local))) ||
190 key != t->parms.i_key ||
191 !(t->dev->flags & IFF_UP))
194 if (t->dev->type != ARPHRD_IP6GRE &&
195 t->dev->type != dev_type)
199 if (t->parms.link != link)
201 if (t->dev->type != dev_type)
206 if (score < cand_score) {
212 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
213 if (t->parms.i_key != key ||
214 !(t->dev->flags & IFF_UP))
217 if (t->dev->type != ARPHRD_IP6GRE &&
218 t->dev->type != dev_type)
222 if (t->parms.link != link)
224 if (t->dev->type != dev_type)
229 if (score < cand_score) {
238 if (gre_proto == htons(ETH_P_ERSPAN) ||
239 gre_proto == htons(ETH_P_ERSPAN2))
240 t = rcu_dereference(ign->collect_md_tun_erspan);
242 t = rcu_dereference(ign->collect_md_tun);
244 if (t && t->dev->flags & IFF_UP)
247 ndev = READ_ONCE(ign->fb_tunnel_dev);
248 if (ndev && ndev->flags & IFF_UP)
249 return netdev_priv(ndev);
254 static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
255 const struct __ip6_tnl_parm *p)
257 const struct in6_addr *remote = &p->raddr;
258 const struct in6_addr *local = &p->laddr;
259 unsigned int h = HASH_KEY(p->i_key);
262 if (!ipv6_addr_any(local))
264 if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) {
266 h ^= HASH_ADDR(remote);
269 return &ign->tunnels[prio][h];
272 static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
274 if (t->parms.collect_md)
275 rcu_assign_pointer(ign->collect_md_tun, t);
278 static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
280 if (t->parms.collect_md)
281 rcu_assign_pointer(ign->collect_md_tun_erspan, t);
284 static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t)
286 if (t->parms.collect_md)
287 rcu_assign_pointer(ign->collect_md_tun, NULL);
290 static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign,
293 if (t->parms.collect_md)
294 rcu_assign_pointer(ign->collect_md_tun_erspan, NULL);
297 static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
298 const struct ip6_tnl *t)
300 return __ip6gre_bucket(ign, &t->parms);
303 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
305 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
307 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
308 rcu_assign_pointer(*tp, t);
311 static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
313 struct ip6_tnl __rcu **tp;
314 struct ip6_tnl *iter;
316 for (tp = ip6gre_bucket(ign, t);
317 (iter = rtnl_dereference(*tp)) != NULL;
320 rcu_assign_pointer(*tp, t->next);
326 static struct ip6_tnl *ip6gre_tunnel_find(struct net *net,
327 const struct __ip6_tnl_parm *parms,
330 const struct in6_addr *remote = &parms->raddr;
331 const struct in6_addr *local = &parms->laddr;
332 __be32 key = parms->i_key;
333 int link = parms->link;
335 struct ip6_tnl __rcu **tp;
336 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
338 for (tp = __ip6gre_bucket(ign, parms);
339 (t = rtnl_dereference(*tp)) != NULL;
341 if (ipv6_addr_equal(local, &t->parms.laddr) &&
342 ipv6_addr_equal(remote, &t->parms.raddr) &&
343 key == t->parms.i_key &&
344 link == t->parms.link &&
345 type == t->dev->type)
351 static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
352 const struct __ip6_tnl_parm *parms, int create)
354 struct ip6_tnl *t, *nt;
355 struct net_device *dev;
357 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
359 t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
365 if (parms->name[0]) {
366 if (!dev_valid_name(parms->name))
368 strlcpy(name, parms->name, IFNAMSIZ);
370 strcpy(name, "ip6gre%d");
372 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
373 ip6gre_tunnel_setup);
377 dev_net_set(dev, net);
379 nt = netdev_priv(dev);
381 dev->rtnl_link_ops = &ip6gre_link_ops;
384 nt->net = dev_net(dev);
386 if (register_netdevice(dev) < 0)
389 ip6gre_tnl_link_config(nt, 1);
391 /* Can use a lockless transmit, unless we generate output sequences */
392 if (!(nt->parms.o_flags & TUNNEL_SEQ))
393 dev->features |= NETIF_F_LLTX;
395 ip6gre_tunnel_link(ign, nt);
403 static void ip6erspan_tunnel_uninit(struct net_device *dev)
405 struct ip6_tnl *t = netdev_priv(dev);
406 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
408 ip6erspan_tunnel_unlink_md(ign, t);
409 ip6gre_tunnel_unlink(ign, t);
410 dst_cache_reset(&t->dst_cache);
414 static void ip6gre_tunnel_uninit(struct net_device *dev)
416 struct ip6_tnl *t = netdev_priv(dev);
417 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
419 ip6gre_tunnel_unlink_md(ign, t);
420 ip6gre_tunnel_unlink(ign, t);
421 if (ign->fb_tunnel_dev == dev)
422 WRITE_ONCE(ign->fb_tunnel_dev, NULL);
423 dst_cache_reset(&t->dst_cache);
428 static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
429 u8 type, u8 code, int offset, __be32 info)
431 struct net *net = dev_net(skb->dev);
432 const struct gre_base_hdr *greh;
433 const struct ipv6hdr *ipv6h;
434 int grehlen = sizeof(*greh);
440 if (!pskb_may_pull(skb, offset + grehlen))
442 greh = (const struct gre_base_hdr *)(skb->data + offset);
444 if (flags & (GRE_VERSION | GRE_ROUTING))
446 if (flags & GRE_CSUM)
448 if (flags & GRE_KEY) {
449 key_off = grehlen + offset;
453 if (!pskb_may_pull(skb, offset + grehlen))
455 ipv6h = (const struct ipv6hdr *)skb->data;
456 greh = (const struct gre_base_hdr *)(skb->data + offset);
457 key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
459 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
460 key, greh->protocol);
465 struct ipv6_tlv_tnl_enc_lim *tel;
467 case ICMPV6_DEST_UNREACH:
468 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
470 if (code != ICMPV6_PORT_UNREACH)
473 case ICMPV6_TIME_EXCEED:
474 if (code == ICMPV6_EXC_HOPLIMIT) {
475 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
480 case ICMPV6_PARAMPROB:
482 if (code == ICMPV6_HDR_FIELD)
483 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
485 if (teli && teli == be32_to_cpu(info) - 2) {
486 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
487 if (tel->encap_limit == 0) {
488 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
492 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
496 case ICMPV6_PKT_TOOBIG:
497 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
500 ip6_redirect(skb, net, skb->dev->ifindex, 0,
501 sock_net_uid(net, NULL));
505 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
509 t->err_time = jiffies;
512 static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
514 const struct ipv6hdr *ipv6h;
515 struct ip6_tnl *tunnel;
517 ipv6h = ipv6_hdr(skb);
518 tunnel = ip6gre_tunnel_lookup(skb->dev,
519 &ipv6h->saddr, &ipv6h->daddr, tpi->key,
522 if (tunnel->parms.collect_md) {
523 struct metadata_dst *tun_dst;
528 tun_id = key32_to_tunnel_id(tpi->key);
530 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0);
532 return PACKET_REJECT;
534 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
536 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
542 return PACKET_REJECT;
545 static int ip6erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
548 struct erspan_base_hdr *ershdr;
549 const struct ipv6hdr *ipv6h;
550 struct erspan_md2 *md2;
551 struct ip6_tnl *tunnel;
554 ipv6h = ipv6_hdr(skb);
555 ershdr = (struct erspan_base_hdr *)skb->data;
558 tunnel = ip6gre_tunnel_lookup(skb->dev,
559 &ipv6h->saddr, &ipv6h->daddr, tpi->key,
562 int len = erspan_hdr_len(ver);
564 if (unlikely(!pskb_may_pull(skb, len)))
565 return PACKET_REJECT;
567 if (__iptunnel_pull_header(skb, len,
570 return PACKET_REJECT;
572 if (tunnel->parms.collect_md) {
573 struct erspan_metadata *pkt_md, *md;
574 struct metadata_dst *tun_dst;
575 struct ip_tunnel_info *info;
580 tpi->flags |= TUNNEL_KEY;
582 tun_id = key32_to_tunnel_id(tpi->key);
584 tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id,
587 return PACKET_REJECT;
589 /* skb can be uncloned in __iptunnel_pull_header, so
590 * old pkt_md is no longer valid and we need to reset
593 gh = skb_network_header(skb) +
594 skb_network_header_len(skb);
595 pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
597 info = &tun_dst->u.tun_info;
598 md = ip_tunnel_info_opts(info);
601 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
603 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
604 info->options_len = sizeof(*md);
606 ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
609 ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
615 return PACKET_REJECT;
618 static int gre_rcv(struct sk_buff *skb)
620 struct tnl_ptk_info tpi;
621 bool csum_err = false;
624 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0);
628 if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
631 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
632 tpi.proto == htons(ETH_P_ERSPAN2))) {
633 if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
638 if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD)
642 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
648 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
650 return iptunnel_handle_offloads(skb,
651 csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
654 static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb,
655 struct net_device *dev,
656 struct flowi6 *fl6, __u8 *dsfield,
659 const struct iphdr *iph = ip_hdr(skb);
660 struct ip6_tnl *t = netdev_priv(dev);
662 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
663 *encap_limit = t->parms.encap_limit;
665 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
667 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
668 *dsfield = ipv4_get_dsfield(iph);
670 *dsfield = ip6_tclass(t->parms.flowinfo);
672 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
673 fl6->flowi6_mark = skb->mark;
675 fl6->flowi6_mark = t->parms.fwmark;
677 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
680 static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
681 struct net_device *dev,
682 struct flowi6 *fl6, __u8 *dsfield,
685 struct ipv6hdr *ipv6h;
686 struct ip6_tnl *t = netdev_priv(dev);
689 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
690 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
691 ipv6h = ipv6_hdr(skb);
694 struct ipv6_tlv_tnl_enc_lim *tel;
696 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
697 if (tel->encap_limit == 0) {
698 icmpv6_send(skb, ICMPV6_PARAMPROB,
699 ICMPV6_HDR_FIELD, offset + 2);
702 *encap_limit = tel->encap_limit - 1;
703 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
704 *encap_limit = t->parms.encap_limit;
707 memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
709 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
710 *dsfield = ipv6_get_dsfield(ipv6h);
712 *dsfield = ip6_tclass(t->parms.flowinfo);
714 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
715 fl6->flowlabel |= ip6_flowlabel(ipv6h);
717 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
718 fl6->flowi6_mark = skb->mark;
720 fl6->flowi6_mark = t->parms.fwmark;
722 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
727 static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
728 struct net_device *dev, __u8 dsfield,
729 struct flowi6 *fl6, int encap_limit,
730 __u32 *pmtu, __be16 proto)
732 struct ip6_tnl *tunnel = netdev_priv(dev);
736 if (dev->type == ARPHRD_ETHER)
737 IPCB(skb)->flags = 0;
739 if (dev->header_ops && dev->type == ARPHRD_IP6GRE)
740 fl6->daddr = ((struct ipv6hdr *)skb->data)->daddr;
742 fl6->daddr = tunnel->parms.raddr;
744 /* Push GRE header. */
745 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
747 if (tunnel->parms.collect_md) {
748 struct ip_tunnel_info *tun_info;
749 const struct ip_tunnel_key *key;
752 tun_info = skb_tunnel_info(skb);
753 if (unlikely(!tun_info ||
754 !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
755 ip_tunnel_info_af(tun_info) != AF_INET6))
758 key = &tun_info->key;
759 memset(fl6, 0, sizeof(*fl6));
760 fl6->flowi6_proto = IPPROTO_GRE;
761 fl6->daddr = key->u.ipv6.dst;
762 fl6->flowlabel = key->label;
763 fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
766 flags = key->tun_flags &
767 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
768 tun_hlen = gre_calc_hlen(flags);
770 if (skb_cow_head(skb, dev->needed_headroom ?: tun_hlen + tunnel->encap_hlen))
773 gre_build_header(skb, tun_hlen,
775 tunnel_id_to_key32(tun_info->key.tun_id),
776 (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
780 if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
783 flags = tunnel->parms.o_flags;
785 gre_build_header(skb, tunnel->tun_hlen, flags,
786 protocol, tunnel->parms.o_key,
787 (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
791 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
795 static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
797 struct ip6_tnl *t = netdev_priv(dev);
798 int encap_limit = -1;
804 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
806 if (!t->parms.collect_md)
807 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
808 &dsfield, &encap_limit);
810 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
814 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
817 /* XXX: send ICMP error even if DF is not set. */
818 if (err == -EMSGSIZE)
819 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
827 static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
829 struct ip6_tnl *t = netdev_priv(dev);
830 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
831 int encap_limit = -1;
837 if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
840 if (!t->parms.collect_md &&
841 prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit))
844 if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
847 err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit,
848 &mtu, skb->protocol);
850 if (err == -EMSGSIZE)
851 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
859 * ip6gre_tnl_addr_conflict - compare packet addresses to tunnel's own
860 * @t: the outgoing tunnel device
861 * @hdr: IPv6 header from the incoming packet
864 * Avoid trivial tunneling loop by checking that tunnel exit-point
865 * doesn't match source of incoming packet.
872 static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t,
873 const struct ipv6hdr *hdr)
875 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
878 static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
880 struct ip6_tnl *t = netdev_priv(dev);
881 int encap_limit = -1;
886 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
887 encap_limit = t->parms.encap_limit;
889 if (!t->parms.collect_md)
890 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
892 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
896 err = __gre6_xmit(skb, dev, 0, &fl6, encap_limit, &mtu, skb->protocol);
901 static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
902 struct net_device *dev)
904 struct ip6_tnl *t = netdev_priv(dev);
905 struct net_device_stats *stats = &t->dev->stats;
908 if (!pskb_inet_may_pull(skb))
911 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
914 switch (skb->protocol) {
915 case htons(ETH_P_IP):
916 ret = ip6gre_xmit_ipv4(skb, dev);
918 case htons(ETH_P_IPV6):
919 ret = ip6gre_xmit_ipv6(skb, dev);
922 ret = ip6gre_xmit_other(skb, dev);
938 static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
939 struct net_device *dev)
941 struct ip6_tnl *t = netdev_priv(dev);
942 struct dst_entry *dst = skb_dst(skb);
943 struct net_device_stats *stats;
944 bool truncate = false;
945 int encap_limit = -1;
946 __u8 dsfield = false;
953 if (!pskb_inet_may_pull(skb))
956 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
959 if (gre_handle_offloads(skb, false))
962 if (skb->len > dev->mtu + dev->hard_header_len) {
963 pskb_trim(skb, dev->mtu + dev->hard_header_len);
967 nhoff = skb_network_offset(skb);
968 if (skb->protocol == htons(ETH_P_IP) &&
969 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
972 if (skb->protocol == htons(ETH_P_IPV6)) {
975 if (skb_transport_header_was_set(skb))
976 thoff = skb_transport_offset(skb);
978 thoff = nhoff + sizeof(struct ipv6hdr);
979 if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
983 if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
986 t->parms.o_flags &= ~TUNNEL_KEY;
987 IPCB(skb)->flags = 0;
989 /* For collect_md mode, derive fl6 from the tunnel key,
990 * for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}.
992 if (t->parms.collect_md) {
993 struct ip_tunnel_info *tun_info;
994 const struct ip_tunnel_key *key;
995 struct erspan_metadata *md;
998 tun_info = skb_tunnel_info(skb);
999 if (unlikely(!tun_info ||
1000 !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1001 ip_tunnel_info_af(tun_info) != AF_INET6))
1004 key = &tun_info->key;
1005 memset(&fl6, 0, sizeof(fl6));
1006 fl6.flowi6_proto = IPPROTO_GRE;
1007 fl6.daddr = key->u.ipv6.dst;
1008 fl6.flowlabel = key->label;
1009 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1012 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
1014 if (tun_info->options_len < sizeof(*md))
1016 md = ip_tunnel_info_opts(tun_info);
1018 tun_id = tunnel_id_to_key32(key->tun_id);
1019 if (md->version == 1) {
1020 erspan_build_header(skb,
1022 ntohl(md->u.index), truncate,
1024 proto = htons(ETH_P_ERSPAN);
1025 } else if (md->version == 2) {
1026 erspan_build_header_v2(skb,
1029 get_hwid(&md->u.md2),
1031 proto = htons(ETH_P_ERSPAN2);
1036 switch (skb->protocol) {
1037 case htons(ETH_P_IP):
1038 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1039 prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
1040 &dsfield, &encap_limit);
1042 case htons(ETH_P_IPV6):
1043 if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
1045 if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
1046 &dsfield, &encap_limit))
1050 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1054 if (t->parms.erspan_ver == 1) {
1055 erspan_build_header(skb, ntohl(t->parms.o_key),
1058 proto = htons(ETH_P_ERSPAN);
1059 } else if (t->parms.erspan_ver == 2) {
1060 erspan_build_header_v2(skb, ntohl(t->parms.o_key),
1064 proto = htons(ETH_P_ERSPAN2);
1069 fl6.daddr = t->parms.raddr;
1072 /* Push GRE header. */
1073 gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(atomic_fetch_inc(&t->o_seqno)));
1075 /* TooBig packet may have updated dst->dev's mtu */
1076 if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
1077 dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false);
1079 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1082 /* XXX: send ICMP error even if DF is not set. */
1083 if (err == -EMSGSIZE) {
1084 if (skb->protocol == htons(ETH_P_IP))
1085 icmp_send(skb, ICMP_DEST_UNREACH,
1086 ICMP_FRAG_NEEDED, htonl(mtu));
1088 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1093 return NETDEV_TX_OK;
1096 stats = &t->dev->stats;
1098 stats->tx_dropped++;
1100 return NETDEV_TX_OK;
1103 static void ip6gre_tnl_link_config_common(struct ip6_tnl *t)
1105 struct net_device *dev = t->dev;
1106 struct __ip6_tnl_parm *p = &t->parms;
1107 struct flowi6 *fl6 = &t->fl.u.ip6;
1109 if (dev->type != ARPHRD_ETHER) {
1110 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1111 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1114 /* Set up flowi template */
1115 fl6->saddr = p->laddr;
1116 fl6->daddr = p->raddr;
1117 fl6->flowi6_oif = p->link;
1119 fl6->flowi6_proto = IPPROTO_GRE;
1121 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1122 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1123 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1124 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1126 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1127 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1129 if (p->flags&IP6_TNL_F_CAP_XMIT &&
1130 p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
1131 dev->flags |= IFF_POINTOPOINT;
1133 dev->flags &= ~IFF_POINTOPOINT;
1136 static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
1139 const struct __ip6_tnl_parm *p = &t->parms;
1140 struct net_device *dev = t->dev;
1142 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1143 int strict = (ipv6_addr_type(&p->raddr) &
1144 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1146 struct rt6_info *rt = rt6_lookup(t->net,
1147 &p->raddr, &p->laddr,
1148 p->link, NULL, strict);
1154 unsigned short dst_len = rt->dst.dev->hard_header_len +
1157 if (t->dev->header_ops)
1158 dev->hard_header_len = dst_len;
1160 dev->needed_headroom = dst_len;
1163 int mtu = rt->dst.dev->mtu - t_hlen;
1165 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1167 if (dev->type == ARPHRD_ETHER)
1170 if (mtu < IPV6_MIN_MTU)
1172 WRITE_ONCE(dev->mtu, mtu);
1179 static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
1183 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
1184 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
1186 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1188 if (tunnel->dev->header_ops)
1189 tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1191 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
1196 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1198 ip6gre_tnl_link_config_common(t);
1199 ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t));
1202 static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
1203 const struct __ip6_tnl_parm *p)
1205 t->parms.laddr = p->laddr;
1206 t->parms.raddr = p->raddr;
1207 t->parms.flags = p->flags;
1208 t->parms.hop_limit = p->hop_limit;
1209 t->parms.encap_limit = p->encap_limit;
1210 t->parms.flowinfo = p->flowinfo;
1211 t->parms.link = p->link;
1212 t->parms.proto = p->proto;
1213 t->parms.i_key = p->i_key;
1214 t->parms.o_key = p->o_key;
1215 t->parms.i_flags = p->i_flags;
1216 t->parms.o_flags = p->o_flags;
1217 t->parms.fwmark = p->fwmark;
1218 t->parms.erspan_ver = p->erspan_ver;
1219 t->parms.index = p->index;
1220 t->parms.dir = p->dir;
1221 t->parms.hwid = p->hwid;
1222 dst_cache_reset(&t->dst_cache);
1225 static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
1228 ip6gre_tnl_copy_tnl_parm(t, p);
1229 ip6gre_tnl_link_config(t, set_mtu);
1233 static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
1234 const struct ip6_tnl_parm2 *u)
1236 p->laddr = u->laddr;
1237 p->raddr = u->raddr;
1238 p->flags = u->flags;
1239 p->hop_limit = u->hop_limit;
1240 p->encap_limit = u->encap_limit;
1241 p->flowinfo = u->flowinfo;
1243 p->i_key = u->i_key;
1244 p->o_key = u->o_key;
1245 p->i_flags = gre_flags_to_tnl_flags(u->i_flags);
1246 p->o_flags = gre_flags_to_tnl_flags(u->o_flags);
1247 memcpy(p->name, u->name, sizeof(u->name));
1250 static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u,
1251 const struct __ip6_tnl_parm *p)
1253 u->proto = IPPROTO_GRE;
1254 u->laddr = p->laddr;
1255 u->raddr = p->raddr;
1256 u->flags = p->flags;
1257 u->hop_limit = p->hop_limit;
1258 u->encap_limit = p->encap_limit;
1259 u->flowinfo = p->flowinfo;
1261 u->i_key = p->i_key;
1262 u->o_key = p->o_key;
1263 u->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
1264 u->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
1265 memcpy(u->name, p->name, sizeof(u->name));
1268 static int ip6gre_tunnel_ioctl(struct net_device *dev,
1269 struct ifreq *ifr, int cmd)
1272 struct ip6_tnl_parm2 p;
1273 struct __ip6_tnl_parm p1;
1274 struct ip6_tnl *t = netdev_priv(dev);
1275 struct net *net = t->net;
1276 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1278 memset(&p1, 0, sizeof(p1));
1282 if (dev == ign->fb_tunnel_dev) {
1283 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1287 ip6gre_tnl_parm_from_user(&p1, &p);
1288 t = ip6gre_tunnel_locate(net, &p1, 0);
1290 t = netdev_priv(dev);
1292 memset(&p, 0, sizeof(p));
1293 ip6gre_tnl_parm_to_user(&p, &t->parms);
1294 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1301 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1305 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1309 if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
1312 if (!(p.i_flags&GRE_KEY))
1314 if (!(p.o_flags&GRE_KEY))
1317 ip6gre_tnl_parm_from_user(&p1, &p);
1318 t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
1320 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1322 if (t->dev != dev) {
1327 t = netdev_priv(dev);
1329 ip6gre_tunnel_unlink(ign, t);
1331 ip6gre_tnl_change(t, &p1, 1);
1332 ip6gre_tunnel_link(ign, t);
1333 netdev_state_change(dev);
1340 memset(&p, 0, sizeof(p));
1341 ip6gre_tnl_parm_to_user(&p, &t->parms);
1342 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1345 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1350 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1353 if (dev == ign->fb_tunnel_dev) {
1355 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1358 ip6gre_tnl_parm_from_user(&p1, &p);
1359 t = ip6gre_tunnel_locate(net, &p1, 0);
1363 if (t == netdev_priv(ign->fb_tunnel_dev))
1367 unregister_netdevice(dev);
1379 static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
1380 unsigned short type, const void *daddr,
1381 const void *saddr, unsigned int len)
1383 struct ip6_tnl *t = netdev_priv(dev);
1384 struct ipv6hdr *ipv6h;
1387 ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h));
1388 ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb,
1389 t->fl.u.ip6.flowlabel,
1390 true, &t->fl.u.ip6));
1391 ipv6h->hop_limit = t->parms.hop_limit;
1392 ipv6h->nexthdr = NEXTHDR_GRE;
1393 ipv6h->saddr = t->parms.laddr;
1394 ipv6h->daddr = t->parms.raddr;
1396 p = (__be16 *)(ipv6h + 1);
1397 p[0] = t->parms.o_flags;
1401 * Set the source hardware address.
1405 memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr));
1407 memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr));
1408 if (!ipv6_addr_any(&ipv6h->daddr))
1414 static const struct header_ops ip6gre_header_ops = {
1415 .create = ip6gre_header,
1418 static const struct net_device_ops ip6gre_netdev_ops = {
1419 .ndo_init = ip6gre_tunnel_init,
1420 .ndo_uninit = ip6gre_tunnel_uninit,
1421 .ndo_start_xmit = ip6gre_tunnel_xmit,
1422 .ndo_do_ioctl = ip6gre_tunnel_ioctl,
1423 .ndo_change_mtu = ip6_tnl_change_mtu,
1424 .ndo_get_stats64 = ip_tunnel_get_stats64,
1425 .ndo_get_iflink = ip6_tnl_get_iflink,
1428 static void ip6gre_dev_free(struct net_device *dev)
1430 struct ip6_tnl *t = netdev_priv(dev);
1432 gro_cells_destroy(&t->gro_cells);
1433 dst_cache_destroy(&t->dst_cache);
1434 free_percpu(dev->tstats);
1437 static void ip6gre_tunnel_setup(struct net_device *dev)
1439 dev->netdev_ops = &ip6gre_netdev_ops;
1440 dev->needs_free_netdev = true;
1441 dev->priv_destructor = ip6gre_dev_free;
1443 dev->type = ARPHRD_IP6GRE;
1445 dev->flags |= IFF_NOARP;
1446 dev->addr_len = sizeof(struct in6_addr);
1447 netif_keep_dst(dev);
1448 /* This perm addr will be used as interface identifier by IPv6 */
1449 dev->addr_assign_type = NET_ADDR_RANDOM;
1450 eth_random_addr(dev->perm_addr);
1453 #define GRE6_FEATURES (NETIF_F_SG | \
1454 NETIF_F_FRAGLIST | \
1458 static void ip6gre_tnl_init_features(struct net_device *dev)
1460 struct ip6_tnl *nt = netdev_priv(dev);
1462 dev->features |= GRE6_FEATURES;
1463 dev->hw_features |= GRE6_FEATURES;
1465 if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
1466 /* TCP offload with GRE SEQ is not supported, nor
1467 * can we support 2 levels of outer headers requiring
1470 if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
1471 nt->encap.type == TUNNEL_ENCAP_NONE) {
1472 dev->features |= NETIF_F_GSO_SOFTWARE;
1473 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1476 /* Can use a lockless transmit, unless we generate
1479 dev->features |= NETIF_F_LLTX;
1483 static int ip6gre_tunnel_init_common(struct net_device *dev)
1485 struct ip6_tnl *tunnel;
1489 tunnel = netdev_priv(dev);
1492 tunnel->net = dev_net(dev);
1493 strcpy(tunnel->parms.name, dev->name);
1495 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1499 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1501 goto cleanup_alloc_pcpu_stats;
1503 ret = gro_cells_init(&tunnel->gro_cells, dev);
1505 goto cleanup_dst_cache_init;
1507 t_hlen = ip6gre_calc_hlen(tunnel);
1508 dev->mtu = ETH_DATA_LEN - t_hlen;
1509 if (dev->type == ARPHRD_ETHER)
1510 dev->mtu -= ETH_HLEN;
1511 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1514 if (tunnel->parms.collect_md) {
1515 netif_keep_dst(dev);
1517 ip6gre_tnl_init_features(dev);
1522 cleanup_dst_cache_init:
1523 dst_cache_destroy(&tunnel->dst_cache);
1524 cleanup_alloc_pcpu_stats:
1525 free_percpu(dev->tstats);
1530 static int ip6gre_tunnel_init(struct net_device *dev)
1532 struct ip6_tnl *tunnel;
1535 ret = ip6gre_tunnel_init_common(dev);
1539 tunnel = netdev_priv(dev);
1541 if (tunnel->parms.collect_md)
1544 memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
1545 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
1547 if (ipv6_addr_any(&tunnel->parms.raddr))
1548 dev->header_ops = &ip6gre_header_ops;
1553 static void ip6gre_fb_tunnel_init(struct net_device *dev)
1555 struct ip6_tnl *tunnel = netdev_priv(dev);
1558 tunnel->net = dev_net(dev);
1559 strcpy(tunnel->parms.name, dev->name);
1561 tunnel->hlen = sizeof(struct ipv6hdr) + 4;
1564 static struct inet6_protocol ip6gre_protocol __read_mostly = {
1566 .err_handler = ip6gre_err,
1567 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1570 static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
1572 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1573 struct net_device *dev, *aux;
1576 for_each_netdev_safe(net, dev, aux)
1577 if (dev->rtnl_link_ops == &ip6gre_link_ops ||
1578 dev->rtnl_link_ops == &ip6gre_tap_ops ||
1579 dev->rtnl_link_ops == &ip6erspan_tap_ops)
1580 unregister_netdevice_queue(dev, head);
1582 for (prio = 0; prio < 4; prio++) {
1584 for (h = 0; h < IP6_GRE_HASH_SIZE; h++) {
1587 t = rtnl_dereference(ign->tunnels[prio][h]);
1590 /* If dev is in the same netns, it has already
1591 * been added to the list by the previous loop.
1593 if (!net_eq(dev_net(t->dev), net))
1594 unregister_netdevice_queue(t->dev,
1596 t = rtnl_dereference(t->next);
1602 static int __net_init ip6gre_init_net(struct net *net)
1604 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1605 struct net_device *ndev;
1608 if (!net_has_fallback_tunnels(net))
1610 ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
1611 NET_NAME_UNKNOWN, ip6gre_tunnel_setup);
1616 ign->fb_tunnel_dev = ndev;
1617 dev_net_set(ign->fb_tunnel_dev, net);
1618 /* FB netdevice is special: we have one, and only one per netns.
1619 * Allowing to move it to another netns is clearly unsafe.
1621 ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
1624 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
1625 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
1627 err = register_netdev(ign->fb_tunnel_dev);
1631 rcu_assign_pointer(ign->tunnels_wc[0],
1632 netdev_priv(ign->fb_tunnel_dev));
1641 static void __net_exit ip6gre_exit_batch_net(struct list_head *net_list)
1647 list_for_each_entry(net, net_list, exit_list)
1648 ip6gre_destroy_tunnels(net, &list);
1649 unregister_netdevice_many(&list);
1653 static struct pernet_operations ip6gre_net_ops = {
1654 .init = ip6gre_init_net,
1655 .exit_batch = ip6gre_exit_batch_net,
1656 .id = &ip6gre_net_id,
1657 .size = sizeof(struct ip6gre_net),
1660 static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1661 struct netlink_ext_ack *extack)
1669 if (data[IFLA_GRE_IFLAGS])
1670 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1671 if (data[IFLA_GRE_OFLAGS])
1672 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1673 if (flags & (GRE_VERSION|GRE_ROUTING))
1679 static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1680 struct netlink_ext_ack *extack)
1682 struct in6_addr daddr;
1684 if (tb[IFLA_ADDRESS]) {
1685 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1687 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1688 return -EADDRNOTAVAIL;
1694 if (data[IFLA_GRE_REMOTE]) {
1695 daddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
1696 if (ipv6_addr_any(&daddr))
1701 return ip6gre_tunnel_validate(tb, data, extack);
1704 static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1705 struct netlink_ext_ack *extack)
1713 ret = ip6gre_tap_validate(tb, data, extack);
1717 /* ERSPAN should only have GRE sequence and key flag */
1718 if (data[IFLA_GRE_OFLAGS])
1719 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1720 if (data[IFLA_GRE_IFLAGS])
1721 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1722 if (!data[IFLA_GRE_COLLECT_METADATA] &&
1723 flags != (GRE_SEQ | GRE_KEY))
1726 /* ERSPAN Session ID only has 10-bit. Since we reuse
1727 * 32-bit key field as ID, check it's range.
1729 if (data[IFLA_GRE_IKEY] &&
1730 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1733 if (data[IFLA_GRE_OKEY] &&
1734 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1737 if (data[IFLA_GRE_ERSPAN_VER]) {
1738 ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1739 if (ver != 1 && ver != 2)
1744 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1745 u32 index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1747 if (index & ~INDEX_MASK)
1750 } else if (ver == 2) {
1751 if (data[IFLA_GRE_ERSPAN_DIR]) {
1752 u16 dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1754 if (dir & ~(DIR_MASK >> DIR_OFFSET))
1758 if (data[IFLA_GRE_ERSPAN_HWID]) {
1759 u16 hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1761 if (hwid & ~(HWID_MASK >> HWID_OFFSET))
1769 static void ip6erspan_set_version(struct nlattr *data[],
1770 struct __ip6_tnl_parm *parms)
1775 parms->erspan_ver = 1;
1776 if (data[IFLA_GRE_ERSPAN_VER])
1777 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1779 if (parms->erspan_ver == 1) {
1780 if (data[IFLA_GRE_ERSPAN_INDEX])
1781 parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1782 } else if (parms->erspan_ver == 2) {
1783 if (data[IFLA_GRE_ERSPAN_DIR])
1784 parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1785 if (data[IFLA_GRE_ERSPAN_HWID])
1786 parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1790 static void ip6gre_netlink_parms(struct nlattr *data[],
1791 struct __ip6_tnl_parm *parms)
1793 memset(parms, 0, sizeof(*parms));
1798 if (data[IFLA_GRE_LINK])
1799 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1801 if (data[IFLA_GRE_IFLAGS])
1802 parms->i_flags = gre_flags_to_tnl_flags(
1803 nla_get_be16(data[IFLA_GRE_IFLAGS]));
1805 if (data[IFLA_GRE_OFLAGS])
1806 parms->o_flags = gre_flags_to_tnl_flags(
1807 nla_get_be16(data[IFLA_GRE_OFLAGS]));
1809 if (data[IFLA_GRE_IKEY])
1810 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1812 if (data[IFLA_GRE_OKEY])
1813 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1815 if (data[IFLA_GRE_LOCAL])
1816 parms->laddr = nla_get_in6_addr(data[IFLA_GRE_LOCAL]);
1818 if (data[IFLA_GRE_REMOTE])
1819 parms->raddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
1821 if (data[IFLA_GRE_TTL])
1822 parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
1824 if (data[IFLA_GRE_ENCAP_LIMIT])
1825 parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
1827 if (data[IFLA_GRE_FLOWINFO])
1828 parms->flowinfo = nla_get_be32(data[IFLA_GRE_FLOWINFO]);
1830 if (data[IFLA_GRE_FLAGS])
1831 parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
1833 if (data[IFLA_GRE_FWMARK])
1834 parms->fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1836 if (data[IFLA_GRE_COLLECT_METADATA])
1837 parms->collect_md = true;
1840 static int ip6gre_tap_init(struct net_device *dev)
1844 ret = ip6gre_tunnel_init_common(dev);
1848 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1853 static const struct net_device_ops ip6gre_tap_netdev_ops = {
1854 .ndo_init = ip6gre_tap_init,
1855 .ndo_uninit = ip6gre_tunnel_uninit,
1856 .ndo_start_xmit = ip6gre_tunnel_xmit,
1857 .ndo_set_mac_address = eth_mac_addr,
1858 .ndo_validate_addr = eth_validate_addr,
1859 .ndo_change_mtu = ip6_tnl_change_mtu,
1860 .ndo_get_stats64 = ip_tunnel_get_stats64,
1861 .ndo_get_iflink = ip6_tnl_get_iflink,
1864 static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel)
1868 tunnel->tun_hlen = 8;
1869 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1870 erspan_hdr_len(tunnel->parms.erspan_ver);
1872 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1873 tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
1877 static int ip6erspan_tap_init(struct net_device *dev)
1879 struct ip6_tnl *tunnel;
1883 tunnel = netdev_priv(dev);
1886 tunnel->net = dev_net(dev);
1887 strcpy(tunnel->parms.name, dev->name);
1889 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1893 ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1895 goto cleanup_alloc_pcpu_stats;
1897 ret = gro_cells_init(&tunnel->gro_cells, dev);
1899 goto cleanup_dst_cache_init;
1901 t_hlen = ip6erspan_calc_hlen(tunnel);
1902 dev->mtu = ETH_DATA_LEN - t_hlen;
1903 if (dev->type == ARPHRD_ETHER)
1904 dev->mtu -= ETH_HLEN;
1905 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1908 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1909 ip6erspan_tnl_link_config(tunnel, 1);
1914 cleanup_dst_cache_init:
1915 dst_cache_destroy(&tunnel->dst_cache);
1916 cleanup_alloc_pcpu_stats:
1917 free_percpu(dev->tstats);
1922 static const struct net_device_ops ip6erspan_netdev_ops = {
1923 .ndo_init = ip6erspan_tap_init,
1924 .ndo_uninit = ip6erspan_tunnel_uninit,
1925 .ndo_start_xmit = ip6erspan_tunnel_xmit,
1926 .ndo_set_mac_address = eth_mac_addr,
1927 .ndo_validate_addr = eth_validate_addr,
1928 .ndo_change_mtu = ip6_tnl_change_mtu,
1929 .ndo_get_stats64 = ip_tunnel_get_stats64,
1930 .ndo_get_iflink = ip6_tnl_get_iflink,
1933 static void ip6gre_tap_setup(struct net_device *dev)
1939 dev->netdev_ops = &ip6gre_tap_netdev_ops;
1940 dev->needs_free_netdev = true;
1941 dev->priv_destructor = ip6gre_dev_free;
1943 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1944 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1945 netif_keep_dst(dev);
1948 bool is_ip6gretap_dev(const struct net_device *dev)
1950 return dev->netdev_ops == &ip6gre_tap_netdev_ops;
1952 EXPORT_SYMBOL_GPL(is_ip6gretap_dev);
1954 static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
1955 struct ip_tunnel_encap *ipencap)
1959 memset(ipencap, 0, sizeof(*ipencap));
1964 if (data[IFLA_GRE_ENCAP_TYPE]) {
1966 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1969 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1971 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1974 if (data[IFLA_GRE_ENCAP_SPORT]) {
1976 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1979 if (data[IFLA_GRE_ENCAP_DPORT]) {
1981 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1987 static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
1988 struct nlattr *tb[], struct nlattr *data[],
1989 struct netlink_ext_ack *extack)
1992 struct ip_tunnel_encap ipencap;
1995 nt = netdev_priv(dev);
1997 if (ip6gre_netlink_encap_parms(data, &ipencap)) {
1998 int err = ip6_tnl_encap_setup(nt, &ipencap);
2004 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
2005 eth_hw_addr_random(dev);
2008 nt->net = dev_net(dev);
2010 err = register_netdevice(dev);
2015 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
2021 static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
2022 struct nlattr *tb[], struct nlattr *data[],
2023 struct netlink_ext_ack *extack)
2025 struct ip6_tnl *nt = netdev_priv(dev);
2026 struct net *net = dev_net(dev);
2027 struct ip6gre_net *ign;
2030 ip6gre_netlink_parms(data, &nt->parms);
2031 ign = net_generic(net, ip6gre_net_id);
2033 if (nt->parms.collect_md) {
2034 if (rtnl_dereference(ign->collect_md_tun))
2037 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
2041 err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
2043 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
2044 ip6gre_tunnel_link_md(ign, nt);
2045 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
2050 static struct ip6_tnl *
2051 ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[],
2052 struct nlattr *data[], struct __ip6_tnl_parm *p_p,
2053 struct netlink_ext_ack *extack)
2055 struct ip6_tnl *t, *nt = netdev_priv(dev);
2056 struct net *net = nt->net;
2057 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2058 struct ip_tunnel_encap ipencap;
2060 if (dev == ign->fb_tunnel_dev)
2061 return ERR_PTR(-EINVAL);
2063 if (ip6gre_netlink_encap_parms(data, &ipencap)) {
2064 int err = ip6_tnl_encap_setup(nt, &ipencap);
2067 return ERR_PTR(err);
2070 ip6gre_netlink_parms(data, p_p);
2072 t = ip6gre_tunnel_locate(net, p_p, 0);
2076 return ERR_PTR(-EEXIST);
2084 static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
2085 struct nlattr *data[],
2086 struct netlink_ext_ack *extack)
2088 struct ip6_tnl *t = netdev_priv(dev);
2089 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
2090 struct __ip6_tnl_parm p;
2092 t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2096 ip6gre_tunnel_unlink_md(ign, t);
2097 ip6gre_tunnel_unlink(ign, t);
2098 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
2099 ip6gre_tunnel_link_md(ign, t);
2100 ip6gre_tunnel_link(ign, t);
2104 static void ip6gre_dellink(struct net_device *dev, struct list_head *head)
2106 struct net *net = dev_net(dev);
2107 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2109 if (dev != ign->fb_tunnel_dev)
2110 unregister_netdevice_queue(dev, head);
2113 static size_t ip6gre_get_size(const struct net_device *dev)
2118 /* IFLA_GRE_IFLAGS */
2120 /* IFLA_GRE_OFLAGS */
2126 /* IFLA_GRE_LOCAL */
2127 nla_total_size(sizeof(struct in6_addr)) +
2128 /* IFLA_GRE_REMOTE */
2129 nla_total_size(sizeof(struct in6_addr)) +
2132 /* IFLA_GRE_ENCAP_LIMIT */
2134 /* IFLA_GRE_FLOWINFO */
2136 /* IFLA_GRE_FLAGS */
2138 /* IFLA_GRE_ENCAP_TYPE */
2140 /* IFLA_GRE_ENCAP_FLAGS */
2142 /* IFLA_GRE_ENCAP_SPORT */
2144 /* IFLA_GRE_ENCAP_DPORT */
2146 /* IFLA_GRE_COLLECT_METADATA */
2148 /* IFLA_GRE_FWMARK */
2150 /* IFLA_GRE_ERSPAN_INDEX */
2155 static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2157 struct ip6_tnl *t = netdev_priv(dev);
2158 struct __ip6_tnl_parm *p = &t->parms;
2159 __be16 o_flags = p->o_flags;
2161 if (p->erspan_ver == 1 || p->erspan_ver == 2) {
2163 o_flags |= TUNNEL_KEY;
2165 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
2166 goto nla_put_failure;
2168 if (p->erspan_ver == 1) {
2169 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2170 goto nla_put_failure;
2172 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
2173 goto nla_put_failure;
2174 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
2175 goto nla_put_failure;
2179 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
2180 nla_put_be16(skb, IFLA_GRE_IFLAGS,
2181 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
2182 nla_put_be16(skb, IFLA_GRE_OFLAGS,
2183 gre_tnl_flags_to_gre_flags(o_flags)) ||
2184 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
2185 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
2186 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
2187 nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) ||
2188 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
2189 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
2190 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
2191 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
2192 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
2193 goto nla_put_failure;
2195 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
2197 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
2199 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
2201 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
2203 goto nla_put_failure;
2205 if (p->collect_md) {
2206 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
2207 goto nla_put_failure;
2216 static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
2217 [IFLA_GRE_LINK] = { .type = NLA_U32 },
2218 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
2219 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
2220 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
2221 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
2222 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) },
2223 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) },
2224 [IFLA_GRE_TTL] = { .type = NLA_U8 },
2225 [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
2226 [IFLA_GRE_FLOWINFO] = { .type = NLA_U32 },
2227 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
2228 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
2229 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
2230 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
2231 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
2232 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
2233 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
2234 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
2235 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
2236 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
2237 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
2240 static void ip6erspan_tap_setup(struct net_device *dev)
2245 dev->netdev_ops = &ip6erspan_netdev_ops;
2246 dev->needs_free_netdev = true;
2247 dev->priv_destructor = ip6gre_dev_free;
2249 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2250 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2251 netif_keep_dst(dev);
2254 static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
2255 struct nlattr *tb[], struct nlattr *data[],
2256 struct netlink_ext_ack *extack)
2258 struct ip6_tnl *nt = netdev_priv(dev);
2259 struct net *net = dev_net(dev);
2260 struct ip6gre_net *ign;
2263 ip6gre_netlink_parms(data, &nt->parms);
2264 ip6erspan_set_version(data, &nt->parms);
2265 ign = net_generic(net, ip6gre_net_id);
2267 if (nt->parms.collect_md) {
2268 if (rtnl_dereference(ign->collect_md_tun_erspan))
2271 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
2275 err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
2277 ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]);
2278 ip6erspan_tunnel_link_md(ign, nt);
2279 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
2284 static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu)
2286 ip6gre_tnl_link_config_common(t);
2287 ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t));
2290 static int ip6erspan_tnl_change(struct ip6_tnl *t,
2291 const struct __ip6_tnl_parm *p, int set_mtu)
2293 ip6gre_tnl_copy_tnl_parm(t, p);
2294 ip6erspan_tnl_link_config(t, set_mtu);
2298 static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
2299 struct nlattr *data[],
2300 struct netlink_ext_ack *extack)
2302 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
2303 struct __ip6_tnl_parm p;
2306 t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2310 ip6erspan_set_version(data, &p);
2311 ip6gre_tunnel_unlink_md(ign, t);
2312 ip6gre_tunnel_unlink(ign, t);
2313 ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
2314 ip6erspan_tunnel_link_md(ign, t);
2315 ip6gre_tunnel_link(ign, t);
2319 static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
2321 .maxtype = IFLA_GRE_MAX,
2322 .policy = ip6gre_policy,
2323 .priv_size = sizeof(struct ip6_tnl),
2324 .setup = ip6gre_tunnel_setup,
2325 .validate = ip6gre_tunnel_validate,
2326 .newlink = ip6gre_newlink,
2327 .changelink = ip6gre_changelink,
2328 .dellink = ip6gre_dellink,
2329 .get_size = ip6gre_get_size,
2330 .fill_info = ip6gre_fill_info,
2331 .get_link_net = ip6_tnl_get_link_net,
2334 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
2335 .kind = "ip6gretap",
2336 .maxtype = IFLA_GRE_MAX,
2337 .policy = ip6gre_policy,
2338 .priv_size = sizeof(struct ip6_tnl),
2339 .setup = ip6gre_tap_setup,
2340 .validate = ip6gre_tap_validate,
2341 .newlink = ip6gre_newlink,
2342 .changelink = ip6gre_changelink,
2343 .get_size = ip6gre_get_size,
2344 .fill_info = ip6gre_fill_info,
2345 .get_link_net = ip6_tnl_get_link_net,
2348 static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = {
2349 .kind = "ip6erspan",
2350 .maxtype = IFLA_GRE_MAX,
2351 .policy = ip6gre_policy,
2352 .priv_size = sizeof(struct ip6_tnl),
2353 .setup = ip6erspan_tap_setup,
2354 .validate = ip6erspan_tap_validate,
2355 .newlink = ip6erspan_newlink,
2356 .changelink = ip6erspan_changelink,
2357 .get_size = ip6gre_get_size,
2358 .fill_info = ip6gre_fill_info,
2359 .get_link_net = ip6_tnl_get_link_net,
2363 * And now the modules code and kernel interface.
2366 static int __init ip6gre_init(void)
2370 pr_info("GRE over IPv6 tunneling driver\n");
2372 err = register_pernet_device(&ip6gre_net_ops);
2376 err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
2378 pr_info("%s: can't add protocol\n", __func__);
2379 goto add_proto_failed;
2382 err = rtnl_link_register(&ip6gre_link_ops);
2384 goto rtnl_link_failed;
2386 err = rtnl_link_register(&ip6gre_tap_ops);
2388 goto tap_ops_failed;
2390 err = rtnl_link_register(&ip6erspan_tap_ops);
2392 goto erspan_link_failed;
2398 rtnl_link_unregister(&ip6gre_tap_ops);
2400 rtnl_link_unregister(&ip6gre_link_ops);
2402 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
2404 unregister_pernet_device(&ip6gre_net_ops);
2408 static void __exit ip6gre_fini(void)
2410 rtnl_link_unregister(&ip6gre_tap_ops);
2411 rtnl_link_unregister(&ip6gre_link_ops);
2412 rtnl_link_unregister(&ip6erspan_tap_ops);
2413 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
2414 unregister_pernet_device(&ip6gre_net_ops);
2417 module_init(ip6gre_init);
2418 module_exit(ip6gre_fini);
2419 MODULE_LICENSE("GPL");
2420 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
2421 MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
2422 MODULE_ALIAS_RTNL_LINK("ip6gre");
2423 MODULE_ALIAS_RTNL_LINK("ip6gretap");
2424 MODULE_ALIAS_RTNL_LINK("ip6erspan");
2425 MODULE_ALIAS_NETDEV("ip6gre0");