2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <asm/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
50 #include <net/dst_metadata.h>
56 1. The most important issue is detecting local dead loops.
57 They would cause complete host lockup in transmit, which
58 would be "resolved" by stack overflow or, if queueing is enabled,
59 with infinite looping in net_bh.
61 We cannot track such dead loops during route installation,
62 it is infeasible task. The most general solutions would be
63 to keep skb->encapsulation counter (sort of local ttl),
64 and silently drop packet when it expires. It is a good
65 solution, but it supposes maintaining new variable in ALL
66 skb, even if no tunneling is used.
68 Current solution: xmit_recursion breaks dead loops. This is a percpu
69 counter, since when we enter the first ndo_xmit(), cpu migration is
70 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
72 2. Networking dead loops would not kill routers, but would really
73 kill network. IP hop limit plays role of "t->recursion" in this case,
74 if we copy it from packet being encapsulated to upper header.
75 It is very good solution, but it introduces two problems:
77 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
78 do not work over tunnels.
79 - traceroute does not work. I planned to relay ICMP from tunnel,
80 so that this problem would be solved and traceroute output
81 would even more informative. This idea appeared to be wrong:
82 only Linux complies to rfc1812 now (yes, guys, Linux is the only
83 true router now :-)), all routers (at least, in neighbourhood of mine)
84 return only 8 bytes of payload. It is the end.
86 Hence, if we want that OSPF worked or traceroute said something reasonable,
87 we should search for another solution.
89 One of them is to parse packet trying to detect inner encapsulation
90 made by our node. It is difficult or even impossible, especially,
91 taking into account fragmentation. TO be short, ttl is not solution at all.
93 Current solution: The solution was UNEXPECTEDLY SIMPLE.
94 We force DF flag on tunnels with preconfigured hop limit,
95 that is ALL. :-) Well, it does not remove the problem completely,
96 but exponential growth of network traffic is changed to linear
97 (branches, that exceed pmtu are pruned) and tunnel mtu
98 rapidly degrades to value <68, where looping stops.
99 Yes, it is not good if there exists a router in the loop,
100 which does not force DF, even when encapsulating packets have DF set.
101 But it is not our problem! Nobody could accuse us, we made
102 all that we could make. Even if it is your gated who injected
103 fatal route to network, even if it were you who configured
104 fatal static route: you are innocent. :-)
109 static bool log_ecn_error = true;
110 module_param(log_ecn_error, bool, 0644);
111 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
113 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
114 static int ipgre_tunnel_init(struct net_device *dev);
116 static int ipgre_net_id __read_mostly;
117 static int gre_tap_net_id __read_mostly;
119 static void ipgre_err(struct sk_buff *skb, u32 info,
120 const struct tnl_ptk_info *tpi)
123 /* All the routers (except for Linux) return only
124 8 bytes of packet payload. It means, that precise relaying of
125 ICMP in the real Internet is absolutely infeasible.
127 Moreover, Cisco "wise men" put GRE key to the third word
128 in GRE header. It makes impossible maintaining even soft
129 state for keyed GRE tunnels with enabled checksum. Tell
132 Well, I wonder, rfc1812 was written by Cisco employee,
133 what the hell these idiots break standards established
136 struct net *net = dev_net(skb->dev);
137 struct ip_tunnel_net *itn;
138 const struct iphdr *iph;
139 const int type = icmp_hdr(skb)->type;
140 const int code = icmp_hdr(skb)->code;
141 unsigned int data_len = 0;
146 case ICMP_PARAMETERPROB:
149 case ICMP_DEST_UNREACH:
152 case ICMP_PORT_UNREACH:
153 /* Impossible event. */
156 /* All others are translated to HOST_UNREACH.
157 rfc2003 contains "deep thoughts" about NET_UNREACH,
158 I believe they are just ether pollution. --ANK
164 case ICMP_TIME_EXCEEDED:
165 if (code != ICMP_EXC_TTL)
167 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
174 if (tpi->proto == htons(ETH_P_TEB))
175 itn = net_generic(net, gre_tap_net_id);
177 itn = net_generic(net, ipgre_net_id);
179 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
180 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
181 iph->daddr, iph->saddr, tpi->key);
186 #if IS_ENABLED(CONFIG_IPV6)
187 if (tpi->proto == htons(ETH_P_IPV6) &&
188 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
193 if (t->parms.iph.daddr == 0 ||
194 ipv4_is_multicast(t->parms.iph.daddr))
197 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
200 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
204 t->err_time = jiffies;
207 static void gre_err(struct sk_buff *skb, u32 info)
209 /* All the routers (except for Linux) return only
210 * 8 bytes of packet payload. It means, that precise relaying of
211 * ICMP in the real Internet is absolutely infeasible.
213 * Moreover, Cisco "wise men" put GRE key to the third word
214 * in GRE header. It makes impossible maintaining even soft
216 * GRE tunnels with enabled checksum. Tell them "thank you".
218 * Well, I wonder, rfc1812 was written by Cisco employee,
219 * what the hell these idiots break standards established
223 const struct iphdr *iph = (struct iphdr *)skb->data;
224 const int type = icmp_hdr(skb)->type;
225 const int code = icmp_hdr(skb)->code;
226 struct tnl_ptk_info tpi;
228 if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
232 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
233 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
234 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
237 if (type == ICMP_REDIRECT) {
238 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
243 ipgre_err(skb, info, &tpi);
246 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
247 struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
249 struct metadata_dst *tun_dst = NULL;
250 const struct iphdr *iph;
251 struct ip_tunnel *tunnel;
254 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
255 iph->saddr, iph->daddr, tpi->key);
258 if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
259 raw_proto, false) < 0)
262 if (tunnel->dev->type != ARPHRD_NONE)
263 skb_pop_mac_header(skb);
265 skb_reset_mac_header(skb);
266 if (tunnel->collect_md) {
270 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
271 tun_id = key32_to_tunnel_id(tpi->key);
272 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
274 return PACKET_REJECT;
277 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
287 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
290 struct net *net = dev_net(skb->dev);
291 struct ip_tunnel_net *itn;
294 if (tpi->proto == htons(ETH_P_TEB))
295 itn = net_generic(net, gre_tap_net_id);
297 itn = net_generic(net, ipgre_net_id);
299 res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
300 if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
301 /* ipgre tunnels in collect metadata mode should receive
302 * also ETH_P_TEB traffic.
304 itn = net_generic(net, ipgre_net_id);
305 res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
310 static int gre_rcv(struct sk_buff *skb)
312 struct tnl_ptk_info tpi;
313 bool csum_err = false;
316 #ifdef CONFIG_NET_IPGRE_BROADCAST
317 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
318 /* Looped back packet, drop it! */
319 if (rt_is_output_route(skb_rtable(skb)))
324 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
328 if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
331 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
337 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
338 const struct iphdr *tnl_params,
341 struct ip_tunnel *tunnel = netdev_priv(dev);
343 if (tunnel->parms.o_flags & TUNNEL_SEQ)
346 /* Push GRE header. */
347 gre_build_header(skb, tunnel->tun_hlen,
348 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
349 htonl(tunnel->o_seqno));
351 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
354 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
356 if (csum && skb_checksum_start(skb) < skb->data)
358 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
361 static struct rtable *gre_get_rt(struct sk_buff *skb,
362 struct net_device *dev,
364 const struct ip_tunnel_key *key)
366 struct net *net = dev_net(dev);
368 memset(fl, 0, sizeof(*fl));
369 fl->daddr = key->u.ipv4.dst;
370 fl->saddr = key->u.ipv4.src;
371 fl->flowi4_tos = RT_TOS(key->tos);
372 fl->flowi4_mark = skb->mark;
373 fl->flowi4_proto = IPPROTO_GRE;
375 return ip_route_output_key(net, fl);
378 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
381 struct ip_tunnel_info *tun_info;
382 const struct ip_tunnel_key *key;
383 struct rtable *rt = NULL;
391 tun_info = skb_tunnel_info(skb);
392 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
393 ip_tunnel_info_af(tun_info) != AF_INET))
396 key = &tun_info->key;
397 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
399 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl.saddr);
401 rt = gre_get_rt(skb, dev, &fl, key);
405 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
409 tunnel_hlen = gre_calc_hlen(key->tun_flags);
411 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
412 + tunnel_hlen + sizeof(struct iphdr);
413 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
414 int head_delta = SKB_DATA_ALIGN(min_headroom -
417 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
423 /* Push Tunnel header. */
424 if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
427 flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
428 gre_build_header(skb, tunnel_hlen, flags, proto,
429 tunnel_id_to_key32(tun_info->key.tun_id), 0);
431 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
433 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
434 key->tos, key->ttl, df, false);
441 dev->stats.tx_dropped++;
444 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
446 struct ip_tunnel_info *info = skb_tunnel_info(skb);
450 if (ip_tunnel_info_af(info) != AF_INET)
453 rt = gre_get_rt(skb, dev, &fl4, &info->key);
458 info->key.u.ipv4.src = fl4.saddr;
462 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
463 struct net_device *dev)
465 struct ip_tunnel *tunnel = netdev_priv(dev);
466 const struct iphdr *tnl_params;
468 if (tunnel->collect_md) {
469 gre_fb_xmit(skb, dev, skb->protocol);
473 if (dev->header_ops) {
474 /* Need space for new headers */
475 if (skb_cow_head(skb, dev->needed_headroom -
476 (tunnel->hlen + sizeof(struct iphdr))))
479 tnl_params = (const struct iphdr *)skb->data;
481 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
484 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
485 skb_reset_mac_header(skb);
487 if (skb_cow_head(skb, dev->needed_headroom))
490 tnl_params = &tunnel->parms.iph;
493 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
496 __gre_xmit(skb, dev, tnl_params, skb->protocol);
501 dev->stats.tx_dropped++;
505 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
506 struct net_device *dev)
508 struct ip_tunnel *tunnel = netdev_priv(dev);
510 if (tunnel->collect_md) {
511 gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
515 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
518 if (skb_cow_head(skb, dev->needed_headroom))
521 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
526 dev->stats.tx_dropped++;
530 static int ipgre_tunnel_ioctl(struct net_device *dev,
531 struct ifreq *ifr, int cmd)
534 struct ip_tunnel_parm p;
536 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
538 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
539 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
540 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
541 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
544 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
545 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
547 err = ip_tunnel_ioctl(dev, &p, cmd);
551 p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
552 p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
554 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
559 /* Nice toy. Unfortunately, useless in real life :-)
560 It allows to construct virtual multiprotocol broadcast "LAN"
561 over the Internet, provided multicast routing is tuned.
564 I have no idea was this bicycle invented before me,
565 so that I had to set ARPHRD_IPGRE to a random value.
566 I have an impression, that Cisco could make something similar,
567 but this feature is apparently missing in IOS<=11.2(8).
569 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
570 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
572 ping -t 255 224.66.66.66
574 If nobody answers, mbone does not work.
576 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
577 ip addr add 10.66.66.<somewhat>/24 dev Universe
579 ifconfig Universe add fe80::<Your_real_addr>/10
580 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
583 ftp fec0:6666:6666::193.233.7.65
586 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
588 const void *daddr, const void *saddr, unsigned int len)
590 struct ip_tunnel *t = netdev_priv(dev);
592 struct gre_base_hdr *greh;
594 iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph));
595 greh = (struct gre_base_hdr *)(iph+1);
596 greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
597 greh->protocol = htons(type);
599 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
601 /* Set the source hardware address. */
603 memcpy(&iph->saddr, saddr, 4);
605 memcpy(&iph->daddr, daddr, 4);
607 return t->hlen + sizeof(*iph);
609 return -(t->hlen + sizeof(*iph));
612 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
614 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
615 memcpy(haddr, &iph->saddr, 4);
619 static const struct header_ops ipgre_header_ops = {
620 .create = ipgre_header,
621 .parse = ipgre_header_parse,
624 #ifdef CONFIG_NET_IPGRE_BROADCAST
625 static int ipgre_open(struct net_device *dev)
627 struct ip_tunnel *t = netdev_priv(dev);
629 if (ipv4_is_multicast(t->parms.iph.daddr)) {
633 rt = ip_route_output_gre(t->net, &fl4,
637 RT_TOS(t->parms.iph.tos),
640 return -EADDRNOTAVAIL;
643 if (!__in_dev_get_rtnl(dev))
644 return -EADDRNOTAVAIL;
645 t->mlink = dev->ifindex;
646 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
651 static int ipgre_close(struct net_device *dev)
653 struct ip_tunnel *t = netdev_priv(dev);
655 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
656 struct in_device *in_dev;
657 in_dev = inetdev_by_index(t->net, t->mlink);
659 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
665 static const struct net_device_ops ipgre_netdev_ops = {
666 .ndo_init = ipgre_tunnel_init,
667 .ndo_uninit = ip_tunnel_uninit,
668 #ifdef CONFIG_NET_IPGRE_BROADCAST
669 .ndo_open = ipgre_open,
670 .ndo_stop = ipgre_close,
672 .ndo_start_xmit = ipgre_xmit,
673 .ndo_do_ioctl = ipgre_tunnel_ioctl,
674 .ndo_change_mtu = ip_tunnel_change_mtu,
675 .ndo_get_stats64 = ip_tunnel_get_stats64,
676 .ndo_get_iflink = ip_tunnel_get_iflink,
679 #define GRE_FEATURES (NETIF_F_SG | \
684 static void ipgre_tunnel_setup(struct net_device *dev)
686 dev->netdev_ops = &ipgre_netdev_ops;
687 dev->type = ARPHRD_IPGRE;
688 ip_tunnel_setup(dev, ipgre_net_id);
691 static void __gre_tunnel_init(struct net_device *dev)
693 struct ip_tunnel *tunnel;
696 tunnel = netdev_priv(dev);
697 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
698 tunnel->parms.iph.protocol = IPPROTO_GRE;
700 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
702 t_hlen = tunnel->hlen + sizeof(struct iphdr);
704 dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
705 dev->mtu = ETH_DATA_LEN - t_hlen - 4;
707 dev->features |= GRE_FEATURES;
708 dev->hw_features |= GRE_FEATURES;
710 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
711 /* TCP offload with GRE SEQ is not supported, nor
712 * can we support 2 levels of outer headers requiring
715 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
716 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
717 dev->features |= NETIF_F_GSO_SOFTWARE;
718 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
721 /* Can use a lockless transmit, unless we generate
724 dev->features |= NETIF_F_LLTX;
728 static int ipgre_tunnel_init(struct net_device *dev)
730 struct ip_tunnel *tunnel = netdev_priv(dev);
731 struct iphdr *iph = &tunnel->parms.iph;
733 __gre_tunnel_init(dev);
735 memcpy(dev->dev_addr, &iph->saddr, 4);
736 memcpy(dev->broadcast, &iph->daddr, 4);
738 dev->flags = IFF_NOARP;
742 if (iph->daddr && !tunnel->collect_md) {
743 #ifdef CONFIG_NET_IPGRE_BROADCAST
744 if (ipv4_is_multicast(iph->daddr)) {
747 dev->flags = IFF_BROADCAST;
748 dev->header_ops = &ipgre_header_ops;
751 } else if (!tunnel->collect_md) {
752 dev->header_ops = &ipgre_header_ops;
755 return ip_tunnel_init(dev);
758 static const struct gre_protocol ipgre_protocol = {
760 .err_handler = gre_err,
763 static int __net_init ipgre_init_net(struct net *net)
765 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
768 static void __net_exit ipgre_exit_net(struct net *net)
770 struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
771 ip_tunnel_delete_net(itn, &ipgre_link_ops);
774 static struct pernet_operations ipgre_net_ops = {
775 .init = ipgre_init_net,
776 .exit = ipgre_exit_net,
778 .size = sizeof(struct ip_tunnel_net),
781 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
789 if (data[IFLA_GRE_IFLAGS])
790 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
791 if (data[IFLA_GRE_OFLAGS])
792 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
793 if (flags & (GRE_VERSION|GRE_ROUTING))
796 if (data[IFLA_GRE_COLLECT_METADATA] &&
797 data[IFLA_GRE_ENCAP_TYPE] &&
798 nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
804 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
808 if (tb[IFLA_ADDRESS]) {
809 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
811 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
812 return -EADDRNOTAVAIL;
818 if (data[IFLA_GRE_REMOTE]) {
819 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
825 return ipgre_tunnel_validate(tb, data);
828 static int ipgre_netlink_parms(struct net_device *dev,
829 struct nlattr *data[],
831 struct ip_tunnel_parm *parms)
833 struct ip_tunnel *t = netdev_priv(dev);
835 memset(parms, 0, sizeof(*parms));
837 parms->iph.protocol = IPPROTO_GRE;
842 if (data[IFLA_GRE_LINK])
843 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
845 if (data[IFLA_GRE_IFLAGS])
846 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
848 if (data[IFLA_GRE_OFLAGS])
849 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
851 if (data[IFLA_GRE_IKEY])
852 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
854 if (data[IFLA_GRE_OKEY])
855 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
857 if (data[IFLA_GRE_LOCAL])
858 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
860 if (data[IFLA_GRE_REMOTE])
861 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
863 if (data[IFLA_GRE_TTL])
864 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
866 if (data[IFLA_GRE_TOS])
867 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
869 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
872 parms->iph.frag_off = htons(IP_DF);
875 if (data[IFLA_GRE_COLLECT_METADATA]) {
876 t->collect_md = true;
877 if (dev->type == ARPHRD_IPGRE)
878 dev->type = ARPHRD_NONE;
881 if (data[IFLA_GRE_IGNORE_DF]) {
882 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
883 && (parms->iph.frag_off & htons(IP_DF)))
885 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
891 /* This function returns true when ENCAP attributes are present in the nl msg */
892 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
893 struct ip_tunnel_encap *ipencap)
897 memset(ipencap, 0, sizeof(*ipencap));
902 if (data[IFLA_GRE_ENCAP_TYPE]) {
904 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
907 if (data[IFLA_GRE_ENCAP_FLAGS]) {
909 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
912 if (data[IFLA_GRE_ENCAP_SPORT]) {
914 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
917 if (data[IFLA_GRE_ENCAP_DPORT]) {
919 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
925 static int gre_tap_init(struct net_device *dev)
927 __gre_tunnel_init(dev);
928 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
930 return ip_tunnel_init(dev);
933 static const struct net_device_ops gre_tap_netdev_ops = {
934 .ndo_init = gre_tap_init,
935 .ndo_uninit = ip_tunnel_uninit,
936 .ndo_start_xmit = gre_tap_xmit,
937 .ndo_set_mac_address = eth_mac_addr,
938 .ndo_validate_addr = eth_validate_addr,
939 .ndo_change_mtu = ip_tunnel_change_mtu,
940 .ndo_get_stats64 = ip_tunnel_get_stats64,
941 .ndo_get_iflink = ip_tunnel_get_iflink,
942 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
945 static void ipgre_tap_setup(struct net_device *dev)
948 dev->netdev_ops = &gre_tap_netdev_ops;
949 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
950 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
951 ip_tunnel_setup(dev, gre_tap_net_id);
954 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
955 struct nlattr *tb[], struct nlattr *data[])
957 struct ip_tunnel_parm p;
958 struct ip_tunnel_encap ipencap;
961 if (ipgre_netlink_encap_parms(data, &ipencap)) {
962 struct ip_tunnel *t = netdev_priv(dev);
963 err = ip_tunnel_encap_setup(t, &ipencap);
969 err = ipgre_netlink_parms(dev, data, tb, &p);
972 return ip_tunnel_newlink(dev, tb, &p);
975 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
976 struct nlattr *data[])
978 struct ip_tunnel_parm p;
979 struct ip_tunnel_encap ipencap;
982 if (ipgre_netlink_encap_parms(data, &ipencap)) {
983 struct ip_tunnel *t = netdev_priv(dev);
984 err = ip_tunnel_encap_setup(t, &ipencap);
990 err = ipgre_netlink_parms(dev, data, tb, &p);
993 return ip_tunnel_changelink(dev, tb, &p);
996 static size_t ipgre_get_size(const struct net_device *dev)
1001 /* IFLA_GRE_IFLAGS */
1003 /* IFLA_GRE_OFLAGS */
1009 /* IFLA_GRE_LOCAL */
1011 /* IFLA_GRE_REMOTE */
1017 /* IFLA_GRE_PMTUDISC */
1019 /* IFLA_GRE_ENCAP_TYPE */
1021 /* IFLA_GRE_ENCAP_FLAGS */
1023 /* IFLA_GRE_ENCAP_SPORT */
1025 /* IFLA_GRE_ENCAP_DPORT */
1027 /* IFLA_GRE_COLLECT_METADATA */
1029 /* IFLA_GRE_IGNORE_DF */
1034 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1036 struct ip_tunnel *t = netdev_priv(dev);
1037 struct ip_tunnel_parm *p = &t->parms;
1039 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1040 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1041 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1042 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1043 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1044 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1045 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1046 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1047 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1048 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1049 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1050 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1051 !!(p->iph.frag_off & htons(IP_DF))))
1052 goto nla_put_failure;
1054 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1056 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1058 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1060 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1062 goto nla_put_failure;
1064 if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1065 goto nla_put_failure;
1067 if (t->collect_md) {
1068 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1069 goto nla_put_failure;
1078 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1079 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1080 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1081 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1082 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1083 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1084 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1085 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1086 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1087 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1088 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1089 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1090 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1091 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1092 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1093 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1094 [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
1097 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1099 .maxtype = IFLA_GRE_MAX,
1100 .policy = ipgre_policy,
1101 .priv_size = sizeof(struct ip_tunnel),
1102 .setup = ipgre_tunnel_setup,
1103 .validate = ipgre_tunnel_validate,
1104 .newlink = ipgre_newlink,
1105 .changelink = ipgre_changelink,
1106 .dellink = ip_tunnel_dellink,
1107 .get_size = ipgre_get_size,
1108 .fill_info = ipgre_fill_info,
1109 .get_link_net = ip_tunnel_get_link_net,
1112 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1114 .maxtype = IFLA_GRE_MAX,
1115 .policy = ipgre_policy,
1116 .priv_size = sizeof(struct ip_tunnel),
1117 .setup = ipgre_tap_setup,
1118 .validate = ipgre_tap_validate,
1119 .newlink = ipgre_newlink,
1120 .changelink = ipgre_changelink,
1121 .dellink = ip_tunnel_dellink,
1122 .get_size = ipgre_get_size,
1123 .fill_info = ipgre_fill_info,
1124 .get_link_net = ip_tunnel_get_link_net,
1127 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1128 u8 name_assign_type)
1130 struct nlattr *tb[IFLA_MAX + 1];
1131 struct net_device *dev;
1132 LIST_HEAD(list_kill);
1133 struct ip_tunnel *t;
1136 memset(&tb, 0, sizeof(tb));
1138 dev = rtnl_create_link(net, name, name_assign_type,
1139 &ipgre_tap_ops, tb);
1143 /* Configure flow based GRE device. */
1144 t = netdev_priv(dev);
1145 t->collect_md = true;
1147 err = ipgre_newlink(net, dev, tb, NULL);
1150 return ERR_PTR(err);
1153 /* openvswitch users expect packet sizes to be unrestricted,
1154 * so set the largest MTU we can.
1156 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1160 err = rtnl_configure_link(dev, NULL);
1166 ip_tunnel_dellink(dev, &list_kill);
1167 unregister_netdevice_many(&list_kill);
1168 return ERR_PTR(err);
1170 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1172 static int __net_init ipgre_tap_init_net(struct net *net)
1174 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1177 static void __net_exit ipgre_tap_exit_net(struct net *net)
1179 struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
1180 ip_tunnel_delete_net(itn, &ipgre_tap_ops);
1183 static struct pernet_operations ipgre_tap_net_ops = {
1184 .init = ipgre_tap_init_net,
1185 .exit = ipgre_tap_exit_net,
1186 .id = &gre_tap_net_id,
1187 .size = sizeof(struct ip_tunnel_net),
1190 static int __init ipgre_init(void)
1194 pr_info("GRE over IPv4 tunneling driver\n");
1196 err = register_pernet_device(&ipgre_net_ops);
1200 err = register_pernet_device(&ipgre_tap_net_ops);
1202 goto pnet_tap_faied;
1204 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1206 pr_info("%s: can't add protocol\n", __func__);
1207 goto add_proto_failed;
1210 err = rtnl_link_register(&ipgre_link_ops);
1212 goto rtnl_link_failed;
1214 err = rtnl_link_register(&ipgre_tap_ops);
1216 goto tap_ops_failed;
1221 rtnl_link_unregister(&ipgre_link_ops);
1223 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1225 unregister_pernet_device(&ipgre_tap_net_ops);
1227 unregister_pernet_device(&ipgre_net_ops);
1231 static void __exit ipgre_fini(void)
1233 rtnl_link_unregister(&ipgre_tap_ops);
1234 rtnl_link_unregister(&ipgre_link_ops);
1235 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1236 unregister_pernet_device(&ipgre_tap_net_ops);
1237 unregister_pernet_device(&ipgre_net_ops);
1240 module_init(ipgre_init);
1241 module_exit(ipgre_fini);
1242 MODULE_LICENSE("GPL");
1243 MODULE_ALIAS_RTNL_LINK("gre");
1244 MODULE_ALIAS_RTNL_LINK("gretap");
1245 MODULE_ALIAS_NETDEV("gre0");
1246 MODULE_ALIAS_NETDEV("gretap0");