2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requirement to work with older peers.
29 #include <linux/uaccess.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
35 #include <linux/kernel.h>
36 #include <linux/fcntl.h>
37 #include <linux/stat.h>
38 #include <linux/socket.h>
40 #include <linux/inet.h>
41 #include <linux/netdevice.h>
42 #include <linux/inetdevice.h>
43 #include <linux/igmp.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/mroute.h>
47 #include <linux/init.h>
48 #include <linux/if_ether.h>
49 #include <linux/slab.h>
50 #include <net/net_namespace.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
62 #include <linux/compat.h>
63 #include <linux/export.h>
64 #include <net/ip_tunnels.h>
65 #include <net/checksum.h>
66 #include <net/netlink.h>
67 #include <net/fib_rules.h>
68 #include <linux/netconf.h>
69 #include <net/nexthop.h>
71 #include <linux/nospec.h>
74 struct fib_rule common;
81 /* Big lock, protecting vif table, mrt cache and mroute socket state.
82 * Note that the changes are semaphored via rtnl_lock.
85 static DEFINE_RWLOCK(mrt_lock);
87 /* Multicast router control variables */
89 /* Special spinlock for queue of unresolved entries */
90 static DEFINE_SPINLOCK(mfc_unres_lock);
92 /* We return to original Alan's scheme. Hash table of resolved
93 * entries is changed only in process context and protected
94 * with weak lock mrt_lock. Queue of unresolved entries is protected
95 * with strong spinlock mfc_unres_lock.
97 * In this case data path is free of exclusive locks at all.
100 static struct kmem_cache *mrt_cachep __read_mostly;
102 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
103 static void ipmr_free_table(struct mr_table *mrt);
105 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
106 struct net_device *dev, struct sk_buff *skb,
107 struct mfc_cache *cache, int local);
108 static int ipmr_cache_report(struct mr_table *mrt,
109 struct sk_buff *pkt, vifi_t vifi, int assert);
110 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
111 struct mfc_cache *c, struct rtmsg *rtm);
112 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
114 static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
115 static void mroute_clean_tables(struct mr_table *mrt, bool all);
116 static void ipmr_expire_process(unsigned long arg);
118 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
119 #define ipmr_for_each_table(mrt, net) \
120 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
122 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
124 struct mr_table *mrt;
126 ipmr_for_each_table(mrt, net) {
133 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
134 struct mr_table **mrt)
137 struct ipmr_result res;
138 struct fib_lookup_arg arg = {
140 .flags = FIB_LOOKUP_NOREF,
143 /* update flow if oif or iif point to device enslaved to l3mdev */
144 l3mdev_update_flow(net, flowi4_to_flowi(flp4));
146 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
147 flowi4_to_flowi(flp4), 0, &arg);
154 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
155 int flags, struct fib_lookup_arg *arg)
157 struct ipmr_result *res = arg->result;
158 struct mr_table *mrt;
160 switch (rule->action) {
163 case FR_ACT_UNREACHABLE:
165 case FR_ACT_PROHIBIT:
167 case FR_ACT_BLACKHOLE:
172 arg->table = fib_rule_get_table(rule, arg);
174 mrt = ipmr_get_table(rule->fr_net, arg->table);
181 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
186 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
190 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
191 struct fib_rule_hdr *frh, struct nlattr **tb)
196 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
202 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
203 struct fib_rule_hdr *frh)
211 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
212 .family = RTNL_FAMILY_IPMR,
213 .rule_size = sizeof(struct ipmr_rule),
214 .addr_size = sizeof(u32),
215 .action = ipmr_rule_action,
216 .match = ipmr_rule_match,
217 .configure = ipmr_rule_configure,
218 .compare = ipmr_rule_compare,
219 .fill = ipmr_rule_fill,
220 .nlgroup = RTNLGRP_IPV4_RULE,
221 .policy = ipmr_rule_policy,
222 .owner = THIS_MODULE,
225 static int __net_init ipmr_rules_init(struct net *net)
227 struct fib_rules_ops *ops;
228 struct mr_table *mrt;
231 ops = fib_rules_register(&ipmr_rules_ops_template, net);
235 INIT_LIST_HEAD(&net->ipv4.mr_tables);
237 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
243 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
247 net->ipv4.mr_rules_ops = ops;
252 ipmr_free_table(mrt);
255 fib_rules_unregister(ops);
259 static void __net_exit ipmr_rules_exit(struct net *net)
261 struct mr_table *mrt, *next;
264 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
265 list_del(&mrt->list);
266 ipmr_free_table(mrt);
268 fib_rules_unregister(net->ipv4.mr_rules_ops);
272 #define ipmr_for_each_table(mrt, net) \
273 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
275 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
277 return net->ipv4.mrt;
280 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
281 struct mr_table **mrt)
283 *mrt = net->ipv4.mrt;
287 static int __net_init ipmr_rules_init(struct net *net)
289 struct mr_table *mrt;
291 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
298 static void __net_exit ipmr_rules_exit(struct net *net)
301 ipmr_free_table(net->ipv4.mrt);
302 net->ipv4.mrt = NULL;
307 static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg,
310 const struct mfc_cache_cmp_arg *cmparg = arg->key;
311 struct mfc_cache *c = (struct mfc_cache *)ptr;
313 return cmparg->mfc_mcastgrp != c->mfc_mcastgrp ||
314 cmparg->mfc_origin != c->mfc_origin;
317 static const struct rhashtable_params ipmr_rht_params = {
318 .head_offset = offsetof(struct mfc_cache, mnode),
319 .key_offset = offsetof(struct mfc_cache, cmparg),
320 .key_len = sizeof(struct mfc_cache_cmp_arg),
323 .obj_cmpfn = ipmr_hash_cmp,
324 .automatic_shrinking = true,
327 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
329 struct mr_table *mrt;
332 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
333 if (id != RT_TABLE_DEFAULT && id >= 1000000000)
334 return ERR_PTR(-EINVAL);
336 mrt = ipmr_get_table(net, id);
340 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
342 return ERR_PTR(-ENOMEM);
343 write_pnet(&mrt->net, net);
346 err = rhltable_init(&mrt->mfc_hash, &ipmr_rht_params);
351 INIT_LIST_HEAD(&mrt->mfc_cache_list);
352 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
354 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
357 mrt->mroute_reg_vif_num = -1;
358 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
359 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
364 static void ipmr_free_table(struct mr_table *mrt)
366 del_timer_sync(&mrt->ipmr_expire_timer);
367 mroute_clean_tables(mrt, true);
368 rhltable_destroy(&mrt->mfc_hash);
372 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
374 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
376 struct net *net = dev_net(dev);
380 dev = __dev_get_by_name(net, "tunl0");
382 const struct net_device_ops *ops = dev->netdev_ops;
384 struct ip_tunnel_parm p;
386 memset(&p, 0, sizeof(p));
387 p.iph.daddr = v->vifc_rmt_addr.s_addr;
388 p.iph.saddr = v->vifc_lcl_addr.s_addr;
391 p.iph.protocol = IPPROTO_IPIP;
392 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
393 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
395 if (ops->ndo_do_ioctl) {
396 mm_segment_t oldfs = get_fs();
399 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
405 /* Initialize ipmr pimreg/tunnel in_device */
406 static bool ipmr_init_vif_indev(const struct net_device *dev)
408 struct in_device *in_dev;
412 in_dev = __in_dev_get_rtnl(dev);
415 ipv4_devconf_setall(in_dev);
416 neigh_parms_data_state_setall(in_dev->arp_parms);
417 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
422 static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
424 struct net_device *dev;
426 dev = __dev_get_by_name(net, "tunl0");
429 const struct net_device_ops *ops = dev->netdev_ops;
432 struct ip_tunnel_parm p;
434 memset(&p, 0, sizeof(p));
435 p.iph.daddr = v->vifc_rmt_addr.s_addr;
436 p.iph.saddr = v->vifc_lcl_addr.s_addr;
439 p.iph.protocol = IPPROTO_IPIP;
440 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
441 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
443 if (ops->ndo_do_ioctl) {
444 mm_segment_t oldfs = get_fs();
447 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
455 (dev = __dev_get_by_name(net, p.name)) != NULL) {
456 dev->flags |= IFF_MULTICAST;
457 if (!ipmr_init_vif_indev(dev))
467 unregister_netdevice(dev);
471 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
472 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
474 struct net *net = dev_net(dev);
475 struct mr_table *mrt;
476 struct flowi4 fl4 = {
477 .flowi4_oif = dev->ifindex,
478 .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
479 .flowi4_mark = skb->mark,
483 err = ipmr_fib_lookup(net, &fl4, &mrt);
489 read_lock(&mrt_lock);
490 dev->stats.tx_bytes += skb->len;
491 dev->stats.tx_packets++;
492 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
493 read_unlock(&mrt_lock);
498 static int reg_vif_get_iflink(const struct net_device *dev)
503 static const struct net_device_ops reg_vif_netdev_ops = {
504 .ndo_start_xmit = reg_vif_xmit,
505 .ndo_get_iflink = reg_vif_get_iflink,
508 static void reg_vif_setup(struct net_device *dev)
510 dev->type = ARPHRD_PIMREG;
511 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
512 dev->flags = IFF_NOARP;
513 dev->netdev_ops = ®_vif_netdev_ops;
514 dev->needs_free_netdev = true;
515 dev->features |= NETIF_F_NETNS_LOCAL;
518 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
520 struct net_device *dev;
523 if (mrt->id == RT_TABLE_DEFAULT)
524 sprintf(name, "pimreg");
526 sprintf(name, "pimreg%u", mrt->id);
528 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
533 dev_net_set(dev, net);
535 if (register_netdevice(dev)) {
540 if (!ipmr_init_vif_indev(dev))
550 unregister_netdevice(dev);
554 /* called with rcu_read_lock() */
555 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
558 struct net_device *reg_dev = NULL;
561 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
563 * a. packet is really sent to a multicast group
564 * b. packet is not a NULL-REGISTER
565 * c. packet is not truncated
567 if (!ipv4_is_multicast(encap->daddr) ||
568 encap->tot_len == 0 ||
569 ntohs(encap->tot_len) + pimlen > skb->len)
572 read_lock(&mrt_lock);
573 if (mrt->mroute_reg_vif_num >= 0)
574 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
575 read_unlock(&mrt_lock);
580 skb->mac_header = skb->network_header;
581 skb_pull(skb, (u8 *)encap - skb->data);
582 skb_reset_network_header(skb);
583 skb->protocol = htons(ETH_P_IP);
584 skb->ip_summed = CHECKSUM_NONE;
586 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
590 return NET_RX_SUCCESS;
593 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
600 * vif_delete - Delete a VIF entry
601 * @notify: Set to 1, if the caller is a notifier_call
603 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
604 struct list_head *head)
606 struct vif_device *v;
607 struct net_device *dev;
608 struct in_device *in_dev;
610 if (vifi < 0 || vifi >= mrt->maxvif)
611 return -EADDRNOTAVAIL;
613 v = &mrt->vif_table[vifi];
615 write_lock_bh(&mrt_lock);
620 write_unlock_bh(&mrt_lock);
621 return -EADDRNOTAVAIL;
624 if (vifi == mrt->mroute_reg_vif_num)
625 mrt->mroute_reg_vif_num = -1;
627 if (vifi + 1 == mrt->maxvif) {
630 for (tmp = vifi - 1; tmp >= 0; tmp--) {
631 if (VIF_EXISTS(mrt, tmp))
637 write_unlock_bh(&mrt_lock);
639 dev_set_allmulti(dev, -1);
641 in_dev = __in_dev_get_rtnl(dev);
643 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
644 inet_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
645 NETCONFA_MC_FORWARDING,
646 dev->ifindex, &in_dev->cnf);
647 ip_rt_multicast_event(in_dev);
650 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
651 unregister_netdevice_queue(dev, head);
657 static void ipmr_cache_free_rcu(struct rcu_head *head)
659 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
661 kmem_cache_free(mrt_cachep, c);
664 static inline void ipmr_cache_free(struct mfc_cache *c)
666 call_rcu(&c->rcu, ipmr_cache_free_rcu);
669 /* Destroy an unresolved cache entry, killing queued skbs
670 * and reporting error to netlink readers.
672 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
674 struct net *net = read_pnet(&mrt->net);
678 atomic_dec(&mrt->cache_resolve_queue_len);
680 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
681 if (ip_hdr(skb)->version == 0) {
682 struct nlmsghdr *nlh = skb_pull(skb,
683 sizeof(struct iphdr));
684 nlh->nlmsg_type = NLMSG_ERROR;
685 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
686 skb_trim(skb, nlh->nlmsg_len);
688 e->error = -ETIMEDOUT;
689 memset(&e->msg, 0, sizeof(e->msg));
691 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
700 /* Timer process for the unresolved queue. */
701 static void ipmr_expire_process(unsigned long arg)
703 struct mr_table *mrt = (struct mr_table *)arg;
705 unsigned long expires;
706 struct mfc_cache *c, *next;
708 if (!spin_trylock(&mfc_unres_lock)) {
709 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
713 if (list_empty(&mrt->mfc_unres_queue))
719 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
720 if (time_after(c->mfc_un.unres.expires, now)) {
721 unsigned long interval = c->mfc_un.unres.expires - now;
722 if (interval < expires)
728 mroute_netlink_event(mrt, c, RTM_DELROUTE);
729 ipmr_destroy_unres(mrt, c);
732 if (!list_empty(&mrt->mfc_unres_queue))
733 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
736 spin_unlock(&mfc_unres_lock);
739 /* Fill oifs list. It is called under write locked mrt_lock. */
740 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
745 cache->mfc_un.res.minvif = MAXVIFS;
746 cache->mfc_un.res.maxvif = 0;
747 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
749 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
750 if (VIF_EXISTS(mrt, vifi) &&
751 ttls[vifi] && ttls[vifi] < 255) {
752 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
753 if (cache->mfc_un.res.minvif > vifi)
754 cache->mfc_un.res.minvif = vifi;
755 if (cache->mfc_un.res.maxvif <= vifi)
756 cache->mfc_un.res.maxvif = vifi + 1;
759 cache->mfc_un.res.lastuse = jiffies;
762 static int vif_add(struct net *net, struct mr_table *mrt,
763 struct vifctl *vifc, int mrtsock)
765 int vifi = vifc->vifc_vifi;
766 struct vif_device *v = &mrt->vif_table[vifi];
767 struct net_device *dev;
768 struct in_device *in_dev;
772 if (VIF_EXISTS(mrt, vifi))
775 switch (vifc->vifc_flags) {
777 if (!ipmr_pimsm_enabled())
779 /* Special Purpose VIF in PIM
780 * All the packets will be sent to the daemon
782 if (mrt->mroute_reg_vif_num >= 0)
784 dev = ipmr_reg_vif(net, mrt);
787 err = dev_set_allmulti(dev, 1);
789 unregister_netdevice(dev);
795 dev = ipmr_new_tunnel(net, vifc);
798 err = dev_set_allmulti(dev, 1);
800 ipmr_del_tunnel(dev, vifc);
805 case VIFF_USE_IFINDEX:
807 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
808 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
809 if (dev && !__in_dev_get_rtnl(dev)) {
811 return -EADDRNOTAVAIL;
814 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
817 return -EADDRNOTAVAIL;
818 err = dev_set_allmulti(dev, 1);
828 in_dev = __in_dev_get_rtnl(dev);
831 return -EADDRNOTAVAIL;
833 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
834 inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_MC_FORWARDING,
835 dev->ifindex, &in_dev->cnf);
836 ip_rt_multicast_event(in_dev);
838 /* Fill in the VIF structures */
840 v->rate_limit = vifc->vifc_rate_limit;
841 v->local = vifc->vifc_lcl_addr.s_addr;
842 v->remote = vifc->vifc_rmt_addr.s_addr;
843 v->flags = vifc->vifc_flags;
845 v->flags |= VIFF_STATIC;
846 v->threshold = vifc->vifc_threshold;
851 v->link = dev->ifindex;
852 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
853 v->link = dev_get_iflink(dev);
855 /* And finish update writing critical data */
856 write_lock_bh(&mrt_lock);
858 if (v->flags & VIFF_REGISTER)
859 mrt->mroute_reg_vif_num = vifi;
860 if (vifi+1 > mrt->maxvif)
861 mrt->maxvif = vifi+1;
862 write_unlock_bh(&mrt_lock);
866 /* called with rcu_read_lock() */
867 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
871 struct mfc_cache_cmp_arg arg = {
872 .mfc_mcastgrp = mcastgrp,
875 struct rhlist_head *tmp, *list;
878 list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
879 rhl_for_each_entry_rcu(c, tmp, list, mnode)
885 /* Look for a (*,*,oif) entry */
886 static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
889 struct mfc_cache_cmp_arg arg = {
890 .mfc_mcastgrp = htonl(INADDR_ANY),
891 .mfc_origin = htonl(INADDR_ANY)
893 struct rhlist_head *tmp, *list;
896 list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
897 rhl_for_each_entry_rcu(c, tmp, list, mnode)
898 if (c->mfc_un.res.ttls[vifi] < 255)
904 /* Look for a (*,G) entry */
905 static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
906 __be32 mcastgrp, int vifi)
908 struct mfc_cache_cmp_arg arg = {
909 .mfc_mcastgrp = mcastgrp,
910 .mfc_origin = htonl(INADDR_ANY)
912 struct rhlist_head *tmp, *list;
913 struct mfc_cache *c, *proxy;
915 if (mcastgrp == htonl(INADDR_ANY))
918 list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
919 rhl_for_each_entry_rcu(c, tmp, list, mnode) {
920 if (c->mfc_un.res.ttls[vifi] < 255)
923 /* It's ok if the vifi is part of the static tree */
924 proxy = ipmr_cache_find_any_parent(mrt, c->mfc_parent);
925 if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
930 return ipmr_cache_find_any_parent(mrt, vifi);
933 /* Look for a (S,G,iif) entry if parent != -1 */
934 static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt,
935 __be32 origin, __be32 mcastgrp,
938 struct mfc_cache_cmp_arg arg = {
939 .mfc_mcastgrp = mcastgrp,
940 .mfc_origin = origin,
942 struct rhlist_head *tmp, *list;
945 list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
946 rhl_for_each_entry_rcu(c, tmp, list, mnode)
947 if (parent == -1 || parent == c->mfc_parent)
953 /* Allocate a multicast cache entry */
954 static struct mfc_cache *ipmr_cache_alloc(void)
956 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
959 c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
960 c->mfc_un.res.minvif = MAXVIFS;
965 static struct mfc_cache *ipmr_cache_alloc_unres(void)
967 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
970 skb_queue_head_init(&c->mfc_un.unres.unresolved);
971 c->mfc_un.unres.expires = jiffies + 10*HZ;
976 /* A cache entry has gone into a resolved state from queued */
977 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
978 struct mfc_cache *uc, struct mfc_cache *c)
983 /* Play the pending entries through our router */
984 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
985 if (ip_hdr(skb)->version == 0) {
986 struct nlmsghdr *nlh = skb_pull(skb,
987 sizeof(struct iphdr));
989 if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
990 nlh->nlmsg_len = skb_tail_pointer(skb) -
993 nlh->nlmsg_type = NLMSG_ERROR;
994 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
995 skb_trim(skb, nlh->nlmsg_len);
997 e->error = -EMSGSIZE;
998 memset(&e->msg, 0, sizeof(e->msg));
1001 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1003 ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
1008 /* Bounce a cache query up to mrouted and netlink.
1010 * Called under mrt_lock.
1012 static int ipmr_cache_report(struct mr_table *mrt,
1013 struct sk_buff *pkt, vifi_t vifi, int assert)
1015 const int ihl = ip_hdrlen(pkt);
1016 struct sock *mroute_sk;
1017 struct igmphdr *igmp;
1018 struct igmpmsg *msg;
1019 struct sk_buff *skb;
1022 if (assert == IGMPMSG_WHOLEPKT)
1023 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
1025 skb = alloc_skb(128, GFP_ATOMIC);
1030 if (assert == IGMPMSG_WHOLEPKT) {
1031 /* Ugly, but we have no choice with this interface.
1032 * Duplicate old header, fix ihl, length etc.
1033 * And all this only to mangle msg->im_msgtype and
1034 * to set msg->im_mbz to "mbz" :-)
1036 skb_push(skb, sizeof(struct iphdr));
1037 skb_reset_network_header(skb);
1038 skb_reset_transport_header(skb);
1039 msg = (struct igmpmsg *)skb_network_header(skb);
1040 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
1041 msg->im_msgtype = IGMPMSG_WHOLEPKT;
1043 msg->im_vif = mrt->mroute_reg_vif_num;
1044 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
1045 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
1046 sizeof(struct iphdr));
1048 /* Copy the IP header */
1049 skb_set_network_header(skb, skb->len);
1051 skb_copy_to_linear_data(skb, pkt->data, ihl);
1052 /* Flag to the kernel this is a route add */
1053 ip_hdr(skb)->protocol = 0;
1054 msg = (struct igmpmsg *)skb_network_header(skb);
1056 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1057 /* Add our header */
1058 igmp = skb_put(skb, sizeof(struct igmphdr));
1059 igmp->type = assert;
1060 msg->im_msgtype = assert;
1062 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
1063 skb->transport_header = skb->network_header;
1067 mroute_sk = rcu_dereference(mrt->mroute_sk);
1074 igmpmsg_netlink_event(mrt, skb);
1076 /* Deliver to mrouted */
1077 ret = sock_queue_rcv_skb(mroute_sk, skb);
1080 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1087 /* Queue a packet for resolution. It gets locked cache entry! */
1088 static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1089 struct sk_buff *skb, struct net_device *dev)
1091 const struct iphdr *iph = ip_hdr(skb);
1092 struct mfc_cache *c;
1096 spin_lock_bh(&mfc_unres_lock);
1097 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
1098 if (c->mfc_mcastgrp == iph->daddr &&
1099 c->mfc_origin == iph->saddr) {
1106 /* Create a new entry if allowable */
1107 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1108 (c = ipmr_cache_alloc_unres()) == NULL) {
1109 spin_unlock_bh(&mfc_unres_lock);
1115 /* Fill in the new cache entry */
1117 c->mfc_origin = iph->saddr;
1118 c->mfc_mcastgrp = iph->daddr;
1120 /* Reflect first query at mrouted. */
1121 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1123 /* If the report failed throw the cache entry
1126 spin_unlock_bh(&mfc_unres_lock);
1133 atomic_inc(&mrt->cache_resolve_queue_len);
1134 list_add(&c->list, &mrt->mfc_unres_queue);
1135 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1137 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1138 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1141 /* See if we can append the packet */
1142 if (c->mfc_un.unres.unresolved.qlen > 3) {
1148 skb->skb_iif = dev->ifindex;
1150 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1154 spin_unlock_bh(&mfc_unres_lock);
1158 /* MFC cache manipulation by user space mroute daemon */
1160 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1162 struct mfc_cache *c;
1164 /* The entries are added/deleted only under RTNL */
1166 c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1167 mfc->mfcc_mcastgrp.s_addr, parent);
1171 rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
1172 list_del_rcu(&c->list);
1173 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1179 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1180 struct mfcctl *mfc, int mrtsock, int parent)
1182 struct mfc_cache *uc, *c;
1186 if (mfc->mfcc_parent >= MAXVIFS)
1189 /* The entries are added/deleted only under RTNL */
1191 c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1192 mfc->mfcc_mcastgrp.s_addr, parent);
1195 write_lock_bh(&mrt_lock);
1196 c->mfc_parent = mfc->mfcc_parent;
1197 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1199 c->mfc_flags |= MFC_STATIC;
1200 write_unlock_bh(&mrt_lock);
1201 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1205 if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
1206 !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1209 c = ipmr_cache_alloc();
1213 c->mfc_origin = mfc->mfcc_origin.s_addr;
1214 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1215 c->mfc_parent = mfc->mfcc_parent;
1216 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1218 c->mfc_flags |= MFC_STATIC;
1220 ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->mnode,
1223 pr_err("ipmr: rhtable insert error %d\n", ret);
1227 list_add_tail_rcu(&c->list, &mrt->mfc_cache_list);
1228 /* Check to see if we resolved a queued list. If so we
1229 * need to send on the frames and tidy up.
1232 spin_lock_bh(&mfc_unres_lock);
1233 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1234 if (uc->mfc_origin == c->mfc_origin &&
1235 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1236 list_del(&uc->list);
1237 atomic_dec(&mrt->cache_resolve_queue_len);
1242 if (list_empty(&mrt->mfc_unres_queue))
1243 del_timer(&mrt->ipmr_expire_timer);
1244 spin_unlock_bh(&mfc_unres_lock);
1247 ipmr_cache_resolve(net, mrt, uc, c);
1248 ipmr_cache_free(uc);
1250 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1254 /* Close the multicast socket, and clear the vif tables etc */
1255 static void mroute_clean_tables(struct mr_table *mrt, bool all)
1257 struct mfc_cache *c, *tmp;
1261 /* Shut down all active vif entries */
1262 for (i = 0; i < mrt->maxvif; i++) {
1263 if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
1265 vif_delete(mrt, i, 0, &list);
1267 unregister_netdevice_many(&list);
1269 /* Wipe the cache */
1270 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1271 if (!all && (c->mfc_flags & MFC_STATIC))
1273 rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
1274 list_del_rcu(&c->list);
1275 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1279 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1280 spin_lock_bh(&mfc_unres_lock);
1281 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1283 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1284 ipmr_destroy_unres(mrt, c);
1286 spin_unlock_bh(&mfc_unres_lock);
1290 /* called from ip_ra_control(), before an RCU grace period,
1291 * we dont need to call synchronize_rcu() here
1293 static void mrtsock_destruct(struct sock *sk)
1295 struct net *net = sock_net(sk);
1296 struct mr_table *mrt;
1299 ipmr_for_each_table(mrt, net) {
1300 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1301 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1302 inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
1303 NETCONFA_MC_FORWARDING,
1304 NETCONFA_IFINDEX_ALL,
1305 net->ipv4.devconf_all);
1306 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1307 mroute_clean_tables(mrt, false);
1312 /* Socket options and virtual interface manipulation. The whole
1313 * virtual interface system is a complete heap, but unfortunately
1314 * that's how BSD mrouted happens to think. Maybe one day with a proper
1315 * MOSPF/PIM router set up we can clean this up.
1318 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
1319 unsigned int optlen)
1321 struct net *net = sock_net(sk);
1322 int val, ret = 0, parent = 0;
1323 struct mr_table *mrt;
1328 /* There's one exception to the lock - MRT_DONE which needs to unlock */
1330 if (sk->sk_type != SOCK_RAW ||
1331 inet_sk(sk)->inet_num != IPPROTO_IGMP) {
1336 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1341 if (optname != MRT_INIT) {
1342 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1343 !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1351 if (optlen != sizeof(int)) {
1355 if (rtnl_dereference(mrt->mroute_sk)) {
1360 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1362 rcu_assign_pointer(mrt->mroute_sk, sk);
1363 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1364 inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
1365 NETCONFA_MC_FORWARDING,
1366 NETCONFA_IFINDEX_ALL,
1367 net->ipv4.devconf_all);
1371 if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1374 ret = ip_ra_control(sk, 0, NULL);
1380 if (optlen != sizeof(vif)) {
1384 if (copy_from_user(&vif, optval, sizeof(vif))) {
1388 if (vif.vifc_vifi >= MAXVIFS) {
1392 if (optname == MRT_ADD_VIF) {
1393 ret = vif_add(net, mrt, &vif,
1394 sk == rtnl_dereference(mrt->mroute_sk));
1396 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1399 /* Manipulate the forwarding caches. These live
1400 * in a sort of kernel/user symbiosis.
1405 case MRT_ADD_MFC_PROXY:
1406 case MRT_DEL_MFC_PROXY:
1407 if (optlen != sizeof(mfc)) {
1411 if (copy_from_user(&mfc, optval, sizeof(mfc))) {
1416 parent = mfc.mfcc_parent;
1417 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1418 ret = ipmr_mfc_delete(mrt, &mfc, parent);
1420 ret = ipmr_mfc_add(net, mrt, &mfc,
1421 sk == rtnl_dereference(mrt->mroute_sk),
1424 /* Control PIM assert. */
1426 if (optlen != sizeof(val)) {
1430 if (get_user(val, (int __user *)optval)) {
1434 mrt->mroute_do_assert = val;
1437 if (!ipmr_pimsm_enabled()) {
1441 if (optlen != sizeof(val)) {
1445 if (get_user(val, (int __user *)optval)) {
1451 if (val != mrt->mroute_do_pim) {
1452 mrt->mroute_do_pim = val;
1453 mrt->mroute_do_assert = val;
1457 if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
1461 if (optlen != sizeof(uval)) {
1465 if (get_user(uval, (u32 __user *)optval)) {
1470 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1473 mrt = ipmr_new_table(net, uval);
1477 raw_sk(sk)->ipmr_table = uval;
1480 /* Spurious command, or MRT_VERSION which you cannot set. */
1489 /* Getsock opt support for the multicast routing system. */
1490 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1494 struct net *net = sock_net(sk);
1495 struct mr_table *mrt;
1497 if (sk->sk_type != SOCK_RAW ||
1498 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1501 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1510 if (!ipmr_pimsm_enabled())
1511 return -ENOPROTOOPT;
1512 val = mrt->mroute_do_pim;
1515 val = mrt->mroute_do_assert;
1518 return -ENOPROTOOPT;
1521 if (get_user(olr, optlen))
1523 olr = min_t(unsigned int, olr, sizeof(int));
1526 if (put_user(olr, optlen))
1528 if (copy_to_user(optval, &val, olr))
1533 /* The IP multicast ioctl support routines. */
1534 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1536 struct sioc_sg_req sr;
1537 struct sioc_vif_req vr;
1538 struct vif_device *vif;
1539 struct mfc_cache *c;
1540 struct net *net = sock_net(sk);
1541 struct mr_table *mrt;
1543 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1549 if (copy_from_user(&vr, arg, sizeof(vr)))
1551 if (vr.vifi >= mrt->maxvif)
1553 read_lock(&mrt_lock);
1554 vif = &mrt->vif_table[vr.vifi];
1555 if (VIF_EXISTS(mrt, vr.vifi)) {
1556 vr.icount = vif->pkt_in;
1557 vr.ocount = vif->pkt_out;
1558 vr.ibytes = vif->bytes_in;
1559 vr.obytes = vif->bytes_out;
1560 read_unlock(&mrt_lock);
1562 if (copy_to_user(arg, &vr, sizeof(vr)))
1566 read_unlock(&mrt_lock);
1567 return -EADDRNOTAVAIL;
1569 if (copy_from_user(&sr, arg, sizeof(sr)))
1573 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1575 sr.pktcnt = c->mfc_un.res.pkt;
1576 sr.bytecnt = c->mfc_un.res.bytes;
1577 sr.wrong_if = c->mfc_un.res.wrong_if;
1580 if (copy_to_user(arg, &sr, sizeof(sr)))
1585 return -EADDRNOTAVAIL;
1587 return -ENOIOCTLCMD;
1591 #ifdef CONFIG_COMPAT
1592 struct compat_sioc_sg_req {
1595 compat_ulong_t pktcnt;
1596 compat_ulong_t bytecnt;
1597 compat_ulong_t wrong_if;
1600 struct compat_sioc_vif_req {
1601 vifi_t vifi; /* Which iface */
1602 compat_ulong_t icount;
1603 compat_ulong_t ocount;
1604 compat_ulong_t ibytes;
1605 compat_ulong_t obytes;
1608 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1610 struct compat_sioc_sg_req sr;
1611 struct compat_sioc_vif_req vr;
1612 struct vif_device *vif;
1613 struct mfc_cache *c;
1614 struct net *net = sock_net(sk);
1615 struct mr_table *mrt;
1617 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1623 if (copy_from_user(&vr, arg, sizeof(vr)))
1625 if (vr.vifi >= mrt->maxvif)
1627 vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
1628 read_lock(&mrt_lock);
1629 vif = &mrt->vif_table[vr.vifi];
1630 if (VIF_EXISTS(mrt, vr.vifi)) {
1631 vr.icount = vif->pkt_in;
1632 vr.ocount = vif->pkt_out;
1633 vr.ibytes = vif->bytes_in;
1634 vr.obytes = vif->bytes_out;
1635 read_unlock(&mrt_lock);
1637 if (copy_to_user(arg, &vr, sizeof(vr)))
1641 read_unlock(&mrt_lock);
1642 return -EADDRNOTAVAIL;
1644 if (copy_from_user(&sr, arg, sizeof(sr)))
1648 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1650 sr.pktcnt = c->mfc_un.res.pkt;
1651 sr.bytecnt = c->mfc_un.res.bytes;
1652 sr.wrong_if = c->mfc_un.res.wrong_if;
1655 if (copy_to_user(arg, &sr, sizeof(sr)))
1660 return -EADDRNOTAVAIL;
1662 return -ENOIOCTLCMD;
1667 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1669 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1670 struct net *net = dev_net(dev);
1671 struct mr_table *mrt;
1672 struct vif_device *v;
1675 if (event != NETDEV_UNREGISTER)
1678 ipmr_for_each_table(mrt, net) {
1679 v = &mrt->vif_table[0];
1680 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1682 vif_delete(mrt, ct, 1, NULL);
1688 static struct notifier_block ip_mr_notifier = {
1689 .notifier_call = ipmr_device_event,
1692 /* Encapsulate a packet by attaching a valid IPIP header to it.
1693 * This avoids tunnel drivers and other mess and gives us the speed so
1694 * important for multicast video.
1696 static void ip_encap(struct net *net, struct sk_buff *skb,
1697 __be32 saddr, __be32 daddr)
1700 const struct iphdr *old_iph = ip_hdr(skb);
1702 skb_push(skb, sizeof(struct iphdr));
1703 skb->transport_header = skb->network_header;
1704 skb_reset_network_header(skb);
1708 iph->tos = old_iph->tos;
1709 iph->ttl = old_iph->ttl;
1713 iph->protocol = IPPROTO_IPIP;
1715 iph->tot_len = htons(skb->len);
1716 ip_select_ident(net, skb, NULL);
1719 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1723 static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1724 struct sk_buff *skb)
1726 struct ip_options *opt = &(IPCB(skb)->opt);
1728 IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1729 IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
1731 if (unlikely(opt->optlen))
1732 ip_forward_options(skb);
1734 return dst_output(net, sk, skb);
1737 /* Processing handlers for ipmr_forward */
1739 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1740 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1742 const struct iphdr *iph = ip_hdr(skb);
1743 struct vif_device *vif = &mrt->vif_table[vifi];
1744 struct net_device *dev;
1752 if (vif->flags & VIFF_REGISTER) {
1754 vif->bytes_out += skb->len;
1755 vif->dev->stats.tx_bytes += skb->len;
1756 vif->dev->stats.tx_packets++;
1757 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1761 if (vif->flags & VIFF_TUNNEL) {
1762 rt = ip_route_output_ports(net, &fl4, NULL,
1763 vif->remote, vif->local,
1766 RT_TOS(iph->tos), vif->link);
1769 encap = sizeof(struct iphdr);
1771 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1774 RT_TOS(iph->tos), vif->link);
1781 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1782 /* Do not fragment multicasts. Alas, IPv4 does not
1783 * allow to send ICMP, so that packets will disappear
1786 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1791 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1793 if (skb_cow(skb, encap)) {
1799 vif->bytes_out += skb->len;
1802 skb_dst_set(skb, &rt->dst);
1803 ip_decrease_ttl(ip_hdr(skb));
1805 /* FIXME: forward and output firewalls used to be called here.
1806 * What do we do with netfilter? -- RR
1808 if (vif->flags & VIFF_TUNNEL) {
1809 ip_encap(net, skb, vif->local, vif->remote);
1810 /* FIXME: extra output firewall step used to be here. --RR */
1811 vif->dev->stats.tx_packets++;
1812 vif->dev->stats.tx_bytes += skb->len;
1815 IPCB(skb)->flags |= IPSKB_FORWARDED;
1817 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1818 * not only before forwarding, but after forwarding on all output
1819 * interfaces. It is clear, if mrouter runs a multicasting
1820 * program, it should receive packets not depending to what interface
1821 * program is joined.
1822 * If we will not make it, the program will have to join on all
1823 * interfaces. On the other hand, multihoming host (or router, but
1824 * not mrouter) cannot join to more than one interface - it will
1825 * result in receiving multiple packets.
1827 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
1828 net, NULL, skb, skb->dev, dev,
1829 ipmr_forward_finish);
1836 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1840 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1841 if (mrt->vif_table[ct].dev == dev)
1847 /* "local" means that we should preserve one skb (for local delivery) */
1848 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1849 struct net_device *dev, struct sk_buff *skb,
1850 struct mfc_cache *cache, int local)
1852 int true_vifi = ipmr_find_vif(mrt, dev);
1856 vif = cache->mfc_parent;
1857 cache->mfc_un.res.pkt++;
1858 cache->mfc_un.res.bytes += skb->len;
1859 cache->mfc_un.res.lastuse = jiffies;
1861 if (cache->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
1862 struct mfc_cache *cache_proxy;
1864 /* For an (*,G) entry, we only check that the incomming
1865 * interface is part of the static tree.
1867 cache_proxy = ipmr_cache_find_any_parent(mrt, vif);
1869 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
1873 /* Wrong interface: drop packet and (maybe) send PIM assert. */
1874 if (mrt->vif_table[vif].dev != dev) {
1875 if (rt_is_output_route(skb_rtable(skb))) {
1876 /* It is our own packet, looped back.
1877 * Very complicated situation...
1879 * The best workaround until routing daemons will be
1880 * fixed is not to redistribute packet, if it was
1881 * send through wrong interface. It means, that
1882 * multicast applications WILL NOT work for
1883 * (S,G), which have default multicast route pointing
1884 * to wrong oif. In any case, it is not a good
1885 * idea to use multicasting applications on router.
1890 cache->mfc_un.res.wrong_if++;
1892 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1893 /* pimsm uses asserts, when switching from RPT to SPT,
1894 * so that we cannot check that packet arrived on an oif.
1895 * It is bad, but otherwise we would need to move pretty
1896 * large chunk of pimd to kernel. Ough... --ANK
1898 (mrt->mroute_do_pim ||
1899 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1901 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1902 cache->mfc_un.res.last_assert = jiffies;
1903 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1909 mrt->vif_table[vif].pkt_in++;
1910 mrt->vif_table[vif].bytes_in += skb->len;
1912 /* Forward the frame */
1913 if (cache->mfc_origin == htonl(INADDR_ANY) &&
1914 cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
1915 if (true_vifi >= 0 &&
1916 true_vifi != cache->mfc_parent &&
1918 cache->mfc_un.res.ttls[cache->mfc_parent]) {
1919 /* It's an (*,*) entry and the packet is not coming from
1920 * the upstream: forward the packet to the upstream
1923 psend = cache->mfc_parent;
1928 for (ct = cache->mfc_un.res.maxvif - 1;
1929 ct >= cache->mfc_un.res.minvif; ct--) {
1930 /* For (*,G) entry, don't forward to the incoming interface */
1931 if ((cache->mfc_origin != htonl(INADDR_ANY) ||
1933 ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1935 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1938 ipmr_queue_xmit(net, mrt, skb2, cache,
1947 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1950 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1952 ipmr_queue_xmit(net, mrt, skb, cache, psend);
1962 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1964 struct rtable *rt = skb_rtable(skb);
1965 struct iphdr *iph = ip_hdr(skb);
1966 struct flowi4 fl4 = {
1967 .daddr = iph->daddr,
1968 .saddr = iph->saddr,
1969 .flowi4_tos = RT_TOS(iph->tos),
1970 .flowi4_oif = (rt_is_output_route(rt) ?
1971 skb->dev->ifindex : 0),
1972 .flowi4_iif = (rt_is_output_route(rt) ?
1975 .flowi4_mark = skb->mark,
1977 struct mr_table *mrt;
1980 err = ipmr_fib_lookup(net, &fl4, &mrt);
1982 return ERR_PTR(err);
1986 /* Multicast packets for forwarding arrive here
1987 * Called with rcu_read_lock();
1989 int ip_mr_input(struct sk_buff *skb)
1991 struct mfc_cache *cache;
1992 struct net *net = dev_net(skb->dev);
1993 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1994 struct mr_table *mrt;
1995 struct net_device *dev;
1997 /* skb->dev passed in is the loX master dev for vrfs.
1998 * As there are no vifs associated with loopback devices,
1999 * get the proper interface that does have a vif associated with it.
2002 if (netif_is_l3_master(skb->dev)) {
2003 dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
2010 /* Packet is looped back after forward, it should not be
2011 * forwarded second time, but still can be delivered locally.
2013 if (IPCB(skb)->flags & IPSKB_FORWARDED)
2016 mrt = ipmr_rt_fib_lookup(net, skb);
2019 return PTR_ERR(mrt);
2022 if (IPCB(skb)->opt.router_alert) {
2023 if (ip_call_ra_chain(skb))
2025 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
2026 /* IGMPv1 (and broken IGMPv2 implementations sort of
2027 * Cisco IOS <= 11.2(8)) do not put router alert
2028 * option to IGMP packets destined to routable
2029 * groups. It is very bad, because it means
2030 * that we can forward NO IGMP messages.
2032 struct sock *mroute_sk;
2034 mroute_sk = rcu_dereference(mrt->mroute_sk);
2037 raw_rcv(mroute_sk, skb);
2043 /* already under rcu_read_lock() */
2044 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
2046 int vif = ipmr_find_vif(mrt, dev);
2049 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
2053 /* No usable cache entry */
2058 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2059 ip_local_deliver(skb);
2065 read_lock(&mrt_lock);
2066 vif = ipmr_find_vif(mrt, dev);
2068 int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev);
2069 read_unlock(&mrt_lock);
2073 read_unlock(&mrt_lock);
2078 read_lock(&mrt_lock);
2079 ip_mr_forward(net, mrt, dev, skb, cache, local);
2080 read_unlock(&mrt_lock);
2083 return ip_local_deliver(skb);
2089 return ip_local_deliver(skb);
2094 #ifdef CONFIG_IP_PIMSM_V1
2095 /* Handle IGMP messages of PIMv1 */
2096 int pim_rcv_v1(struct sk_buff *skb)
2098 struct igmphdr *pim;
2099 struct net *net = dev_net(skb->dev);
2100 struct mr_table *mrt;
2102 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2105 pim = igmp_hdr(skb);
2107 mrt = ipmr_rt_fib_lookup(net, skb);
2110 if (!mrt->mroute_do_pim ||
2111 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2114 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2122 #ifdef CONFIG_IP_PIMSM_V2
2123 static int pim_rcv(struct sk_buff *skb)
2125 struct pimreghdr *pim;
2126 struct net *net = dev_net(skb->dev);
2127 struct mr_table *mrt;
2129 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2132 pim = (struct pimreghdr *)skb_transport_header(skb);
2133 if (pim->type != ((PIM_VERSION << 4) | (PIM_TYPE_REGISTER)) ||
2134 (pim->flags & PIM_NULL_REGISTER) ||
2135 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2136 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
2139 mrt = ipmr_rt_fib_lookup(net, skb);
2142 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2150 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2151 struct mfc_cache *c, struct rtmsg *rtm)
2153 struct rta_mfc_stats mfcs;
2154 struct nlattr *mp_attr;
2155 struct rtnexthop *nhp;
2156 unsigned long lastuse;
2159 /* If cache is unresolved, don't try to parse IIF and OIF */
2160 if (c->mfc_parent >= MAXVIFS) {
2161 rtm->rtm_flags |= RTNH_F_UNRESOLVED;
2165 if (VIF_EXISTS(mrt, c->mfc_parent) &&
2166 nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2169 if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2172 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2173 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2174 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2175 nla_nest_cancel(skb, mp_attr);
2179 nhp->rtnh_flags = 0;
2180 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2181 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2182 nhp->rtnh_len = sizeof(*nhp);
2186 nla_nest_end(skb, mp_attr);
2188 lastuse = READ_ONCE(c->mfc_un.res.lastuse);
2189 lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
2191 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2192 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2193 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2194 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
2195 nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
2199 rtm->rtm_type = RTN_MULTICAST;
2203 int ipmr_get_route(struct net *net, struct sk_buff *skb,
2204 __be32 saddr, __be32 daddr,
2205 struct rtmsg *rtm, u32 portid)
2207 struct mfc_cache *cache;
2208 struct mr_table *mrt;
2211 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2216 cache = ipmr_cache_find(mrt, saddr, daddr);
2217 if (!cache && skb->dev) {
2218 int vif = ipmr_find_vif(mrt, skb->dev);
2221 cache = ipmr_cache_find_any(mrt, daddr, vif);
2224 struct sk_buff *skb2;
2226 struct net_device *dev;
2230 read_lock(&mrt_lock);
2232 vif = ipmr_find_vif(mrt, dev);
2234 read_unlock(&mrt_lock);
2238 skb2 = skb_clone(skb, GFP_ATOMIC);
2240 read_unlock(&mrt_lock);
2245 NETLINK_CB(skb2).portid = portid;
2246 skb_push(skb2, sizeof(struct iphdr));
2247 skb_reset_network_header(skb2);
2249 iph->ihl = sizeof(struct iphdr) >> 2;
2253 err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
2254 read_unlock(&mrt_lock);
2259 read_lock(&mrt_lock);
2260 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
2261 read_unlock(&mrt_lock);
2266 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2267 u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2270 struct nlmsghdr *nlh;
2274 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2278 rtm = nlmsg_data(nlh);
2279 rtm->rtm_family = RTNL_FAMILY_IPMR;
2280 rtm->rtm_dst_len = 32;
2281 rtm->rtm_src_len = 32;
2283 rtm->rtm_table = mrt->id;
2284 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2285 goto nla_put_failure;
2286 rtm->rtm_type = RTN_MULTICAST;
2287 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2288 if (c->mfc_flags & MFC_STATIC)
2289 rtm->rtm_protocol = RTPROT_STATIC;
2291 rtm->rtm_protocol = RTPROT_MROUTED;
2294 if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2295 nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
2296 goto nla_put_failure;
2297 err = __ipmr_fill_mroute(mrt, skb, c, rtm);
2298 /* do not break the dump if cache is unresolved */
2299 if (err < 0 && err != -ENOENT)
2300 goto nla_put_failure;
2302 nlmsg_end(skb, nlh);
2306 nlmsg_cancel(skb, nlh);
2310 static size_t mroute_msgsize(bool unresolved, int maxvif)
2313 NLMSG_ALIGN(sizeof(struct rtmsg))
2314 + nla_total_size(4) /* RTA_TABLE */
2315 + nla_total_size(4) /* RTA_SRC */
2316 + nla_total_size(4) /* RTA_DST */
2321 + nla_total_size(4) /* RTA_IIF */
2322 + nla_total_size(0) /* RTA_MULTIPATH */
2323 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2325 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2331 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2334 struct net *net = read_pnet(&mrt->net);
2335 struct sk_buff *skb;
2338 skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
2343 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2347 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2353 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2356 static size_t igmpmsg_netlink_msgsize(size_t payloadlen)
2359 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2360 + nla_total_size(1) /* IPMRA_CREPORT_MSGTYPE */
2361 + nla_total_size(4) /* IPMRA_CREPORT_VIF_ID */
2362 + nla_total_size(4) /* IPMRA_CREPORT_SRC_ADDR */
2363 + nla_total_size(4) /* IPMRA_CREPORT_DST_ADDR */
2364 /* IPMRA_CREPORT_PKT */
2365 + nla_total_size(payloadlen)
2371 static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
2373 struct net *net = read_pnet(&mrt->net);
2374 struct nlmsghdr *nlh;
2375 struct rtgenmsg *rtgenm;
2376 struct igmpmsg *msg;
2377 struct sk_buff *skb;
2381 payloadlen = pkt->len - sizeof(struct igmpmsg);
2382 msg = (struct igmpmsg *)skb_network_header(pkt);
2384 skb = nlmsg_new(igmpmsg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2388 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2389 sizeof(struct rtgenmsg), 0);
2392 rtgenm = nlmsg_data(nlh);
2393 rtgenm->rtgen_family = RTNL_FAMILY_IPMR;
2394 if (nla_put_u8(skb, IPMRA_CREPORT_MSGTYPE, msg->im_msgtype) ||
2395 nla_put_u32(skb, IPMRA_CREPORT_VIF_ID, msg->im_vif) ||
2396 nla_put_in_addr(skb, IPMRA_CREPORT_SRC_ADDR,
2397 msg->im_src.s_addr) ||
2398 nla_put_in_addr(skb, IPMRA_CREPORT_DST_ADDR,
2399 msg->im_dst.s_addr))
2400 goto nla_put_failure;
2402 nla = nla_reserve(skb, IPMRA_CREPORT_PKT, payloadlen);
2403 if (!nla || skb_copy_bits(pkt, sizeof(struct igmpmsg),
2404 nla_data(nla), payloadlen))
2405 goto nla_put_failure;
2407 nlmsg_end(skb, nlh);
2409 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE_R, NULL, GFP_ATOMIC);
2413 nlmsg_cancel(skb, nlh);
2416 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE_R, -ENOBUFS);
2419 static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2420 struct netlink_ext_ack *extack)
2422 struct net *net = sock_net(in_skb->sk);
2423 struct nlattr *tb[RTA_MAX + 1];
2424 struct sk_buff *skb = NULL;
2425 struct mfc_cache *cache;
2426 struct mr_table *mrt;
2432 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
2433 rtm_ipv4_policy, extack);
2437 rtm = nlmsg_data(nlh);
2439 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
2440 grp = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
2441 tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0;
2443 mrt = ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT);
2449 /* entries are added/deleted only under RTNL */
2451 cache = ipmr_cache_find(mrt, src, grp);
2458 skb = nlmsg_new(mroute_msgsize(false, mrt->maxvif), GFP_KERNEL);
2464 err = ipmr_fill_mroute(mrt, skb, NETLINK_CB(in_skb).portid,
2465 nlh->nlmsg_seq, cache,
2470 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2480 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2482 struct net *net = sock_net(skb->sk);
2483 struct mr_table *mrt;
2484 struct mfc_cache *mfc;
2485 unsigned int t = 0, s_t;
2486 unsigned int e = 0, s_e;
2492 ipmr_for_each_table(mrt, net) {
2495 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
2498 if (ipmr_fill_mroute(mrt, skb,
2499 NETLINK_CB(cb->skb).portid,
2508 spin_lock_bh(&mfc_unres_lock);
2509 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2512 if (ipmr_fill_mroute(mrt, skb,
2513 NETLINK_CB(cb->skb).portid,
2517 spin_unlock_bh(&mfc_unres_lock);
2523 spin_unlock_bh(&mfc_unres_lock);
2538 static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
2539 [RTA_SRC] = { .type = NLA_U32 },
2540 [RTA_DST] = { .type = NLA_U32 },
2541 [RTA_IIF] = { .type = NLA_U32 },
2542 [RTA_TABLE] = { .type = NLA_U32 },
2543 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2546 static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol)
2548 switch (rtm_protocol) {
2550 case RTPROT_MROUTED:
2556 static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc)
2558 struct rtnexthop *rtnh = nla_data(nla);
2559 int remaining = nla_len(nla), vifi = 0;
2561 while (rtnh_ok(rtnh, remaining)) {
2562 mfcc->mfcc_ttls[vifi] = rtnh->rtnh_hops;
2563 if (++vifi == MAXVIFS)
2565 rtnh = rtnh_next(rtnh, &remaining);
2568 return remaining > 0 ? -EINVAL : vifi;
2571 /* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
2572 static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
2573 struct mfcctl *mfcc, int *mrtsock,
2574 struct mr_table **mrtret,
2575 struct netlink_ext_ack *extack)
2577 struct net_device *dev = NULL;
2578 u32 tblid = RT_TABLE_DEFAULT;
2579 struct mr_table *mrt;
2580 struct nlattr *attr;
2584 ret = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipmr_policy,
2588 rtm = nlmsg_data(nlh);
2591 if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 ||
2592 rtm->rtm_type != RTN_MULTICAST ||
2593 rtm->rtm_scope != RT_SCOPE_UNIVERSE ||
2594 !ipmr_rtm_validate_proto(rtm->rtm_protocol))
2597 memset(mfcc, 0, sizeof(*mfcc));
2598 mfcc->mfcc_parent = -1;
2600 nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) {
2601 switch (nla_type(attr)) {
2603 mfcc->mfcc_origin.s_addr = nla_get_be32(attr);
2606 mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr);
2609 dev = __dev_get_by_index(net, nla_get_u32(attr));
2616 if (ipmr_nla_get_ttls(attr, mfcc) < 0) {
2625 tblid = nla_get_u32(attr);
2629 mrt = ipmr_get_table(net, tblid);
2635 *mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0;
2637 mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
2643 /* takes care of both newroute and delroute */
2644 static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh,
2645 struct netlink_ext_ack *extack)
2647 struct net *net = sock_net(skb->sk);
2648 int ret, mrtsock, parent;
2649 struct mr_table *tbl;
2654 ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl, extack);
2658 parent = ret ? mfcc.mfcc_parent : -1;
2659 if (nlh->nlmsg_type == RTM_NEWROUTE)
2660 return ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent);
2662 return ipmr_mfc_delete(tbl, &mfcc, parent);
2665 static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
2667 u32 queue_len = atomic_read(&mrt->cache_resolve_queue_len);
2669 if (nla_put_u32(skb, IPMRA_TABLE_ID, mrt->id) ||
2670 nla_put_u32(skb, IPMRA_TABLE_CACHE_RES_QUEUE_LEN, queue_len) ||
2671 nla_put_s32(skb, IPMRA_TABLE_MROUTE_REG_VIF_NUM,
2672 mrt->mroute_reg_vif_num) ||
2673 nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT,
2674 mrt->mroute_do_assert) ||
2675 nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim))
2681 static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
2683 struct nlattr *vif_nest;
2684 struct vif_device *vif;
2686 /* if the VIF doesn't exist just continue */
2687 if (!VIF_EXISTS(mrt, vifid))
2690 vif = &mrt->vif_table[vifid];
2691 vif_nest = nla_nest_start(skb, IPMRA_VIF);
2694 if (nla_put_u32(skb, IPMRA_VIFA_IFINDEX, vif->dev->ifindex) ||
2695 nla_put_u32(skb, IPMRA_VIFA_VIF_ID, vifid) ||
2696 nla_put_u16(skb, IPMRA_VIFA_FLAGS, vif->flags) ||
2697 nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_IN, vif->bytes_in,
2699 nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_OUT, vif->bytes_out,
2701 nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_IN, vif->pkt_in,
2703 nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_OUT, vif->pkt_out,
2705 nla_put_be32(skb, IPMRA_VIFA_LOCAL_ADDR, vif->local) ||
2706 nla_put_be32(skb, IPMRA_VIFA_REMOTE_ADDR, vif->remote)) {
2707 nla_nest_cancel(skb, vif_nest);
2710 nla_nest_end(skb, vif_nest);
2715 static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
2717 struct net *net = sock_net(skb->sk);
2718 struct nlmsghdr *nlh = NULL;
2719 unsigned int t = 0, s_t;
2720 unsigned int e = 0, s_e;
2721 struct mr_table *mrt;
2726 ipmr_for_each_table(mrt, net) {
2727 struct nlattr *vifs, *af;
2728 struct ifinfomsg *hdr;
2733 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
2734 cb->nlh->nlmsg_seq, RTM_NEWLINK,
2735 sizeof(*hdr), NLM_F_MULTI);
2739 hdr = nlmsg_data(nlh);
2740 memset(hdr, 0, sizeof(*hdr));
2741 hdr->ifi_family = RTNL_FAMILY_IPMR;
2743 af = nla_nest_start(skb, IFLA_AF_SPEC);
2745 nlmsg_cancel(skb, nlh);
2749 if (!ipmr_fill_table(mrt, skb)) {
2750 nlmsg_cancel(skb, nlh);
2754 vifs = nla_nest_start(skb, IPMRA_TABLE_VIFS);
2756 nla_nest_end(skb, af);
2757 nlmsg_end(skb, nlh);
2760 for (i = 0; i < mrt->maxvif; i++) {
2763 if (!ipmr_fill_vif(mrt, i, skb)) {
2764 nla_nest_end(skb, vifs);
2765 nla_nest_end(skb, af);
2766 nlmsg_end(skb, nlh);
2774 nla_nest_end(skb, vifs);
2775 nla_nest_end(skb, af);
2776 nlmsg_end(skb, nlh);
2788 #ifdef CONFIG_PROC_FS
2789 /* The /proc interfaces to multicast routing :
2790 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2792 struct ipmr_vif_iter {
2793 struct seq_net_private p;
2794 struct mr_table *mrt;
2798 static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2799 struct ipmr_vif_iter *iter,
2802 struct mr_table *mrt = iter->mrt;
2804 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2805 if (!VIF_EXISTS(mrt, iter->ct))
2808 return &mrt->vif_table[iter->ct];
2813 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2814 __acquires(mrt_lock)
2816 struct ipmr_vif_iter *iter = seq->private;
2817 struct net *net = seq_file_net(seq);
2818 struct mr_table *mrt;
2820 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2822 return ERR_PTR(-ENOENT);
2826 read_lock(&mrt_lock);
2827 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2831 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2833 struct ipmr_vif_iter *iter = seq->private;
2834 struct net *net = seq_file_net(seq);
2835 struct mr_table *mrt = iter->mrt;
2838 if (v == SEQ_START_TOKEN)
2839 return ipmr_vif_seq_idx(net, iter, 0);
2841 while (++iter->ct < mrt->maxvif) {
2842 if (!VIF_EXISTS(mrt, iter->ct))
2844 return &mrt->vif_table[iter->ct];
2849 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2850 __releases(mrt_lock)
2852 read_unlock(&mrt_lock);
2855 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2857 struct ipmr_vif_iter *iter = seq->private;
2858 struct mr_table *mrt = iter->mrt;
2860 if (v == SEQ_START_TOKEN) {
2862 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2864 const struct vif_device *vif = v;
2865 const char *name = vif->dev ? vif->dev->name : "none";
2868 "%2zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2869 vif - mrt->vif_table,
2870 name, vif->bytes_in, vif->pkt_in,
2871 vif->bytes_out, vif->pkt_out,
2872 vif->flags, vif->local, vif->remote);
2877 static const struct seq_operations ipmr_vif_seq_ops = {
2878 .start = ipmr_vif_seq_start,
2879 .next = ipmr_vif_seq_next,
2880 .stop = ipmr_vif_seq_stop,
2881 .show = ipmr_vif_seq_show,
2884 static int ipmr_vif_open(struct inode *inode, struct file *file)
2886 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2887 sizeof(struct ipmr_vif_iter));
2890 static const struct file_operations ipmr_vif_fops = {
2891 .owner = THIS_MODULE,
2892 .open = ipmr_vif_open,
2894 .llseek = seq_lseek,
2895 .release = seq_release_net,
2898 struct ipmr_mfc_iter {
2899 struct seq_net_private p;
2900 struct mr_table *mrt;
2901 struct list_head *cache;
2904 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2905 struct ipmr_mfc_iter *it, loff_t pos)
2907 struct mr_table *mrt = it->mrt;
2908 struct mfc_cache *mfc;
2911 it->cache = &mrt->mfc_cache_list;
2912 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
2917 spin_lock_bh(&mfc_unres_lock);
2918 it->cache = &mrt->mfc_unres_queue;
2919 list_for_each_entry(mfc, it->cache, list)
2922 spin_unlock_bh(&mfc_unres_lock);
2929 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2931 struct ipmr_mfc_iter *it = seq->private;
2932 struct net *net = seq_file_net(seq);
2933 struct mr_table *mrt;
2935 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2937 return ERR_PTR(-ENOENT);
2941 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2945 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2947 struct ipmr_mfc_iter *it = seq->private;
2948 struct net *net = seq_file_net(seq);
2949 struct mr_table *mrt = it->mrt;
2950 struct mfc_cache *mfc = v;
2954 if (v == SEQ_START_TOKEN)
2955 return ipmr_mfc_seq_idx(net, seq->private, 0);
2957 if (mfc->list.next != it->cache)
2958 return list_entry(mfc->list.next, struct mfc_cache, list);
2960 if (it->cache == &mrt->mfc_unres_queue)
2963 /* exhausted cache_array, show unresolved */
2965 it->cache = &mrt->mfc_unres_queue;
2967 spin_lock_bh(&mfc_unres_lock);
2968 if (!list_empty(it->cache))
2969 return list_first_entry(it->cache, struct mfc_cache, list);
2972 spin_unlock_bh(&mfc_unres_lock);
2978 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2980 struct ipmr_mfc_iter *it = seq->private;
2981 struct mr_table *mrt = it->mrt;
2983 if (it->cache == &mrt->mfc_unres_queue)
2984 spin_unlock_bh(&mfc_unres_lock);
2985 else if (it->cache == &mrt->mfc_cache_list)
2989 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2993 if (v == SEQ_START_TOKEN) {
2995 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2997 const struct mfc_cache *mfc = v;
2998 const struct ipmr_mfc_iter *it = seq->private;
2999 const struct mr_table *mrt = it->mrt;
3001 seq_printf(seq, "%08X %08X %-3hd",
3002 (__force u32) mfc->mfc_mcastgrp,
3003 (__force u32) mfc->mfc_origin,
3006 if (it->cache != &mrt->mfc_unres_queue) {
3007 seq_printf(seq, " %8lu %8lu %8lu",
3008 mfc->mfc_un.res.pkt,
3009 mfc->mfc_un.res.bytes,
3010 mfc->mfc_un.res.wrong_if);
3011 for (n = mfc->mfc_un.res.minvif;
3012 n < mfc->mfc_un.res.maxvif; n++) {
3013 if (VIF_EXISTS(mrt, n) &&
3014 mfc->mfc_un.res.ttls[n] < 255)
3017 n, mfc->mfc_un.res.ttls[n]);
3020 /* unresolved mfc_caches don't contain
3021 * pkt, bytes and wrong_if values
3023 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
3025 seq_putc(seq, '\n');
3030 static const struct seq_operations ipmr_mfc_seq_ops = {
3031 .start = ipmr_mfc_seq_start,
3032 .next = ipmr_mfc_seq_next,
3033 .stop = ipmr_mfc_seq_stop,
3034 .show = ipmr_mfc_seq_show,
3037 static int ipmr_mfc_open(struct inode *inode, struct file *file)
3039 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
3040 sizeof(struct ipmr_mfc_iter));
3043 static const struct file_operations ipmr_mfc_fops = {
3044 .owner = THIS_MODULE,
3045 .open = ipmr_mfc_open,
3047 .llseek = seq_lseek,
3048 .release = seq_release_net,
3052 #ifdef CONFIG_IP_PIMSM_V2
3053 static const struct net_protocol pim_protocol = {
3059 /* Setup for IP multicast routing */
3060 static int __net_init ipmr_net_init(struct net *net)
3064 err = ipmr_rules_init(net);
3068 #ifdef CONFIG_PROC_FS
3070 if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
3072 if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
3073 goto proc_cache_fail;
3077 #ifdef CONFIG_PROC_FS
3079 remove_proc_entry("ip_mr_vif", net->proc_net);
3081 ipmr_rules_exit(net);
3087 static void __net_exit ipmr_net_exit(struct net *net)
3089 #ifdef CONFIG_PROC_FS
3090 remove_proc_entry("ip_mr_cache", net->proc_net);
3091 remove_proc_entry("ip_mr_vif", net->proc_net);
3093 ipmr_rules_exit(net);
3096 static struct pernet_operations ipmr_net_ops = {
3097 .init = ipmr_net_init,
3098 .exit = ipmr_net_exit,
3101 int __init ip_mr_init(void)
3105 mrt_cachep = kmem_cache_create("ip_mrt_cache",
3106 sizeof(struct mfc_cache),
3107 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
3110 err = register_pernet_subsys(&ipmr_net_ops);
3112 goto reg_pernet_fail;
3114 err = register_netdevice_notifier(&ip_mr_notifier);
3116 goto reg_notif_fail;
3117 #ifdef CONFIG_IP_PIMSM_V2
3118 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
3119 pr_err("%s: can't add PIM protocol\n", __func__);
3121 goto add_proto_fail;
3124 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
3125 ipmr_rtm_getroute, ipmr_rtm_dumproute, 0);
3126 rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE,
3127 ipmr_rtm_route, NULL, 0);
3128 rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE,
3129 ipmr_rtm_route, NULL, 0);
3131 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETLINK,
3132 NULL, ipmr_rtm_dumplink, 0);
3135 #ifdef CONFIG_IP_PIMSM_V2
3137 unregister_netdevice_notifier(&ip_mr_notifier);
3140 unregister_pernet_subsys(&ipmr_net_ops);
3142 kmem_cache_destroy(mrt_cachep);