GNU Linux-libre 4.4.288-gnu1
[releases.git] / net / sched / act_mirred.c
1 /*
2  * net/sched/act_mirred.c       packet mirroring and redirect actions
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  * Authors:     Jamal Hadi Salim (2002-4)
10  *
11  * TODO: Add ingress support (and socket redirect support)
12  *
13  */
14
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/gfp.h>
24 #include <net/net_namespace.h>
25 #include <net/netlink.h>
26 #include <net/pkt_sched.h>
27 #include <linux/tc_act/tc_mirred.h>
28 #include <net/tc_act/tc_mirred.h>
29
30 #include <linux/if_arp.h>
31
32 #define MIRRED_TAB_MASK     7
33 static LIST_HEAD(mirred_list);
34 static DEFINE_SPINLOCK(mirred_list_lock);
35
36 static void tcf_mirred_release(struct tc_action *a, int bind)
37 {
38         struct tcf_mirred *m = to_mirred(a);
39         struct net_device *dev;
40
41         /* We could be called either in a RCU callback or with RTNL lock held. */
42         spin_lock_bh(&mirred_list_lock);
43         list_del(&m->tcfm_list);
44         dev = rcu_dereference_protected(m->tcfm_dev, 1);
45         if (dev)
46                 dev_put(dev);
47         spin_unlock_bh(&mirred_list_lock);
48 }
49
50 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
51         [TCA_MIRRED_PARMS]      = { .len = sizeof(struct tc_mirred) },
52 };
53
54 static int tcf_mirred_init(struct net *net, struct nlattr *nla,
55                            struct nlattr *est, struct tc_action *a, int ovr,
56                            int bind)
57 {
58         struct nlattr *tb[TCA_MIRRED_MAX + 1];
59         struct tc_mirred *parm;
60         struct tcf_mirred *m;
61         struct net_device *dev;
62         int ret, ok_push = 0;
63
64         if (nla == NULL)
65                 return -EINVAL;
66         ret = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy);
67         if (ret < 0)
68                 return ret;
69         if (tb[TCA_MIRRED_PARMS] == NULL)
70                 return -EINVAL;
71         parm = nla_data(tb[TCA_MIRRED_PARMS]);
72         switch (parm->eaction) {
73         case TCA_EGRESS_MIRROR:
74         case TCA_EGRESS_REDIR:
75                 break;
76         default:
77                 return -EINVAL;
78         }
79         if (parm->ifindex) {
80                 dev = __dev_get_by_index(net, parm->ifindex);
81                 if (dev == NULL)
82                         return -ENODEV;
83                 switch (dev->type) {
84                 case ARPHRD_TUNNEL:
85                 case ARPHRD_TUNNEL6:
86                 case ARPHRD_SIT:
87                 case ARPHRD_IPGRE:
88                 case ARPHRD_VOID:
89                 case ARPHRD_NONE:
90                         ok_push = 0;
91                         break;
92                 default:
93                         ok_push = 1;
94                         break;
95                 }
96         } else {
97                 dev = NULL;
98         }
99
100         if (!tcf_hash_check(parm->index, a, bind)) {
101                 if (dev == NULL)
102                         return -EINVAL;
103                 ret = tcf_hash_create(parm->index, est, a, sizeof(*m),
104                                       bind, true);
105                 if (ret)
106                         return ret;
107                 ret = ACT_P_CREATED;
108         } else {
109                 if (bind)
110                         return 0;
111
112                 tcf_hash_release(a, bind);
113                 if (!ovr)
114                         return -EEXIST;
115         }
116         m = to_mirred(a);
117
118         ASSERT_RTNL();
119         m->tcf_action = parm->action;
120         m->tcfm_eaction = parm->eaction;
121         if (dev != NULL) {
122                 m->tcfm_ifindex = parm->ifindex;
123                 if (ret != ACT_P_CREATED)
124                         dev_put(rcu_dereference_protected(m->tcfm_dev, 1));
125                 dev_hold(dev);
126                 rcu_assign_pointer(m->tcfm_dev, dev);
127                 m->tcfm_ok_push = ok_push;
128         }
129
130         if (ret == ACT_P_CREATED) {
131                 spin_lock_bh(&mirred_list_lock);
132                 list_add(&m->tcfm_list, &mirred_list);
133                 spin_unlock_bh(&mirred_list_lock);
134                 tcf_hash_insert(a);
135         }
136
137         return ret;
138 }
139
140 static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
141                       struct tcf_result *res)
142 {
143         struct tcf_mirred *m = a->priv;
144         struct net_device *dev;
145         struct sk_buff *skb2;
146         int retval, err;
147         u32 at;
148
149         tcf_lastuse_update(&m->tcf_tm);
150
151         bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
152
153         rcu_read_lock();
154         retval = READ_ONCE(m->tcf_action);
155         dev = rcu_dereference(m->tcfm_dev);
156         if (unlikely(!dev)) {
157                 pr_notice_once("tc mirred: target device is gone\n");
158                 goto out;
159         }
160
161         if (unlikely(!(dev->flags & IFF_UP))) {
162                 net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
163                                        dev->name);
164                 goto out;
165         }
166
167         at = G_TC_AT(skb->tc_verd);
168         skb2 = skb_clone(skb, GFP_ATOMIC);
169         if (!skb2)
170                 goto out;
171
172         if (!(at & AT_EGRESS)) {
173                 if (m->tcfm_ok_push)
174                         skb_push_rcsum(skb2, skb->mac_len);
175         }
176
177         /* mirror is always swallowed */
178         if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
179                 skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at);
180
181         skb2->skb_iif = skb->dev->ifindex;
182         skb2->dev = dev;
183         skb_sender_cpu_clear(skb2);
184         err = dev_queue_xmit(skb2);
185
186         if (err) {
187 out:
188                 qstats_overlimit_inc(this_cpu_ptr(m->common.cpu_qstats));
189                 if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
190                         retval = TC_ACT_SHOT;
191         }
192         rcu_read_unlock();
193
194         return retval;
195 }
196
197 static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
198 {
199         unsigned char *b = skb_tail_pointer(skb);
200         struct tcf_mirred *m = a->priv;
201         struct tc_mirred opt = {
202                 .index   = m->tcf_index,
203                 .action  = m->tcf_action,
204                 .refcnt  = m->tcf_refcnt - ref,
205                 .bindcnt = m->tcf_bindcnt - bind,
206                 .eaction = m->tcfm_eaction,
207                 .ifindex = m->tcfm_ifindex,
208         };
209         struct tcf_t t;
210
211         if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
212                 goto nla_put_failure;
213         t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
214         t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
215         t.expires = jiffies_to_clock_t(m->tcf_tm.expires);
216         if (nla_put(skb, TCA_MIRRED_TM, sizeof(t), &t))
217                 goto nla_put_failure;
218         return skb->len;
219
220 nla_put_failure:
221         nlmsg_trim(skb, b);
222         return -1;
223 }
224
225 static int mirred_device_event(struct notifier_block *unused,
226                                unsigned long event, void *ptr)
227 {
228         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
229         struct tcf_mirred *m;
230
231         ASSERT_RTNL();
232         if (event == NETDEV_UNREGISTER) {
233                 spin_lock_bh(&mirred_list_lock);
234                 list_for_each_entry(m, &mirred_list, tcfm_list) {
235                         if (rcu_access_pointer(m->tcfm_dev) == dev) {
236                                 dev_put(dev);
237                                 /* Note : no rcu grace period necessary, as
238                                  * net_device are already rcu protected.
239                                  */
240                                 RCU_INIT_POINTER(m->tcfm_dev, NULL);
241                         }
242                 }
243                 spin_unlock_bh(&mirred_list_lock);
244         }
245
246         return NOTIFY_DONE;
247 }
248
249 static struct notifier_block mirred_device_notifier = {
250         .notifier_call = mirred_device_event,
251 };
252
253 static struct tc_action_ops act_mirred_ops = {
254         .kind           =       "mirred",
255         .type           =       TCA_ACT_MIRRED,
256         .owner          =       THIS_MODULE,
257         .act            =       tcf_mirred,
258         .dump           =       tcf_mirred_dump,
259         .cleanup        =       tcf_mirred_release,
260         .init           =       tcf_mirred_init,
261 };
262
263 MODULE_AUTHOR("Jamal Hadi Salim(2002)");
264 MODULE_DESCRIPTION("Device Mirror/redirect actions");
265 MODULE_LICENSE("GPL");
266
267 static int __init mirred_init_module(void)
268 {
269         int err = register_netdevice_notifier(&mirred_device_notifier);
270         if (err)
271                 return err;
272
273         pr_info("Mirror/redirect action on\n");
274         return tcf_register_action(&act_mirred_ops, MIRRED_TAB_MASK);
275 }
276
277 static void __exit mirred_cleanup_module(void)
278 {
279         tcf_unregister_action(&act_mirred_ops);
280         unregister_netdevice_notifier(&mirred_device_notifier);
281 }
282
283 module_init(mirred_init_module);
284 module_exit(mirred_cleanup_module);