GNU Linux-libre 4.19.264-gnu1
[releases.git] / net / netfilter / nf_flow_table_core.c
1 #include <linux/kernel.h>
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <linux/rhashtable.h>
6 #include <linux/netdevice.h>
7 #include <net/ip.h>
8 #include <net/ip6_route.h>
9 #include <net/netfilter/nf_tables.h>
10 #include <net/netfilter/nf_flow_table.h>
11 #include <net/netfilter/nf_conntrack.h>
12 #include <net/netfilter/nf_conntrack_core.h>
13 #include <net/netfilter/nf_conntrack_tuple.h>
14
15 struct flow_offload_entry {
16         struct flow_offload     flow;
17         struct nf_conn          *ct;
18         struct rcu_head         rcu_head;
19 };
20
21 static DEFINE_MUTEX(flowtable_lock);
22 static LIST_HEAD(flowtables);
23
24 static void
25 flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
26                       struct nf_flow_route *route,
27                       enum flow_offload_tuple_dir dir)
28 {
29         struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
30         struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
31         struct dst_entry *other_dst = route->tuple[!dir].dst;
32         struct dst_entry *dst = route->tuple[dir].dst;
33
34         ft->dir = dir;
35
36         switch (ctt->src.l3num) {
37         case NFPROTO_IPV4:
38                 ft->src_v4 = ctt->src.u3.in;
39                 ft->dst_v4 = ctt->dst.u3.in;
40                 ft->mtu = ip_dst_mtu_maybe_forward(dst, true);
41                 break;
42         case NFPROTO_IPV6:
43                 ft->src_v6 = ctt->src.u3.in6;
44                 ft->dst_v6 = ctt->dst.u3.in6;
45                 ft->mtu = ip6_dst_mtu_forward(dst);
46                 break;
47         }
48
49         ft->l3proto = ctt->src.l3num;
50         ft->l4proto = ctt->dst.protonum;
51         ft->src_port = ctt->src.u.tcp.port;
52         ft->dst_port = ctt->dst.u.tcp.port;
53
54         ft->iifidx = other_dst->dev->ifindex;
55         ft->oifidx = dst->dev->ifindex;
56         ft->dst_cache = dst;
57 }
58
59 struct flow_offload *
60 flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
61 {
62         struct flow_offload_entry *entry;
63         struct flow_offload *flow;
64
65         if (unlikely(nf_ct_is_dying(ct) ||
66             !atomic_inc_not_zero(&ct->ct_general.use)))
67                 return NULL;
68
69         entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
70         if (!entry)
71                 goto err_ct_refcnt;
72
73         flow = &entry->flow;
74
75         if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst))
76                 goto err_dst_cache_original;
77
78         if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst))
79                 goto err_dst_cache_reply;
80
81         entry->ct = ct;
82
83         flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_ORIGINAL);
84         flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_REPLY);
85
86         if (ct->status & IPS_SRC_NAT)
87                 flow->flags |= FLOW_OFFLOAD_SNAT;
88         if (ct->status & IPS_DST_NAT)
89                 flow->flags |= FLOW_OFFLOAD_DNAT;
90
91         return flow;
92
93 err_dst_cache_reply:
94         dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
95 err_dst_cache_original:
96         kfree(entry);
97 err_ct_refcnt:
98         nf_ct_put(ct);
99
100         return NULL;
101 }
102 EXPORT_SYMBOL_GPL(flow_offload_alloc);
103
104 static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
105 {
106         tcp->state = TCP_CONNTRACK_ESTABLISHED;
107         tcp->seen[0].td_maxwin = 0;
108         tcp->seen[1].td_maxwin = 0;
109 }
110
111 #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
112 #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
113
114 static void flow_offload_fixup_ct_state(struct nf_conn *ct)
115 {
116         const struct nf_conntrack_l4proto *l4proto;
117         unsigned int timeout;
118         int l4num;
119
120         l4num = nf_ct_protonum(ct);
121         if (l4num == IPPROTO_TCP)
122                 flow_offload_fixup_tcp(&ct->proto.tcp);
123
124         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), l4num);
125         if (!l4proto)
126                 return;
127
128         if (l4num == IPPROTO_TCP)
129                 timeout = NF_FLOWTABLE_TCP_PICKUP_TIMEOUT;
130         else if (l4num == IPPROTO_UDP)
131                 timeout = NF_FLOWTABLE_UDP_PICKUP_TIMEOUT;
132         else
133                 return;
134
135         ct->timeout = nfct_time_stamp + timeout;
136 }
137
138 void flow_offload_free(struct flow_offload *flow)
139 {
140         struct flow_offload_entry *e;
141
142         dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
143         dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
144         e = container_of(flow, struct flow_offload_entry, flow);
145         if (flow->flags & FLOW_OFFLOAD_DYING)
146                 nf_ct_delete(e->ct, 0, 0);
147         nf_ct_put(e->ct);
148         kfree_rcu(e, rcu_head);
149 }
150 EXPORT_SYMBOL_GPL(flow_offload_free);
151
152 static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
153 {
154         const struct flow_offload_tuple *tuple = data;
155
156         return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed);
157 }
158
159 static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
160 {
161         const struct flow_offload_tuple_rhash *tuplehash = data;
162
163         return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed);
164 }
165
166 static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
167                                         const void *ptr)
168 {
169         const struct flow_offload_tuple *tuple = arg->key;
170         const struct flow_offload_tuple_rhash *x = ptr;
171
172         if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir)))
173                 return 1;
174
175         return 0;
176 }
177
178 static const struct rhashtable_params nf_flow_offload_rhash_params = {
179         .head_offset            = offsetof(struct flow_offload_tuple_rhash, node),
180         .hashfn                 = flow_offload_hash,
181         .obj_hashfn             = flow_offload_hash_obj,
182         .obj_cmpfn              = flow_offload_hash_cmp,
183         .automatic_shrinking    = true,
184 };
185
186 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
187 {
188         int err;
189
190         flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
191
192         err = rhashtable_insert_fast(&flow_table->rhashtable,
193                                      &flow->tuplehash[0].node,
194                                      nf_flow_offload_rhash_params);
195         if (err < 0)
196                 return err;
197
198         err = rhashtable_insert_fast(&flow_table->rhashtable,
199                                      &flow->tuplehash[1].node,
200                                      nf_flow_offload_rhash_params);
201         if (err < 0) {
202                 rhashtable_remove_fast(&flow_table->rhashtable,
203                                        &flow->tuplehash[0].node,
204                                        nf_flow_offload_rhash_params);
205                 return err;
206         }
207
208         return 0;
209 }
210 EXPORT_SYMBOL_GPL(flow_offload_add);
211
212 static void flow_offload_del(struct nf_flowtable *flow_table,
213                              struct flow_offload *flow)
214 {
215         struct flow_offload_entry *e;
216
217         rhashtable_remove_fast(&flow_table->rhashtable,
218                                &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
219                                nf_flow_offload_rhash_params);
220         rhashtable_remove_fast(&flow_table->rhashtable,
221                                &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
222                                nf_flow_offload_rhash_params);
223
224         e = container_of(flow, struct flow_offload_entry, flow);
225         clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
226
227         flow_offload_free(flow);
228 }
229
230 void flow_offload_teardown(struct flow_offload *flow)
231 {
232         struct flow_offload_entry *e;
233
234         flow->flags |= FLOW_OFFLOAD_TEARDOWN;
235
236         e = container_of(flow, struct flow_offload_entry, flow);
237         flow_offload_fixup_ct_state(e->ct);
238 }
239 EXPORT_SYMBOL_GPL(flow_offload_teardown);
240
241 struct flow_offload_tuple_rhash *
242 flow_offload_lookup(struct nf_flowtable *flow_table,
243                     struct flow_offload_tuple *tuple)
244 {
245         struct flow_offload_tuple_rhash *tuplehash;
246         struct flow_offload *flow;
247         int dir;
248
249         tuplehash = rhashtable_lookup_fast(&flow_table->rhashtable, tuple,
250                                            nf_flow_offload_rhash_params);
251         if (!tuplehash)
252                 return NULL;
253
254         dir = tuplehash->tuple.dir;
255         flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
256         if (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))
257                 return NULL;
258
259         return tuplehash;
260 }
261 EXPORT_SYMBOL_GPL(flow_offload_lookup);
262
263 int nf_flow_table_iterate(struct nf_flowtable *flow_table,
264                           void (*iter)(struct flow_offload *flow, void *data),
265                           void *data)
266 {
267         struct flow_offload_tuple_rhash *tuplehash;
268         struct rhashtable_iter hti;
269         struct flow_offload *flow;
270         int err;
271
272         err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
273         if (err)
274                 return err;
275
276         rhashtable_walk_start(&hti);
277
278         while ((tuplehash = rhashtable_walk_next(&hti))) {
279                 if (IS_ERR(tuplehash)) {
280                         err = PTR_ERR(tuplehash);
281                         if (err != -EAGAIN)
282                                 goto out;
283
284                         continue;
285                 }
286                 if (tuplehash->tuple.dir)
287                         continue;
288
289                 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
290
291                 iter(flow, data);
292         }
293 out:
294         rhashtable_walk_stop(&hti);
295         rhashtable_walk_exit(&hti);
296
297         return err;
298 }
299 EXPORT_SYMBOL_GPL(nf_flow_table_iterate);
300
301 static inline bool nf_flow_has_expired(const struct flow_offload *flow)
302 {
303         return (__s32)(flow->timeout - (u32)jiffies) <= 0;
304 }
305
306 static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
307 {
308         struct flow_offload_tuple_rhash *tuplehash;
309         struct rhashtable_iter hti;
310         struct flow_offload *flow;
311         int err;
312
313         err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
314         if (err)
315                 return 0;
316
317         rhashtable_walk_start(&hti);
318
319         while ((tuplehash = rhashtable_walk_next(&hti))) {
320                 if (IS_ERR(tuplehash)) {
321                         err = PTR_ERR(tuplehash);
322                         if (err != -EAGAIN)
323                                 goto out;
324
325                         continue;
326                 }
327                 if (tuplehash->tuple.dir)
328                         continue;
329
330                 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
331
332                 if (nf_flow_has_expired(flow) ||
333                     (flow->flags & (FLOW_OFFLOAD_DYING |
334                                     FLOW_OFFLOAD_TEARDOWN)))
335                         flow_offload_del(flow_table, flow);
336         }
337 out:
338         rhashtable_walk_stop(&hti);
339         rhashtable_walk_exit(&hti);
340
341         return 1;
342 }
343
344 static void nf_flow_offload_work_gc(struct work_struct *work)
345 {
346         struct nf_flowtable *flow_table;
347
348         flow_table = container_of(work, struct nf_flowtable, gc_work.work);
349         nf_flow_offload_gc_step(flow_table);
350         queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
351 }
352
353 static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
354                                 __be16 port, __be16 new_port)
355 {
356         struct tcphdr *tcph;
357
358         if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
359             skb_try_make_writable(skb, thoff + sizeof(*tcph)))
360                 return -1;
361
362         tcph = (void *)(skb_network_header(skb) + thoff);
363         inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
364
365         return 0;
366 }
367
368 static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
369                                 __be16 port, __be16 new_port)
370 {
371         struct udphdr *udph;
372
373         if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
374             skb_try_make_writable(skb, thoff + sizeof(*udph)))
375                 return -1;
376
377         udph = (void *)(skb_network_header(skb) + thoff);
378         if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
379                 inet_proto_csum_replace2(&udph->check, skb, port,
380                                          new_port, false);
381                 if (!udph->check)
382                         udph->check = CSUM_MANGLED_0;
383         }
384
385         return 0;
386 }
387
388 static int nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
389                             u8 protocol, __be16 port, __be16 new_port)
390 {
391         switch (protocol) {
392         case IPPROTO_TCP:
393                 if (nf_flow_nat_port_tcp(skb, thoff, port, new_port) < 0)
394                         return NF_DROP;
395                 break;
396         case IPPROTO_UDP:
397                 if (nf_flow_nat_port_udp(skb, thoff, port, new_port) < 0)
398                         return NF_DROP;
399                 break;
400         }
401
402         return 0;
403 }
404
405 int nf_flow_snat_port(const struct flow_offload *flow,
406                       struct sk_buff *skb, unsigned int thoff,
407                       u8 protocol, enum flow_offload_tuple_dir dir)
408 {
409         struct flow_ports *hdr;
410         __be16 port, new_port;
411
412         if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
413             skb_try_make_writable(skb, thoff + sizeof(*hdr)))
414                 return -1;
415
416         hdr = (void *)(skb_network_header(skb) + thoff);
417
418         switch (dir) {
419         case FLOW_OFFLOAD_DIR_ORIGINAL:
420                 port = hdr->source;
421                 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
422                 hdr->source = new_port;
423                 break;
424         case FLOW_OFFLOAD_DIR_REPLY:
425                 port = hdr->dest;
426                 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
427                 hdr->dest = new_port;
428                 break;
429         default:
430                 return -1;
431         }
432
433         return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
434 }
435 EXPORT_SYMBOL_GPL(nf_flow_snat_port);
436
437 int nf_flow_dnat_port(const struct flow_offload *flow,
438                       struct sk_buff *skb, unsigned int thoff,
439                       u8 protocol, enum flow_offload_tuple_dir dir)
440 {
441         struct flow_ports *hdr;
442         __be16 port, new_port;
443
444         if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
445             skb_try_make_writable(skb, thoff + sizeof(*hdr)))
446                 return -1;
447
448         hdr = (void *)(skb_network_header(skb) + thoff);
449
450         switch (dir) {
451         case FLOW_OFFLOAD_DIR_ORIGINAL:
452                 port = hdr->dest;
453                 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
454                 hdr->dest = new_port;
455                 break;
456         case FLOW_OFFLOAD_DIR_REPLY:
457                 port = hdr->source;
458                 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
459                 hdr->source = new_port;
460                 break;
461         default:
462                 return -1;
463         }
464
465         return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
466 }
467 EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
468
469 int nf_flow_table_init(struct nf_flowtable *flowtable)
470 {
471         int err;
472
473         INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
474
475         err = rhashtable_init(&flowtable->rhashtable,
476                               &nf_flow_offload_rhash_params);
477         if (err < 0)
478                 return err;
479
480         queue_delayed_work(system_power_efficient_wq,
481                            &flowtable->gc_work, HZ);
482
483         mutex_lock(&flowtable_lock);
484         list_add(&flowtable->list, &flowtables);
485         mutex_unlock(&flowtable_lock);
486
487         return 0;
488 }
489 EXPORT_SYMBOL_GPL(nf_flow_table_init);
490
491 static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
492 {
493         struct net_device *dev = data;
494         struct flow_offload_entry *e;
495
496         e = container_of(flow, struct flow_offload_entry, flow);
497
498         if (!dev) {
499                 flow_offload_teardown(flow);
500                 return;
501         }
502         if (net_eq(nf_ct_net(e->ct), dev_net(dev)) &&
503             (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
504              flow->tuplehash[1].tuple.iifidx == dev->ifindex))
505                 flow_offload_dead(flow);
506 }
507
508 static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
509                                           struct net_device *dev)
510 {
511         nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
512         flush_delayed_work(&flowtable->gc_work);
513 }
514
515 void nf_flow_table_cleanup(struct net *net, struct net_device *dev)
516 {
517         struct nf_flowtable *flowtable;
518
519         mutex_lock(&flowtable_lock);
520         list_for_each_entry(flowtable, &flowtables, list)
521                 nf_flow_table_iterate_cleanup(flowtable, dev);
522         mutex_unlock(&flowtable_lock);
523 }
524 EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
525
526 void nf_flow_table_free(struct nf_flowtable *flow_table)
527 {
528         mutex_lock(&flowtable_lock);
529         list_del(&flow_table->list);
530         mutex_unlock(&flowtable_lock);
531         cancel_delayed_work_sync(&flow_table->gc_work);
532         nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
533         WARN_ON(!nf_flow_offload_gc_step(flow_table));
534         rhashtable_destroy(&flow_table->rhashtable);
535 }
536 EXPORT_SYMBOL_GPL(nf_flow_table_free);
537
538 MODULE_LICENSE("GPL");
539 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");