2 * (C) 1999-2001 Paul `Rusty' Russell
3 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
4 * (C) 2011 Patrick McHardy <kaber@trash.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/timer.h>
14 #include <linux/skbuff.h>
15 #include <linux/gfp.h>
17 #include <linux/jhash.h>
18 #include <linux/rtnetlink.h>
20 #include <net/netfilter/nf_conntrack.h>
21 #include <net/netfilter/nf_conntrack_core.h>
22 #include <net/netfilter/nf_nat.h>
23 #include <net/netfilter/nf_nat_l3proto.h>
24 #include <net/netfilter/nf_nat_l4proto.h>
25 #include <net/netfilter/nf_nat_core.h>
26 #include <net/netfilter/nf_nat_helper.h>
27 #include <net/netfilter/nf_conntrack_helper.h>
28 #include <net/netfilter/nf_conntrack_seqadj.h>
29 #include <net/netfilter/nf_conntrack_l3proto.h>
30 #include <net/netfilter/nf_conntrack_zones.h>
31 #include <linux/netfilter/nf_nat.h>
33 static spinlock_t nf_nat_locks[CONNTRACK_LOCKS];
35 static DEFINE_MUTEX(nf_nat_proto_mutex);
36 static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
38 static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
41 static struct hlist_head *nf_nat_bysource __read_mostly;
42 static unsigned int nf_nat_htable_size __read_mostly;
43 static unsigned int nf_nat_hash_rnd __read_mostly;
45 inline const struct nf_nat_l3proto *
46 __nf_nat_l3proto_find(u8 family)
48 return rcu_dereference(nf_nat_l3protos[family]);
51 inline const struct nf_nat_l4proto *
52 __nf_nat_l4proto_find(u8 family, u8 protonum)
54 return rcu_dereference(nf_nat_l4protos[family][protonum]);
56 EXPORT_SYMBOL_GPL(__nf_nat_l4proto_find);
59 static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
61 const struct nf_nat_l3proto *l3proto;
62 const struct nf_conn *ct;
63 enum ip_conntrack_info ctinfo;
64 enum ip_conntrack_dir dir;
65 unsigned long statusbit;
68 ct = nf_ct_get(skb, &ctinfo);
72 family = nf_ct_l3num(ct);
73 l3proto = __nf_nat_l3proto_find(family);
77 dir = CTINFO2DIR(ctinfo);
78 if (dir == IP_CT_DIR_ORIGINAL)
79 statusbit = IPS_DST_NAT;
81 statusbit = IPS_SRC_NAT;
83 l3proto->decode_session(skb, ct, dir, statusbit, fl);
86 int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
90 struct dst_entry *dst;
93 err = xfrm_decode_session(skb, &fl, family);
99 dst = ((struct xfrm_dst *)dst)->route;
100 if (!dst_hold_safe(dst))
101 return -EHOSTUNREACH;
103 dst = xfrm_lookup(net, dst, &fl, skb->sk, 0);
108 skb_dst_set(skb, dst);
110 /* Change in oif may mean change in hh_len. */
111 hh_len = skb_dst(skb)->dev->hard_header_len;
112 if (skb_headroom(skb) < hh_len &&
113 pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
117 EXPORT_SYMBOL(nf_xfrm_me_harder);
118 #endif /* CONFIG_XFRM */
120 /* We keep an extra hash for each conntrack, for fast searching. */
122 hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
126 get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
128 /* Original src, to ensure we map it consistently if poss. */
129 hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
130 tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n));
132 return reciprocal_scale(hash, nf_nat_htable_size);
135 /* Is this tuple already taken? (not by us) */
137 nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
138 const struct nf_conn *ignored_conntrack)
140 /* Conntrack tracking doesn't keep track of outgoing tuples; only
141 * incoming ones. NAT means they don't have a fixed mapping,
142 * so we invert the tuple and look for the incoming reply.
144 * We could keep a separate hash if this proves too slow.
146 struct nf_conntrack_tuple reply;
148 nf_ct_invert_tuplepr(&reply, tuple);
149 return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
151 EXPORT_SYMBOL(nf_nat_used_tuple);
153 /* If we source map this tuple so reply looks like reply_tuple, will
154 * that meet the constraints of range.
156 static int in_range(const struct nf_nat_l3proto *l3proto,
157 const struct nf_nat_l4proto *l4proto,
158 const struct nf_conntrack_tuple *tuple,
159 const struct nf_nat_range *range)
161 /* If we are supposed to map IPs, then we must be in the
162 * range specified, otherwise let this drag us onto a new src IP.
164 if (range->flags & NF_NAT_RANGE_MAP_IPS &&
165 !l3proto->in_range(tuple, range))
168 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) ||
169 l4proto->in_range(tuple, NF_NAT_MANIP_SRC,
170 &range->min_proto, &range->max_proto))
177 same_src(const struct nf_conn *ct,
178 const struct nf_conntrack_tuple *tuple)
180 const struct nf_conntrack_tuple *t;
182 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
183 return (t->dst.protonum == tuple->dst.protonum &&
184 nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) &&
185 t->src.u.all == tuple->src.u.all);
188 /* Only called for SRC manip */
190 find_appropriate_src(struct net *net,
191 const struct nf_conntrack_zone *zone,
192 const struct nf_nat_l3proto *l3proto,
193 const struct nf_nat_l4proto *l4proto,
194 const struct nf_conntrack_tuple *tuple,
195 struct nf_conntrack_tuple *result,
196 const struct nf_nat_range *range)
198 unsigned int h = hash_by_src(net, tuple);
199 const struct nf_conn *ct;
201 hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
202 if (same_src(ct, tuple) &&
203 net_eq(net, nf_ct_net(ct)) &&
204 nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
205 /* Copy source part from reply tuple. */
206 nf_ct_invert_tuplepr(result,
207 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
208 result->dst = tuple->dst;
210 if (in_range(l3proto, l4proto, result, range))
217 /* For [FUTURE] fragmentation handling, we want the least-used
218 * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
219 * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
220 * 1-65535, we don't do pro-rata allocation based on ports; we choose
221 * the ip with the lowest src-ip/dst-ip/proto usage.
224 find_best_ips_proto(const struct nf_conntrack_zone *zone,
225 struct nf_conntrack_tuple *tuple,
226 const struct nf_nat_range *range,
227 const struct nf_conn *ct,
228 enum nf_nat_manip_type maniptype)
230 union nf_inet_addr *var_ipp;
233 u32 minip, maxip, j, dist;
236 /* No IP mapping? Do nothing. */
237 if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
240 if (maniptype == NF_NAT_MANIP_SRC)
241 var_ipp = &tuple->src.u3;
243 var_ipp = &tuple->dst.u3;
245 /* Fast path: only one choice. */
246 if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
247 *var_ipp = range->min_addr;
251 if (nf_ct_l3num(ct) == NFPROTO_IPV4)
252 max = sizeof(var_ipp->ip) / sizeof(u32) - 1;
254 max = sizeof(var_ipp->ip6) / sizeof(u32) - 1;
256 /* Hashing source and destination IPs gives a fairly even
257 * spread in practice (if there are a small number of IPs
258 * involved, there usually aren't that many connections
259 * anyway). The consistency means that servers see the same
260 * client coming from the same IP (some Internet Banking sites
261 * like this), even across reboots.
263 j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
264 range->flags & NF_NAT_RANGE_PERSISTENT ?
265 0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
268 for (i = 0; i <= max; i++) {
269 /* If first bytes of the address are at the maximum, use the
270 * distance. Otherwise use the full range.
273 minip = ntohl((__force __be32)range->min_addr.all[i]);
274 maxip = ntohl((__force __be32)range->max_addr.all[i]);
275 dist = maxip - minip + 1;
281 var_ipp->all[i] = (__force __u32)
282 htonl(minip + reciprocal_scale(j, dist));
283 if (var_ipp->all[i] != range->max_addr.all[i])
286 if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
287 j ^= (__force u32)tuple->dst.u3.all[i];
291 /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
292 * we change the source to map into the range. For NF_INET_PRE_ROUTING
293 * and NF_INET_LOCAL_OUT, we change the destination to map into the
294 * range. It might not be possible to get a unique tuple, but we try.
295 * At worst (or if we race), we will end up with a final duplicate in
296 * __ip_conntrack_confirm and drop the packet. */
298 get_unique_tuple(struct nf_conntrack_tuple *tuple,
299 const struct nf_conntrack_tuple *orig_tuple,
300 const struct nf_nat_range *range,
302 enum nf_nat_manip_type maniptype)
304 const struct nf_conntrack_zone *zone;
305 const struct nf_nat_l3proto *l3proto;
306 const struct nf_nat_l4proto *l4proto;
307 struct net *net = nf_ct_net(ct);
309 zone = nf_ct_zone(ct);
312 l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num);
313 l4proto = __nf_nat_l4proto_find(orig_tuple->src.l3num,
314 orig_tuple->dst.protonum);
316 /* 1) If this srcip/proto/src-proto-part is currently mapped,
317 * and that same mapping gives a unique tuple within the given
320 * This is only required for source (ie. NAT/masq) mappings.
321 * So far, we don't do local source mappings, so multiple
322 * manips not an issue.
324 if (maniptype == NF_NAT_MANIP_SRC &&
325 !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
326 /* try the original tuple first */
327 if (in_range(l3proto, l4proto, orig_tuple, range)) {
328 if (!nf_nat_used_tuple(orig_tuple, ct)) {
329 *tuple = *orig_tuple;
332 } else if (find_appropriate_src(net, zone, l3proto, l4proto,
333 orig_tuple, tuple, range)) {
334 pr_debug("get_unique_tuple: Found current src map\n");
335 if (!nf_nat_used_tuple(tuple, ct))
340 /* 2) Select the least-used IP/proto combination in the given range */
341 *tuple = *orig_tuple;
342 find_best_ips_proto(zone, tuple, range, ct, maniptype);
344 /* 3) The per-protocol part of the manip is made to map into
345 * the range to make a unique tuple.
348 /* Only bother mapping if it's not already in range and unique */
349 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
350 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
351 if (l4proto->in_range(tuple, maniptype,
353 &range->max_proto) &&
354 (range->min_proto.all == range->max_proto.all ||
355 !nf_nat_used_tuple(tuple, ct)))
357 } else if (!nf_nat_used_tuple(tuple, ct)) {
362 /* Last change: get protocol to try to obtain unique tuple. */
363 l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct);
368 struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
370 struct nf_conn_nat *nat = nfct_nat(ct);
374 if (!nf_ct_is_confirmed(ct))
375 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
379 EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add);
382 nf_nat_setup_info(struct nf_conn *ct,
383 const struct nf_nat_range *range,
384 enum nf_nat_manip_type maniptype)
386 struct net *net = nf_ct_net(ct);
387 struct nf_conntrack_tuple curr_tuple, new_tuple;
389 /* Can't setup nat info for confirmed ct. */
390 if (nf_ct_is_confirmed(ct))
393 WARN_ON(maniptype != NF_NAT_MANIP_SRC &&
394 maniptype != NF_NAT_MANIP_DST);
396 if (WARN_ON(nf_nat_initialized(ct, maniptype)))
399 /* What we've got will look like inverse of reply. Normally
400 * this is what is in the conntrack, except for prior
401 * manipulations (future optimization: if num_manips == 0,
402 * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
404 nf_ct_invert_tuplepr(&curr_tuple,
405 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
407 get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
409 if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
410 struct nf_conntrack_tuple reply;
412 /* Alter conntrack table so will recognize replies. */
413 nf_ct_invert_tuplepr(&reply, &new_tuple);
414 nf_conntrack_alter_reply(ct, &reply);
416 /* Non-atomic: we own this at the moment. */
417 if (maniptype == NF_NAT_MANIP_SRC)
418 ct->status |= IPS_SRC_NAT;
420 ct->status |= IPS_DST_NAT;
422 if (nfct_help(ct) && !nfct_seqadj(ct))
423 if (!nfct_seqadj_ext_add(ct))
427 if (maniptype == NF_NAT_MANIP_SRC) {
428 unsigned int srchash;
431 srchash = hash_by_src(net,
432 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
433 lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
435 hlist_add_head_rcu(&ct->nat_bysource,
436 &nf_nat_bysource[srchash]);
437 spin_unlock_bh(lock);
441 if (maniptype == NF_NAT_MANIP_DST)
442 ct->status |= IPS_DST_NAT_DONE;
444 ct->status |= IPS_SRC_NAT_DONE;
448 EXPORT_SYMBOL(nf_nat_setup_info);
451 __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
453 /* Force range to this IP; let proto decide mapping for
454 * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
455 * Use reply in case it's already been mangled (eg local packet).
457 union nf_inet_addr ip =
458 (manip == NF_NAT_MANIP_SRC ?
459 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
460 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
461 struct nf_nat_range range = {
462 .flags = NF_NAT_RANGE_MAP_IPS,
466 return nf_nat_setup_info(ct, &range, manip);
470 nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
472 return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum));
474 EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
476 /* Do packet manipulations according to nf_nat_setup_info. */
477 unsigned int nf_nat_packet(struct nf_conn *ct,
478 enum ip_conntrack_info ctinfo,
479 unsigned int hooknum,
482 const struct nf_nat_l3proto *l3proto;
483 const struct nf_nat_l4proto *l4proto;
484 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
485 unsigned long statusbit;
486 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
488 if (mtype == NF_NAT_MANIP_SRC)
489 statusbit = IPS_SRC_NAT;
491 statusbit = IPS_DST_NAT;
493 /* Invert if this is reply dir. */
494 if (dir == IP_CT_DIR_REPLY)
495 statusbit ^= IPS_NAT_MASK;
497 /* Non-atomic: these bits don't change. */
498 if (ct->status & statusbit) {
499 struct nf_conntrack_tuple target;
501 /* We are aiming to look like inverse of other direction. */
502 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
504 l3proto = __nf_nat_l3proto_find(target.src.l3num);
505 l4proto = __nf_nat_l4proto_find(target.src.l3num,
506 target.dst.protonum);
507 if (!l3proto->manip_pkt(skb, 0, l4proto, &target, mtype))
512 EXPORT_SYMBOL_GPL(nf_nat_packet);
514 struct nf_nat_proto_clean {
519 /* kill conntracks with affected NAT section */
520 static int nf_nat_proto_remove(struct nf_conn *i, void *data)
522 const struct nf_nat_proto_clean *clean = data;
524 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
525 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
528 return i->status & IPS_NAT_MASK ? 1 : 0;
531 static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
535 h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
536 spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
537 hlist_del_rcu(&ct->nat_bysource);
538 spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
541 static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
543 if (nf_nat_proto_remove(ct, data))
546 if ((ct->status & IPS_SRC_NAT_DONE) == 0)
549 /* This netns is being destroyed, and conntrack has nat null binding.
550 * Remove it from bysource hash, as the table will be freed soon.
552 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
553 * will delete entry from already-freed table.
555 clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
556 __nf_nat_cleanup_conntrack(ct);
558 /* don't delete conntrack. Although that would make things a lot
559 * simpler, we'd end up flushing all conntracks on nat rmmod.
564 static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
566 struct nf_nat_proto_clean clean = {
571 nf_ct_iterate_destroy(nf_nat_proto_remove, &clean);
574 static void nf_nat_l3proto_clean(u8 l3proto)
576 struct nf_nat_proto_clean clean = {
580 nf_ct_iterate_destroy(nf_nat_proto_remove, &clean);
583 /* Protocol registration. */
584 int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto)
586 const struct nf_nat_l4proto **l4protos;
590 mutex_lock(&nf_nat_proto_mutex);
591 if (nf_nat_l4protos[l3proto] == NULL) {
592 l4protos = kmalloc(IPPROTO_MAX * sizeof(struct nf_nat_l4proto *),
594 if (l4protos == NULL) {
599 for (i = 0; i < IPPROTO_MAX; i++)
600 RCU_INIT_POINTER(l4protos[i], &nf_nat_l4proto_unknown);
602 /* Before making proto_array visible to lockless readers,
603 * we must make sure its content is committed to memory.
607 nf_nat_l4protos[l3proto] = l4protos;
610 if (rcu_dereference_protected(
611 nf_nat_l4protos[l3proto][l4proto->l4proto],
612 lockdep_is_held(&nf_nat_proto_mutex)
613 ) != &nf_nat_l4proto_unknown) {
617 RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], l4proto);
619 mutex_unlock(&nf_nat_proto_mutex);
622 EXPORT_SYMBOL_GPL(nf_nat_l4proto_register);
624 /* No one stores the protocol anywhere; simply delete it. */
625 void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto)
627 mutex_lock(&nf_nat_proto_mutex);
628 RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto],
629 &nf_nat_l4proto_unknown);
630 mutex_unlock(&nf_nat_proto_mutex);
633 nf_nat_l4proto_clean(l3proto, l4proto->l4proto);
635 EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister);
637 int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto)
641 err = nf_ct_l3proto_try_module_get(l3proto->l3proto);
645 mutex_lock(&nf_nat_proto_mutex);
646 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP],
647 &nf_nat_l4proto_tcp);
648 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDP],
649 &nf_nat_l4proto_udp);
650 #ifdef CONFIG_NF_NAT_PROTO_DCCP
651 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_DCCP],
652 &nf_nat_l4proto_dccp);
654 #ifdef CONFIG_NF_NAT_PROTO_SCTP
655 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_SCTP],
656 &nf_nat_l4proto_sctp);
658 #ifdef CONFIG_NF_NAT_PROTO_UDPLITE
659 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDPLITE],
660 &nf_nat_l4proto_udplite);
662 mutex_unlock(&nf_nat_proto_mutex);
664 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto);
667 EXPORT_SYMBOL_GPL(nf_nat_l3proto_register);
669 void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto)
671 mutex_lock(&nf_nat_proto_mutex);
672 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL);
673 mutex_unlock(&nf_nat_proto_mutex);
676 nf_nat_l3proto_clean(l3proto->l3proto);
677 nf_ct_l3proto_module_put(l3proto->l3proto);
679 EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister);
681 /* No one using conntrack by the time this called. */
682 static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
684 if (ct->status & IPS_SRC_NAT_DONE)
685 __nf_nat_cleanup_conntrack(ct);
688 static struct nf_ct_ext_type nat_extend __read_mostly = {
689 .len = sizeof(struct nf_conn_nat),
690 .align = __alignof__(struct nf_conn_nat),
691 .destroy = nf_nat_cleanup_conntrack,
695 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
697 #include <linux/netfilter/nfnetlink.h>
698 #include <linux/netfilter/nfnetlink_conntrack.h>
700 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
701 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
702 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
705 static int nfnetlink_parse_nat_proto(struct nlattr *attr,
706 const struct nf_conn *ct,
707 struct nf_nat_range *range)
709 struct nlattr *tb[CTA_PROTONAT_MAX+1];
710 const struct nf_nat_l4proto *l4proto;
713 err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr,
714 protonat_nla_policy, NULL);
718 l4proto = __nf_nat_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
719 if (l4proto->nlattr_to_range)
720 err = l4proto->nlattr_to_range(tb, range);
725 static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
726 [CTA_NAT_V4_MINIP] = { .type = NLA_U32 },
727 [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 },
728 [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) },
729 [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) },
730 [CTA_NAT_PROTO] = { .type = NLA_NESTED },
734 nfnetlink_parse_nat(const struct nlattr *nat,
735 const struct nf_conn *ct, struct nf_nat_range *range,
736 const struct nf_nat_l3proto *l3proto)
738 struct nlattr *tb[CTA_NAT_MAX+1];
741 memset(range, 0, sizeof(*range));
743 err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy, NULL);
747 err = l3proto->nlattr_to_range(tb, range);
751 if (!tb[CTA_NAT_PROTO])
754 return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
757 /* This function is called under rcu_read_lock() */
759 nfnetlink_parse_nat_setup(struct nf_conn *ct,
760 enum nf_nat_manip_type manip,
761 const struct nlattr *attr)
763 struct nf_nat_range range;
764 const struct nf_nat_l3proto *l3proto;
767 /* Should not happen, restricted to creating new conntracks
770 if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
773 /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to
774 * attach the null binding, otherwise this may oops.
776 l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
780 /* No NAT information has been passed, allocate the null-binding */
782 return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0;
784 err = nfnetlink_parse_nat(attr, ct, &range, l3proto);
788 return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
792 nfnetlink_parse_nat_setup(struct nf_conn *ct,
793 enum nf_nat_manip_type manip,
794 const struct nlattr *attr)
800 static struct nf_ct_helper_expectfn follow_master_nat = {
801 .name = "nat-follow-master",
802 .expectfn = nf_nat_follow_master,
805 static int __init nf_nat_init(void)
809 /* Leave them the same for the moment. */
810 nf_nat_htable_size = nf_conntrack_htable_size;
811 if (nf_nat_htable_size < CONNTRACK_LOCKS)
812 nf_nat_htable_size = CONNTRACK_LOCKS;
814 nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
815 if (!nf_nat_bysource)
818 ret = nf_ct_extend_register(&nat_extend);
820 nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
821 printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
825 for (i = 0; i < CONNTRACK_LOCKS; i++)
826 spin_lock_init(&nf_nat_locks[i]);
828 nf_ct_helper_expectfn_register(&follow_master_nat);
830 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
831 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook,
832 nfnetlink_parse_nat_setup);
834 BUG_ON(nf_nat_decode_session_hook != NULL);
835 RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session);
840 static void __exit nf_nat_cleanup(void)
842 struct nf_nat_proto_clean clean = {};
845 nf_ct_iterate_destroy(nf_nat_proto_clean, &clean);
847 nf_ct_extend_unregister(&nat_extend);
848 nf_ct_helper_expectfn_unregister(&follow_master_nat);
849 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
851 RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
855 for (i = 0; i < NFPROTO_NUMPROTO; i++)
856 kfree(nf_nat_l4protos[i]);
858 nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
861 MODULE_LICENSE("GPL");
863 module_init(nf_nat_init);
864 module_exit(nf_nat_cleanup);