1 /* Copyright (C) 2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 as
5 * published by the Free Software Foundation.
8 #ifndef _IP_SET_HASH_GEN_H
9 #define _IP_SET_HASH_GEN_H
11 #include <linux/rcupdate.h>
12 #include <linux/jhash.h>
13 #include <linux/types.h>
14 #include <linux/netfilter/ipset/ip_set_timeout.h>
16 #define __ipset_dereference_protected(p, c) rcu_dereference_protected(p, c)
17 #define ipset_dereference_protected(p, set) \
18 __ipset_dereference_protected(p, spin_is_locked(&(set)->lock))
20 #define rcu_dereference_bh_nfnl(p) rcu_dereference_bh_check(p, 1)
22 /* Hashing which uses arrays to resolve clashing. The hash table is resized
23 * (doubled) when searching becomes too long.
24 * Internally jhash is used with the assumption that the size of the
25 * stored data is a multiple of sizeof(u32).
27 * Readers and resizing
29 * Resizing can be triggered by userspace command only, and those
30 * are serialized by the nfnl mutex. During resizing the set is
31 * read-locked, so the only possible concurrent operations are
32 * the kernel side readers. Those must be protected by proper RCU locking.
35 /* Number of elements to store in an initial array block */
36 #define AHASH_INIT_SIZE 4
37 /* Max number of elements to store in an array block */
38 #define AHASH_MAX_SIZE (3 * AHASH_INIT_SIZE)
39 /* Max muber of elements in the array block when tuned */
40 #define AHASH_MAX_TUNED 64
42 /* Max number of elements can be tuned */
43 #ifdef IP_SET_HASH_WITH_MULTI
44 #define AHASH_MAX(h) ((h)->ahash_max)
47 tune_ahash_max(u8 curr, u32 multi)
54 n = curr + AHASH_INIT_SIZE;
55 /* Currently, at listing one hash bucket must fit into a message.
56 * Therefore we have a hard limit here.
58 return n > curr && n <= AHASH_MAX_TUNED ? n : curr;
61 #define TUNE_AHASH_MAX(h, multi) \
62 ((h)->ahash_max = tune_ahash_max((h)->ahash_max, multi))
64 #define AHASH_MAX(h) AHASH_MAX_SIZE
65 #define TUNE_AHASH_MAX(h, multi)
70 struct rcu_head rcu; /* for call_rcu_bh */
71 /* Which positions are used in the array */
72 DECLARE_BITMAP(used, AHASH_MAX_TUNED);
73 u8 size; /* size of the array */
74 u8 pos; /* position of the first free entry */
75 unsigned char value[0] /* the array of the values */
76 __aligned(__alignof__(u64));
79 /* The hash table: the table size stored here in order to make resizing easy */
81 atomic_t ref; /* References for resizing */
82 atomic_t uref; /* References for dumping */
83 u8 htable_bits; /* size of hash table == 2^htable_bits */
84 struct hbucket __rcu *bucket[0]; /* hashtable buckets */
87 #define hbucket(h, i) ((h)->bucket[i])
88 #define ext_size(n, dsize) \
89 (sizeof(struct hbucket) + (n) * (dsize))
91 #ifndef IPSET_NET_COUNT
92 #define IPSET_NET_COUNT 1
95 /* Book-keeping of the prefixes added to the set */
97 u32 nets[IPSET_NET_COUNT]; /* number of elements for this cidr */
98 u8 cidr[IPSET_NET_COUNT]; /* the cidr value */
101 /* Compute the hash table size */
103 htable_size(u8 hbits)
107 /* We must fit both into u32 in jhash and INT_MAX in kvmalloc_node() */
110 hsize = jhash_size(hbits);
111 if ((INT_MAX - sizeof(struct htable)) / sizeof(struct hbucket *)
115 return hsize * sizeof(struct hbucket *) + sizeof(struct htable);
118 #ifdef IP_SET_HASH_WITH_NETS
119 #if IPSET_NET_COUNT > 1
120 #define __CIDR(cidr, i) (cidr[i])
122 #define __CIDR(cidr, i) (cidr)
125 /* cidr + 1 is stored in net_prefixes to support /0 */
126 #define NCIDR_PUT(cidr) ((cidr) + 1)
127 #define NCIDR_GET(cidr) ((cidr) - 1)
129 #ifdef IP_SET_HASH_WITH_NETS_PACKED
130 /* When cidr is packed with nomatch, cidr - 1 is stored in the data entry */
131 #define DCIDR_PUT(cidr) ((cidr) - 1)
132 #define DCIDR_GET(cidr, i) (__CIDR(cidr, i) + 1)
134 #define DCIDR_PUT(cidr) (cidr)
135 #define DCIDR_GET(cidr, i) __CIDR(cidr, i)
138 #define INIT_CIDR(cidr, host_mask) \
139 DCIDR_PUT(((cidr) ? NCIDR_GET(cidr) : host_mask))
141 #ifdef IP_SET_HASH_WITH_NET0
142 /* cidr from 0 to HOST_MASK value and c = cidr + 1 */
143 #define NLEN (HOST_MASK + 1)
144 #define CIDR_POS(c) ((c) - 1)
146 /* cidr from 1 to HOST_MASK value and c = cidr + 1 */
147 #define NLEN HOST_MASK
148 #define CIDR_POS(c) ((c) - 2)
153 #endif /* IP_SET_HASH_WITH_NETS */
155 #endif /* _IP_SET_HASH_GEN_H */
158 #error "MTYPE is not defined!"
162 #error "HTYPE is not defined!"
166 #error "HOST_MASK is not defined!"
169 /* Family dependent templates */
172 #undef mtype_data_equal
173 #undef mtype_do_data_match
174 #undef mtype_data_set_flags
175 #undef mtype_data_reset_elem
176 #undef mtype_data_reset_flags
177 #undef mtype_data_netmask
178 #undef mtype_data_list
179 #undef mtype_data_next
182 #undef mtype_ahash_destroy
183 #undef mtype_ext_cleanup
184 #undef mtype_add_cidr
185 #undef mtype_del_cidr
186 #undef mtype_ahash_memsize
189 #undef mtype_same_set
195 #undef mtype_test_cidrs
205 #undef mtype_data_match
210 #define mtype_data_equal IPSET_TOKEN(MTYPE, _data_equal)
211 #ifdef IP_SET_HASH_WITH_NETS
212 #define mtype_do_data_match IPSET_TOKEN(MTYPE, _do_data_match)
214 #define mtype_do_data_match(d) 1
216 #define mtype_data_set_flags IPSET_TOKEN(MTYPE, _data_set_flags)
217 #define mtype_data_reset_elem IPSET_TOKEN(MTYPE, _data_reset_elem)
218 #define mtype_data_reset_flags IPSET_TOKEN(MTYPE, _data_reset_flags)
219 #define mtype_data_netmask IPSET_TOKEN(MTYPE, _data_netmask)
220 #define mtype_data_list IPSET_TOKEN(MTYPE, _data_list)
221 #define mtype_data_next IPSET_TOKEN(MTYPE, _data_next)
222 #define mtype_elem IPSET_TOKEN(MTYPE, _elem)
224 #define mtype_ahash_destroy IPSET_TOKEN(MTYPE, _ahash_destroy)
225 #define mtype_ext_cleanup IPSET_TOKEN(MTYPE, _ext_cleanup)
226 #define mtype_add_cidr IPSET_TOKEN(MTYPE, _add_cidr)
227 #define mtype_del_cidr IPSET_TOKEN(MTYPE, _del_cidr)
228 #define mtype_ahash_memsize IPSET_TOKEN(MTYPE, _ahash_memsize)
229 #define mtype_flush IPSET_TOKEN(MTYPE, _flush)
230 #define mtype_destroy IPSET_TOKEN(MTYPE, _destroy)
231 #define mtype_same_set IPSET_TOKEN(MTYPE, _same_set)
232 #define mtype_kadt IPSET_TOKEN(MTYPE, _kadt)
233 #define mtype_uadt IPSET_TOKEN(MTYPE, _uadt)
235 #define mtype_add IPSET_TOKEN(MTYPE, _add)
236 #define mtype_del IPSET_TOKEN(MTYPE, _del)
237 #define mtype_test_cidrs IPSET_TOKEN(MTYPE, _test_cidrs)
238 #define mtype_test IPSET_TOKEN(MTYPE, _test)
239 #define mtype_uref IPSET_TOKEN(MTYPE, _uref)
240 #define mtype_expire IPSET_TOKEN(MTYPE, _expire)
241 #define mtype_resize IPSET_TOKEN(MTYPE, _resize)
242 #define mtype_head IPSET_TOKEN(MTYPE, _head)
243 #define mtype_list IPSET_TOKEN(MTYPE, _list)
244 #define mtype_gc IPSET_TOKEN(MTYPE, _gc)
245 #define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init)
246 #define mtype_variant IPSET_TOKEN(MTYPE, _variant)
247 #define mtype_data_match IPSET_TOKEN(MTYPE, _data_match)
250 #define HKEY_DATALEN sizeof(struct mtype_elem)
255 #define HKEY(data, initval, htable_bits) \
257 const u32 *__k = (const u32 *)data; \
258 u32 __l = HKEY_DATALEN / sizeof(u32); \
260 BUILD_BUG_ON(HKEY_DATALEN % sizeof(u32) != 0); \
262 jhash2(__k, __l, initval) & jhash_mask(htable_bits); \
265 /* The generic hash structure */
267 struct htable __rcu *table; /* the hash table */
268 struct timer_list gc; /* garbage collection when timeout enabled */
269 u32 maxelem; /* max elements in the hash */
270 u32 initval; /* random jhash init value */
271 #ifdef IP_SET_HASH_WITH_MARKMASK
272 u32 markmask; /* markmask value for mark mask to store */
274 #ifdef IP_SET_HASH_WITH_MULTI
275 u8 ahash_max; /* max elements in an array block */
277 #ifdef IP_SET_HASH_WITH_NETMASK
278 u8 netmask; /* netmask value for subnets to store */
280 struct mtype_elem next; /* temporary storage for uadd */
281 #ifdef IP_SET_HASH_WITH_NETS
282 struct net_prefixes nets[NLEN]; /* book-keeping of prefixes */
286 #ifdef IP_SET_HASH_WITH_NETS
287 /* Network cidr size book keeping when the hash stores different
288 * sized networks. cidr == real cidr + 1 to support /0.
291 mtype_add_cidr(struct htype *h, u8 cidr, u8 n)
295 /* Add in increasing prefix order, so larger cidr first */
296 for (i = 0, j = -1; i < NLEN && h->nets[i].cidr[n]; i++) {
299 } else if (h->nets[i].cidr[n] < cidr) {
301 } else if (h->nets[i].cidr[n] == cidr) {
302 h->nets[CIDR_POS(cidr)].nets[n]++;
308 h->nets[i].cidr[n] = h->nets[i - 1].cidr[n];
310 h->nets[i].cidr[n] = cidr;
311 h->nets[CIDR_POS(cidr)].nets[n] = 1;
315 mtype_del_cidr(struct htype *h, u8 cidr, u8 n)
317 u8 i, j, net_end = NLEN - 1;
319 for (i = 0; i < NLEN; i++) {
320 if (h->nets[i].cidr[n] != cidr)
322 h->nets[CIDR_POS(cidr)].nets[n]--;
323 if (h->nets[CIDR_POS(cidr)].nets[n] > 0)
325 for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
326 h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
327 h->nets[j].cidr[n] = 0;
333 /* Calculate the actual memory size of the set data */
335 mtype_ahash_memsize(const struct htype *h, const struct htable *t)
337 return sizeof(*h) + sizeof(*t);
340 /* Get the ith element from the array block n */
341 #define ahash_data(n, i, dsize) \
342 ((struct mtype_elem *)((n)->value + ((i) * (dsize))))
345 mtype_ext_cleanup(struct ip_set *set, struct hbucket *n)
349 for (i = 0; i < n->pos; i++)
350 if (test_bit(i, n->used))
351 ip_set_ext_destroy(set, ahash_data(n, i, set->dsize));
354 /* Flush a hash type of set: destroy all elements */
356 mtype_flush(struct ip_set *set)
358 struct htype *h = set->data;
363 t = ipset_dereference_protected(h->table, set);
364 for (i = 0; i < jhash_size(t->htable_bits); i++) {
365 n = __ipset_dereference_protected(hbucket(t, i), 1);
368 if (set->extensions & IPSET_EXT_DESTROY)
369 mtype_ext_cleanup(set, n);
370 /* FIXME: use slab cache */
371 rcu_assign_pointer(hbucket(t, i), NULL);
374 #ifdef IP_SET_HASH_WITH_NETS
375 memset(h->nets, 0, sizeof(h->nets));
381 /* Destroy the hashtable part of the set */
383 mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
388 for (i = 0; i < jhash_size(t->htable_bits); i++) {
389 n = __ipset_dereference_protected(hbucket(t, i), 1);
392 if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
393 mtype_ext_cleanup(set, n);
394 /* FIXME: use slab cache */
401 /* Destroy a hash type of set */
403 mtype_destroy(struct ip_set *set)
405 struct htype *h = set->data;
407 if (SET_WITH_TIMEOUT(set))
408 del_timer_sync(&h->gc);
410 mtype_ahash_destroy(set,
411 __ipset_dereference_protected(h->table, 1), true);
418 mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
420 struct htype *h = set->data;
422 setup_timer(&h->gc, gc, (unsigned long)set);
423 mod_timer(&h->gc, jiffies + IPSET_GC_PERIOD(set->timeout) * HZ);
424 pr_debug("gc initialized, run in every %u\n",
425 IPSET_GC_PERIOD(set->timeout));
429 mtype_same_set(const struct ip_set *a, const struct ip_set *b)
431 const struct htype *x = a->data;
432 const struct htype *y = b->data;
434 /* Resizing changes htable_bits, so we ignore it */
435 return x->maxelem == y->maxelem &&
436 a->timeout == b->timeout &&
437 #ifdef IP_SET_HASH_WITH_NETMASK
438 x->netmask == y->netmask &&
440 #ifdef IP_SET_HASH_WITH_MARKMASK
441 x->markmask == y->markmask &&
443 a->extensions == b->extensions;
446 /* Delete expired elements from the hashtable */
448 mtype_expire(struct ip_set *set, struct htype *h)
451 struct hbucket *n, *tmp;
452 struct mtype_elem *data;
454 size_t dsize = set->dsize;
455 #ifdef IP_SET_HASH_WITH_NETS
459 t = ipset_dereference_protected(h->table, set);
460 for (i = 0; i < jhash_size(t->htable_bits); i++) {
461 n = __ipset_dereference_protected(hbucket(t, i), 1);
464 for (j = 0, d = 0; j < n->pos; j++) {
465 if (!test_bit(j, n->used)) {
469 data = ahash_data(n, j, dsize);
470 if (!ip_set_timeout_expired(ext_timeout(data, set)))
472 pr_debug("expired %u/%u\n", i, j);
473 clear_bit(j, n->used);
474 smp_mb__after_atomic();
475 #ifdef IP_SET_HASH_WITH_NETS
476 for (k = 0; k < IPSET_NET_COUNT; k++)
478 NCIDR_PUT(DCIDR_GET(data->cidr, k)),
481 ip_set_ext_destroy(set, data);
485 if (d >= AHASH_INIT_SIZE) {
487 rcu_assign_pointer(hbucket(t, i), NULL);
491 tmp = kzalloc(sizeof(*tmp) +
492 (n->size - AHASH_INIT_SIZE) * dsize,
495 /* Still try to delete expired elements */
497 tmp->size = n->size - AHASH_INIT_SIZE;
498 for (j = 0, d = 0; j < n->pos; j++) {
499 if (!test_bit(j, n->used))
501 data = ahash_data(n, j, dsize);
502 memcpy(tmp->value + d * dsize, data, dsize);
503 set_bit(d, tmp->used);
507 set->ext_size -= ext_size(AHASH_INIT_SIZE, dsize);
508 rcu_assign_pointer(hbucket(t, i), tmp);
515 mtype_gc(unsigned long ul_set)
517 struct ip_set *set = (struct ip_set *)ul_set;
518 struct htype *h = set->data;
520 pr_debug("called\n");
521 spin_lock_bh(&set->lock);
522 mtype_expire(set, h);
523 spin_unlock_bh(&set->lock);
525 h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
529 /* Resize a hash: create a new hash table with doubling the hashsize
530 * and inserting the elements to it. Repeat until we succeed or
531 * fail due to memory pressures.
534 mtype_resize(struct ip_set *set, bool retried)
536 struct htype *h = set->data;
537 struct htable *t, *orig;
539 size_t extsize, dsize = set->dsize;
540 #ifdef IP_SET_HASH_WITH_NETS
542 struct mtype_elem *tmp;
544 struct mtype_elem *data;
545 struct mtype_elem *d;
546 struct hbucket *n, *m;
550 #ifdef IP_SET_HASH_WITH_NETS
551 tmp = kmalloc(dsize, GFP_KERNEL);
556 orig = rcu_dereference_bh_nfnl(h->table);
557 htable_bits = orig->htable_bits;
558 rcu_read_unlock_bh();
564 /* In case we have plenty of memory :-) */
565 pr_warn("Cannot increase the hashsize of set %s further\n",
567 ret = -IPSET_ERR_HASH_FULL;
570 t = ip_set_alloc(htable_size(htable_bits));
575 t->htable_bits = htable_bits;
577 spin_lock_bh(&set->lock);
578 orig = __ipset_dereference_protected(h->table, 1);
579 /* There can't be another parallel resizing, but dumping is possible */
580 atomic_set(&orig->ref, 1);
581 atomic_inc(&orig->uref);
583 pr_debug("attempt to resize set %s from %u to %u, t %p\n",
584 set->name, orig->htable_bits, htable_bits, orig);
585 for (i = 0; i < jhash_size(orig->htable_bits); i++) {
586 n = __ipset_dereference_protected(hbucket(orig, i), 1);
589 for (j = 0; j < n->pos; j++) {
590 if (!test_bit(j, n->used))
592 data = ahash_data(n, j, dsize);
593 #ifdef IP_SET_HASH_WITH_NETS
594 /* We have readers running parallel with us,
595 * so the live data cannot be modified.
598 memcpy(tmp, data, dsize);
600 mtype_data_reset_flags(data, &flags);
602 key = HKEY(data, h->initval, htable_bits);
603 m = __ipset_dereference_protected(hbucket(t, key), 1);
605 m = kzalloc(sizeof(*m) +
606 AHASH_INIT_SIZE * dsize,
612 m->size = AHASH_INIT_SIZE;
613 extsize += ext_size(AHASH_INIT_SIZE, dsize);
614 RCU_INIT_POINTER(hbucket(t, key), m);
615 } else if (m->pos >= m->size) {
618 if (m->size >= AHASH_MAX(h)) {
621 ht = kzalloc(sizeof(*ht) +
622 (m->size + AHASH_INIT_SIZE)
630 memcpy(ht, m, sizeof(struct hbucket) +
632 ht->size = m->size + AHASH_INIT_SIZE;
633 extsize += ext_size(AHASH_INIT_SIZE, dsize);
636 RCU_INIT_POINTER(hbucket(t, key), ht);
638 d = ahash_data(m, m->pos, dsize);
639 memcpy(d, data, dsize);
640 set_bit(m->pos++, m->used);
641 #ifdef IP_SET_HASH_WITH_NETS
642 mtype_data_reset_flags(d, &flags);
646 rcu_assign_pointer(h->table, t);
647 set->ext_size = extsize;
649 spin_unlock_bh(&set->lock);
651 /* Give time to other readers of the set */
652 synchronize_rcu_bh();
654 pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
655 orig->htable_bits, orig, t->htable_bits, t);
656 /* If there's nobody else dumping the table, destroy it */
657 if (atomic_dec_and_test(&orig->uref)) {
658 pr_debug("Table destroy by resize %p\n", orig);
659 mtype_ahash_destroy(set, orig, false);
663 #ifdef IP_SET_HASH_WITH_NETS
669 atomic_set(&orig->ref, 0);
670 atomic_dec(&orig->uref);
671 spin_unlock_bh(&set->lock);
672 mtype_ahash_destroy(set, t, false);
678 /* Add an element to a hash and update the internal counters when succeeded,
679 * otherwise report the proper error code.
682 mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
683 struct ip_set_ext *mext, u32 flags)
685 struct htype *h = set->data;
687 const struct mtype_elem *d = value;
688 struct mtype_elem *data;
689 struct hbucket *n, *old = ERR_PTR(-ENOENT);
691 bool flag_exist = flags & IPSET_FLAG_EXIST;
692 bool deleted = false, forceadd = false, reuse = false;
695 if (set->elements >= h->maxelem) {
696 if (SET_WITH_TIMEOUT(set))
697 /* FIXME: when set is full, we slow down here */
698 mtype_expire(set, h);
699 if (set->elements >= h->maxelem && SET_WITH_FORCEADD(set))
703 t = ipset_dereference_protected(h->table, set);
704 key = HKEY(value, h->initval, t->htable_bits);
705 n = __ipset_dereference_protected(hbucket(t, key), 1);
707 if (forceadd || set->elements >= h->maxelem)
710 n = kzalloc(sizeof(*n) + AHASH_INIT_SIZE * set->dsize,
714 n->size = AHASH_INIT_SIZE;
715 set->ext_size += ext_size(AHASH_INIT_SIZE, set->dsize);
718 for (i = 0; i < n->pos; i++) {
719 if (!test_bit(i, n->used)) {
720 /* Reuse first deleted entry */
722 deleted = reuse = true;
727 data = ahash_data(n, i, set->dsize);
728 if (mtype_data_equal(data, d, &multi)) {
730 (SET_WITH_TIMEOUT(set) &&
731 ip_set_timeout_expired(ext_timeout(data, set)))) {
732 /* Just the extensions could be overwritten */
734 goto overwrite_extensions;
736 return -IPSET_ERR_EXIST;
738 /* Reuse first timed out entry */
739 if (SET_WITH_TIMEOUT(set) &&
740 ip_set_timeout_expired(ext_timeout(data, set)) &&
746 if (reuse || forceadd) {
747 data = ahash_data(n, j, set->dsize);
749 #ifdef IP_SET_HASH_WITH_NETS
750 for (i = 0; i < IPSET_NET_COUNT; i++)
752 NCIDR_PUT(DCIDR_GET(data->cidr, i)),
755 ip_set_ext_destroy(set, data);
760 if (set->elements >= h->maxelem)
762 /* Create a new slot */
763 if (n->pos >= n->size) {
764 TUNE_AHASH_MAX(h, multi);
765 if (n->size >= AHASH_MAX(h)) {
766 /* Trigger rehashing */
767 mtype_data_next(&h->next, d);
771 n = kzalloc(sizeof(*n) +
772 (old->size + AHASH_INIT_SIZE) * set->dsize,
776 memcpy(n, old, sizeof(struct hbucket) +
777 old->size * set->dsize);
778 n->size = old->size + AHASH_INIT_SIZE;
779 set->ext_size += ext_size(AHASH_INIT_SIZE, set->dsize);
784 data = ahash_data(n, j, set->dsize);
787 #ifdef IP_SET_HASH_WITH_NETS
788 for (i = 0; i < IPSET_NET_COUNT; i++)
789 mtype_add_cidr(h, NCIDR_PUT(DCIDR_GET(d->cidr, i)), i);
791 memcpy(data, d, sizeof(struct mtype_elem));
792 overwrite_extensions:
793 #ifdef IP_SET_HASH_WITH_NETS
794 mtype_data_set_flags(data, flags);
796 if (SET_WITH_COUNTER(set))
797 ip_set_init_counter(ext_counter(data, set), ext);
798 if (SET_WITH_COMMENT(set))
799 ip_set_init_comment(set, ext_comment(data, set), ext);
800 if (SET_WITH_SKBINFO(set))
801 ip_set_init_skbinfo(ext_skbinfo(data, set), ext);
802 /* Must come last for the case when timed out entry is reused */
803 if (SET_WITH_TIMEOUT(set))
804 ip_set_timeout_set(ext_timeout(data, set), ext->timeout);
805 smp_mb__before_atomic();
807 if (old != ERR_PTR(-ENOENT)) {
808 rcu_assign_pointer(hbucket(t, key), n);
816 pr_warn("Set %s is full, maxelem %u reached\n",
817 set->name, h->maxelem);
818 return -IPSET_ERR_HASH_FULL;
821 /* Delete an element from the hash and free up space if possible.
824 mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
825 struct ip_set_ext *mext, u32 flags)
827 struct htype *h = set->data;
829 const struct mtype_elem *d = value;
830 struct mtype_elem *data;
832 int i, j, k, ret = -IPSET_ERR_EXIST;
834 size_t dsize = set->dsize;
836 t = ipset_dereference_protected(h->table, set);
837 key = HKEY(value, h->initval, t->htable_bits);
838 n = __ipset_dereference_protected(hbucket(t, key), 1);
841 for (i = 0, k = 0; i < n->pos; i++) {
842 if (!test_bit(i, n->used)) {
846 data = ahash_data(n, i, dsize);
847 if (!mtype_data_equal(data, d, &multi))
849 if (SET_WITH_TIMEOUT(set) &&
850 ip_set_timeout_expired(ext_timeout(data, set)))
854 clear_bit(i, n->used);
855 smp_mb__after_atomic();
859 #ifdef IP_SET_HASH_WITH_NETS
860 for (j = 0; j < IPSET_NET_COUNT; j++)
861 mtype_del_cidr(h, NCIDR_PUT(DCIDR_GET(d->cidr, j)),
864 ip_set_ext_destroy(set, data);
866 for (; i < n->pos; i++) {
867 if (!test_bit(i, n->used))
870 if (n->pos == 0 && k == 0) {
871 set->ext_size -= ext_size(n->size, dsize);
872 rcu_assign_pointer(hbucket(t, key), NULL);
874 } else if (k >= AHASH_INIT_SIZE) {
875 struct hbucket *tmp = kzalloc(sizeof(*tmp) +
876 (n->size - AHASH_INIT_SIZE) * dsize,
880 tmp->size = n->size - AHASH_INIT_SIZE;
881 for (j = 0, k = 0; j < n->pos; j++) {
882 if (!test_bit(j, n->used))
884 data = ahash_data(n, j, dsize);
885 memcpy(tmp->value + k * dsize, data, dsize);
886 set_bit(k, tmp->used);
890 set->ext_size -= ext_size(AHASH_INIT_SIZE, dsize);
891 rcu_assign_pointer(hbucket(t, key), tmp);
902 mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext,
903 struct ip_set_ext *mext, struct ip_set *set, u32 flags)
905 if (SET_WITH_COUNTER(set))
906 ip_set_update_counter(ext_counter(data, set),
908 if (SET_WITH_SKBINFO(set))
909 ip_set_get_skbinfo(ext_skbinfo(data, set),
911 return mtype_do_data_match(data);
914 #ifdef IP_SET_HASH_WITH_NETS
915 /* Special test function which takes into account the different network
916 * sizes added to the set
919 mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
920 const struct ip_set_ext *ext,
921 struct ip_set_ext *mext, u32 flags)
923 struct htype *h = set->data;
924 struct htable *t = rcu_dereference_bh(h->table);
926 struct mtype_elem *data;
927 #if IPSET_NET_COUNT == 2
928 struct mtype_elem orig = *d;
935 pr_debug("test by nets\n");
936 for (; j < NLEN && h->nets[j].cidr[0] && !multi; j++) {
937 #if IPSET_NET_COUNT == 2
938 mtype_data_reset_elem(d, &orig);
939 mtype_data_netmask(d, NCIDR_GET(h->nets[j].cidr[0]), false);
940 for (k = 0; k < NLEN && h->nets[k].cidr[1] && !multi;
942 mtype_data_netmask(d, NCIDR_GET(h->nets[k].cidr[1]),
945 mtype_data_netmask(d, NCIDR_GET(h->nets[j].cidr[0]));
947 key = HKEY(d, h->initval, t->htable_bits);
948 n = rcu_dereference_bh(hbucket(t, key));
951 for (i = 0; i < n->pos; i++) {
952 if (!test_bit(i, n->used))
954 data = ahash_data(n, i, set->dsize);
955 if (!mtype_data_equal(data, d, &multi))
957 if (SET_WITH_TIMEOUT(set)) {
958 if (!ip_set_timeout_expired(
959 ext_timeout(data, set)))
960 return mtype_data_match(data, ext,
963 #ifdef IP_SET_HASH_WITH_MULTI
967 return mtype_data_match(data, ext,
970 #if IPSET_NET_COUNT == 2
978 /* Test whether the element is added to the set */
980 mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
981 struct ip_set_ext *mext, u32 flags)
983 struct htype *h = set->data;
985 struct mtype_elem *d = value;
987 struct mtype_elem *data;
991 t = rcu_dereference_bh(h->table);
992 #ifdef IP_SET_HASH_WITH_NETS
993 /* If we test an IP address and not a network address,
994 * try all possible network sizes
996 for (i = 0; i < IPSET_NET_COUNT; i++)
997 if (DCIDR_GET(d->cidr, i) != HOST_MASK)
999 if (i == IPSET_NET_COUNT) {
1000 ret = mtype_test_cidrs(set, d, ext, mext, flags);
1005 key = HKEY(d, h->initval, t->htable_bits);
1006 n = rcu_dereference_bh(hbucket(t, key));
1011 for (i = 0; i < n->pos; i++) {
1012 if (!test_bit(i, n->used))
1014 data = ahash_data(n, i, set->dsize);
1015 if (mtype_data_equal(data, d, &multi) &&
1016 !(SET_WITH_TIMEOUT(set) &&
1017 ip_set_timeout_expired(ext_timeout(data, set)))) {
1018 ret = mtype_data_match(data, ext, mext, set, flags);
1026 /* Reply a HEADER request: fill out the header part of the set */
1028 mtype_head(struct ip_set *set, struct sk_buff *skb)
1030 struct htype *h = set->data;
1031 const struct htable *t;
1032 struct nlattr *nested;
1036 /* If any members have expired, set->elements will be wrong
1037 * mytype_expire function will update it with the right count.
1038 * we do not hold set->lock here, so grab it first.
1039 * set->elements can still be incorrect in the case of a huge set,
1040 * because elements might time out during the listing.
1042 if (SET_WITH_TIMEOUT(set)) {
1043 spin_lock_bh(&set->lock);
1044 mtype_expire(set, h);
1045 spin_unlock_bh(&set->lock);
1049 t = rcu_dereference_bh_nfnl(h->table);
1050 memsize = mtype_ahash_memsize(h, t) + set->ext_size;
1051 htable_bits = t->htable_bits;
1052 rcu_read_unlock_bh();
1054 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
1056 goto nla_put_failure;
1057 if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE,
1058 htonl(jhash_size(htable_bits))) ||
1059 nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)))
1060 goto nla_put_failure;
1061 #ifdef IP_SET_HASH_WITH_NETMASK
1062 if (h->netmask != HOST_MASK &&
1063 nla_put_u8(skb, IPSET_ATTR_NETMASK, h->netmask))
1064 goto nla_put_failure;
1066 #ifdef IP_SET_HASH_WITH_MARKMASK
1067 if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask))
1068 goto nla_put_failure;
1070 if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
1071 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
1072 nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(set->elements)))
1073 goto nla_put_failure;
1074 if (unlikely(ip_set_put_flags(skb, set)))
1075 goto nla_put_failure;
1076 ipset_nest_end(skb, nested);
1083 /* Make possible to run dumping parallel with resizing */
1085 mtype_uref(struct ip_set *set, struct netlink_callback *cb, bool start)
1087 struct htype *h = set->data;
1092 t = rcu_dereference_bh_nfnl(h->table);
1093 atomic_inc(&t->uref);
1094 cb->args[IPSET_CB_PRIVATE] = (unsigned long)t;
1095 rcu_read_unlock_bh();
1096 } else if (cb->args[IPSET_CB_PRIVATE]) {
1097 t = (struct htable *)cb->args[IPSET_CB_PRIVATE];
1098 if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
1099 /* Resizing didn't destroy the hash table */
1100 pr_debug("Table destroy by dump: %p\n", t);
1101 mtype_ahash_destroy(set, t, false);
1103 cb->args[IPSET_CB_PRIVATE] = 0;
1107 /* Reply a LIST/SAVE request: dump the elements of the specified set */
1109 mtype_list(const struct ip_set *set,
1110 struct sk_buff *skb, struct netlink_callback *cb)
1112 const struct htable *t;
1113 struct nlattr *atd, *nested;
1114 const struct hbucket *n;
1115 const struct mtype_elem *e;
1116 u32 first = cb->args[IPSET_CB_ARG0];
1117 /* We assume that one hash bucket fills into one page */
1121 atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
1125 pr_debug("list hash set %s\n", set->name);
1126 t = (const struct htable *)cb->args[IPSET_CB_PRIVATE];
1127 /* Expire may replace a hbucket with another one */
1129 for (; cb->args[IPSET_CB_ARG0] < jhash_size(t->htable_bits);
1130 cb->args[IPSET_CB_ARG0]++) {
1131 incomplete = skb_tail_pointer(skb);
1132 n = rcu_dereference(hbucket(t, cb->args[IPSET_CB_ARG0]));
1133 pr_debug("cb->arg bucket: %lu, t %p n %p\n",
1134 cb->args[IPSET_CB_ARG0], t, n);
1137 for (i = 0; i < n->pos; i++) {
1138 if (!test_bit(i, n->used))
1140 e = ahash_data(n, i, set->dsize);
1141 if (SET_WITH_TIMEOUT(set) &&
1142 ip_set_timeout_expired(ext_timeout(e, set)))
1144 pr_debug("list hash %lu hbucket %p i %u, data %p\n",
1145 cb->args[IPSET_CB_ARG0], n, i, e);
1146 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
1148 if (cb->args[IPSET_CB_ARG0] == first) {
1149 nla_nest_cancel(skb, atd);
1153 goto nla_put_failure;
1155 if (mtype_data_list(skb, e))
1156 goto nla_put_failure;
1157 if (ip_set_put_extensions(skb, set, e, true))
1158 goto nla_put_failure;
1159 ipset_nest_end(skb, nested);
1162 ipset_nest_end(skb, atd);
1163 /* Set listing finished */
1164 cb->args[IPSET_CB_ARG0] = 0;
1169 nlmsg_trim(skb, incomplete);
1170 if (unlikely(first == cb->args[IPSET_CB_ARG0])) {
1171 pr_warn("Can't list set %s: one bucket does not fit into a message. Please report it!\n",
1173 cb->args[IPSET_CB_ARG0] = 0;
1176 ipset_nest_end(skb, atd);
1184 IPSET_TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb,
1185 const struct xt_action_param *par,
1186 enum ipset_adt adt, struct ip_set_adt_opt *opt);
1189 IPSET_TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[],
1190 enum ipset_adt adt, u32 *lineno, u32 flags,
1193 static const struct ip_set_type_variant mtype_variant = {
1197 [IPSET_ADD] = mtype_add,
1198 [IPSET_DEL] = mtype_del,
1199 [IPSET_TEST] = mtype_test,
1201 .destroy = mtype_destroy,
1202 .flush = mtype_flush,
1206 .resize = mtype_resize,
1207 .same_set = mtype_same_set,
1210 #ifdef IP_SET_EMIT_CREATE
1212 IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
1213 struct nlattr *tb[], u32 flags)
1215 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
1216 #ifdef IP_SET_HASH_WITH_MARKMASK
1220 #ifdef IP_SET_HASH_WITH_NETMASK
1227 pr_debug("Create set %s with family %s\n",
1228 set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
1230 #ifdef IP_SET_PROTO_UNDEF
1231 if (set->family != NFPROTO_UNSPEC)
1232 return -IPSET_ERR_INVALID_FAMILY;
1234 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
1235 return -IPSET_ERR_INVALID_FAMILY;
1238 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
1239 !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
1240 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
1241 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
1242 return -IPSET_ERR_PROTOCOL;
1244 #ifdef IP_SET_HASH_WITH_MARKMASK
1245 /* Separated condition in order to avoid directive in argument list */
1246 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_MARKMASK)))
1247 return -IPSET_ERR_PROTOCOL;
1249 markmask = 0xffffffff;
1250 if (tb[IPSET_ATTR_MARKMASK]) {
1251 markmask = ntohl(nla_get_be32(tb[IPSET_ATTR_MARKMASK]));
1253 return -IPSET_ERR_INVALID_MARKMASK;
1257 #ifdef IP_SET_HASH_WITH_NETMASK
1258 netmask = set->family == NFPROTO_IPV4 ? 32 : 128;
1259 if (tb[IPSET_ATTR_NETMASK]) {
1260 netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
1262 if ((set->family == NFPROTO_IPV4 && netmask > 32) ||
1263 (set->family == NFPROTO_IPV6 && netmask > 128) ||
1265 return -IPSET_ERR_INVALID_NETMASK;
1269 if (tb[IPSET_ATTR_HASHSIZE]) {
1270 hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
1271 if (hashsize < IPSET_MIMINAL_HASHSIZE)
1272 hashsize = IPSET_MIMINAL_HASHSIZE;
1275 if (tb[IPSET_ATTR_MAXELEM])
1276 maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
1279 h = kzalloc(hsize, GFP_KERNEL);
1283 /* Compute htable_bits from the user input parameter hashsize.
1284 * Assume that hashsize == 2^htable_bits,
1285 * otherwise round up to the first 2^n value.
1287 hbits = fls(hashsize - 1);
1288 hsize = htable_size(hbits);
1293 t = ip_set_alloc(hsize);
1298 h->maxelem = maxelem;
1299 #ifdef IP_SET_HASH_WITH_NETMASK
1300 h->netmask = netmask;
1302 #ifdef IP_SET_HASH_WITH_MARKMASK
1303 h->markmask = markmask;
1305 get_random_bytes(&h->initval, sizeof(h->initval));
1307 t->htable_bits = hbits;
1308 RCU_INIT_POINTER(h->table, t);
1311 #ifndef IP_SET_PROTO_UNDEF
1312 if (set->family == NFPROTO_IPV4) {
1314 set->variant = &IPSET_TOKEN(HTYPE, 4_variant);
1315 set->dsize = ip_set_elem_len(set, tb,
1316 sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)),
1317 __alignof__(struct IPSET_TOKEN(HTYPE, 4_elem)));
1318 #ifndef IP_SET_PROTO_UNDEF
1320 set->variant = &IPSET_TOKEN(HTYPE, 6_variant);
1321 set->dsize = ip_set_elem_len(set, tb,
1322 sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)),
1323 __alignof__(struct IPSET_TOKEN(HTYPE, 6_elem)));
1326 set->timeout = IPSET_NO_TIMEOUT;
1327 if (tb[IPSET_ATTR_TIMEOUT]) {
1328 set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
1329 #ifndef IP_SET_PROTO_UNDEF
1330 if (set->family == NFPROTO_IPV4)
1332 IPSET_TOKEN(HTYPE, 4_gc_init)(set,
1333 IPSET_TOKEN(HTYPE, 4_gc));
1334 #ifndef IP_SET_PROTO_UNDEF
1336 IPSET_TOKEN(HTYPE, 6_gc_init)(set,
1337 IPSET_TOKEN(HTYPE, 6_gc));
1340 pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
1341 set->name, jhash_size(t->htable_bits),
1342 t->htable_bits, h->maxelem, set->data, t);
1346 #endif /* IP_SET_EMIT_CREATE */