1 /* Copyright (C) 2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 as
5 * published by the Free Software Foundation.
8 #ifndef _IP_SET_HASH_GEN_H
9 #define _IP_SET_HASH_GEN_H
11 #include <linux/rcupdate.h>
12 #include <linux/jhash.h>
13 #include <linux/types.h>
14 #include <linux/netfilter/ipset/ip_set_timeout.h>
16 #define __ipset_dereference_protected(p, c) rcu_dereference_protected(p, c)
17 #define ipset_dereference_protected(p, set) \
18 __ipset_dereference_protected(p, spin_is_locked(&(set)->lock))
20 #define rcu_dereference_bh_nfnl(p) rcu_dereference_bh_check(p, 1)
22 /* Hashing which uses arrays to resolve clashing. The hash table is resized
23 * (doubled) when searching becomes too long.
24 * Internally jhash is used with the assumption that the size of the
25 * stored data is a multiple of sizeof(u32).
27 * Readers and resizing
29 * Resizing can be triggered by userspace command only, and those
30 * are serialized by the nfnl mutex. During resizing the set is
31 * read-locked, so the only possible concurrent operations are
32 * the kernel side readers. Those must be protected by proper RCU locking.
35 /* Number of elements to store in an initial array block */
36 #define AHASH_INIT_SIZE 4
37 /* Max number of elements to store in an array block */
38 #define AHASH_MAX_SIZE (3 * AHASH_INIT_SIZE)
39 /* Max muber of elements in the array block when tuned */
40 #define AHASH_MAX_TUNED 64
42 /* Max number of elements can be tuned */
43 #ifdef IP_SET_HASH_WITH_MULTI
44 #define AHASH_MAX(h) ((h)->ahash_max)
47 tune_ahash_max(u8 curr, u32 multi)
54 n = curr + AHASH_INIT_SIZE;
55 /* Currently, at listing one hash bucket must fit into a message.
56 * Therefore we have a hard limit here.
58 return n > curr && n <= AHASH_MAX_TUNED ? n : curr;
61 #define TUNE_AHASH_MAX(h, multi) \
62 ((h)->ahash_max = tune_ahash_max((h)->ahash_max, multi))
64 #define AHASH_MAX(h) AHASH_MAX_SIZE
65 #define TUNE_AHASH_MAX(h, multi)
70 struct rcu_head rcu; /* for call_rcu_bh */
71 /* Which positions are used in the array */
72 DECLARE_BITMAP(used, AHASH_MAX_TUNED);
73 u8 size; /* size of the array */
74 u8 pos; /* position of the first free entry */
75 unsigned char value[0] /* the array of the values */
76 __aligned(__alignof__(u64));
79 /* The hash table: the table size stored here in order to make resizing easy */
81 atomic_t ref; /* References for resizing */
82 atomic_t uref; /* References for dumping */
83 u8 htable_bits; /* size of hash table == 2^htable_bits */
84 struct hbucket __rcu *bucket[0]; /* hashtable buckets */
87 #define hbucket(h, i) ((h)->bucket[i])
88 #define ext_size(n, dsize) \
89 (sizeof(struct hbucket) + (n) * (dsize))
91 #ifndef IPSET_NET_COUNT
92 #define IPSET_NET_COUNT 1
95 /* Book-keeping of the prefixes added to the set */
97 u32 nets[IPSET_NET_COUNT]; /* number of elements for this cidr */
98 u8 cidr[IPSET_NET_COUNT]; /* the cidr value */
101 /* Compute the hash table size */
103 htable_size(u8 hbits)
107 /* We must fit both into u32 in jhash and INT_MAX in kvmalloc_node() */
110 hsize = jhash_size(hbits);
111 if ((INT_MAX - sizeof(struct htable)) / sizeof(struct hbucket *)
115 return hsize * sizeof(struct hbucket *) + sizeof(struct htable);
118 #ifdef IP_SET_HASH_WITH_NETS
119 #if IPSET_NET_COUNT > 1
120 #define __CIDR(cidr, i) (cidr[i])
122 #define __CIDR(cidr, i) (cidr)
125 /* cidr + 1 is stored in net_prefixes to support /0 */
126 #define NCIDR_PUT(cidr) ((cidr) + 1)
127 #define NCIDR_GET(cidr) ((cidr) - 1)
129 #ifdef IP_SET_HASH_WITH_NETS_PACKED
130 /* When cidr is packed with nomatch, cidr - 1 is stored in the data entry */
131 #define DCIDR_PUT(cidr) ((cidr) - 1)
132 #define DCIDR_GET(cidr, i) (__CIDR(cidr, i) + 1)
134 #define DCIDR_PUT(cidr) (cidr)
135 #define DCIDR_GET(cidr, i) __CIDR(cidr, i)
138 #define INIT_CIDR(cidr, host_mask) \
139 DCIDR_PUT(((cidr) ? NCIDR_GET(cidr) : host_mask))
141 #ifdef IP_SET_HASH_WITH_NET0
142 /* cidr from 0 to HOST_MASK value and c = cidr + 1 */
143 #define NLEN (HOST_MASK + 1)
144 #define CIDR_POS(c) ((c) - 1)
146 /* cidr from 1 to HOST_MASK value and c = cidr + 1 */
147 #define NLEN HOST_MASK
148 #define CIDR_POS(c) ((c) - 2)
153 #endif /* IP_SET_HASH_WITH_NETS */
155 #endif /* _IP_SET_HASH_GEN_H */
158 #error "MTYPE is not defined!"
162 #error "HTYPE is not defined!"
166 #error "HOST_MASK is not defined!"
169 /* Family dependent templates */
172 #undef mtype_data_equal
173 #undef mtype_do_data_match
174 #undef mtype_data_set_flags
175 #undef mtype_data_reset_elem
176 #undef mtype_data_reset_flags
177 #undef mtype_data_netmask
178 #undef mtype_data_list
179 #undef mtype_data_next
182 #undef mtype_ahash_destroy
183 #undef mtype_ext_cleanup
184 #undef mtype_add_cidr
185 #undef mtype_del_cidr
186 #undef mtype_ahash_memsize
189 #undef mtype_same_set
195 #undef mtype_test_cidrs
205 #undef mtype_data_match
210 #define mtype_data_equal IPSET_TOKEN(MTYPE, _data_equal)
211 #ifdef IP_SET_HASH_WITH_NETS
212 #define mtype_do_data_match IPSET_TOKEN(MTYPE, _do_data_match)
214 #define mtype_do_data_match(d) 1
216 #define mtype_data_set_flags IPSET_TOKEN(MTYPE, _data_set_flags)
217 #define mtype_data_reset_elem IPSET_TOKEN(MTYPE, _data_reset_elem)
218 #define mtype_data_reset_flags IPSET_TOKEN(MTYPE, _data_reset_flags)
219 #define mtype_data_netmask IPSET_TOKEN(MTYPE, _data_netmask)
220 #define mtype_data_list IPSET_TOKEN(MTYPE, _data_list)
221 #define mtype_data_next IPSET_TOKEN(MTYPE, _data_next)
222 #define mtype_elem IPSET_TOKEN(MTYPE, _elem)
224 #define mtype_ahash_destroy IPSET_TOKEN(MTYPE, _ahash_destroy)
225 #define mtype_ext_cleanup IPSET_TOKEN(MTYPE, _ext_cleanup)
226 #define mtype_add_cidr IPSET_TOKEN(MTYPE, _add_cidr)
227 #define mtype_del_cidr IPSET_TOKEN(MTYPE, _del_cidr)
228 #define mtype_ahash_memsize IPSET_TOKEN(MTYPE, _ahash_memsize)
229 #define mtype_flush IPSET_TOKEN(MTYPE, _flush)
230 #define mtype_destroy IPSET_TOKEN(MTYPE, _destroy)
231 #define mtype_same_set IPSET_TOKEN(MTYPE, _same_set)
232 #define mtype_kadt IPSET_TOKEN(MTYPE, _kadt)
233 #define mtype_uadt IPSET_TOKEN(MTYPE, _uadt)
235 #define mtype_add IPSET_TOKEN(MTYPE, _add)
236 #define mtype_del IPSET_TOKEN(MTYPE, _del)
237 #define mtype_test_cidrs IPSET_TOKEN(MTYPE, _test_cidrs)
238 #define mtype_test IPSET_TOKEN(MTYPE, _test)
239 #define mtype_uref IPSET_TOKEN(MTYPE, _uref)
240 #define mtype_expire IPSET_TOKEN(MTYPE, _expire)
241 #define mtype_resize IPSET_TOKEN(MTYPE, _resize)
242 #define mtype_head IPSET_TOKEN(MTYPE, _head)
243 #define mtype_list IPSET_TOKEN(MTYPE, _list)
244 #define mtype_gc IPSET_TOKEN(MTYPE, _gc)
245 #define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init)
246 #define mtype_variant IPSET_TOKEN(MTYPE, _variant)
247 #define mtype_data_match IPSET_TOKEN(MTYPE, _data_match)
250 #define HKEY_DATALEN sizeof(struct mtype_elem)
255 #define HKEY(data, initval, htable_bits) \
257 const u32 *__k = (const u32 *)data; \
258 u32 __l = HKEY_DATALEN / sizeof(u32); \
260 BUILD_BUG_ON(HKEY_DATALEN % sizeof(u32) != 0); \
262 jhash2(__k, __l, initval) & jhash_mask(htable_bits); \
265 /* The generic hash structure */
267 struct htable __rcu *table; /* the hash table */
268 struct timer_list gc; /* garbage collection when timeout enabled */
269 struct ip_set *set; /* attached to this ip_set */
270 u32 maxelem; /* max elements in the hash */
271 u32 initval; /* random jhash init value */
272 #ifdef IP_SET_HASH_WITH_MARKMASK
273 u32 markmask; /* markmask value for mark mask to store */
275 #ifdef IP_SET_HASH_WITH_MULTI
276 u8 ahash_max; /* max elements in an array block */
278 #ifdef IP_SET_HASH_WITH_NETMASK
279 u8 netmask; /* netmask value for subnets to store */
281 struct mtype_elem next; /* temporary storage for uadd */
282 #ifdef IP_SET_HASH_WITH_NETS
283 struct net_prefixes nets[NLEN]; /* book-keeping of prefixes */
287 #ifdef IP_SET_HASH_WITH_NETS
288 /* Network cidr size book keeping when the hash stores different
289 * sized networks. cidr == real cidr + 1 to support /0.
292 mtype_add_cidr(struct htype *h, u8 cidr, u8 n)
296 /* Add in increasing prefix order, so larger cidr first */
297 for (i = 0, j = -1; i < NLEN && h->nets[i].cidr[n]; i++) {
300 } else if (h->nets[i].cidr[n] < cidr) {
302 } else if (h->nets[i].cidr[n] == cidr) {
303 h->nets[CIDR_POS(cidr)].nets[n]++;
309 h->nets[i].cidr[n] = h->nets[i - 1].cidr[n];
311 h->nets[i].cidr[n] = cidr;
312 h->nets[CIDR_POS(cidr)].nets[n] = 1;
316 mtype_del_cidr(struct htype *h, u8 cidr, u8 n)
318 u8 i, j, net_end = NLEN - 1;
320 for (i = 0; i < NLEN; i++) {
321 if (h->nets[i].cidr[n] != cidr)
323 h->nets[CIDR_POS(cidr)].nets[n]--;
324 if (h->nets[CIDR_POS(cidr)].nets[n] > 0)
326 for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
327 h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
328 h->nets[j].cidr[n] = 0;
334 /* Calculate the actual memory size of the set data */
336 mtype_ahash_memsize(const struct htype *h, const struct htable *t)
338 return sizeof(*h) + sizeof(*t);
341 /* Get the ith element from the array block n */
342 #define ahash_data(n, i, dsize) \
343 ((struct mtype_elem *)((n)->value + ((i) * (dsize))))
346 mtype_ext_cleanup(struct ip_set *set, struct hbucket *n)
350 for (i = 0; i < n->pos; i++)
351 if (test_bit(i, n->used))
352 ip_set_ext_destroy(set, ahash_data(n, i, set->dsize));
355 /* Flush a hash type of set: destroy all elements */
357 mtype_flush(struct ip_set *set)
359 struct htype *h = set->data;
364 t = ipset_dereference_protected(h->table, set);
365 for (i = 0; i < jhash_size(t->htable_bits); i++) {
366 n = __ipset_dereference_protected(hbucket(t, i), 1);
369 if (set->extensions & IPSET_EXT_DESTROY)
370 mtype_ext_cleanup(set, n);
371 /* FIXME: use slab cache */
372 rcu_assign_pointer(hbucket(t, i), NULL);
375 #ifdef IP_SET_HASH_WITH_NETS
376 memset(h->nets, 0, sizeof(h->nets));
382 /* Destroy the hashtable part of the set */
384 mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
389 for (i = 0; i < jhash_size(t->htable_bits); i++) {
390 n = __ipset_dereference_protected(hbucket(t, i), 1);
393 if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
394 mtype_ext_cleanup(set, n);
395 /* FIXME: use slab cache */
402 /* Destroy a hash type of set */
404 mtype_destroy(struct ip_set *set)
406 struct htype *h = set->data;
408 if (SET_WITH_TIMEOUT(set))
409 del_timer_sync(&h->gc);
411 mtype_ahash_destroy(set,
412 __ipset_dereference_protected(h->table, 1), true);
419 mtype_gc_init(struct ip_set *set, void (*gc)(struct timer_list *t))
421 struct htype *h = set->data;
423 timer_setup(&h->gc, gc, 0);
424 mod_timer(&h->gc, jiffies + IPSET_GC_PERIOD(set->timeout) * HZ);
425 pr_debug("gc initialized, run in every %u\n",
426 IPSET_GC_PERIOD(set->timeout));
430 mtype_same_set(const struct ip_set *a, const struct ip_set *b)
432 const struct htype *x = a->data;
433 const struct htype *y = b->data;
435 /* Resizing changes htable_bits, so we ignore it */
436 return x->maxelem == y->maxelem &&
437 a->timeout == b->timeout &&
438 #ifdef IP_SET_HASH_WITH_NETMASK
439 x->netmask == y->netmask &&
441 #ifdef IP_SET_HASH_WITH_MARKMASK
442 x->markmask == y->markmask &&
444 a->extensions == b->extensions;
447 /* Delete expired elements from the hashtable */
449 mtype_expire(struct ip_set *set, struct htype *h)
452 struct hbucket *n, *tmp;
453 struct mtype_elem *data;
455 size_t dsize = set->dsize;
456 #ifdef IP_SET_HASH_WITH_NETS
460 t = ipset_dereference_protected(h->table, set);
461 for (i = 0; i < jhash_size(t->htable_bits); i++) {
462 n = __ipset_dereference_protected(hbucket(t, i), 1);
465 for (j = 0, d = 0; j < n->pos; j++) {
466 if (!test_bit(j, n->used)) {
470 data = ahash_data(n, j, dsize);
471 if (!ip_set_timeout_expired(ext_timeout(data, set)))
473 pr_debug("expired %u/%u\n", i, j);
474 clear_bit(j, n->used);
475 smp_mb__after_atomic();
476 #ifdef IP_SET_HASH_WITH_NETS
477 for (k = 0; k < IPSET_NET_COUNT; k++)
479 NCIDR_PUT(DCIDR_GET(data->cidr, k)),
482 ip_set_ext_destroy(set, data);
486 if (d >= AHASH_INIT_SIZE) {
488 rcu_assign_pointer(hbucket(t, i), NULL);
492 tmp = kzalloc(sizeof(*tmp) +
493 (n->size - AHASH_INIT_SIZE) * dsize,
496 /* Still try to delete expired elements */
498 tmp->size = n->size - AHASH_INIT_SIZE;
499 for (j = 0, d = 0; j < n->pos; j++) {
500 if (!test_bit(j, n->used))
502 data = ahash_data(n, j, dsize);
503 memcpy(tmp->value + d * dsize, data, dsize);
504 set_bit(d, tmp->used);
508 set->ext_size -= ext_size(AHASH_INIT_SIZE, dsize);
509 rcu_assign_pointer(hbucket(t, i), tmp);
516 mtype_gc(struct timer_list *t)
518 struct htype *h = from_timer(h, t, gc);
519 struct ip_set *set = h->set;
521 pr_debug("called\n");
522 spin_lock_bh(&set->lock);
523 mtype_expire(set, h);
524 spin_unlock_bh(&set->lock);
526 h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
530 /* Resize a hash: create a new hash table with doubling the hashsize
531 * and inserting the elements to it. Repeat until we succeed or
532 * fail due to memory pressures.
535 mtype_resize(struct ip_set *set, bool retried)
537 struct htype *h = set->data;
538 struct htable *t, *orig;
540 size_t extsize, dsize = set->dsize;
541 #ifdef IP_SET_HASH_WITH_NETS
543 struct mtype_elem *tmp;
545 struct mtype_elem *data;
546 struct mtype_elem *d;
547 struct hbucket *n, *m;
551 #ifdef IP_SET_HASH_WITH_NETS
552 tmp = kmalloc(dsize, GFP_KERNEL);
557 orig = rcu_dereference_bh_nfnl(h->table);
558 htable_bits = orig->htable_bits;
559 rcu_read_unlock_bh();
565 /* In case we have plenty of memory :-) */
566 pr_warn("Cannot increase the hashsize of set %s further\n",
568 ret = -IPSET_ERR_HASH_FULL;
571 t = ip_set_alloc(htable_size(htable_bits));
576 t->htable_bits = htable_bits;
578 spin_lock_bh(&set->lock);
579 orig = __ipset_dereference_protected(h->table, 1);
580 /* There can't be another parallel resizing, but dumping is possible */
581 atomic_set(&orig->ref, 1);
582 atomic_inc(&orig->uref);
584 pr_debug("attempt to resize set %s from %u to %u, t %p\n",
585 set->name, orig->htable_bits, htable_bits, orig);
586 for (i = 0; i < jhash_size(orig->htable_bits); i++) {
587 n = __ipset_dereference_protected(hbucket(orig, i), 1);
590 for (j = 0; j < n->pos; j++) {
591 if (!test_bit(j, n->used))
593 data = ahash_data(n, j, dsize);
594 #ifdef IP_SET_HASH_WITH_NETS
595 /* We have readers running parallel with us,
596 * so the live data cannot be modified.
599 memcpy(tmp, data, dsize);
601 mtype_data_reset_flags(data, &flags);
603 key = HKEY(data, h->initval, htable_bits);
604 m = __ipset_dereference_protected(hbucket(t, key), 1);
606 m = kzalloc(sizeof(*m) +
607 AHASH_INIT_SIZE * dsize,
613 m->size = AHASH_INIT_SIZE;
614 extsize += ext_size(AHASH_INIT_SIZE, dsize);
615 RCU_INIT_POINTER(hbucket(t, key), m);
616 } else if (m->pos >= m->size) {
619 if (m->size >= AHASH_MAX(h)) {
622 ht = kzalloc(sizeof(*ht) +
623 (m->size + AHASH_INIT_SIZE)
631 memcpy(ht, m, sizeof(struct hbucket) +
633 ht->size = m->size + AHASH_INIT_SIZE;
634 extsize += ext_size(AHASH_INIT_SIZE, dsize);
637 RCU_INIT_POINTER(hbucket(t, key), ht);
639 d = ahash_data(m, m->pos, dsize);
640 memcpy(d, data, dsize);
641 set_bit(m->pos++, m->used);
642 #ifdef IP_SET_HASH_WITH_NETS
643 mtype_data_reset_flags(d, &flags);
647 rcu_assign_pointer(h->table, t);
648 set->ext_size = extsize;
650 spin_unlock_bh(&set->lock);
652 /* Give time to other readers of the set */
653 synchronize_rcu_bh();
655 pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
656 orig->htable_bits, orig, t->htable_bits, t);
657 /* If there's nobody else dumping the table, destroy it */
658 if (atomic_dec_and_test(&orig->uref)) {
659 pr_debug("Table destroy by resize %p\n", orig);
660 mtype_ahash_destroy(set, orig, false);
664 #ifdef IP_SET_HASH_WITH_NETS
670 atomic_set(&orig->ref, 0);
671 atomic_dec(&orig->uref);
672 spin_unlock_bh(&set->lock);
673 mtype_ahash_destroy(set, t, false);
679 /* Add an element to a hash and update the internal counters when succeeded,
680 * otherwise report the proper error code.
683 mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
684 struct ip_set_ext *mext, u32 flags)
686 struct htype *h = set->data;
688 const struct mtype_elem *d = value;
689 struct mtype_elem *data;
690 struct hbucket *n, *old = ERR_PTR(-ENOENT);
692 bool flag_exist = flags & IPSET_FLAG_EXIST;
693 bool deleted = false, forceadd = false, reuse = false;
696 if (set->elements >= h->maxelem) {
697 if (SET_WITH_TIMEOUT(set))
698 /* FIXME: when set is full, we slow down here */
699 mtype_expire(set, h);
700 if (set->elements >= h->maxelem && SET_WITH_FORCEADD(set))
704 t = ipset_dereference_protected(h->table, set);
705 key = HKEY(value, h->initval, t->htable_bits);
706 n = __ipset_dereference_protected(hbucket(t, key), 1);
708 if (forceadd || set->elements >= h->maxelem)
711 n = kzalloc(sizeof(*n) + AHASH_INIT_SIZE * set->dsize,
715 n->size = AHASH_INIT_SIZE;
716 set->ext_size += ext_size(AHASH_INIT_SIZE, set->dsize);
719 for (i = 0; i < n->pos; i++) {
720 if (!test_bit(i, n->used)) {
721 /* Reuse first deleted entry */
723 deleted = reuse = true;
728 data = ahash_data(n, i, set->dsize);
729 if (mtype_data_equal(data, d, &multi)) {
731 (SET_WITH_TIMEOUT(set) &&
732 ip_set_timeout_expired(ext_timeout(data, set)))) {
733 /* Just the extensions could be overwritten */
735 goto overwrite_extensions;
737 return -IPSET_ERR_EXIST;
739 /* Reuse first timed out entry */
740 if (SET_WITH_TIMEOUT(set) &&
741 ip_set_timeout_expired(ext_timeout(data, set)) &&
747 if (reuse || forceadd) {
748 data = ahash_data(n, j, set->dsize);
750 #ifdef IP_SET_HASH_WITH_NETS
751 for (i = 0; i < IPSET_NET_COUNT; i++)
753 NCIDR_PUT(DCIDR_GET(data->cidr, i)),
756 ip_set_ext_destroy(set, data);
761 if (set->elements >= h->maxelem)
763 /* Create a new slot */
764 if (n->pos >= n->size) {
765 TUNE_AHASH_MAX(h, multi);
766 if (n->size >= AHASH_MAX(h)) {
767 /* Trigger rehashing */
768 mtype_data_next(&h->next, d);
772 n = kzalloc(sizeof(*n) +
773 (old->size + AHASH_INIT_SIZE) * set->dsize,
777 memcpy(n, old, sizeof(struct hbucket) +
778 old->size * set->dsize);
779 n->size = old->size + AHASH_INIT_SIZE;
780 set->ext_size += ext_size(AHASH_INIT_SIZE, set->dsize);
785 data = ahash_data(n, j, set->dsize);
788 #ifdef IP_SET_HASH_WITH_NETS
789 for (i = 0; i < IPSET_NET_COUNT; i++)
790 mtype_add_cidr(h, NCIDR_PUT(DCIDR_GET(d->cidr, i)), i);
792 memcpy(data, d, sizeof(struct mtype_elem));
793 overwrite_extensions:
794 #ifdef IP_SET_HASH_WITH_NETS
795 mtype_data_set_flags(data, flags);
797 if (SET_WITH_COUNTER(set))
798 ip_set_init_counter(ext_counter(data, set), ext);
799 if (SET_WITH_COMMENT(set))
800 ip_set_init_comment(set, ext_comment(data, set), ext);
801 if (SET_WITH_SKBINFO(set))
802 ip_set_init_skbinfo(ext_skbinfo(data, set), ext);
803 /* Must come last for the case when timed out entry is reused */
804 if (SET_WITH_TIMEOUT(set))
805 ip_set_timeout_set(ext_timeout(data, set), ext->timeout);
806 smp_mb__before_atomic();
808 if (old != ERR_PTR(-ENOENT)) {
809 rcu_assign_pointer(hbucket(t, key), n);
817 pr_warn("Set %s is full, maxelem %u reached\n",
818 set->name, h->maxelem);
819 return -IPSET_ERR_HASH_FULL;
822 /* Delete an element from the hash and free up space if possible.
825 mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
826 struct ip_set_ext *mext, u32 flags)
828 struct htype *h = set->data;
830 const struct mtype_elem *d = value;
831 struct mtype_elem *data;
833 int i, j, k, ret = -IPSET_ERR_EXIST;
835 size_t dsize = set->dsize;
837 t = ipset_dereference_protected(h->table, set);
838 key = HKEY(value, h->initval, t->htable_bits);
839 n = __ipset_dereference_protected(hbucket(t, key), 1);
842 for (i = 0, k = 0; i < n->pos; i++) {
843 if (!test_bit(i, n->used)) {
847 data = ahash_data(n, i, dsize);
848 if (!mtype_data_equal(data, d, &multi))
850 if (SET_WITH_TIMEOUT(set) &&
851 ip_set_timeout_expired(ext_timeout(data, set)))
855 clear_bit(i, n->used);
856 smp_mb__after_atomic();
860 #ifdef IP_SET_HASH_WITH_NETS
861 for (j = 0; j < IPSET_NET_COUNT; j++)
862 mtype_del_cidr(h, NCIDR_PUT(DCIDR_GET(d->cidr, j)),
865 ip_set_ext_destroy(set, data);
867 for (; i < n->pos; i++) {
868 if (!test_bit(i, n->used))
871 if (n->pos == 0 && k == 0) {
872 set->ext_size -= ext_size(n->size, dsize);
873 rcu_assign_pointer(hbucket(t, key), NULL);
875 } else if (k >= AHASH_INIT_SIZE) {
876 struct hbucket *tmp = kzalloc(sizeof(*tmp) +
877 (n->size - AHASH_INIT_SIZE) * dsize,
881 tmp->size = n->size - AHASH_INIT_SIZE;
882 for (j = 0, k = 0; j < n->pos; j++) {
883 if (!test_bit(j, n->used))
885 data = ahash_data(n, j, dsize);
886 memcpy(tmp->value + k * dsize, data, dsize);
887 set_bit(k, tmp->used);
891 set->ext_size -= ext_size(AHASH_INIT_SIZE, dsize);
892 rcu_assign_pointer(hbucket(t, key), tmp);
903 mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext,
904 struct ip_set_ext *mext, struct ip_set *set, u32 flags)
906 if (!ip_set_match_extensions(set, ext, mext, flags, data))
908 /* nomatch entries return -ENOTEMPTY */
909 return mtype_do_data_match(data);
912 #ifdef IP_SET_HASH_WITH_NETS
913 /* Special test function which takes into account the different network
914 * sizes added to the set
917 mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
918 const struct ip_set_ext *ext,
919 struct ip_set_ext *mext, u32 flags)
921 struct htype *h = set->data;
922 struct htable *t = rcu_dereference_bh(h->table);
924 struct mtype_elem *data;
925 #if IPSET_NET_COUNT == 2
926 struct mtype_elem orig = *d;
927 int ret, i, j = 0, k;
933 pr_debug("test by nets\n");
934 for (; j < NLEN && h->nets[j].cidr[0] && !multi; j++) {
935 #if IPSET_NET_COUNT == 2
936 mtype_data_reset_elem(d, &orig);
937 mtype_data_netmask(d, NCIDR_GET(h->nets[j].cidr[0]), false);
938 for (k = 0; k < NLEN && h->nets[k].cidr[1] && !multi;
940 mtype_data_netmask(d, NCIDR_GET(h->nets[k].cidr[1]),
943 mtype_data_netmask(d, NCIDR_GET(h->nets[j].cidr[0]));
945 key = HKEY(d, h->initval, t->htable_bits);
946 n = rcu_dereference_bh(hbucket(t, key));
949 for (i = 0; i < n->pos; i++) {
950 if (!test_bit(i, n->used))
952 data = ahash_data(n, i, set->dsize);
953 if (!mtype_data_equal(data, d, &multi))
955 ret = mtype_data_match(data, ext, mext, set, flags);
958 #ifdef IP_SET_HASH_WITH_MULTI
959 /* No match, reset multiple match flag */
963 #if IPSET_NET_COUNT == 2
971 /* Test whether the element is added to the set */
973 mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
974 struct ip_set_ext *mext, u32 flags)
976 struct htype *h = set->data;
978 struct mtype_elem *d = value;
980 struct mtype_elem *data;
984 t = rcu_dereference_bh(h->table);
985 #ifdef IP_SET_HASH_WITH_NETS
986 /* If we test an IP address and not a network address,
987 * try all possible network sizes
989 for (i = 0; i < IPSET_NET_COUNT; i++)
990 if (DCIDR_GET(d->cidr, i) != HOST_MASK)
992 if (i == IPSET_NET_COUNT) {
993 ret = mtype_test_cidrs(set, d, ext, mext, flags);
998 key = HKEY(d, h->initval, t->htable_bits);
999 n = rcu_dereference_bh(hbucket(t, key));
1004 for (i = 0; i < n->pos; i++) {
1005 if (!test_bit(i, n->used))
1007 data = ahash_data(n, i, set->dsize);
1008 if (!mtype_data_equal(data, d, &multi))
1010 ret = mtype_data_match(data, ext, mext, set, flags);
1018 /* Reply a HEADER request: fill out the header part of the set */
1020 mtype_head(struct ip_set *set, struct sk_buff *skb)
1022 struct htype *h = set->data;
1023 const struct htable *t;
1024 struct nlattr *nested;
1028 /* If any members have expired, set->elements will be wrong
1029 * mytype_expire function will update it with the right count.
1030 * we do not hold set->lock here, so grab it first.
1031 * set->elements can still be incorrect in the case of a huge set,
1032 * because elements might time out during the listing.
1034 if (SET_WITH_TIMEOUT(set)) {
1035 spin_lock_bh(&set->lock);
1036 mtype_expire(set, h);
1037 spin_unlock_bh(&set->lock);
1041 t = rcu_dereference_bh_nfnl(h->table);
1042 memsize = mtype_ahash_memsize(h, t) + set->ext_size;
1043 htable_bits = t->htable_bits;
1044 rcu_read_unlock_bh();
1046 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
1048 goto nla_put_failure;
1049 if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE,
1050 htonl(jhash_size(htable_bits))) ||
1051 nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)))
1052 goto nla_put_failure;
1053 #ifdef IP_SET_HASH_WITH_NETMASK
1054 if (h->netmask != HOST_MASK &&
1055 nla_put_u8(skb, IPSET_ATTR_NETMASK, h->netmask))
1056 goto nla_put_failure;
1058 #ifdef IP_SET_HASH_WITH_MARKMASK
1059 if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask))
1060 goto nla_put_failure;
1062 if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
1063 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
1064 nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(set->elements)))
1065 goto nla_put_failure;
1066 if (unlikely(ip_set_put_flags(skb, set)))
1067 goto nla_put_failure;
1068 ipset_nest_end(skb, nested);
1075 /* Make possible to run dumping parallel with resizing */
1077 mtype_uref(struct ip_set *set, struct netlink_callback *cb, bool start)
1079 struct htype *h = set->data;
1084 t = rcu_dereference_bh_nfnl(h->table);
1085 atomic_inc(&t->uref);
1086 cb->args[IPSET_CB_PRIVATE] = (unsigned long)t;
1087 rcu_read_unlock_bh();
1088 } else if (cb->args[IPSET_CB_PRIVATE]) {
1089 t = (struct htable *)cb->args[IPSET_CB_PRIVATE];
1090 if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
1091 /* Resizing didn't destroy the hash table */
1092 pr_debug("Table destroy by dump: %p\n", t);
1093 mtype_ahash_destroy(set, t, false);
1095 cb->args[IPSET_CB_PRIVATE] = 0;
1099 /* Reply a LIST/SAVE request: dump the elements of the specified set */
1101 mtype_list(const struct ip_set *set,
1102 struct sk_buff *skb, struct netlink_callback *cb)
1104 const struct htable *t;
1105 struct nlattr *atd, *nested;
1106 const struct hbucket *n;
1107 const struct mtype_elem *e;
1108 u32 first = cb->args[IPSET_CB_ARG0];
1109 /* We assume that one hash bucket fills into one page */
1113 atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
1117 pr_debug("list hash set %s\n", set->name);
1118 t = (const struct htable *)cb->args[IPSET_CB_PRIVATE];
1119 /* Expire may replace a hbucket with another one */
1121 for (; cb->args[IPSET_CB_ARG0] < jhash_size(t->htable_bits);
1122 cb->args[IPSET_CB_ARG0]++) {
1124 incomplete = skb_tail_pointer(skb);
1125 n = rcu_dereference(hbucket(t, cb->args[IPSET_CB_ARG0]));
1126 pr_debug("cb->arg bucket: %lu, t %p n %p\n",
1127 cb->args[IPSET_CB_ARG0], t, n);
1130 for (i = 0; i < n->pos; i++) {
1131 if (!test_bit(i, n->used))
1133 e = ahash_data(n, i, set->dsize);
1134 if (SET_WITH_TIMEOUT(set) &&
1135 ip_set_timeout_expired(ext_timeout(e, set)))
1137 pr_debug("list hash %lu hbucket %p i %u, data %p\n",
1138 cb->args[IPSET_CB_ARG0], n, i, e);
1139 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
1141 if (cb->args[IPSET_CB_ARG0] == first) {
1142 nla_nest_cancel(skb, atd);
1146 goto nla_put_failure;
1148 if (mtype_data_list(skb, e))
1149 goto nla_put_failure;
1150 if (ip_set_put_extensions(skb, set, e, true))
1151 goto nla_put_failure;
1152 ipset_nest_end(skb, nested);
1155 ipset_nest_end(skb, atd);
1156 /* Set listing finished */
1157 cb->args[IPSET_CB_ARG0] = 0;
1162 nlmsg_trim(skb, incomplete);
1163 if (unlikely(first == cb->args[IPSET_CB_ARG0])) {
1164 pr_warn("Can't list set %s: one bucket does not fit into a message. Please report it!\n",
1166 cb->args[IPSET_CB_ARG0] = 0;
1169 ipset_nest_end(skb, atd);
1177 IPSET_TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb,
1178 const struct xt_action_param *par,
1179 enum ipset_adt adt, struct ip_set_adt_opt *opt);
1182 IPSET_TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[],
1183 enum ipset_adt adt, u32 *lineno, u32 flags,
1186 static const struct ip_set_type_variant mtype_variant = {
1190 [IPSET_ADD] = mtype_add,
1191 [IPSET_DEL] = mtype_del,
1192 [IPSET_TEST] = mtype_test,
1194 .destroy = mtype_destroy,
1195 .flush = mtype_flush,
1199 .resize = mtype_resize,
1200 .same_set = mtype_same_set,
1203 #ifdef IP_SET_EMIT_CREATE
1205 IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
1206 struct nlattr *tb[], u32 flags)
1208 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
1209 #ifdef IP_SET_HASH_WITH_MARKMASK
1213 #ifdef IP_SET_HASH_WITH_NETMASK
1220 pr_debug("Create set %s with family %s\n",
1221 set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
1223 #ifdef IP_SET_PROTO_UNDEF
1224 if (set->family != NFPROTO_UNSPEC)
1225 return -IPSET_ERR_INVALID_FAMILY;
1227 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
1228 return -IPSET_ERR_INVALID_FAMILY;
1231 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
1232 !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
1233 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
1234 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
1235 return -IPSET_ERR_PROTOCOL;
1237 #ifdef IP_SET_HASH_WITH_MARKMASK
1238 /* Separated condition in order to avoid directive in argument list */
1239 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_MARKMASK)))
1240 return -IPSET_ERR_PROTOCOL;
1242 markmask = 0xffffffff;
1243 if (tb[IPSET_ATTR_MARKMASK]) {
1244 markmask = ntohl(nla_get_be32(tb[IPSET_ATTR_MARKMASK]));
1246 return -IPSET_ERR_INVALID_MARKMASK;
1250 #ifdef IP_SET_HASH_WITH_NETMASK
1251 netmask = set->family == NFPROTO_IPV4 ? 32 : 128;
1252 if (tb[IPSET_ATTR_NETMASK]) {
1253 netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
1255 if ((set->family == NFPROTO_IPV4 && netmask > 32) ||
1256 (set->family == NFPROTO_IPV6 && netmask > 128) ||
1258 return -IPSET_ERR_INVALID_NETMASK;
1262 if (tb[IPSET_ATTR_HASHSIZE]) {
1263 hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
1264 if (hashsize < IPSET_MIMINAL_HASHSIZE)
1265 hashsize = IPSET_MIMINAL_HASHSIZE;
1268 if (tb[IPSET_ATTR_MAXELEM])
1269 maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
1272 h = kzalloc(hsize, GFP_KERNEL);
1276 /* Compute htable_bits from the user input parameter hashsize.
1277 * Assume that hashsize == 2^htable_bits,
1278 * otherwise round up to the first 2^n value.
1280 hbits = fls(hashsize - 1);
1281 hsize = htable_size(hbits);
1286 t = ip_set_alloc(hsize);
1291 h->maxelem = maxelem;
1292 #ifdef IP_SET_HASH_WITH_NETMASK
1293 h->netmask = netmask;
1295 #ifdef IP_SET_HASH_WITH_MARKMASK
1296 h->markmask = markmask;
1298 get_random_bytes(&h->initval, sizeof(h->initval));
1300 t->htable_bits = hbits;
1301 RCU_INIT_POINTER(h->table, t);
1305 #ifndef IP_SET_PROTO_UNDEF
1306 if (set->family == NFPROTO_IPV4) {
1308 set->variant = &IPSET_TOKEN(HTYPE, 4_variant);
1309 set->dsize = ip_set_elem_len(set, tb,
1310 sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)),
1311 __alignof__(struct IPSET_TOKEN(HTYPE, 4_elem)));
1312 #ifndef IP_SET_PROTO_UNDEF
1314 set->variant = &IPSET_TOKEN(HTYPE, 6_variant);
1315 set->dsize = ip_set_elem_len(set, tb,
1316 sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)),
1317 __alignof__(struct IPSET_TOKEN(HTYPE, 6_elem)));
1320 set->timeout = IPSET_NO_TIMEOUT;
1321 if (tb[IPSET_ATTR_TIMEOUT]) {
1322 set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
1323 #ifndef IP_SET_PROTO_UNDEF
1324 if (set->family == NFPROTO_IPV4)
1326 IPSET_TOKEN(HTYPE, 4_gc_init)(set,
1327 IPSET_TOKEN(HTYPE, 4_gc));
1328 #ifndef IP_SET_PROTO_UNDEF
1330 IPSET_TOKEN(HTYPE, 6_gc_init)(set,
1331 IPSET_TOKEN(HTYPE, 6_gc));
1334 pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
1335 set->name, jhash_size(t->htable_bits),
1336 t->htable_bits, h->maxelem, set->data, t);
1340 #endif /* IP_SET_EMIT_CREATE */