GNU Linux-libre 4.19.286-gnu1
[releases.git] / net / xfrm / xfrm_state.c
1 /*
2  * xfrm_state.c
3  *
4  * Changes:
5  *      Mitsuru KANDA @USAGI
6  *      Kazunori MIYAZAWA @USAGI
7  *      Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8  *              IPv6 support
9  *      YOSHIFUJI Hideaki @USAGI
10  *              Split up af-specific functions
11  *      Derek Atkins <derek@ihtfp.com>
12  *              Add UDP Encapsulation
13  *
14  */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <linux/uaccess.h>
24 #include <linux/ktime.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
28
29 #include "xfrm_hash.h"
30
31 #define xfrm_state_deref_prot(table, net) \
32         rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
33
34 static void xfrm_state_gc_task(struct work_struct *work);
35
36 /* Each xfrm_state may be linked to two tables:
37
38    1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
39    2. Hash table by (daddr,family,reqid) to find what SAs exist for given
40       destination/tunnel endpoint. (output)
41  */
42
43 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
44 static struct kmem_cache *xfrm_state_cache __ro_after_init;
45
46 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
47 static HLIST_HEAD(xfrm_state_gc_list);
48
49 static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
50 {
51         return refcount_inc_not_zero(&x->refcnt);
52 }
53
54 static inline unsigned int xfrm_dst_hash(struct net *net,
55                                          const xfrm_address_t *daddr,
56                                          const xfrm_address_t *saddr,
57                                          u32 reqid,
58                                          unsigned short family)
59 {
60         return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
61 }
62
63 static inline unsigned int xfrm_src_hash(struct net *net,
64                                          const xfrm_address_t *daddr,
65                                          const xfrm_address_t *saddr,
66                                          unsigned short family)
67 {
68         return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
69 }
70
71 static inline unsigned int
72 xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
73               __be32 spi, u8 proto, unsigned short family)
74 {
75         return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
76 }
77
78 static void xfrm_hash_transfer(struct hlist_head *list,
79                                struct hlist_head *ndsttable,
80                                struct hlist_head *nsrctable,
81                                struct hlist_head *nspitable,
82                                unsigned int nhashmask)
83 {
84         struct hlist_node *tmp;
85         struct xfrm_state *x;
86
87         hlist_for_each_entry_safe(x, tmp, list, bydst) {
88                 unsigned int h;
89
90                 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
91                                     x->props.reqid, x->props.family,
92                                     nhashmask);
93                 hlist_add_head_rcu(&x->bydst, ndsttable + h);
94
95                 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
96                                     x->props.family,
97                                     nhashmask);
98                 hlist_add_head_rcu(&x->bysrc, nsrctable + h);
99
100                 if (x->id.spi) {
101                         h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
102                                             x->id.proto, x->props.family,
103                                             nhashmask);
104                         hlist_add_head_rcu(&x->byspi, nspitable + h);
105                 }
106         }
107 }
108
109 static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
110 {
111         return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
112 }
113
114 static void xfrm_hash_resize(struct work_struct *work)
115 {
116         struct net *net = container_of(work, struct net, xfrm.state_hash_work);
117         struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
118         unsigned long nsize, osize;
119         unsigned int nhashmask, ohashmask;
120         int i;
121
122         nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
123         ndst = xfrm_hash_alloc(nsize);
124         if (!ndst)
125                 return;
126         nsrc = xfrm_hash_alloc(nsize);
127         if (!nsrc) {
128                 xfrm_hash_free(ndst, nsize);
129                 return;
130         }
131         nspi = xfrm_hash_alloc(nsize);
132         if (!nspi) {
133                 xfrm_hash_free(ndst, nsize);
134                 xfrm_hash_free(nsrc, nsize);
135                 return;
136         }
137
138         spin_lock_bh(&net->xfrm.xfrm_state_lock);
139         write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
140
141         nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
142         odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
143         for (i = net->xfrm.state_hmask; i >= 0; i--)
144                 xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nhashmask);
145
146         osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
147         ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
148         ohashmask = net->xfrm.state_hmask;
149
150         rcu_assign_pointer(net->xfrm.state_bydst, ndst);
151         rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
152         rcu_assign_pointer(net->xfrm.state_byspi, nspi);
153         net->xfrm.state_hmask = nhashmask;
154
155         write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
156         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
157
158         osize = (ohashmask + 1) * sizeof(struct hlist_head);
159
160         synchronize_rcu();
161
162         xfrm_hash_free(odst, osize);
163         xfrm_hash_free(osrc, osize);
164         xfrm_hash_free(ospi, osize);
165 }
166
167 static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
168 static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
169
170 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
171
172 int __xfrm_state_delete(struct xfrm_state *x);
173
174 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
175 bool km_is_alive(const struct km_event *c);
176 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
177
178 static DEFINE_SPINLOCK(xfrm_type_lock);
179 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
180 {
181         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
182         const struct xfrm_type **typemap;
183         int err = 0;
184
185         if (unlikely(afinfo == NULL))
186                 return -EAFNOSUPPORT;
187         typemap = afinfo->type_map;
188         spin_lock_bh(&xfrm_type_lock);
189
190         if (likely(typemap[type->proto] == NULL))
191                 typemap[type->proto] = type;
192         else
193                 err = -EEXIST;
194         spin_unlock_bh(&xfrm_type_lock);
195         rcu_read_unlock();
196         return err;
197 }
198 EXPORT_SYMBOL(xfrm_register_type);
199
200 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
201 {
202         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
203         const struct xfrm_type **typemap;
204         int err = 0;
205
206         if (unlikely(afinfo == NULL))
207                 return -EAFNOSUPPORT;
208         typemap = afinfo->type_map;
209         spin_lock_bh(&xfrm_type_lock);
210
211         if (unlikely(typemap[type->proto] != type))
212                 err = -ENOENT;
213         else
214                 typemap[type->proto] = NULL;
215         spin_unlock_bh(&xfrm_type_lock);
216         rcu_read_unlock();
217         return err;
218 }
219 EXPORT_SYMBOL(xfrm_unregister_type);
220
221 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
222 {
223         struct xfrm_state_afinfo *afinfo;
224         const struct xfrm_type **typemap;
225         const struct xfrm_type *type;
226         int modload_attempted = 0;
227
228 retry:
229         afinfo = xfrm_state_get_afinfo(family);
230         if (unlikely(afinfo == NULL))
231                 return NULL;
232         typemap = afinfo->type_map;
233
234         type = READ_ONCE(typemap[proto]);
235         if (unlikely(type && !try_module_get(type->owner)))
236                 type = NULL;
237
238         rcu_read_unlock();
239
240         if (!type && !modload_attempted) {
241                 request_module("xfrm-type-%d-%d", family, proto);
242                 modload_attempted = 1;
243                 goto retry;
244         }
245
246         return type;
247 }
248
249 static void xfrm_put_type(const struct xfrm_type *type)
250 {
251         module_put(type->owner);
252 }
253
254 static DEFINE_SPINLOCK(xfrm_type_offload_lock);
255 int xfrm_register_type_offload(const struct xfrm_type_offload *type,
256                                unsigned short family)
257 {
258         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
259         const struct xfrm_type_offload **typemap;
260         int err = 0;
261
262         if (unlikely(afinfo == NULL))
263                 return -EAFNOSUPPORT;
264         typemap = afinfo->type_offload_map;
265         spin_lock_bh(&xfrm_type_offload_lock);
266
267         if (likely(typemap[type->proto] == NULL))
268                 typemap[type->proto] = type;
269         else
270                 err = -EEXIST;
271         spin_unlock_bh(&xfrm_type_offload_lock);
272         rcu_read_unlock();
273         return err;
274 }
275 EXPORT_SYMBOL(xfrm_register_type_offload);
276
277 int xfrm_unregister_type_offload(const struct xfrm_type_offload *type,
278                                  unsigned short family)
279 {
280         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
281         const struct xfrm_type_offload **typemap;
282         int err = 0;
283
284         if (unlikely(afinfo == NULL))
285                 return -EAFNOSUPPORT;
286         typemap = afinfo->type_offload_map;
287         spin_lock_bh(&xfrm_type_offload_lock);
288
289         if (unlikely(typemap[type->proto] != type))
290                 err = -ENOENT;
291         else
292                 typemap[type->proto] = NULL;
293         spin_unlock_bh(&xfrm_type_offload_lock);
294         rcu_read_unlock();
295         return err;
296 }
297 EXPORT_SYMBOL(xfrm_unregister_type_offload);
298
299 static const struct xfrm_type_offload *
300 xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load)
301 {
302         struct xfrm_state_afinfo *afinfo;
303         const struct xfrm_type_offload **typemap;
304         const struct xfrm_type_offload *type;
305
306 retry:
307         afinfo = xfrm_state_get_afinfo(family);
308         if (unlikely(afinfo == NULL))
309                 return NULL;
310         typemap = afinfo->type_offload_map;
311
312         type = typemap[proto];
313         if ((type && !try_module_get(type->owner)))
314                 type = NULL;
315
316         rcu_read_unlock();
317
318         if (!type && try_load) {
319                 request_module("xfrm-offload-%d-%d", family, proto);
320                 try_load = false;
321                 goto retry;
322         }
323
324         return type;
325 }
326
327 static void xfrm_put_type_offload(const struct xfrm_type_offload *type)
328 {
329         module_put(type->owner);
330 }
331
332 static DEFINE_SPINLOCK(xfrm_mode_lock);
333 int xfrm_register_mode(struct xfrm_mode *mode, int family)
334 {
335         struct xfrm_state_afinfo *afinfo;
336         struct xfrm_mode **modemap;
337         int err;
338
339         if (unlikely(mode->encap >= XFRM_MODE_MAX))
340                 return -EINVAL;
341
342         afinfo = xfrm_state_get_afinfo(family);
343         if (unlikely(afinfo == NULL))
344                 return -EAFNOSUPPORT;
345
346         err = -EEXIST;
347         modemap = afinfo->mode_map;
348         spin_lock_bh(&xfrm_mode_lock);
349         if (modemap[mode->encap])
350                 goto out;
351
352         err = -ENOENT;
353         if (!try_module_get(afinfo->owner))
354                 goto out;
355
356         mode->afinfo = afinfo;
357         modemap[mode->encap] = mode;
358         err = 0;
359
360 out:
361         spin_unlock_bh(&xfrm_mode_lock);
362         rcu_read_unlock();
363         return err;
364 }
365 EXPORT_SYMBOL(xfrm_register_mode);
366
367 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
368 {
369         struct xfrm_state_afinfo *afinfo;
370         struct xfrm_mode **modemap;
371         int err;
372
373         if (unlikely(mode->encap >= XFRM_MODE_MAX))
374                 return -EINVAL;
375
376         afinfo = xfrm_state_get_afinfo(family);
377         if (unlikely(afinfo == NULL))
378                 return -EAFNOSUPPORT;
379
380         err = -ENOENT;
381         modemap = afinfo->mode_map;
382         spin_lock_bh(&xfrm_mode_lock);
383         if (likely(modemap[mode->encap] == mode)) {
384                 modemap[mode->encap] = NULL;
385                 module_put(mode->afinfo->owner);
386                 err = 0;
387         }
388
389         spin_unlock_bh(&xfrm_mode_lock);
390         rcu_read_unlock();
391         return err;
392 }
393 EXPORT_SYMBOL(xfrm_unregister_mode);
394
395 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
396 {
397         struct xfrm_state_afinfo *afinfo;
398         struct xfrm_mode *mode;
399         int modload_attempted = 0;
400
401         if (unlikely(encap >= XFRM_MODE_MAX))
402                 return NULL;
403
404 retry:
405         afinfo = xfrm_state_get_afinfo(family);
406         if (unlikely(afinfo == NULL))
407                 return NULL;
408
409         mode = READ_ONCE(afinfo->mode_map[encap]);
410         if (unlikely(mode && !try_module_get(mode->owner)))
411                 mode = NULL;
412
413         rcu_read_unlock();
414         if (!mode && !modload_attempted) {
415                 request_module("xfrm-mode-%d-%d", family, encap);
416                 modload_attempted = 1;
417                 goto retry;
418         }
419
420         return mode;
421 }
422
423 static void xfrm_put_mode(struct xfrm_mode *mode)
424 {
425         module_put(mode->owner);
426 }
427
428 void xfrm_state_free(struct xfrm_state *x)
429 {
430         kmem_cache_free(xfrm_state_cache, x);
431 }
432 EXPORT_SYMBOL(xfrm_state_free);
433
434 static void ___xfrm_state_destroy(struct xfrm_state *x)
435 {
436         tasklet_hrtimer_cancel(&x->mtimer);
437         del_timer_sync(&x->rtimer);
438         kfree(x->aead);
439         kfree(x->aalg);
440         kfree(x->ealg);
441         kfree(x->calg);
442         kfree(x->encap);
443         kfree(x->coaddr);
444         kfree(x->replay_esn);
445         kfree(x->preplay_esn);
446         if (x->inner_mode)
447                 xfrm_put_mode(x->inner_mode);
448         if (x->inner_mode_iaf)
449                 xfrm_put_mode(x->inner_mode_iaf);
450         if (x->outer_mode)
451                 xfrm_put_mode(x->outer_mode);
452         if (x->type_offload)
453                 xfrm_put_type_offload(x->type_offload);
454         if (x->type) {
455                 x->type->destructor(x);
456                 xfrm_put_type(x->type);
457         }
458         if (x->xfrag.page)
459                 put_page(x->xfrag.page);
460         xfrm_dev_state_free(x);
461         security_xfrm_state_free(x);
462         xfrm_state_free(x);
463 }
464
465 static void xfrm_state_gc_task(struct work_struct *work)
466 {
467         struct xfrm_state *x;
468         struct hlist_node *tmp;
469         struct hlist_head gc_list;
470
471         spin_lock_bh(&xfrm_state_gc_lock);
472         hlist_move_list(&xfrm_state_gc_list, &gc_list);
473         spin_unlock_bh(&xfrm_state_gc_lock);
474
475         synchronize_rcu();
476
477         hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
478                 ___xfrm_state_destroy(x);
479 }
480
481 static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
482 {
483         struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer);
484         struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer);
485         time64_t now = ktime_get_real_seconds();
486         time64_t next = TIME64_MAX;
487         int warn = 0;
488         int err = 0;
489
490         spin_lock(&x->lock);
491         if (x->km.state == XFRM_STATE_DEAD)
492                 goto out;
493         if (x->km.state == XFRM_STATE_EXPIRED)
494                 goto expired;
495         if (x->lft.hard_add_expires_seconds) {
496                 long tmo = x->lft.hard_add_expires_seconds +
497                         x->curlft.add_time - now;
498                 if (tmo <= 0) {
499                         if (x->xflags & XFRM_SOFT_EXPIRE) {
500                                 /* enter hard expire without soft expire first?!
501                                  * setting a new date could trigger this.
502                                  * workaround: fix x->curflt.add_time by below:
503                                  */
504                                 x->curlft.add_time = now - x->saved_tmo - 1;
505                                 tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
506                         } else
507                                 goto expired;
508                 }
509                 if (tmo < next)
510                         next = tmo;
511         }
512         if (x->lft.hard_use_expires_seconds) {
513                 long tmo = x->lft.hard_use_expires_seconds +
514                         (x->curlft.use_time ? : now) - now;
515                 if (tmo <= 0)
516                         goto expired;
517                 if (tmo < next)
518                         next = tmo;
519         }
520         if (x->km.dying)
521                 goto resched;
522         if (x->lft.soft_add_expires_seconds) {
523                 long tmo = x->lft.soft_add_expires_seconds +
524                         x->curlft.add_time - now;
525                 if (tmo <= 0) {
526                         warn = 1;
527                         x->xflags &= ~XFRM_SOFT_EXPIRE;
528                 } else if (tmo < next) {
529                         next = tmo;
530                         x->xflags |= XFRM_SOFT_EXPIRE;
531                         x->saved_tmo = tmo;
532                 }
533         }
534         if (x->lft.soft_use_expires_seconds) {
535                 long tmo = x->lft.soft_use_expires_seconds +
536                         (x->curlft.use_time ? : now) - now;
537                 if (tmo <= 0)
538                         warn = 1;
539                 else if (tmo < next)
540                         next = tmo;
541         }
542
543         x->km.dying = warn;
544         if (warn)
545                 km_state_expired(x, 0, 0);
546 resched:
547         if (next != TIME64_MAX) {
548                 tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL);
549         }
550
551         goto out;
552
553 expired:
554         if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0)
555                 x->km.state = XFRM_STATE_EXPIRED;
556
557         err = __xfrm_state_delete(x);
558         if (!err)
559                 km_state_expired(x, 1, 0);
560
561         xfrm_audit_state_delete(x, err ? 0 : 1, true);
562
563 out:
564         spin_unlock(&x->lock);
565         return HRTIMER_NORESTART;
566 }
567
568 static void xfrm_replay_timer_handler(struct timer_list *t);
569
570 struct xfrm_state *xfrm_state_alloc(struct net *net)
571 {
572         struct xfrm_state *x;
573
574         x = kmem_cache_alloc(xfrm_state_cache, GFP_ATOMIC | __GFP_ZERO);
575
576         if (x) {
577                 write_pnet(&x->xs_net, net);
578                 refcount_set(&x->refcnt, 1);
579                 atomic_set(&x->tunnel_users, 0);
580                 INIT_LIST_HEAD(&x->km.all);
581                 INIT_HLIST_NODE(&x->bydst);
582                 INIT_HLIST_NODE(&x->bysrc);
583                 INIT_HLIST_NODE(&x->byspi);
584                 tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
585                                         CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
586                 timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
587                 x->curlft.add_time = ktime_get_real_seconds();
588                 x->lft.soft_byte_limit = XFRM_INF;
589                 x->lft.soft_packet_limit = XFRM_INF;
590                 x->lft.hard_byte_limit = XFRM_INF;
591                 x->lft.hard_packet_limit = XFRM_INF;
592                 x->replay_maxage = 0;
593                 x->replay_maxdiff = 0;
594                 x->inner_mode = NULL;
595                 x->inner_mode_iaf = NULL;
596                 spin_lock_init(&x->lock);
597         }
598         return x;
599 }
600 EXPORT_SYMBOL(xfrm_state_alloc);
601
602 void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
603 {
604         WARN_ON(x->km.state != XFRM_STATE_DEAD);
605
606         if (sync) {
607                 synchronize_rcu();
608                 ___xfrm_state_destroy(x);
609         } else {
610                 spin_lock_bh(&xfrm_state_gc_lock);
611                 hlist_add_head(&x->gclist, &xfrm_state_gc_list);
612                 spin_unlock_bh(&xfrm_state_gc_lock);
613                 schedule_work(&xfrm_state_gc_work);
614         }
615 }
616 EXPORT_SYMBOL(__xfrm_state_destroy);
617
618 int __xfrm_state_delete(struct xfrm_state *x)
619 {
620         struct net *net = xs_net(x);
621         int err = -ESRCH;
622
623         if (x->km.state != XFRM_STATE_DEAD) {
624                 x->km.state = XFRM_STATE_DEAD;
625                 spin_lock(&net->xfrm.xfrm_state_lock);
626                 list_del(&x->km.all);
627                 hlist_del_rcu(&x->bydst);
628                 hlist_del_rcu(&x->bysrc);
629                 if (x->id.spi)
630                         hlist_del_rcu(&x->byspi);
631                 net->xfrm.state_num--;
632                 spin_unlock(&net->xfrm.xfrm_state_lock);
633
634                 xfrm_dev_state_delete(x);
635
636                 /* All xfrm_state objects are created by xfrm_state_alloc.
637                  * The xfrm_state_alloc call gives a reference, and that
638                  * is what we are dropping here.
639                  */
640                 xfrm_state_put(x);
641                 err = 0;
642         }
643
644         return err;
645 }
646 EXPORT_SYMBOL(__xfrm_state_delete);
647
648 int xfrm_state_delete(struct xfrm_state *x)
649 {
650         int err;
651
652         spin_lock_bh(&x->lock);
653         err = __xfrm_state_delete(x);
654         spin_unlock_bh(&x->lock);
655
656         return err;
657 }
658 EXPORT_SYMBOL(xfrm_state_delete);
659
660 #ifdef CONFIG_SECURITY_NETWORK_XFRM
661 static inline int
662 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
663 {
664         int i, err = 0;
665
666         for (i = 0; i <= net->xfrm.state_hmask; i++) {
667                 struct xfrm_state *x;
668
669                 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
670                         if (xfrm_id_proto_match(x->id.proto, proto) &&
671                            (err = security_xfrm_state_delete(x)) != 0) {
672                                 xfrm_audit_state_delete(x, 0, task_valid);
673                                 return err;
674                         }
675                 }
676         }
677
678         return err;
679 }
680
681 static inline int
682 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
683 {
684         int i, err = 0;
685
686         for (i = 0; i <= net->xfrm.state_hmask; i++) {
687                 struct xfrm_state *x;
688                 struct xfrm_state_offload *xso;
689
690                 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
691                         xso = &x->xso;
692
693                         if (xso->dev == dev &&
694                            (err = security_xfrm_state_delete(x)) != 0) {
695                                 xfrm_audit_state_delete(x, 0, task_valid);
696                                 return err;
697                         }
698                 }
699         }
700
701         return err;
702 }
703 #else
704 static inline int
705 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
706 {
707         return 0;
708 }
709
710 static inline int
711 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
712 {
713         return 0;
714 }
715 #endif
716
717 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
718 {
719         int i, err = 0, cnt = 0;
720
721         spin_lock_bh(&net->xfrm.xfrm_state_lock);
722         err = xfrm_state_flush_secctx_check(net, proto, task_valid);
723         if (err)
724                 goto out;
725
726         err = -ESRCH;
727         for (i = 0; i <= net->xfrm.state_hmask; i++) {
728                 struct xfrm_state *x;
729 restart:
730                 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
731                         if (!xfrm_state_kern(x) &&
732                             xfrm_id_proto_match(x->id.proto, proto)) {
733                                 xfrm_state_hold(x);
734                                 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
735
736                                 err = xfrm_state_delete(x);
737                                 xfrm_audit_state_delete(x, err ? 0 : 1,
738                                                         task_valid);
739                                 if (sync)
740                                         xfrm_state_put_sync(x);
741                                 else
742                                         xfrm_state_put(x);
743                                 if (!err)
744                                         cnt++;
745
746                                 spin_lock_bh(&net->xfrm.xfrm_state_lock);
747                                 goto restart;
748                         }
749                 }
750         }
751 out:
752         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
753         if (cnt)
754                 err = 0;
755
756         return err;
757 }
758 EXPORT_SYMBOL(xfrm_state_flush);
759
760 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
761 {
762         int i, err = 0, cnt = 0;
763
764         spin_lock_bh(&net->xfrm.xfrm_state_lock);
765         err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid);
766         if (err)
767                 goto out;
768
769         err = -ESRCH;
770         for (i = 0; i <= net->xfrm.state_hmask; i++) {
771                 struct xfrm_state *x;
772                 struct xfrm_state_offload *xso;
773 restart:
774                 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
775                         xso = &x->xso;
776
777                         if (!xfrm_state_kern(x) && xso->dev == dev) {
778                                 xfrm_state_hold(x);
779                                 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
780
781                                 err = xfrm_state_delete(x);
782                                 xfrm_audit_state_delete(x, err ? 0 : 1,
783                                                         task_valid);
784                                 xfrm_state_put(x);
785                                 if (!err)
786                                         cnt++;
787
788                                 spin_lock_bh(&net->xfrm.xfrm_state_lock);
789                                 goto restart;
790                         }
791                 }
792         }
793         if (cnt)
794                 err = 0;
795
796 out:
797         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
798         return err;
799 }
800 EXPORT_SYMBOL(xfrm_dev_state_flush);
801
802 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
803 {
804         spin_lock_bh(&net->xfrm.xfrm_state_lock);
805         si->sadcnt = net->xfrm.state_num;
806         si->sadhcnt = net->xfrm.state_hmask + 1;
807         si->sadhmcnt = xfrm_state_hashmax;
808         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
809 }
810 EXPORT_SYMBOL(xfrm_sad_getinfo);
811
812 static void
813 xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
814                     const struct xfrm_tmpl *tmpl,
815                     const xfrm_address_t *daddr, const xfrm_address_t *saddr,
816                     unsigned short family)
817 {
818         struct xfrm_state_afinfo *afinfo = xfrm_state_afinfo_get_rcu(family);
819
820         if (!afinfo)
821                 return;
822
823         afinfo->init_tempsel(&x->sel, fl);
824
825         if (family != tmpl->encap_family) {
826                 afinfo = xfrm_state_afinfo_get_rcu(tmpl->encap_family);
827                 if (!afinfo)
828                         return;
829         }
830         afinfo->init_temprop(x, tmpl, daddr, saddr);
831 }
832
833 static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
834                                               const xfrm_address_t *daddr,
835                                               __be32 spi, u8 proto,
836                                               unsigned short family)
837 {
838         unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
839         struct xfrm_state *x;
840
841         hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
842                 if (x->props.family != family ||
843                     x->id.spi       != spi ||
844                     x->id.proto     != proto ||
845                     !xfrm_addr_equal(&x->id.daddr, daddr, family))
846                         continue;
847
848                 if ((mark & x->mark.m) != x->mark.v)
849                         continue;
850                 if (!xfrm_state_hold_rcu(x))
851                         continue;
852                 return x;
853         }
854
855         return NULL;
856 }
857
858 static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
859                                                      const xfrm_address_t *daddr,
860                                                      const xfrm_address_t *saddr,
861                                                      u8 proto, unsigned short family)
862 {
863         unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
864         struct xfrm_state *x;
865
866         hlist_for_each_entry_rcu(x, net->xfrm.state_bysrc + h, bysrc) {
867                 if (x->props.family != family ||
868                     x->id.proto     != proto ||
869                     !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
870                     !xfrm_addr_equal(&x->props.saddr, saddr, family))
871                         continue;
872
873                 if ((mark & x->mark.m) != x->mark.v)
874                         continue;
875                 if (!xfrm_state_hold_rcu(x))
876                         continue;
877                 return x;
878         }
879
880         return NULL;
881 }
882
883 static inline struct xfrm_state *
884 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
885 {
886         struct net *net = xs_net(x);
887         u32 mark = x->mark.v & x->mark.m;
888
889         if (use_spi)
890                 return __xfrm_state_lookup(net, mark, &x->id.daddr,
891                                            x->id.spi, x->id.proto, family);
892         else
893                 return __xfrm_state_lookup_byaddr(net, mark,
894                                                   &x->id.daddr,
895                                                   &x->props.saddr,
896                                                   x->id.proto, family);
897 }
898
899 static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
900 {
901         if (have_hash_collision &&
902             (net->xfrm.state_hmask + 1) < xfrm_state_hashmax &&
903             net->xfrm.state_num > net->xfrm.state_hmask)
904                 schedule_work(&net->xfrm.state_hash_work);
905 }
906
907 static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
908                                const struct flowi *fl, unsigned short family,
909                                struct xfrm_state **best, int *acq_in_progress,
910                                int *error)
911 {
912         /* Resolution logic:
913          * 1. There is a valid state with matching selector. Done.
914          * 2. Valid state with inappropriate selector. Skip.
915          *
916          * Entering area of "sysdeps".
917          *
918          * 3. If state is not valid, selector is temporary, it selects
919          *    only session which triggered previous resolution. Key
920          *    manager will do something to install a state with proper
921          *    selector.
922          */
923         if (x->km.state == XFRM_STATE_VALID) {
924                 if ((x->sel.family &&
925                      (x->sel.family != family ||
926                       !xfrm_selector_match(&x->sel, fl, family))) ||
927                     !security_xfrm_state_pol_flow_match(x, pol, fl))
928                         return;
929
930                 if (!*best ||
931                     (*best)->km.dying > x->km.dying ||
932                     ((*best)->km.dying == x->km.dying &&
933                      (*best)->curlft.add_time < x->curlft.add_time))
934                         *best = x;
935         } else if (x->km.state == XFRM_STATE_ACQ) {
936                 *acq_in_progress = 1;
937         } else if (x->km.state == XFRM_STATE_ERROR ||
938                    x->km.state == XFRM_STATE_EXPIRED) {
939                 if ((!x->sel.family ||
940                      (x->sel.family == family &&
941                       xfrm_selector_match(&x->sel, fl, family))) &&
942                     security_xfrm_state_pol_flow_match(x, pol, fl))
943                         *error = -ESRCH;
944         }
945 }
946
947 struct xfrm_state *
948 xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
949                 const struct flowi *fl, struct xfrm_tmpl *tmpl,
950                 struct xfrm_policy *pol, int *err,
951                 unsigned short family, u32 if_id)
952 {
953         static xfrm_address_t saddr_wildcard = { };
954         struct net *net = xp_net(pol);
955         unsigned int h, h_wildcard;
956         struct xfrm_state *x, *x0, *to_put;
957         int acquire_in_progress = 0;
958         int error = 0;
959         struct xfrm_state *best = NULL;
960         u32 mark = pol->mark.v & pol->mark.m;
961         unsigned short encap_family = tmpl->encap_family;
962         unsigned int sequence;
963         struct km_event c;
964
965         to_put = NULL;
966
967         sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
968
969         rcu_read_lock();
970         h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
971         hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
972                 if (x->props.family == encap_family &&
973                     x->props.reqid == tmpl->reqid &&
974                     (mark & x->mark.m) == x->mark.v &&
975                     x->if_id == if_id &&
976                     !(x->props.flags & XFRM_STATE_WILDRECV) &&
977                     xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
978                     tmpl->mode == x->props.mode &&
979                     tmpl->id.proto == x->id.proto &&
980                     (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
981                         xfrm_state_look_at(pol, x, fl, family,
982                                            &best, &acquire_in_progress, &error);
983         }
984         if (best || acquire_in_progress)
985                 goto found;
986
987         h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
988         hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
989                 if (x->props.family == encap_family &&
990                     x->props.reqid == tmpl->reqid &&
991                     (mark & x->mark.m) == x->mark.v &&
992                     x->if_id == if_id &&
993                     !(x->props.flags & XFRM_STATE_WILDRECV) &&
994                     xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
995                     tmpl->mode == x->props.mode &&
996                     tmpl->id.proto == x->id.proto &&
997                     (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
998                         xfrm_state_look_at(pol, x, fl, family,
999                                            &best, &acquire_in_progress, &error);
1000         }
1001
1002 found:
1003         x = best;
1004         if (!x && !error && !acquire_in_progress) {
1005                 if (tmpl->id.spi &&
1006                     (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
1007                                               tmpl->id.proto, encap_family)) != NULL) {
1008                         to_put = x0;
1009                         error = -EEXIST;
1010                         goto out;
1011                 }
1012
1013                 c.net = net;
1014                 /* If the KMs have no listeners (yet...), avoid allocating an SA
1015                  * for each and every packet - garbage collection might not
1016                  * handle the flood.
1017                  */
1018                 if (!km_is_alive(&c)) {
1019                         error = -ESRCH;
1020                         goto out;
1021                 }
1022
1023                 x = xfrm_state_alloc(net);
1024                 if (x == NULL) {
1025                         error = -ENOMEM;
1026                         goto out;
1027                 }
1028                 /* Initialize temporary state matching only
1029                  * to current session. */
1030                 xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
1031                 memcpy(&x->mark, &pol->mark, sizeof(x->mark));
1032                 x->if_id = if_id;
1033
1034                 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
1035                 if (error) {
1036                         x->km.state = XFRM_STATE_DEAD;
1037                         to_put = x;
1038                         x = NULL;
1039                         goto out;
1040                 }
1041
1042                 if (km_query(x, tmpl, pol) == 0) {
1043                         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1044                         x->km.state = XFRM_STATE_ACQ;
1045                         list_add(&x->km.all, &net->xfrm.state_all);
1046                         hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1047                         h = xfrm_src_hash(net, daddr, saddr, encap_family);
1048                         hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1049                         if (x->id.spi) {
1050                                 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
1051                                 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1052                         }
1053                         x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1054                         tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
1055                         net->xfrm.state_num++;
1056                         xfrm_hash_grow_check(net, x->bydst.next != NULL);
1057                         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1058                 } else {
1059                         x->km.state = XFRM_STATE_DEAD;
1060                         to_put = x;
1061                         x = NULL;
1062                         error = -ESRCH;
1063                 }
1064         }
1065 out:
1066         if (x) {
1067                 if (!xfrm_state_hold_rcu(x)) {
1068                         *err = -EAGAIN;
1069                         x = NULL;
1070                 }
1071         } else {
1072                 *err = acquire_in_progress ? -EAGAIN : error;
1073         }
1074         rcu_read_unlock();
1075         if (to_put)
1076                 xfrm_state_put(to_put);
1077
1078         if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) {
1079                 *err = -EAGAIN;
1080                 if (x) {
1081                         xfrm_state_put(x);
1082                         x = NULL;
1083                 }
1084         }
1085
1086         return x;
1087 }
1088
1089 struct xfrm_state *
1090 xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1091                     xfrm_address_t *daddr, xfrm_address_t *saddr,
1092                     unsigned short family, u8 mode, u8 proto, u32 reqid)
1093 {
1094         unsigned int h;
1095         struct xfrm_state *rx = NULL, *x = NULL;
1096
1097         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1098         h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1099         hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1100                 if (x->props.family == family &&
1101                     x->props.reqid == reqid &&
1102                     (mark & x->mark.m) == x->mark.v &&
1103                     x->if_id == if_id &&
1104                     !(x->props.flags & XFRM_STATE_WILDRECV) &&
1105                     xfrm_state_addr_check(x, daddr, saddr, family) &&
1106                     mode == x->props.mode &&
1107                     proto == x->id.proto &&
1108                     x->km.state == XFRM_STATE_VALID) {
1109                         rx = x;
1110                         break;
1111                 }
1112         }
1113
1114         if (rx)
1115                 xfrm_state_hold(rx);
1116         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1117
1118
1119         return rx;
1120 }
1121 EXPORT_SYMBOL(xfrm_stateonly_find);
1122
1123 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1124                                               unsigned short family)
1125 {
1126         struct xfrm_state *x;
1127         struct xfrm_state_walk *w;
1128
1129         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1130         list_for_each_entry(w, &net->xfrm.state_all, all) {
1131                 x = container_of(w, struct xfrm_state, km);
1132                 if (x->props.family != family ||
1133                         x->id.spi != spi)
1134                         continue;
1135
1136                 xfrm_state_hold(x);
1137                 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1138                 return x;
1139         }
1140         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1141         return NULL;
1142 }
1143 EXPORT_SYMBOL(xfrm_state_lookup_byspi);
1144
1145 static void __xfrm_state_insert(struct xfrm_state *x)
1146 {
1147         struct net *net = xs_net(x);
1148         unsigned int h;
1149
1150         list_add(&x->km.all, &net->xfrm.state_all);
1151
1152         h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
1153                           x->props.reqid, x->props.family);
1154         hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1155
1156         h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
1157         hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1158
1159         if (x->id.spi) {
1160                 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
1161                                   x->props.family);
1162
1163                 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1164         }
1165
1166         tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
1167         if (x->replay_maxage)
1168                 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
1169
1170         net->xfrm.state_num++;
1171
1172         xfrm_hash_grow_check(net, x->bydst.next != NULL);
1173 }
1174
1175 /* net->xfrm.xfrm_state_lock is held */
1176 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
1177 {
1178         struct net *net = xs_net(xnew);
1179         unsigned short family = xnew->props.family;
1180         u32 reqid = xnew->props.reqid;
1181         struct xfrm_state *x;
1182         unsigned int h;
1183         u32 mark = xnew->mark.v & xnew->mark.m;
1184         u32 if_id = xnew->if_id;
1185
1186         h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
1187         hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1188                 if (x->props.family     == family &&
1189                     x->props.reqid      == reqid &&
1190                     x->if_id            == if_id &&
1191                     (mark & x->mark.m) == x->mark.v &&
1192                     xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
1193                     xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
1194                         x->genid++;
1195         }
1196 }
1197
1198 void xfrm_state_insert(struct xfrm_state *x)
1199 {
1200         struct net *net = xs_net(x);
1201
1202         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1203         __xfrm_state_bump_genids(x);
1204         __xfrm_state_insert(x);
1205         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1206 }
1207 EXPORT_SYMBOL(xfrm_state_insert);
1208
1209 /* net->xfrm.xfrm_state_lock is held */
1210 static struct xfrm_state *__find_acq_core(struct net *net,
1211                                           const struct xfrm_mark *m,
1212                                           unsigned short family, u8 mode,
1213                                           u32 reqid, u32 if_id, u8 proto,
1214                                           const xfrm_address_t *daddr,
1215                                           const xfrm_address_t *saddr,
1216                                           int create)
1217 {
1218         unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1219         struct xfrm_state *x;
1220         u32 mark = m->v & m->m;
1221
1222         hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1223                 if (x->props.reqid  != reqid ||
1224                     x->props.mode   != mode ||
1225                     x->props.family != family ||
1226                     x->km.state     != XFRM_STATE_ACQ ||
1227                     x->id.spi       != 0 ||
1228                     x->id.proto     != proto ||
1229                     (mark & x->mark.m) != x->mark.v ||
1230                     !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1231                     !xfrm_addr_equal(&x->props.saddr, saddr, family))
1232                         continue;
1233
1234                 xfrm_state_hold(x);
1235                 return x;
1236         }
1237
1238         if (!create)
1239                 return NULL;
1240
1241         x = xfrm_state_alloc(net);
1242         if (likely(x)) {
1243                 switch (family) {
1244                 case AF_INET:
1245                         x->sel.daddr.a4 = daddr->a4;
1246                         x->sel.saddr.a4 = saddr->a4;
1247                         x->sel.prefixlen_d = 32;
1248                         x->sel.prefixlen_s = 32;
1249                         x->props.saddr.a4 = saddr->a4;
1250                         x->id.daddr.a4 = daddr->a4;
1251                         break;
1252
1253                 case AF_INET6:
1254                         x->sel.daddr.in6 = daddr->in6;
1255                         x->sel.saddr.in6 = saddr->in6;
1256                         x->sel.prefixlen_d = 128;
1257                         x->sel.prefixlen_s = 128;
1258                         x->props.saddr.in6 = saddr->in6;
1259                         x->id.daddr.in6 = daddr->in6;
1260                         break;
1261                 }
1262
1263                 x->km.state = XFRM_STATE_ACQ;
1264                 x->id.proto = proto;
1265                 x->props.family = family;
1266                 x->props.mode = mode;
1267                 x->props.reqid = reqid;
1268                 x->if_id = if_id;
1269                 x->mark.v = m->v;
1270                 x->mark.m = m->m;
1271                 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1272                 xfrm_state_hold(x);
1273                 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
1274                 list_add(&x->km.all, &net->xfrm.state_all);
1275                 hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1276                 h = xfrm_src_hash(net, daddr, saddr, family);
1277                 hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1278
1279                 net->xfrm.state_num++;
1280
1281                 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1282         }
1283
1284         return x;
1285 }
1286
1287 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1288
1289 int xfrm_state_add(struct xfrm_state *x)
1290 {
1291         struct net *net = xs_net(x);
1292         struct xfrm_state *x1, *to_put;
1293         int family;
1294         int err;
1295         u32 mark = x->mark.v & x->mark.m;
1296         int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1297
1298         family = x->props.family;
1299
1300         to_put = NULL;
1301
1302         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1303
1304         x1 = __xfrm_state_locate(x, use_spi, family);
1305         if (x1) {
1306                 to_put = x1;
1307                 x1 = NULL;
1308                 err = -EEXIST;
1309                 goto out;
1310         }
1311
1312         if (use_spi && x->km.seq) {
1313                 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
1314                 if (x1 && ((x1->id.proto != x->id.proto) ||
1315                     !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) {
1316                         to_put = x1;
1317                         x1 = NULL;
1318                 }
1319         }
1320
1321         if (use_spi && !x1)
1322                 x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
1323                                      x->props.reqid, x->if_id, x->id.proto,
1324                                      &x->id.daddr, &x->props.saddr, 0);
1325
1326         __xfrm_state_bump_genids(x);
1327         __xfrm_state_insert(x);
1328         err = 0;
1329
1330 out:
1331         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1332
1333         if (x1) {
1334                 xfrm_state_delete(x1);
1335                 xfrm_state_put(x1);
1336         }
1337
1338         if (to_put)
1339                 xfrm_state_put(to_put);
1340
1341         return err;
1342 }
1343 EXPORT_SYMBOL(xfrm_state_add);
1344
1345 #ifdef CONFIG_XFRM_MIGRATE
1346 static inline int clone_security(struct xfrm_state *x, struct xfrm_sec_ctx *security)
1347 {
1348         struct xfrm_user_sec_ctx *uctx;
1349         int size = sizeof(*uctx) + security->ctx_len;
1350         int err;
1351
1352         uctx = kmalloc(size, GFP_KERNEL);
1353         if (!uctx)
1354                 return -ENOMEM;
1355
1356         uctx->exttype = XFRMA_SEC_CTX;
1357         uctx->len = size;
1358         uctx->ctx_doi = security->ctx_doi;
1359         uctx->ctx_alg = security->ctx_alg;
1360         uctx->ctx_len = security->ctx_len;
1361         memcpy(uctx + 1, security->ctx_str, security->ctx_len);
1362         err = security_xfrm_state_alloc(x, uctx);
1363         kfree(uctx);
1364         if (err)
1365                 return err;
1366
1367         return 0;
1368 }
1369
1370 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
1371                                            struct xfrm_encap_tmpl *encap)
1372 {
1373         struct net *net = xs_net(orig);
1374         struct xfrm_state *x = xfrm_state_alloc(net);
1375         if (!x)
1376                 goto out;
1377
1378         memcpy(&x->id, &orig->id, sizeof(x->id));
1379         memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1380         memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1381         x->props.mode = orig->props.mode;
1382         x->props.replay_window = orig->props.replay_window;
1383         x->props.reqid = orig->props.reqid;
1384         x->props.family = orig->props.family;
1385         x->props.saddr = orig->props.saddr;
1386
1387         if (orig->aalg) {
1388                 x->aalg = xfrm_algo_auth_clone(orig->aalg);
1389                 if (!x->aalg)
1390                         goto error;
1391         }
1392         x->props.aalgo = orig->props.aalgo;
1393
1394         if (orig->aead) {
1395                 x->aead = xfrm_algo_aead_clone(orig->aead);
1396                 x->geniv = orig->geniv;
1397                 if (!x->aead)
1398                         goto error;
1399         }
1400         if (orig->ealg) {
1401                 x->ealg = xfrm_algo_clone(orig->ealg);
1402                 if (!x->ealg)
1403                         goto error;
1404         }
1405         x->props.ealgo = orig->props.ealgo;
1406
1407         if (orig->calg) {
1408                 x->calg = xfrm_algo_clone(orig->calg);
1409                 if (!x->calg)
1410                         goto error;
1411         }
1412         x->props.calgo = orig->props.calgo;
1413
1414         if (encap || orig->encap) {
1415                 if (encap)
1416                         x->encap = kmemdup(encap, sizeof(*x->encap),
1417                                         GFP_KERNEL);
1418                 else
1419                         x->encap = kmemdup(orig->encap, sizeof(*x->encap),
1420                                         GFP_KERNEL);
1421
1422                 if (!x->encap)
1423                         goto error;
1424         }
1425
1426         if (orig->security)
1427                 if (clone_security(x, orig->security))
1428                         goto error;
1429
1430         if (orig->coaddr) {
1431                 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1432                                     GFP_KERNEL);
1433                 if (!x->coaddr)
1434                         goto error;
1435         }
1436
1437         if (orig->replay_esn) {
1438                 if (xfrm_replay_clone(x, orig))
1439                         goto error;
1440         }
1441
1442         memcpy(&x->mark, &orig->mark, sizeof(x->mark));
1443         memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark));
1444
1445         x->props.flags = orig->props.flags;
1446         x->props.extra_flags = orig->props.extra_flags;
1447
1448         x->if_id = orig->if_id;
1449         x->tfcpad = orig->tfcpad;
1450         x->replay_maxdiff = orig->replay_maxdiff;
1451         x->replay_maxage = orig->replay_maxage;
1452         memcpy(&x->curlft, &orig->curlft, sizeof(x->curlft));
1453         x->km.state = orig->km.state;
1454         x->km.seq = orig->km.seq;
1455         x->replay = orig->replay;
1456         x->preplay = orig->preplay;
1457
1458         return x;
1459
1460  error:
1461         xfrm_state_put(x);
1462 out:
1463         return NULL;
1464 }
1465
1466 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
1467                                                 u32 if_id)
1468 {
1469         unsigned int h;
1470         struct xfrm_state *x = NULL;
1471
1472         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1473
1474         if (m->reqid) {
1475                 h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
1476                                   m->reqid, m->old_family);
1477                 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1478                         if (x->props.mode != m->mode ||
1479                             x->id.proto != m->proto)
1480                                 continue;
1481                         if (m->reqid && x->props.reqid != m->reqid)
1482                                 continue;
1483                         if (if_id != 0 && x->if_id != if_id)
1484                                 continue;
1485                         if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1486                                              m->old_family) ||
1487                             !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1488                                              m->old_family))
1489                                 continue;
1490                         xfrm_state_hold(x);
1491                         break;
1492                 }
1493         } else {
1494                 h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
1495                                   m->old_family);
1496                 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
1497                         if (x->props.mode != m->mode ||
1498                             x->id.proto != m->proto)
1499                                 continue;
1500                         if (if_id != 0 && x->if_id != if_id)
1501                                 continue;
1502                         if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1503                                              m->old_family) ||
1504                             !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1505                                              m->old_family))
1506                                 continue;
1507                         xfrm_state_hold(x);
1508                         break;
1509                 }
1510         }
1511
1512         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1513
1514         return x;
1515 }
1516 EXPORT_SYMBOL(xfrm_migrate_state_find);
1517
1518 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1519                                       struct xfrm_migrate *m,
1520                                       struct xfrm_encap_tmpl *encap)
1521 {
1522         struct xfrm_state *xc;
1523
1524         xc = xfrm_state_clone(x, encap);
1525         if (!xc)
1526                 return NULL;
1527
1528         xc->props.family = m->new_family;
1529
1530         if (xfrm_init_state(xc) < 0)
1531                 goto error;
1532
1533         memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1534         memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1535
1536         /* add state */
1537         if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) {
1538                 /* a care is needed when the destination address of the
1539                    state is to be updated as it is a part of triplet */
1540                 xfrm_state_insert(xc);
1541         } else {
1542                 if (xfrm_state_add(xc) < 0)
1543                         goto error;
1544         }
1545
1546         return xc;
1547 error:
1548         xfrm_state_put(xc);
1549         return NULL;
1550 }
1551 EXPORT_SYMBOL(xfrm_state_migrate);
1552 #endif
1553
1554 int xfrm_state_update(struct xfrm_state *x)
1555 {
1556         struct xfrm_state *x1, *to_put;
1557         int err;
1558         int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1559         struct net *net = xs_net(x);
1560
1561         to_put = NULL;
1562
1563         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1564         x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1565
1566         err = -ESRCH;
1567         if (!x1)
1568                 goto out;
1569
1570         if (xfrm_state_kern(x1)) {
1571                 to_put = x1;
1572                 err = -EEXIST;
1573                 goto out;
1574         }
1575
1576         if (x1->km.state == XFRM_STATE_ACQ) {
1577                 __xfrm_state_insert(x);
1578                 x = NULL;
1579         }
1580         err = 0;
1581
1582 out:
1583         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1584
1585         if (to_put)
1586                 xfrm_state_put(to_put);
1587
1588         if (err)
1589                 return err;
1590
1591         if (!x) {
1592                 xfrm_state_delete(x1);
1593                 xfrm_state_put(x1);
1594                 return 0;
1595         }
1596
1597         err = -EINVAL;
1598         spin_lock_bh(&x1->lock);
1599         if (likely(x1->km.state == XFRM_STATE_VALID)) {
1600                 if (x->encap && x1->encap &&
1601                     x->encap->encap_type == x1->encap->encap_type)
1602                         memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1603                 else if (x->encap || x1->encap)
1604                         goto fail;
1605
1606                 if (x->coaddr && x1->coaddr) {
1607                         memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1608                 }
1609                 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1610                         memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1611                 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1612                 x1->km.dying = 0;
1613
1614                 tasklet_hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
1615                 if (x1->curlft.use_time)
1616                         xfrm_state_check_expire(x1);
1617
1618                 if (x->props.smark.m || x->props.smark.v || x->if_id) {
1619                         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1620
1621                         if (x->props.smark.m || x->props.smark.v)
1622                                 x1->props.smark = x->props.smark;
1623
1624                         if (x->if_id)
1625                                 x1->if_id = x->if_id;
1626
1627                         __xfrm_state_bump_genids(x1);
1628                         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1629                 }
1630
1631                 err = 0;
1632                 x->km.state = XFRM_STATE_DEAD;
1633                 __xfrm_state_put(x);
1634         }
1635
1636 fail:
1637         spin_unlock_bh(&x1->lock);
1638
1639         xfrm_state_put(x1);
1640
1641         return err;
1642 }
1643 EXPORT_SYMBOL(xfrm_state_update);
1644
1645 int xfrm_state_check_expire(struct xfrm_state *x)
1646 {
1647         if (!x->curlft.use_time)
1648                 x->curlft.use_time = ktime_get_real_seconds();
1649
1650         if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1651             x->curlft.packets >= x->lft.hard_packet_limit) {
1652                 x->km.state = XFRM_STATE_EXPIRED;
1653                 tasklet_hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL);
1654                 return -EINVAL;
1655         }
1656
1657         if (!x->km.dying &&
1658             (x->curlft.bytes >= x->lft.soft_byte_limit ||
1659              x->curlft.packets >= x->lft.soft_packet_limit)) {
1660                 x->km.dying = 1;
1661                 km_state_expired(x, 0, 0);
1662         }
1663         return 0;
1664 }
1665 EXPORT_SYMBOL(xfrm_state_check_expire);
1666
1667 struct xfrm_state *
1668 xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
1669                   u8 proto, unsigned short family)
1670 {
1671         struct xfrm_state *x;
1672
1673         rcu_read_lock();
1674         x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
1675         rcu_read_unlock();
1676         return x;
1677 }
1678 EXPORT_SYMBOL(xfrm_state_lookup);
1679
1680 struct xfrm_state *
1681 xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1682                          const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1683                          u8 proto, unsigned short family)
1684 {
1685         struct xfrm_state *x;
1686
1687         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1688         x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
1689         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1690         return x;
1691 }
1692 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1693
1694 struct xfrm_state *
1695 xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
1696               u32 if_id, u8 proto, const xfrm_address_t *daddr,
1697               const xfrm_address_t *saddr, int create, unsigned short family)
1698 {
1699         struct xfrm_state *x;
1700
1701         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1702         x = __find_acq_core(net, mark, family, mode, reqid, if_id, proto, daddr, saddr, create);
1703         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1704
1705         return x;
1706 }
1707 EXPORT_SYMBOL(xfrm_find_acq);
1708
1709 #ifdef CONFIG_XFRM_SUB_POLICY
1710 int
1711 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1712                unsigned short family, struct net *net)
1713 {
1714         int i;
1715         int err = 0;
1716         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1717         if (!afinfo)
1718                 return -EAFNOSUPPORT;
1719
1720         spin_lock_bh(&net->xfrm.xfrm_state_lock); /*FIXME*/
1721         if (afinfo->tmpl_sort)
1722                 err = afinfo->tmpl_sort(dst, src, n);
1723         else
1724                 for (i = 0; i < n; i++)
1725                         dst[i] = src[i];
1726         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1727         rcu_read_unlock();
1728         return err;
1729 }
1730 EXPORT_SYMBOL(xfrm_tmpl_sort);
1731
1732 int
1733 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1734                 unsigned short family)
1735 {
1736         int i;
1737         int err = 0;
1738         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1739         struct net *net = xs_net(*src);
1740
1741         if (!afinfo)
1742                 return -EAFNOSUPPORT;
1743
1744         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1745         if (afinfo->state_sort)
1746                 err = afinfo->state_sort(dst, src, n);
1747         else
1748                 for (i = 0; i < n; i++)
1749                         dst[i] = src[i];
1750         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1751         rcu_read_unlock();
1752         return err;
1753 }
1754 EXPORT_SYMBOL(xfrm_state_sort);
1755 #endif
1756
1757 /* Silly enough, but I'm lazy to build resolution list */
1758
1759 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1760 {
1761         int i;
1762
1763         for (i = 0; i <= net->xfrm.state_hmask; i++) {
1764                 struct xfrm_state *x;
1765
1766                 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
1767                         if (x->km.seq == seq &&
1768                             (mark & x->mark.m) == x->mark.v &&
1769                             x->km.state == XFRM_STATE_ACQ) {
1770                                 xfrm_state_hold(x);
1771                                 return x;
1772                         }
1773                 }
1774         }
1775         return NULL;
1776 }
1777
1778 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1779 {
1780         struct xfrm_state *x;
1781
1782         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1783         x = __xfrm_find_acq_byseq(net, mark, seq);
1784         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1785         return x;
1786 }
1787 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1788
1789 u32 xfrm_get_acqseq(void)
1790 {
1791         u32 res;
1792         static atomic_t acqseq;
1793
1794         do {
1795                 res = atomic_inc_return(&acqseq);
1796         } while (!res);
1797
1798         return res;
1799 }
1800 EXPORT_SYMBOL(xfrm_get_acqseq);
1801
1802 int verify_spi_info(u8 proto, u32 min, u32 max)
1803 {
1804         switch (proto) {
1805         case IPPROTO_AH:
1806         case IPPROTO_ESP:
1807                 break;
1808
1809         case IPPROTO_COMP:
1810                 /* IPCOMP spi is 16-bits. */
1811                 if (max >= 0x10000)
1812                         return -EINVAL;
1813                 break;
1814
1815         default:
1816                 return -EINVAL;
1817         }
1818
1819         if (min > max)
1820                 return -EINVAL;
1821
1822         return 0;
1823 }
1824 EXPORT_SYMBOL(verify_spi_info);
1825
1826 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1827 {
1828         struct net *net = xs_net(x);
1829         unsigned int h;
1830         struct xfrm_state *x0;
1831         int err = -ENOENT;
1832         __be32 minspi = htonl(low);
1833         __be32 maxspi = htonl(high);
1834         __be32 newspi = 0;
1835         u32 mark = x->mark.v & x->mark.m;
1836
1837         spin_lock_bh(&x->lock);
1838         if (x->km.state == XFRM_STATE_DEAD)
1839                 goto unlock;
1840
1841         err = 0;
1842         if (x->id.spi)
1843                 goto unlock;
1844
1845         err = -ENOENT;
1846
1847         if (minspi == maxspi) {
1848                 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
1849                 if (x0) {
1850                         xfrm_state_put(x0);
1851                         goto unlock;
1852                 }
1853                 newspi = minspi;
1854         } else {
1855                 u32 spi = 0;
1856                 for (h = 0; h < high-low+1; h++) {
1857                         spi = low + prandom_u32()%(high-low+1);
1858                         x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1859                         if (x0 == NULL) {
1860                                 newspi = htonl(spi);
1861                                 break;
1862                         }
1863                         xfrm_state_put(x0);
1864                 }
1865         }
1866         if (newspi) {
1867                 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1868                 x->id.spi = newspi;
1869                 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1870                 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1871                 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1872
1873                 err = 0;
1874         }
1875
1876 unlock:
1877         spin_unlock_bh(&x->lock);
1878
1879         return err;
1880 }
1881 EXPORT_SYMBOL(xfrm_alloc_spi);
1882
1883 static bool __xfrm_state_filter_match(struct xfrm_state *x,
1884                                       struct xfrm_address_filter *filter)
1885 {
1886         if (filter) {
1887                 if ((filter->family == AF_INET ||
1888                      filter->family == AF_INET6) &&
1889                     x->props.family != filter->family)
1890                         return false;
1891
1892                 return addr_match(&x->props.saddr, &filter->saddr,
1893                                   filter->splen) &&
1894                        addr_match(&x->id.daddr, &filter->daddr,
1895                                   filter->dplen);
1896         }
1897         return true;
1898 }
1899
1900 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1901                     int (*func)(struct xfrm_state *, int, void*),
1902                     void *data)
1903 {
1904         struct xfrm_state *state;
1905         struct xfrm_state_walk *x;
1906         int err = 0;
1907
1908         if (walk->seq != 0 && list_empty(&walk->all))
1909                 return 0;
1910
1911         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1912         if (list_empty(&walk->all))
1913                 x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
1914         else
1915                 x = list_first_entry(&walk->all, struct xfrm_state_walk, all);
1916         list_for_each_entry_from(x, &net->xfrm.state_all, all) {
1917                 if (x->state == XFRM_STATE_DEAD)
1918                         continue;
1919                 state = container_of(x, struct xfrm_state, km);
1920                 if (!xfrm_id_proto_match(state->id.proto, walk->proto))
1921                         continue;
1922                 if (!__xfrm_state_filter_match(state, walk->filter))
1923                         continue;
1924                 err = func(state, walk->seq, data);
1925                 if (err) {
1926                         list_move_tail(&walk->all, &x->all);
1927                         goto out;
1928                 }
1929                 walk->seq++;
1930         }
1931         if (walk->seq == 0) {
1932                 err = -ENOENT;
1933                 goto out;
1934         }
1935         list_del_init(&walk->all);
1936 out:
1937         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1938         return err;
1939 }
1940 EXPORT_SYMBOL(xfrm_state_walk);
1941
1942 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1943                           struct xfrm_address_filter *filter)
1944 {
1945         INIT_LIST_HEAD(&walk->all);
1946         walk->proto = proto;
1947         walk->state = XFRM_STATE_DEAD;
1948         walk->seq = 0;
1949         walk->filter = filter;
1950 }
1951 EXPORT_SYMBOL(xfrm_state_walk_init);
1952
1953 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
1954 {
1955         kfree(walk->filter);
1956
1957         if (list_empty(&walk->all))
1958                 return;
1959
1960         spin_lock_bh(&net->xfrm.xfrm_state_lock);
1961         list_del(&walk->all);
1962         spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1963 }
1964 EXPORT_SYMBOL(xfrm_state_walk_done);
1965
1966 static void xfrm_replay_timer_handler(struct timer_list *t)
1967 {
1968         struct xfrm_state *x = from_timer(x, t, rtimer);
1969
1970         spin_lock(&x->lock);
1971
1972         if (x->km.state == XFRM_STATE_VALID) {
1973                 if (xfrm_aevent_is_on(xs_net(x)))
1974                         x->repl->notify(x, XFRM_REPLAY_TIMEOUT);
1975                 else
1976                         x->xflags |= XFRM_TIME_DEFER;
1977         }
1978
1979         spin_unlock(&x->lock);
1980 }
1981
1982 static LIST_HEAD(xfrm_km_list);
1983
1984 void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
1985 {
1986         struct xfrm_mgr *km;
1987
1988         rcu_read_lock();
1989         list_for_each_entry_rcu(km, &xfrm_km_list, list)
1990                 if (km->notify_policy)
1991                         km->notify_policy(xp, dir, c);
1992         rcu_read_unlock();
1993 }
1994
1995 void km_state_notify(struct xfrm_state *x, const struct km_event *c)
1996 {
1997         struct xfrm_mgr *km;
1998         rcu_read_lock();
1999         list_for_each_entry_rcu(km, &xfrm_km_list, list)
2000                 if (km->notify)
2001                         km->notify(x, c);
2002         rcu_read_unlock();
2003 }
2004
2005 EXPORT_SYMBOL(km_policy_notify);
2006 EXPORT_SYMBOL(km_state_notify);
2007
2008 void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
2009 {
2010         struct km_event c;
2011
2012         c.data.hard = hard;
2013         c.portid = portid;
2014         c.event = XFRM_MSG_EXPIRE;
2015         km_state_notify(x, &c);
2016 }
2017
2018 EXPORT_SYMBOL(km_state_expired);
2019 /*
2020  * We send to all registered managers regardless of failure
2021  * We are happy with one success
2022 */
2023 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
2024 {
2025         int err = -EINVAL, acqret;
2026         struct xfrm_mgr *km;
2027
2028         rcu_read_lock();
2029         list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2030                 acqret = km->acquire(x, t, pol);
2031                 if (!acqret)
2032                         err = acqret;
2033         }
2034         rcu_read_unlock();
2035         return err;
2036 }
2037 EXPORT_SYMBOL(km_query);
2038
2039 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
2040 {
2041         int err = -EINVAL;
2042         struct xfrm_mgr *km;
2043
2044         rcu_read_lock();
2045         list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2046                 if (km->new_mapping)
2047                         err = km->new_mapping(x, ipaddr, sport);
2048                 if (!err)
2049                         break;
2050         }
2051         rcu_read_unlock();
2052         return err;
2053 }
2054 EXPORT_SYMBOL(km_new_mapping);
2055
2056 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
2057 {
2058         struct km_event c;
2059
2060         c.data.hard = hard;
2061         c.portid = portid;
2062         c.event = XFRM_MSG_POLEXPIRE;
2063         km_policy_notify(pol, dir, &c);
2064 }
2065 EXPORT_SYMBOL(km_policy_expired);
2066
2067 #ifdef CONFIG_XFRM_MIGRATE
2068 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2069                const struct xfrm_migrate *m, int num_migrate,
2070                const struct xfrm_kmaddress *k,
2071                const struct xfrm_encap_tmpl *encap)
2072 {
2073         int err = -EINVAL;
2074         int ret;
2075         struct xfrm_mgr *km;
2076
2077         rcu_read_lock();
2078         list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2079                 if (km->migrate) {
2080                         ret = km->migrate(sel, dir, type, m, num_migrate, k,
2081                                           encap);
2082                         if (!ret)
2083                                 err = ret;
2084                 }
2085         }
2086         rcu_read_unlock();
2087         return err;
2088 }
2089 EXPORT_SYMBOL(km_migrate);
2090 #endif
2091
2092 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
2093 {
2094         int err = -EINVAL;
2095         int ret;
2096         struct xfrm_mgr *km;
2097
2098         rcu_read_lock();
2099         list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2100                 if (km->report) {
2101                         ret = km->report(net, proto, sel, addr);
2102                         if (!ret)
2103                                 err = ret;
2104                 }
2105         }
2106         rcu_read_unlock();
2107         return err;
2108 }
2109 EXPORT_SYMBOL(km_report);
2110
2111 bool km_is_alive(const struct km_event *c)
2112 {
2113         struct xfrm_mgr *km;
2114         bool is_alive = false;
2115
2116         rcu_read_lock();
2117         list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2118                 if (km->is_alive && km->is_alive(c)) {
2119                         is_alive = true;
2120                         break;
2121                 }
2122         }
2123         rcu_read_unlock();
2124
2125         return is_alive;
2126 }
2127 EXPORT_SYMBOL(km_is_alive);
2128
2129 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
2130 {
2131         int err;
2132         u8 *data;
2133         struct xfrm_mgr *km;
2134         struct xfrm_policy *pol = NULL;
2135
2136 #ifdef CONFIG_COMPAT
2137         if (in_compat_syscall())
2138                 return -EOPNOTSUPP;
2139 #endif
2140
2141         if (!optval && !optlen) {
2142                 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
2143                 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
2144                 __sk_dst_reset(sk);
2145                 return 0;
2146         }
2147
2148         if (optlen <= 0 || optlen > PAGE_SIZE)
2149                 return -EMSGSIZE;
2150
2151         data = memdup_user(optval, optlen);
2152         if (IS_ERR(data))
2153                 return PTR_ERR(data);
2154
2155         err = -EINVAL;
2156         rcu_read_lock();
2157         list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2158                 pol = km->compile_policy(sk, optname, data,
2159                                          optlen, &err);
2160                 if (err >= 0)
2161                         break;
2162         }
2163         rcu_read_unlock();
2164
2165         if (err >= 0) {
2166                 xfrm_sk_policy_insert(sk, err, pol);
2167                 xfrm_pol_put(pol);
2168                 __sk_dst_reset(sk);
2169                 err = 0;
2170         }
2171
2172         kfree(data);
2173         return err;
2174 }
2175 EXPORT_SYMBOL(xfrm_user_policy);
2176
2177 static DEFINE_SPINLOCK(xfrm_km_lock);
2178
2179 int xfrm_register_km(struct xfrm_mgr *km)
2180 {
2181         spin_lock_bh(&xfrm_km_lock);
2182         list_add_tail_rcu(&km->list, &xfrm_km_list);
2183         spin_unlock_bh(&xfrm_km_lock);
2184         return 0;
2185 }
2186 EXPORT_SYMBOL(xfrm_register_km);
2187
2188 int xfrm_unregister_km(struct xfrm_mgr *km)
2189 {
2190         spin_lock_bh(&xfrm_km_lock);
2191         list_del_rcu(&km->list);
2192         spin_unlock_bh(&xfrm_km_lock);
2193         synchronize_rcu();
2194         return 0;
2195 }
2196 EXPORT_SYMBOL(xfrm_unregister_km);
2197
2198 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
2199 {
2200         int err = 0;
2201
2202         if (WARN_ON(afinfo->family >= NPROTO))
2203                 return -EAFNOSUPPORT;
2204
2205         spin_lock_bh(&xfrm_state_afinfo_lock);
2206         if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
2207                 err = -EEXIST;
2208         else
2209                 rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
2210         spin_unlock_bh(&xfrm_state_afinfo_lock);
2211         return err;
2212 }
2213 EXPORT_SYMBOL(xfrm_state_register_afinfo);
2214
2215 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
2216 {
2217         int err = 0, family = afinfo->family;
2218
2219         if (WARN_ON(family >= NPROTO))
2220                 return -EAFNOSUPPORT;
2221
2222         spin_lock_bh(&xfrm_state_afinfo_lock);
2223         if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
2224                 if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo)
2225                         err = -EINVAL;
2226                 else
2227                         RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL);
2228         }
2229         spin_unlock_bh(&xfrm_state_afinfo_lock);
2230         synchronize_rcu();
2231         return err;
2232 }
2233 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
2234
2235 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family)
2236 {
2237         if (unlikely(family >= NPROTO))
2238                 return NULL;
2239
2240         return rcu_dereference(xfrm_state_afinfo[family]);
2241 }
2242
2243 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
2244 {
2245         struct xfrm_state_afinfo *afinfo;
2246         if (unlikely(family >= NPROTO))
2247                 return NULL;
2248         rcu_read_lock();
2249         afinfo = rcu_dereference(xfrm_state_afinfo[family]);
2250         if (unlikely(!afinfo))
2251                 rcu_read_unlock();
2252         return afinfo;
2253 }
2254
2255 void xfrm_flush_gc(void)
2256 {
2257         flush_work(&xfrm_state_gc_work);
2258 }
2259 EXPORT_SYMBOL(xfrm_flush_gc);
2260
2261 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
2262 void xfrm_state_delete_tunnel(struct xfrm_state *x)
2263 {
2264         if (x->tunnel) {
2265                 struct xfrm_state *t = x->tunnel;
2266
2267                 if (atomic_read(&t->tunnel_users) == 2)
2268                         xfrm_state_delete(t);
2269                 atomic_dec(&t->tunnel_users);
2270                 xfrm_state_put_sync(t);
2271                 x->tunnel = NULL;
2272         }
2273 }
2274 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
2275
2276 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
2277 {
2278         const struct xfrm_type *type = READ_ONCE(x->type);
2279
2280         if (x->km.state == XFRM_STATE_VALID &&
2281             type && type->get_mtu)
2282                 return type->get_mtu(x, mtu);
2283
2284         return mtu - x->props.header_len;
2285 }
2286
2287 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
2288 {
2289         struct xfrm_state_afinfo *afinfo;
2290         struct xfrm_mode *inner_mode;
2291         int family = x->props.family;
2292         int err;
2293
2294         err = -EAFNOSUPPORT;
2295         afinfo = xfrm_state_get_afinfo(family);
2296         if (!afinfo)
2297                 goto error;
2298
2299         err = 0;
2300         if (afinfo->init_flags)
2301                 err = afinfo->init_flags(x);
2302
2303         rcu_read_unlock();
2304
2305         if (err)
2306                 goto error;
2307
2308         err = -EPROTONOSUPPORT;
2309
2310         if (x->sel.family != AF_UNSPEC) {
2311                 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2312                 if (inner_mode == NULL)
2313                         goto error;
2314
2315                 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2316                     family != x->sel.family) {
2317                         xfrm_put_mode(inner_mode);
2318                         goto error;
2319                 }
2320
2321                 x->inner_mode = inner_mode;
2322         } else {
2323                 struct xfrm_mode *inner_mode_iaf;
2324                 int iafamily = AF_INET;
2325
2326                 inner_mode = xfrm_get_mode(x->props.mode, x->props.family);
2327                 if (inner_mode == NULL)
2328                         goto error;
2329
2330                 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
2331                         xfrm_put_mode(inner_mode);
2332                         goto error;
2333                 }
2334                 x->inner_mode = inner_mode;
2335
2336                 if (x->props.family == AF_INET)
2337                         iafamily = AF_INET6;
2338
2339                 inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily);
2340                 if (inner_mode_iaf) {
2341                         if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)
2342                                 x->inner_mode_iaf = inner_mode_iaf;
2343                         else
2344                                 xfrm_put_mode(inner_mode_iaf);
2345                 }
2346         }
2347
2348         x->type = xfrm_get_type(x->id.proto, family);
2349         if (x->type == NULL)
2350                 goto error;
2351
2352         x->type_offload = xfrm_get_type_offload(x->id.proto, family, offload);
2353
2354         err = x->type->init_state(x);
2355         if (err)
2356                 goto error;
2357
2358         x->outer_mode = xfrm_get_mode(x->props.mode, family);
2359         if (x->outer_mode == NULL) {
2360                 err = -EPROTONOSUPPORT;
2361                 goto error;
2362         }
2363
2364         if (init_replay) {
2365                 err = xfrm_init_replay(x);
2366                 if (err)
2367                         goto error;
2368         }
2369
2370 error:
2371         return err;
2372 }
2373
2374 EXPORT_SYMBOL(__xfrm_init_state);
2375
2376 int xfrm_init_state(struct xfrm_state *x)
2377 {
2378         int err;
2379
2380         err = __xfrm_init_state(x, true, false);
2381         if (!err)
2382                 x->km.state = XFRM_STATE_VALID;
2383
2384         return err;
2385 }
2386
2387 EXPORT_SYMBOL(xfrm_init_state);
2388
2389 int __net_init xfrm_state_init(struct net *net)
2390 {
2391         unsigned int sz;
2392
2393         if (net_eq(net, &init_net))
2394                 xfrm_state_cache = KMEM_CACHE(xfrm_state,
2395                                               SLAB_HWCACHE_ALIGN | SLAB_PANIC);
2396
2397         INIT_LIST_HEAD(&net->xfrm.state_all);
2398
2399         sz = sizeof(struct hlist_head) * 8;
2400
2401         net->xfrm.state_bydst = xfrm_hash_alloc(sz);
2402         if (!net->xfrm.state_bydst)
2403                 goto out_bydst;
2404         net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
2405         if (!net->xfrm.state_bysrc)
2406                 goto out_bysrc;
2407         net->xfrm.state_byspi = xfrm_hash_alloc(sz);
2408         if (!net->xfrm.state_byspi)
2409                 goto out_byspi;
2410         net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2411
2412         net->xfrm.state_num = 0;
2413         INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
2414         spin_lock_init(&net->xfrm.xfrm_state_lock);
2415         seqcount_init(&net->xfrm.xfrm_state_hash_generation);
2416         return 0;
2417
2418 out_byspi:
2419         xfrm_hash_free(net->xfrm.state_bysrc, sz);
2420 out_bysrc:
2421         xfrm_hash_free(net->xfrm.state_bydst, sz);
2422 out_bydst:
2423         return -ENOMEM;
2424 }
2425
2426 void xfrm_state_fini(struct net *net)
2427 {
2428         unsigned int sz;
2429
2430         flush_work(&net->xfrm.state_hash_work);
2431         flush_work(&xfrm_state_gc_work);
2432         xfrm_state_flush(net, 0, false, true);
2433
2434         WARN_ON(!list_empty(&net->xfrm.state_all));
2435
2436         sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
2437         WARN_ON(!hlist_empty(net->xfrm.state_byspi));
2438         xfrm_hash_free(net->xfrm.state_byspi, sz);
2439         WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
2440         xfrm_hash_free(net->xfrm.state_bysrc, sz);
2441         WARN_ON(!hlist_empty(net->xfrm.state_bydst));
2442         xfrm_hash_free(net->xfrm.state_bydst, sz);
2443 }
2444
2445 #ifdef CONFIG_AUDITSYSCALL
2446 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2447                                      struct audit_buffer *audit_buf)
2448 {
2449         struct xfrm_sec_ctx *ctx = x->security;
2450         u32 spi = ntohl(x->id.spi);
2451
2452         if (ctx)
2453                 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2454                                  ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2455
2456         switch (x->props.family) {
2457         case AF_INET:
2458                 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2459                                  &x->props.saddr.a4, &x->id.daddr.a4);
2460                 break;
2461         case AF_INET6:
2462                 audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
2463                                  x->props.saddr.a6, x->id.daddr.a6);
2464                 break;
2465         }
2466
2467         audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2468 }
2469
2470 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2471                                       struct audit_buffer *audit_buf)
2472 {
2473         const struct iphdr *iph4;
2474         const struct ipv6hdr *iph6;
2475
2476         switch (family) {
2477         case AF_INET:
2478                 iph4 = ip_hdr(skb);
2479                 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2480                                  &iph4->saddr, &iph4->daddr);
2481                 break;
2482         case AF_INET6:
2483                 iph6 = ipv6_hdr(skb);
2484                 audit_log_format(audit_buf,
2485                                  " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
2486                                  &iph6->saddr, &iph6->daddr,
2487                                  iph6->flow_lbl[0] & 0x0f,
2488                                  iph6->flow_lbl[1],
2489                                  iph6->flow_lbl[2]);
2490                 break;
2491         }
2492 }
2493
2494 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid)
2495 {
2496         struct audit_buffer *audit_buf;
2497
2498         audit_buf = xfrm_audit_start("SAD-add");
2499         if (audit_buf == NULL)
2500                 return;
2501         xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2502         xfrm_audit_helper_sainfo(x, audit_buf);
2503         audit_log_format(audit_buf, " res=%u", result);
2504         audit_log_end(audit_buf);
2505 }
2506 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2507
2508 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid)
2509 {
2510         struct audit_buffer *audit_buf;
2511
2512         audit_buf = xfrm_audit_start("SAD-delete");
2513         if (audit_buf == NULL)
2514                 return;
2515         xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2516         xfrm_audit_helper_sainfo(x, audit_buf);
2517         audit_log_format(audit_buf, " res=%u", result);
2518         audit_log_end(audit_buf);
2519 }
2520 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2521
2522 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2523                                       struct sk_buff *skb)
2524 {
2525         struct audit_buffer *audit_buf;
2526         u32 spi;
2527
2528         audit_buf = xfrm_audit_start("SA-replay-overflow");
2529         if (audit_buf == NULL)
2530                 return;
2531         xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2532         /* don't record the sequence number because it's inherent in this kind
2533          * of audit message */
2534         spi = ntohl(x->id.spi);
2535         audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2536         audit_log_end(audit_buf);
2537 }
2538 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2539
2540 void xfrm_audit_state_replay(struct xfrm_state *x,
2541                              struct sk_buff *skb, __be32 net_seq)
2542 {
2543         struct audit_buffer *audit_buf;
2544         u32 spi;
2545
2546         audit_buf = xfrm_audit_start("SA-replayed-pkt");
2547         if (audit_buf == NULL)
2548                 return;
2549         xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2550         spi = ntohl(x->id.spi);
2551         audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2552                          spi, spi, ntohl(net_seq));
2553         audit_log_end(audit_buf);
2554 }
2555 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay);
2556
2557 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2558 {
2559         struct audit_buffer *audit_buf;
2560
2561         audit_buf = xfrm_audit_start("SA-notfound");
2562         if (audit_buf == NULL)
2563                 return;
2564         xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2565         audit_log_end(audit_buf);
2566 }
2567 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2568
2569 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2570                                __be32 net_spi, __be32 net_seq)
2571 {
2572         struct audit_buffer *audit_buf;
2573         u32 spi;
2574
2575         audit_buf = xfrm_audit_start("SA-notfound");
2576         if (audit_buf == NULL)
2577                 return;
2578         xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2579         spi = ntohl(net_spi);
2580         audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2581                          spi, spi, ntohl(net_seq));
2582         audit_log_end(audit_buf);
2583 }
2584 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2585
2586 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2587                               struct sk_buff *skb, u8 proto)
2588 {
2589         struct audit_buffer *audit_buf;
2590         __be32 net_spi;
2591         __be32 net_seq;
2592
2593         audit_buf = xfrm_audit_start("SA-icv-failure");
2594         if (audit_buf == NULL)
2595                 return;
2596         xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2597         if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2598                 u32 spi = ntohl(net_spi);
2599                 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2600                                  spi, spi, ntohl(net_seq));
2601         }
2602         audit_log_end(audit_buf);
2603 }
2604 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2605 #endif /* CONFIG_AUDITSYSCALL */