GNU Linux-libre 4.19.286-gnu1
[releases.git] / net / ipv6 / ip6_flowlabel.c
1 /*
2  *      ip6_flowlabel.c         IPv6 flowlabel manager.
3  *
4  *      This program is free software; you can redistribute it and/or
5  *      modify it under the terms of the GNU General Public License
6  *      as published by the Free Software Foundation; either version
7  *      2 of the License, or (at your option) any later version.
8  *
9  *      Authors:        Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  */
11
12 #include <linux/capability.h>
13 #include <linux/errno.h>
14 #include <linux/types.h>
15 #include <linux/socket.h>
16 #include <linux/net.h>
17 #include <linux/netdevice.h>
18 #include <linux/in6.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
22 #include <linux/export.h>
23 #include <linux/pid_namespace.h>
24
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
27
28 #include <net/ipv6.h>
29 #include <net/rawv6.h>
30 #include <net/transp_v6.h>
31
32 #include <linux/uaccess.h>
33
34 #define FL_MIN_LINGER   6       /* Minimal linger. It is set to 6sec specified
35                                    in old IPv6 RFC. Well, it was reasonable value.
36                                  */
37 #define FL_MAX_LINGER   150     /* Maximal linger timeout */
38
39 /* FL hash table */
40
41 #define FL_MAX_PER_SOCK 32
42 #define FL_MAX_SIZE     4096
43 #define FL_HASH_MASK    255
44 #define FL_HASH(l)      (ntohl(l)&FL_HASH_MASK)
45
46 static atomic_t fl_size = ATOMIC_INIT(0);
47 static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
48
49 static void ip6_fl_gc(struct timer_list *unused);
50 static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc);
51
52 /* FL hash table lock: it protects only of GC */
53
54 static DEFINE_SPINLOCK(ip6_fl_lock);
55
56 /* Big socket sock */
57
58 static DEFINE_SPINLOCK(ip6_sk_fl_lock);
59
60 #define for_each_fl_rcu(hash, fl)                               \
61         for (fl = rcu_dereference_bh(fl_ht[(hash)]);            \
62              fl != NULL;                                        \
63              fl = rcu_dereference_bh(fl->next))
64 #define for_each_fl_continue_rcu(fl)                            \
65         for (fl = rcu_dereference_bh(fl->next);                 \
66              fl != NULL;                                        \
67              fl = rcu_dereference_bh(fl->next))
68
69 #define for_each_sk_fl_rcu(np, sfl)                             \
70         for (sfl = rcu_dereference_bh(np->ipv6_fl_list);        \
71              sfl != NULL;                                       \
72              sfl = rcu_dereference_bh(sfl->next))
73
74 static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
75 {
76         struct ip6_flowlabel *fl;
77
78         for_each_fl_rcu(FL_HASH(label), fl) {
79                 if (fl->label == label && net_eq(fl->fl_net, net))
80                         return fl;
81         }
82         return NULL;
83 }
84
85 static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
86 {
87         struct ip6_flowlabel *fl;
88
89         rcu_read_lock_bh();
90         fl = __fl_lookup(net, label);
91         if (fl && !atomic_inc_not_zero(&fl->users))
92                 fl = NULL;
93         rcu_read_unlock_bh();
94         return fl;
95 }
96
97 static void fl_free_rcu(struct rcu_head *head)
98 {
99         struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
100
101         if (fl->share == IPV6_FL_S_PROCESS)
102                 put_pid(fl->owner.pid);
103         kfree(fl->opt);
104         kfree(fl);
105 }
106
107
108 static void fl_free(struct ip6_flowlabel *fl)
109 {
110         if (fl)
111                 call_rcu(&fl->rcu, fl_free_rcu);
112 }
113
114 static void fl_release(struct ip6_flowlabel *fl)
115 {
116         spin_lock_bh(&ip6_fl_lock);
117
118         fl->lastuse = jiffies;
119         if (atomic_dec_and_test(&fl->users)) {
120                 unsigned long ttd = fl->lastuse + fl->linger;
121                 if (time_after(ttd, fl->expires))
122                         fl->expires = ttd;
123                 ttd = fl->expires;
124                 if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
125                         struct ipv6_txoptions *opt = fl->opt;
126                         fl->opt = NULL;
127                         kfree(opt);
128                 }
129                 if (!timer_pending(&ip6_fl_gc_timer) ||
130                     time_after(ip6_fl_gc_timer.expires, ttd))
131                         mod_timer(&ip6_fl_gc_timer, ttd);
132         }
133         spin_unlock_bh(&ip6_fl_lock);
134 }
135
136 static void ip6_fl_gc(struct timer_list *unused)
137 {
138         int i;
139         unsigned long now = jiffies;
140         unsigned long sched = 0;
141
142         spin_lock(&ip6_fl_lock);
143
144         for (i = 0; i <= FL_HASH_MASK; i++) {
145                 struct ip6_flowlabel *fl;
146                 struct ip6_flowlabel __rcu **flp;
147
148                 flp = &fl_ht[i];
149                 while ((fl = rcu_dereference_protected(*flp,
150                                                        lockdep_is_held(&ip6_fl_lock))) != NULL) {
151                         if (atomic_read(&fl->users) == 0) {
152                                 unsigned long ttd = fl->lastuse + fl->linger;
153                                 if (time_after(ttd, fl->expires))
154                                         fl->expires = ttd;
155                                 ttd = fl->expires;
156                                 if (time_after_eq(now, ttd)) {
157                                         *flp = fl->next;
158                                         fl_free(fl);
159                                         atomic_dec(&fl_size);
160                                         continue;
161                                 }
162                                 if (!sched || time_before(ttd, sched))
163                                         sched = ttd;
164                         }
165                         flp = &fl->next;
166                 }
167         }
168         if (!sched && atomic_read(&fl_size))
169                 sched = now + FL_MAX_LINGER;
170         if (sched) {
171                 mod_timer(&ip6_fl_gc_timer, sched);
172         }
173         spin_unlock(&ip6_fl_lock);
174 }
175
176 static void __net_exit ip6_fl_purge(struct net *net)
177 {
178         int i;
179
180         spin_lock_bh(&ip6_fl_lock);
181         for (i = 0; i <= FL_HASH_MASK; i++) {
182                 struct ip6_flowlabel *fl;
183                 struct ip6_flowlabel __rcu **flp;
184
185                 flp = &fl_ht[i];
186                 while ((fl = rcu_dereference_protected(*flp,
187                                                        lockdep_is_held(&ip6_fl_lock))) != NULL) {
188                         if (net_eq(fl->fl_net, net) &&
189                             atomic_read(&fl->users) == 0) {
190                                 *flp = fl->next;
191                                 fl_free(fl);
192                                 atomic_dec(&fl_size);
193                                 continue;
194                         }
195                         flp = &fl->next;
196                 }
197         }
198         spin_unlock_bh(&ip6_fl_lock);
199 }
200
201 static struct ip6_flowlabel *fl_intern(struct net *net,
202                                        struct ip6_flowlabel *fl, __be32 label)
203 {
204         struct ip6_flowlabel *lfl;
205
206         fl->label = label & IPV6_FLOWLABEL_MASK;
207
208         spin_lock_bh(&ip6_fl_lock);
209         if (label == 0) {
210                 for (;;) {
211                         fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
212                         if (fl->label) {
213                                 lfl = __fl_lookup(net, fl->label);
214                                 if (!lfl)
215                                         break;
216                         }
217                 }
218         } else {
219                 /*
220                  * we dropper the ip6_fl_lock, so this entry could reappear
221                  * and we need to recheck with it.
222                  *
223                  * OTOH no need to search the active socket first, like it is
224                  * done in ipv6_flowlabel_opt - sock is locked, so new entry
225                  * with the same label can only appear on another sock
226                  */
227                 lfl = __fl_lookup(net, fl->label);
228                 if (lfl) {
229                         atomic_inc(&lfl->users);
230                         spin_unlock_bh(&ip6_fl_lock);
231                         return lfl;
232                 }
233         }
234
235         fl->lastuse = jiffies;
236         fl->next = fl_ht[FL_HASH(fl->label)];
237         rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
238         atomic_inc(&fl_size);
239         spin_unlock_bh(&ip6_fl_lock);
240         return NULL;
241 }
242
243
244
245 /* Socket flowlabel lists */
246
247 struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
248 {
249         struct ipv6_fl_socklist *sfl;
250         struct ipv6_pinfo *np = inet6_sk(sk);
251
252         label &= IPV6_FLOWLABEL_MASK;
253
254         rcu_read_lock_bh();
255         for_each_sk_fl_rcu(np, sfl) {
256                 struct ip6_flowlabel *fl = sfl->fl;
257
258                 if (fl->label == label && atomic_inc_not_zero(&fl->users)) {
259                         fl->lastuse = jiffies;
260                         rcu_read_unlock_bh();
261                         return fl;
262                 }
263         }
264         rcu_read_unlock_bh();
265         return NULL;
266 }
267 EXPORT_SYMBOL_GPL(fl6_sock_lookup);
268
269 void fl6_free_socklist(struct sock *sk)
270 {
271         struct ipv6_pinfo *np = inet6_sk(sk);
272         struct ipv6_fl_socklist *sfl;
273
274         if (!rcu_access_pointer(np->ipv6_fl_list))
275                 return;
276
277         spin_lock_bh(&ip6_sk_fl_lock);
278         while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
279                                                 lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
280                 np->ipv6_fl_list = sfl->next;
281                 spin_unlock_bh(&ip6_sk_fl_lock);
282
283                 fl_release(sfl->fl);
284                 kfree_rcu(sfl, rcu);
285
286                 spin_lock_bh(&ip6_sk_fl_lock);
287         }
288         spin_unlock_bh(&ip6_sk_fl_lock);
289 }
290
291 /* Service routines */
292
293
294 /*
295    It is the only difficult place. flowlabel enforces equal headers
296    before and including routing header, however user may supply options
297    following rthdr.
298  */
299
300 struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
301                                          struct ip6_flowlabel *fl,
302                                          struct ipv6_txoptions *fopt)
303 {
304         struct ipv6_txoptions *fl_opt = fl->opt;
305
306         if (!fopt || fopt->opt_flen == 0)
307                 return fl_opt;
308
309         if (fl_opt) {
310                 opt_space->hopopt = fl_opt->hopopt;
311                 opt_space->dst0opt = fl_opt->dst0opt;
312                 opt_space->srcrt = fl_opt->srcrt;
313                 opt_space->opt_nflen = fl_opt->opt_nflen;
314         } else {
315                 if (fopt->opt_nflen == 0)
316                         return fopt;
317                 opt_space->hopopt = NULL;
318                 opt_space->dst0opt = NULL;
319                 opt_space->srcrt = NULL;
320                 opt_space->opt_nflen = 0;
321         }
322         opt_space->dst1opt = fopt->dst1opt;
323         opt_space->opt_flen = fopt->opt_flen;
324         opt_space->tot_len = fopt->tot_len;
325         return opt_space;
326 }
327 EXPORT_SYMBOL_GPL(fl6_merge_options);
328
329 static unsigned long check_linger(unsigned long ttl)
330 {
331         if (ttl < FL_MIN_LINGER)
332                 return FL_MIN_LINGER*HZ;
333         if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
334                 return 0;
335         return ttl*HZ;
336 }
337
338 static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
339 {
340         linger = check_linger(linger);
341         if (!linger)
342                 return -EPERM;
343         expires = check_linger(expires);
344         if (!expires)
345                 return -EPERM;
346
347         spin_lock_bh(&ip6_fl_lock);
348         fl->lastuse = jiffies;
349         if (time_before(fl->linger, linger))
350                 fl->linger = linger;
351         if (time_before(expires, fl->linger))
352                 expires = fl->linger;
353         if (time_before(fl->expires, fl->lastuse + expires))
354                 fl->expires = fl->lastuse + expires;
355         spin_unlock_bh(&ip6_fl_lock);
356
357         return 0;
358 }
359
360 static struct ip6_flowlabel *
361 fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
362           char __user *optval, int optlen, int *err_p)
363 {
364         struct ip6_flowlabel *fl = NULL;
365         int olen;
366         int addr_type;
367         int err;
368
369         olen = optlen - CMSG_ALIGN(sizeof(*freq));
370         err = -EINVAL;
371         if (olen > 64 * 1024)
372                 goto done;
373
374         err = -ENOMEM;
375         fl = kzalloc(sizeof(*fl), GFP_KERNEL);
376         if (!fl)
377                 goto done;
378
379         if (olen > 0) {
380                 struct msghdr msg;
381                 struct flowi6 flowi6;
382                 struct ipcm6_cookie ipc6;
383
384                 err = -ENOMEM;
385                 fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
386                 if (!fl->opt)
387                         goto done;
388
389                 memset(fl->opt, 0, sizeof(*fl->opt));
390                 fl->opt->tot_len = sizeof(*fl->opt) + olen;
391                 err = -EFAULT;
392                 if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
393                         goto done;
394
395                 msg.msg_controllen = olen;
396                 msg.msg_control = (void *)(fl->opt+1);
397                 memset(&flowi6, 0, sizeof(flowi6));
398
399                 ipc6.opt = fl->opt;
400                 err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6);
401                 if (err)
402                         goto done;
403                 err = -EINVAL;
404                 if (fl->opt->opt_flen)
405                         goto done;
406                 if (fl->opt->opt_nflen == 0) {
407                         kfree(fl->opt);
408                         fl->opt = NULL;
409                 }
410         }
411
412         fl->fl_net = net;
413         fl->expires = jiffies;
414         err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
415         if (err)
416                 goto done;
417         fl->share = freq->flr_share;
418         addr_type = ipv6_addr_type(&freq->flr_dst);
419         if ((addr_type & IPV6_ADDR_MAPPED) ||
420             addr_type == IPV6_ADDR_ANY) {
421                 err = -EINVAL;
422                 goto done;
423         }
424         fl->dst = freq->flr_dst;
425         atomic_set(&fl->users, 1);
426         switch (fl->share) {
427         case IPV6_FL_S_EXCL:
428         case IPV6_FL_S_ANY:
429                 break;
430         case IPV6_FL_S_PROCESS:
431                 fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
432                 break;
433         case IPV6_FL_S_USER:
434                 fl->owner.uid = current_euid();
435                 break;
436         default:
437                 err = -EINVAL;
438                 goto done;
439         }
440         return fl;
441
442 done:
443         fl_free(fl);
444         *err_p = err;
445         return NULL;
446 }
447
448 static int mem_check(struct sock *sk)
449 {
450         struct ipv6_pinfo *np = inet6_sk(sk);
451         struct ipv6_fl_socklist *sfl;
452         int room = FL_MAX_SIZE - atomic_read(&fl_size);
453         int count = 0;
454
455         if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
456                 return 0;
457
458         rcu_read_lock_bh();
459         for_each_sk_fl_rcu(np, sfl)
460                 count++;
461         rcu_read_unlock_bh();
462
463         if (room <= 0 ||
464             ((count >= FL_MAX_PER_SOCK ||
465               (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
466              !capable(CAP_NET_ADMIN)))
467                 return -ENOBUFS;
468
469         return 0;
470 }
471
472 static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
473                 struct ip6_flowlabel *fl)
474 {
475         spin_lock_bh(&ip6_sk_fl_lock);
476         sfl->fl = fl;
477         sfl->next = np->ipv6_fl_list;
478         rcu_assign_pointer(np->ipv6_fl_list, sfl);
479         spin_unlock_bh(&ip6_sk_fl_lock);
480 }
481
482 int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
483                            int flags)
484 {
485         struct ipv6_pinfo *np = inet6_sk(sk);
486         struct ipv6_fl_socklist *sfl;
487
488         if (flags & IPV6_FL_F_REMOTE) {
489                 freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK;
490                 return 0;
491         }
492
493         if (np->repflow) {
494                 freq->flr_label = np->flow_label;
495                 return 0;
496         }
497
498         rcu_read_lock_bh();
499
500         for_each_sk_fl_rcu(np, sfl) {
501                 if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
502                         spin_lock_bh(&ip6_fl_lock);
503                         freq->flr_label = sfl->fl->label;
504                         freq->flr_dst = sfl->fl->dst;
505                         freq->flr_share = sfl->fl->share;
506                         freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
507                         freq->flr_linger = sfl->fl->linger / HZ;
508
509                         spin_unlock_bh(&ip6_fl_lock);
510                         rcu_read_unlock_bh();
511                         return 0;
512                 }
513         }
514         rcu_read_unlock_bh();
515
516         return -ENOENT;
517 }
518
519 int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
520 {
521         int uninitialized_var(err);
522         struct net *net = sock_net(sk);
523         struct ipv6_pinfo *np = inet6_sk(sk);
524         struct in6_flowlabel_req freq;
525         struct ipv6_fl_socklist *sfl1 = NULL;
526         struct ipv6_fl_socklist *sfl;
527         struct ipv6_fl_socklist __rcu **sflp;
528         struct ip6_flowlabel *fl, *fl1 = NULL;
529
530
531         if (optlen < sizeof(freq))
532                 return -EINVAL;
533
534         if (copy_from_user(&freq, optval, sizeof(freq)))
535                 return -EFAULT;
536
537         switch (freq.flr_action) {
538         case IPV6_FL_A_PUT:
539                 if (freq.flr_flags & IPV6_FL_F_REFLECT) {
540                         if (sk->sk_protocol != IPPROTO_TCP)
541                                 return -ENOPROTOOPT;
542                         if (!np->repflow)
543                                 return -ESRCH;
544                         np->flow_label = 0;
545                         np->repflow = 0;
546                         return 0;
547                 }
548                 spin_lock_bh(&ip6_sk_fl_lock);
549                 for (sflp = &np->ipv6_fl_list;
550                      (sfl = rcu_dereference_protected(*sflp,
551                                                       lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
552                      sflp = &sfl->next) {
553                         if (sfl->fl->label == freq.flr_label) {
554                                 if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
555                                         np->flow_label &= ~IPV6_FLOWLABEL_MASK;
556                                 *sflp = sfl->next;
557                                 spin_unlock_bh(&ip6_sk_fl_lock);
558                                 fl_release(sfl->fl);
559                                 kfree_rcu(sfl, rcu);
560                                 return 0;
561                         }
562                 }
563                 spin_unlock_bh(&ip6_sk_fl_lock);
564                 return -ESRCH;
565
566         case IPV6_FL_A_RENEW:
567                 rcu_read_lock_bh();
568                 for_each_sk_fl_rcu(np, sfl) {
569                         if (sfl->fl->label == freq.flr_label) {
570                                 err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
571                                 rcu_read_unlock_bh();
572                                 return err;
573                         }
574                 }
575                 rcu_read_unlock_bh();
576
577                 if (freq.flr_share == IPV6_FL_S_NONE &&
578                     ns_capable(net->user_ns, CAP_NET_ADMIN)) {
579                         fl = fl_lookup(net, freq.flr_label);
580                         if (fl) {
581                                 err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
582                                 fl_release(fl);
583                                 return err;
584                         }
585                 }
586                 return -ESRCH;
587
588         case IPV6_FL_A_GET:
589                 if (freq.flr_flags & IPV6_FL_F_REFLECT) {
590                         struct net *net = sock_net(sk);
591                         if (net->ipv6.sysctl.flowlabel_consistency) {
592                                 net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
593                                 return -EPERM;
594                         }
595
596                         if (sk->sk_protocol != IPPROTO_TCP)
597                                 return -ENOPROTOOPT;
598
599                         np->repflow = 1;
600                         return 0;
601                 }
602
603                 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
604                         return -EINVAL;
605
606                 if (net->ipv6.sysctl.flowlabel_state_ranges &&
607                     (freq.flr_label & IPV6_FLOWLABEL_STATELESS_FLAG))
608                         return -ERANGE;
609
610                 fl = fl_create(net, sk, &freq, optval, optlen, &err);
611                 if (!fl)
612                         return err;
613                 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
614
615                 if (freq.flr_label) {
616                         err = -EEXIST;
617                         rcu_read_lock_bh();
618                         for_each_sk_fl_rcu(np, sfl) {
619                                 if (sfl->fl->label == freq.flr_label) {
620                                         if (freq.flr_flags&IPV6_FL_F_EXCL) {
621                                                 rcu_read_unlock_bh();
622                                                 goto done;
623                                         }
624                                         fl1 = sfl->fl;
625                                         if (!atomic_inc_not_zero(&fl1->users))
626                                                 fl1 = NULL;
627                                         break;
628                                 }
629                         }
630                         rcu_read_unlock_bh();
631
632                         if (!fl1)
633                                 fl1 = fl_lookup(net, freq.flr_label);
634                         if (fl1) {
635 recheck:
636                                 err = -EEXIST;
637                                 if (freq.flr_flags&IPV6_FL_F_EXCL)
638                                         goto release;
639                                 err = -EPERM;
640                                 if (fl1->share == IPV6_FL_S_EXCL ||
641                                     fl1->share != fl->share ||
642                                     ((fl1->share == IPV6_FL_S_PROCESS) &&
643                                      (fl1->owner.pid != fl->owner.pid)) ||
644                                     ((fl1->share == IPV6_FL_S_USER) &&
645                                      !uid_eq(fl1->owner.uid, fl->owner.uid)))
646                                         goto release;
647
648                                 err = -ENOMEM;
649                                 if (!sfl1)
650                                         goto release;
651                                 if (fl->linger > fl1->linger)
652                                         fl1->linger = fl->linger;
653                                 if ((long)(fl->expires - fl1->expires) > 0)
654                                         fl1->expires = fl->expires;
655                                 fl_link(np, sfl1, fl1);
656                                 fl_free(fl);
657                                 return 0;
658
659 release:
660                                 fl_release(fl1);
661                                 goto done;
662                         }
663                 }
664                 err = -ENOENT;
665                 if (!(freq.flr_flags&IPV6_FL_F_CREATE))
666                         goto done;
667
668                 err = -ENOMEM;
669                 if (!sfl1)
670                         goto done;
671
672                 err = mem_check(sk);
673                 if (err != 0)
674                         goto done;
675
676                 fl1 = fl_intern(net, fl, freq.flr_label);
677                 if (fl1)
678                         goto recheck;
679
680                 if (!freq.flr_label) {
681                         if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
682                                          &fl->label, sizeof(fl->label))) {
683                                 /* Intentionally ignore fault. */
684                         }
685                 }
686
687                 fl_link(np, sfl1, fl);
688                 return 0;
689
690         default:
691                 return -EINVAL;
692         }
693
694 done:
695         fl_free(fl);
696         kfree(sfl1);
697         return err;
698 }
699
700 #ifdef CONFIG_PROC_FS
701
702 struct ip6fl_iter_state {
703         struct seq_net_private p;
704         struct pid_namespace *pid_ns;
705         int bucket;
706 };
707
708 #define ip6fl_seq_private(seq)  ((struct ip6fl_iter_state *)(seq)->private)
709
710 static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
711 {
712         struct ip6_flowlabel *fl = NULL;
713         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
714         struct net *net = seq_file_net(seq);
715
716         for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
717                 for_each_fl_rcu(state->bucket, fl) {
718                         if (net_eq(fl->fl_net, net))
719                                 goto out;
720                 }
721         }
722         fl = NULL;
723 out:
724         return fl;
725 }
726
727 static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
728 {
729         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
730         struct net *net = seq_file_net(seq);
731
732         for_each_fl_continue_rcu(fl) {
733                 if (net_eq(fl->fl_net, net))
734                         goto out;
735         }
736
737 try_again:
738         if (++state->bucket <= FL_HASH_MASK) {
739                 for_each_fl_rcu(state->bucket, fl) {
740                         if (net_eq(fl->fl_net, net))
741                                 goto out;
742                 }
743                 goto try_again;
744         }
745         fl = NULL;
746
747 out:
748         return fl;
749 }
750
751 static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
752 {
753         struct ip6_flowlabel *fl = ip6fl_get_first(seq);
754         if (fl)
755                 while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
756                         --pos;
757         return pos ? NULL : fl;
758 }
759
760 static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
761         __acquires(RCU)
762 {
763         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
764
765         state->pid_ns = proc_pid_ns(file_inode(seq->file));
766
767         rcu_read_lock_bh();
768         return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
769 }
770
771 static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
772 {
773         struct ip6_flowlabel *fl;
774
775         if (v == SEQ_START_TOKEN)
776                 fl = ip6fl_get_first(seq);
777         else
778                 fl = ip6fl_get_next(seq, v);
779         ++*pos;
780         return fl;
781 }
782
783 static void ip6fl_seq_stop(struct seq_file *seq, void *v)
784         __releases(RCU)
785 {
786         rcu_read_unlock_bh();
787 }
788
789 static int ip6fl_seq_show(struct seq_file *seq, void *v)
790 {
791         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
792         if (v == SEQ_START_TOKEN) {
793                 seq_puts(seq, "Label S Owner  Users  Linger Expires  Dst                              Opt\n");
794         } else {
795                 struct ip6_flowlabel *fl = v;
796                 seq_printf(seq,
797                            "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
798                            (unsigned int)ntohl(fl->label),
799                            fl->share,
800                            ((fl->share == IPV6_FL_S_PROCESS) ?
801                             pid_nr_ns(fl->owner.pid, state->pid_ns) :
802                             ((fl->share == IPV6_FL_S_USER) ?
803                              from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
804                              0)),
805                            atomic_read(&fl->users),
806                            fl->linger/HZ,
807                            (long)(fl->expires - jiffies)/HZ,
808                            &fl->dst,
809                            fl->opt ? fl->opt->opt_nflen : 0);
810         }
811         return 0;
812 }
813
814 static const struct seq_operations ip6fl_seq_ops = {
815         .start  =       ip6fl_seq_start,
816         .next   =       ip6fl_seq_next,
817         .stop   =       ip6fl_seq_stop,
818         .show   =       ip6fl_seq_show,
819 };
820
821 static int __net_init ip6_flowlabel_proc_init(struct net *net)
822 {
823         if (!proc_create_net("ip6_flowlabel", 0444, net->proc_net,
824                         &ip6fl_seq_ops, sizeof(struct ip6fl_iter_state)))
825                 return -ENOMEM;
826         return 0;
827 }
828
829 static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
830 {
831         remove_proc_entry("ip6_flowlabel", net->proc_net);
832 }
833 #else
834 static inline int ip6_flowlabel_proc_init(struct net *net)
835 {
836         return 0;
837 }
838 static inline void ip6_flowlabel_proc_fini(struct net *net)
839 {
840 }
841 #endif
842
843 static void __net_exit ip6_flowlabel_net_exit(struct net *net)
844 {
845         ip6_fl_purge(net);
846         ip6_flowlabel_proc_fini(net);
847 }
848
849 static struct pernet_operations ip6_flowlabel_net_ops = {
850         .init = ip6_flowlabel_proc_init,
851         .exit = ip6_flowlabel_net_exit,
852 };
853
854 int ip6_flowlabel_init(void)
855 {
856         return register_pernet_subsys(&ip6_flowlabel_net_ops);
857 }
858
859 void ip6_flowlabel_cleanup(void)
860 {
861         del_timer(&ip6_fl_gc_timer);
862         unregister_pernet_subsys(&ip6_flowlabel_net_ops);
863 }