Merge commit 'v3.15' into next
[firefly-linux-kernel-4.4.55.git] / net / ipv6 / ip6_flowlabel.c
1 /*
2  *      ip6_flowlabel.c         IPv6 flowlabel manager.
3  *
4  *      This program is free software; you can redistribute it and/or
5  *      modify it under the terms of the GNU General Public License
6  *      as published by the Free Software Foundation; either version
7  *      2 of the License, or (at your option) any later version.
8  *
9  *      Authors:        Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  */
11
12 #include <linux/capability.h>
13 #include <linux/errno.h>
14 #include <linux/types.h>
15 #include <linux/socket.h>
16 #include <linux/net.h>
17 #include <linux/netdevice.h>
18 #include <linux/in6.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
22 #include <linux/export.h>
23 #include <linux/pid_namespace.h>
24
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
27
28 #include <net/ipv6.h>
29 #include <net/addrconf.h>
30 #include <net/rawv6.h>
31 #include <net/transp_v6.h>
32
33 #include <asm/uaccess.h>
34
35 #define FL_MIN_LINGER   6       /* Minimal linger. It is set to 6sec specified
36                                    in old IPv6 RFC. Well, it was reasonable value.
37                                  */
38 #define FL_MAX_LINGER   150     /* Maximal linger timeout */
39
40 /* FL hash table */
41
42 #define FL_MAX_PER_SOCK 32
43 #define FL_MAX_SIZE     4096
44 #define FL_HASH_MASK    255
45 #define FL_HASH(l)      (ntohl(l)&FL_HASH_MASK)
46
47 static atomic_t fl_size = ATOMIC_INIT(0);
48 static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
49
50 static void ip6_fl_gc(unsigned long dummy);
51 static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0);
52
53 /* FL hash table lock: it protects only of GC */
54
55 static DEFINE_SPINLOCK(ip6_fl_lock);
56
57 /* Big socket sock */
58
59 static DEFINE_SPINLOCK(ip6_sk_fl_lock);
60
61 #define for_each_fl_rcu(hash, fl)                               \
62         for (fl = rcu_dereference_bh(fl_ht[(hash)]);            \
63              fl != NULL;                                        \
64              fl = rcu_dereference_bh(fl->next))
65 #define for_each_fl_continue_rcu(fl)                            \
66         for (fl = rcu_dereference_bh(fl->next);                 \
67              fl != NULL;                                        \
68              fl = rcu_dereference_bh(fl->next))
69
70 #define for_each_sk_fl_rcu(np, sfl)                             \
71         for (sfl = rcu_dereference_bh(np->ipv6_fl_list);        \
72              sfl != NULL;                                       \
73              sfl = rcu_dereference_bh(sfl->next))
74
75 static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
76 {
77         struct ip6_flowlabel *fl;
78
79         for_each_fl_rcu(FL_HASH(label), fl) {
80                 if (fl->label == label && net_eq(fl->fl_net, net))
81                         return fl;
82         }
83         return NULL;
84 }
85
86 static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
87 {
88         struct ip6_flowlabel *fl;
89
90         rcu_read_lock_bh();
91         fl = __fl_lookup(net, label);
92         if (fl && !atomic_inc_not_zero(&fl->users))
93                 fl = NULL;
94         rcu_read_unlock_bh();
95         return fl;
96 }
97
98
99 static void fl_free(struct ip6_flowlabel *fl)
100 {
101         if (fl) {
102                 if (fl->share == IPV6_FL_S_PROCESS)
103                         put_pid(fl->owner.pid);
104                 release_net(fl->fl_net);
105                 kfree(fl->opt);
106                 kfree_rcu(fl, rcu);
107         }
108 }
109
110 static void fl_release(struct ip6_flowlabel *fl)
111 {
112         spin_lock_bh(&ip6_fl_lock);
113
114         fl->lastuse = jiffies;
115         if (atomic_dec_and_test(&fl->users)) {
116                 unsigned long ttd = fl->lastuse + fl->linger;
117                 if (time_after(ttd, fl->expires))
118                         fl->expires = ttd;
119                 ttd = fl->expires;
120                 if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
121                         struct ipv6_txoptions *opt = fl->opt;
122                         fl->opt = NULL;
123                         kfree(opt);
124                 }
125                 if (!timer_pending(&ip6_fl_gc_timer) ||
126                     time_after(ip6_fl_gc_timer.expires, ttd))
127                         mod_timer(&ip6_fl_gc_timer, ttd);
128         }
129         spin_unlock_bh(&ip6_fl_lock);
130 }
131
132 static void ip6_fl_gc(unsigned long dummy)
133 {
134         int i;
135         unsigned long now = jiffies;
136         unsigned long sched = 0;
137
138         spin_lock(&ip6_fl_lock);
139
140         for (i=0; i<=FL_HASH_MASK; i++) {
141                 struct ip6_flowlabel *fl;
142                 struct ip6_flowlabel __rcu **flp;
143
144                 flp = &fl_ht[i];
145                 while ((fl = rcu_dereference_protected(*flp,
146                                                        lockdep_is_held(&ip6_fl_lock))) != NULL) {
147                         if (atomic_read(&fl->users) == 0) {
148                                 unsigned long ttd = fl->lastuse + fl->linger;
149                                 if (time_after(ttd, fl->expires))
150                                         fl->expires = ttd;
151                                 ttd = fl->expires;
152                                 if (time_after_eq(now, ttd)) {
153                                         *flp = fl->next;
154                                         fl_free(fl);
155                                         atomic_dec(&fl_size);
156                                         continue;
157                                 }
158                                 if (!sched || time_before(ttd, sched))
159                                         sched = ttd;
160                         }
161                         flp = &fl->next;
162                 }
163         }
164         if (!sched && atomic_read(&fl_size))
165                 sched = now + FL_MAX_LINGER;
166         if (sched) {
167                 mod_timer(&ip6_fl_gc_timer, sched);
168         }
169         spin_unlock(&ip6_fl_lock);
170 }
171
172 static void __net_exit ip6_fl_purge(struct net *net)
173 {
174         int i;
175
176         spin_lock(&ip6_fl_lock);
177         for (i = 0; i <= FL_HASH_MASK; i++) {
178                 struct ip6_flowlabel *fl;
179                 struct ip6_flowlabel __rcu **flp;
180
181                 flp = &fl_ht[i];
182                 while ((fl = rcu_dereference_protected(*flp,
183                                                        lockdep_is_held(&ip6_fl_lock))) != NULL) {
184                         if (net_eq(fl->fl_net, net) &&
185                             atomic_read(&fl->users) == 0) {
186                                 *flp = fl->next;
187                                 fl_free(fl);
188                                 atomic_dec(&fl_size);
189                                 continue;
190                         }
191                         flp = &fl->next;
192                 }
193         }
194         spin_unlock(&ip6_fl_lock);
195 }
196
197 static struct ip6_flowlabel *fl_intern(struct net *net,
198                                        struct ip6_flowlabel *fl, __be32 label)
199 {
200         struct ip6_flowlabel *lfl;
201
202         fl->label = label & IPV6_FLOWLABEL_MASK;
203
204         spin_lock_bh(&ip6_fl_lock);
205         if (label == 0) {
206                 for (;;) {
207                         fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
208                         if (fl->label) {
209                                 lfl = __fl_lookup(net, fl->label);
210                                 if (lfl == NULL)
211                                         break;
212                         }
213                 }
214         } else {
215                 /*
216                  * we dropper the ip6_fl_lock, so this entry could reappear
217                  * and we need to recheck with it.
218                  *
219                  * OTOH no need to search the active socket first, like it is
220                  * done in ipv6_flowlabel_opt - sock is locked, so new entry
221                  * with the same label can only appear on another sock
222                  */
223                 lfl = __fl_lookup(net, fl->label);
224                 if (lfl != NULL) {
225                         atomic_inc(&lfl->users);
226                         spin_unlock_bh(&ip6_fl_lock);
227                         return lfl;
228                 }
229         }
230
231         fl->lastuse = jiffies;
232         fl->next = fl_ht[FL_HASH(fl->label)];
233         rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
234         atomic_inc(&fl_size);
235         spin_unlock_bh(&ip6_fl_lock);
236         return NULL;
237 }
238
239
240
241 /* Socket flowlabel lists */
242
243 struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label)
244 {
245         struct ipv6_fl_socklist *sfl;
246         struct ipv6_pinfo *np = inet6_sk(sk);
247
248         label &= IPV6_FLOWLABEL_MASK;
249
250         rcu_read_lock_bh();
251         for_each_sk_fl_rcu(np, sfl) {
252                 struct ip6_flowlabel *fl = sfl->fl;
253                 if (fl->label == label) {
254                         fl->lastuse = jiffies;
255                         atomic_inc(&fl->users);
256                         rcu_read_unlock_bh();
257                         return fl;
258                 }
259         }
260         rcu_read_unlock_bh();
261         return NULL;
262 }
263
264 EXPORT_SYMBOL_GPL(fl6_sock_lookup);
265
266 void fl6_free_socklist(struct sock *sk)
267 {
268         struct ipv6_pinfo *np = inet6_sk(sk);
269         struct ipv6_fl_socklist *sfl;
270
271         if (!rcu_access_pointer(np->ipv6_fl_list))
272                 return;
273
274         spin_lock_bh(&ip6_sk_fl_lock);
275         while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
276                                                 lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
277                 np->ipv6_fl_list = sfl->next;
278                 spin_unlock_bh(&ip6_sk_fl_lock);
279
280                 fl_release(sfl->fl);
281                 kfree_rcu(sfl, rcu);
282
283                 spin_lock_bh(&ip6_sk_fl_lock);
284         }
285         spin_unlock_bh(&ip6_sk_fl_lock);
286 }
287
288 /* Service routines */
289
290
291 /*
292    It is the only difficult place. flowlabel enforces equal headers
293    before and including routing header, however user may supply options
294    following rthdr.
295  */
296
297 struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
298                                          struct ip6_flowlabel * fl,
299                                          struct ipv6_txoptions * fopt)
300 {
301         struct ipv6_txoptions * fl_opt = fl->opt;
302
303         if (fopt == NULL || fopt->opt_flen == 0)
304                 return fl_opt;
305
306         if (fl_opt != NULL) {
307                 opt_space->hopopt = fl_opt->hopopt;
308                 opt_space->dst0opt = fl_opt->dst0opt;
309                 opt_space->srcrt = fl_opt->srcrt;
310                 opt_space->opt_nflen = fl_opt->opt_nflen;
311         } else {
312                 if (fopt->opt_nflen == 0)
313                         return fopt;
314                 opt_space->hopopt = NULL;
315                 opt_space->dst0opt = NULL;
316                 opt_space->srcrt = NULL;
317                 opt_space->opt_nflen = 0;
318         }
319         opt_space->dst1opt = fopt->dst1opt;
320         opt_space->opt_flen = fopt->opt_flen;
321         return opt_space;
322 }
323 EXPORT_SYMBOL_GPL(fl6_merge_options);
324
325 static unsigned long check_linger(unsigned long ttl)
326 {
327         if (ttl < FL_MIN_LINGER)
328                 return FL_MIN_LINGER*HZ;
329         if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
330                 return 0;
331         return ttl*HZ;
332 }
333
334 static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
335 {
336         linger = check_linger(linger);
337         if (!linger)
338                 return -EPERM;
339         expires = check_linger(expires);
340         if (!expires)
341                 return -EPERM;
342
343         spin_lock_bh(&ip6_fl_lock);
344         fl->lastuse = jiffies;
345         if (time_before(fl->linger, linger))
346                 fl->linger = linger;
347         if (time_before(expires, fl->linger))
348                 expires = fl->linger;
349         if (time_before(fl->expires, fl->lastuse + expires))
350                 fl->expires = fl->lastuse + expires;
351         spin_unlock_bh(&ip6_fl_lock);
352
353         return 0;
354 }
355
356 static struct ip6_flowlabel *
357 fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
358           char __user *optval, int optlen, int *err_p)
359 {
360         struct ip6_flowlabel *fl = NULL;
361         int olen;
362         int addr_type;
363         int err;
364
365         olen = optlen - CMSG_ALIGN(sizeof(*freq));
366         err = -EINVAL;
367         if (olen > 64 * 1024)
368                 goto done;
369
370         err = -ENOMEM;
371         fl = kzalloc(sizeof(*fl), GFP_KERNEL);
372         if (fl == NULL)
373                 goto done;
374
375         if (olen > 0) {
376                 struct msghdr msg;
377                 struct flowi6 flowi6;
378                 int junk;
379
380                 err = -ENOMEM;
381                 fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
382                 if (fl->opt == NULL)
383                         goto done;
384
385                 memset(fl->opt, 0, sizeof(*fl->opt));
386                 fl->opt->tot_len = sizeof(*fl->opt) + olen;
387                 err = -EFAULT;
388                 if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
389                         goto done;
390
391                 msg.msg_controllen = olen;
392                 msg.msg_control = (void*)(fl->opt+1);
393                 memset(&flowi6, 0, sizeof(flowi6));
394
395                 err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
396                                             &junk, &junk, &junk);
397                 if (err)
398                         goto done;
399                 err = -EINVAL;
400                 if (fl->opt->opt_flen)
401                         goto done;
402                 if (fl->opt->opt_nflen == 0) {
403                         kfree(fl->opt);
404                         fl->opt = NULL;
405                 }
406         }
407
408         fl->fl_net = hold_net(net);
409         fl->expires = jiffies;
410         err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
411         if (err)
412                 goto done;
413         fl->share = freq->flr_share;
414         addr_type = ipv6_addr_type(&freq->flr_dst);
415         if ((addr_type & IPV6_ADDR_MAPPED) ||
416             addr_type == IPV6_ADDR_ANY) {
417                 err = -EINVAL;
418                 goto done;
419         }
420         fl->dst = freq->flr_dst;
421         atomic_set(&fl->users, 1);
422         switch (fl->share) {
423         case IPV6_FL_S_EXCL:
424         case IPV6_FL_S_ANY:
425                 break;
426         case IPV6_FL_S_PROCESS:
427                 fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
428                 break;
429         case IPV6_FL_S_USER:
430                 fl->owner.uid = current_euid();
431                 break;
432         default:
433                 err = -EINVAL;
434                 goto done;
435         }
436         return fl;
437
438 done:
439         fl_free(fl);
440         *err_p = err;
441         return NULL;
442 }
443
444 static int mem_check(struct sock *sk)
445 {
446         struct ipv6_pinfo *np = inet6_sk(sk);
447         struct ipv6_fl_socklist *sfl;
448         int room = FL_MAX_SIZE - atomic_read(&fl_size);
449         int count = 0;
450
451         if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
452                 return 0;
453
454         rcu_read_lock_bh();
455         for_each_sk_fl_rcu(np, sfl)
456                 count++;
457         rcu_read_unlock_bh();
458
459         if (room <= 0 ||
460             ((count >= FL_MAX_PER_SOCK ||
461               (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
462              !capable(CAP_NET_ADMIN)))
463                 return -ENOBUFS;
464
465         return 0;
466 }
467
468 static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
469                 struct ip6_flowlabel *fl)
470 {
471         spin_lock_bh(&ip6_sk_fl_lock);
472         sfl->fl = fl;
473         sfl->next = np->ipv6_fl_list;
474         rcu_assign_pointer(np->ipv6_fl_list, sfl);
475         spin_unlock_bh(&ip6_sk_fl_lock);
476 }
477
478 int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
479                            int flags)
480 {
481         struct ipv6_pinfo *np = inet6_sk(sk);
482         struct ipv6_fl_socklist *sfl;
483
484         if (flags & IPV6_FL_F_REMOTE) {
485                 freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK;
486                 return 0;
487         }
488
489         if (np->repflow) {
490                 freq->flr_label = np->flow_label;
491                 return 0;
492         }
493
494         rcu_read_lock_bh();
495
496         for_each_sk_fl_rcu(np, sfl) {
497                 if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
498                         spin_lock_bh(&ip6_fl_lock);
499                         freq->flr_label = sfl->fl->label;
500                         freq->flr_dst = sfl->fl->dst;
501                         freq->flr_share = sfl->fl->share;
502                         freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
503                         freq->flr_linger = sfl->fl->linger / HZ;
504
505                         spin_unlock_bh(&ip6_fl_lock);
506                         rcu_read_unlock_bh();
507                         return 0;
508                 }
509         }
510         rcu_read_unlock_bh();
511
512         return -ENOENT;
513 }
514
515 int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
516 {
517         int uninitialized_var(err);
518         struct net *net = sock_net(sk);
519         struct ipv6_pinfo *np = inet6_sk(sk);
520         struct in6_flowlabel_req freq;
521         struct ipv6_fl_socklist *sfl1=NULL;
522         struct ipv6_fl_socklist *sfl;
523         struct ipv6_fl_socklist __rcu **sflp;
524         struct ip6_flowlabel *fl, *fl1 = NULL;
525
526
527         if (optlen < sizeof(freq))
528                 return -EINVAL;
529
530         if (copy_from_user(&freq, optval, sizeof(freq)))
531                 return -EFAULT;
532
533         switch (freq.flr_action) {
534         case IPV6_FL_A_PUT:
535                 if (freq.flr_flags & IPV6_FL_F_REFLECT) {
536                         if (sk->sk_protocol != IPPROTO_TCP)
537                                 return -ENOPROTOOPT;
538                         if (!np->repflow)
539                                 return -ESRCH;
540                         np->flow_label = 0;
541                         np->repflow = 0;
542                         return 0;
543                 }
544                 spin_lock_bh(&ip6_sk_fl_lock);
545                 for (sflp = &np->ipv6_fl_list;
546                      (sfl = rcu_dereference(*sflp))!=NULL;
547                      sflp = &sfl->next) {
548                         if (sfl->fl->label == freq.flr_label) {
549                                 if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
550                                         np->flow_label &= ~IPV6_FLOWLABEL_MASK;
551                                 *sflp = rcu_dereference(sfl->next);
552                                 spin_unlock_bh(&ip6_sk_fl_lock);
553                                 fl_release(sfl->fl);
554                                 kfree_rcu(sfl, rcu);
555                                 return 0;
556                         }
557                 }
558                 spin_unlock_bh(&ip6_sk_fl_lock);
559                 return -ESRCH;
560
561         case IPV6_FL_A_RENEW:
562                 rcu_read_lock_bh();
563                 for_each_sk_fl_rcu(np, sfl) {
564                         if (sfl->fl->label == freq.flr_label) {
565                                 err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
566                                 rcu_read_unlock_bh();
567                                 return err;
568                         }
569                 }
570                 rcu_read_unlock_bh();
571
572                 if (freq.flr_share == IPV6_FL_S_NONE &&
573                     ns_capable(net->user_ns, CAP_NET_ADMIN)) {
574                         fl = fl_lookup(net, freq.flr_label);
575                         if (fl) {
576                                 err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
577                                 fl_release(fl);
578                                 return err;
579                         }
580                 }
581                 return -ESRCH;
582
583         case IPV6_FL_A_GET:
584                 if (freq.flr_flags & IPV6_FL_F_REFLECT) {
585                         struct net *net = sock_net(sk);
586                         if (net->ipv6.sysctl.flowlabel_consistency) {
587                                 net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
588                                 return -EPERM;
589                         }
590
591                         if (sk->sk_protocol != IPPROTO_TCP)
592                                 return -ENOPROTOOPT;
593
594                         np->repflow = 1;
595                         return 0;
596                 }
597
598                 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
599                         return -EINVAL;
600
601                 fl = fl_create(net, sk, &freq, optval, optlen, &err);
602                 if (fl == NULL)
603                         return err;
604                 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
605
606                 if (freq.flr_label) {
607                         err = -EEXIST;
608                         rcu_read_lock_bh();
609                         for_each_sk_fl_rcu(np, sfl) {
610                                 if (sfl->fl->label == freq.flr_label) {
611                                         if (freq.flr_flags&IPV6_FL_F_EXCL) {
612                                                 rcu_read_unlock_bh();
613                                                 goto done;
614                                         }
615                                         fl1 = sfl->fl;
616                                         atomic_inc(&fl1->users);
617                                         break;
618                                 }
619                         }
620                         rcu_read_unlock_bh();
621
622                         if (fl1 == NULL)
623                                 fl1 = fl_lookup(net, freq.flr_label);
624                         if (fl1) {
625 recheck:
626                                 err = -EEXIST;
627                                 if (freq.flr_flags&IPV6_FL_F_EXCL)
628                                         goto release;
629                                 err = -EPERM;
630                                 if (fl1->share == IPV6_FL_S_EXCL ||
631                                     fl1->share != fl->share ||
632                                     ((fl1->share == IPV6_FL_S_PROCESS) &&
633                                      (fl1->owner.pid == fl->owner.pid)) ||
634                                     ((fl1->share == IPV6_FL_S_USER) &&
635                                      uid_eq(fl1->owner.uid, fl->owner.uid)))
636                                         goto release;
637
638                                 err = -ENOMEM;
639                                 if (sfl1 == NULL)
640                                         goto release;
641                                 if (fl->linger > fl1->linger)
642                                         fl1->linger = fl->linger;
643                                 if ((long)(fl->expires - fl1->expires) > 0)
644                                         fl1->expires = fl->expires;
645                                 fl_link(np, sfl1, fl1);
646                                 fl_free(fl);
647                                 return 0;
648
649 release:
650                                 fl_release(fl1);
651                                 goto done;
652                         }
653                 }
654                 err = -ENOENT;
655                 if (!(freq.flr_flags&IPV6_FL_F_CREATE))
656                         goto done;
657
658                 err = -ENOMEM;
659                 if (sfl1 == NULL || (err = mem_check(sk)) != 0)
660                         goto done;
661
662                 fl1 = fl_intern(net, fl, freq.flr_label);
663                 if (fl1 != NULL)
664                         goto recheck;
665
666                 if (!freq.flr_label) {
667                         if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
668                                          &fl->label, sizeof(fl->label))) {
669                                 /* Intentionally ignore fault. */
670                         }
671                 }
672
673                 fl_link(np, sfl1, fl);
674                 return 0;
675
676         default:
677                 return -EINVAL;
678         }
679
680 done:
681         fl_free(fl);
682         kfree(sfl1);
683         return err;
684 }
685
686 #ifdef CONFIG_PROC_FS
687
688 struct ip6fl_iter_state {
689         struct seq_net_private p;
690         struct pid_namespace *pid_ns;
691         int bucket;
692 };
693
694 #define ip6fl_seq_private(seq)  ((struct ip6fl_iter_state *)(seq)->private)
695
696 static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
697 {
698         struct ip6_flowlabel *fl = NULL;
699         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
700         struct net *net = seq_file_net(seq);
701
702         for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
703                 for_each_fl_rcu(state->bucket, fl) {
704                         if (net_eq(fl->fl_net, net))
705                                 goto out;
706                 }
707         }
708         fl = NULL;
709 out:
710         return fl;
711 }
712
713 static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
714 {
715         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
716         struct net *net = seq_file_net(seq);
717
718         for_each_fl_continue_rcu(fl) {
719                 if (net_eq(fl->fl_net, net))
720                         goto out;
721         }
722
723 try_again:
724         if (++state->bucket <= FL_HASH_MASK) {
725                 for_each_fl_rcu(state->bucket, fl) {
726                         if (net_eq(fl->fl_net, net))
727                                 goto out;
728                 }
729                 goto try_again;
730         }
731         fl = NULL;
732
733 out:
734         return fl;
735 }
736
737 static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
738 {
739         struct ip6_flowlabel *fl = ip6fl_get_first(seq);
740         if (fl)
741                 while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
742                         --pos;
743         return pos ? NULL : fl;
744 }
745
746 static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
747         __acquires(RCU)
748 {
749         rcu_read_lock_bh();
750         return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
751 }
752
753 static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
754 {
755         struct ip6_flowlabel *fl;
756
757         if (v == SEQ_START_TOKEN)
758                 fl = ip6fl_get_first(seq);
759         else
760                 fl = ip6fl_get_next(seq, v);
761         ++*pos;
762         return fl;
763 }
764
765 static void ip6fl_seq_stop(struct seq_file *seq, void *v)
766         __releases(RCU)
767 {
768         rcu_read_unlock_bh();
769 }
770
771 static int ip6fl_seq_show(struct seq_file *seq, void *v)
772 {
773         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
774         if (v == SEQ_START_TOKEN)
775                 seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
776                            "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
777         else {
778                 struct ip6_flowlabel *fl = v;
779                 seq_printf(seq,
780                            "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
781                            (unsigned int)ntohl(fl->label),
782                            fl->share,
783                            ((fl->share == IPV6_FL_S_PROCESS) ?
784                             pid_nr_ns(fl->owner.pid, state->pid_ns) :
785                             ((fl->share == IPV6_FL_S_USER) ?
786                              from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
787                              0)),
788                            atomic_read(&fl->users),
789                            fl->linger/HZ,
790                            (long)(fl->expires - jiffies)/HZ,
791                            &fl->dst,
792                            fl->opt ? fl->opt->opt_nflen : 0);
793         }
794         return 0;
795 }
796
797 static const struct seq_operations ip6fl_seq_ops = {
798         .start  =       ip6fl_seq_start,
799         .next   =       ip6fl_seq_next,
800         .stop   =       ip6fl_seq_stop,
801         .show   =       ip6fl_seq_show,
802 };
803
804 static int ip6fl_seq_open(struct inode *inode, struct file *file)
805 {
806         struct seq_file *seq;
807         struct ip6fl_iter_state *state;
808         int err;
809
810         err = seq_open_net(inode, file, &ip6fl_seq_ops,
811                            sizeof(struct ip6fl_iter_state));
812
813         if (!err) {
814                 seq = file->private_data;
815                 state = ip6fl_seq_private(seq);
816                 rcu_read_lock();
817                 state->pid_ns = get_pid_ns(task_active_pid_ns(current));
818                 rcu_read_unlock();
819         }
820         return err;
821 }
822
823 static int ip6fl_seq_release(struct inode *inode, struct file *file)
824 {
825         struct seq_file *seq = file->private_data;
826         struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
827         put_pid_ns(state->pid_ns);
828         return seq_release_net(inode, file);
829 }
830
831 static const struct file_operations ip6fl_seq_fops = {
832         .owner          =       THIS_MODULE,
833         .open           =       ip6fl_seq_open,
834         .read           =       seq_read,
835         .llseek         =       seq_lseek,
836         .release        =       ip6fl_seq_release,
837 };
838
839 static int __net_init ip6_flowlabel_proc_init(struct net *net)
840 {
841         if (!proc_create("ip6_flowlabel", S_IRUGO, net->proc_net,
842                          &ip6fl_seq_fops))
843                 return -ENOMEM;
844         return 0;
845 }
846
847 static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
848 {
849         remove_proc_entry("ip6_flowlabel", net->proc_net);
850 }
851 #else
852 static inline int ip6_flowlabel_proc_init(struct net *net)
853 {
854         return 0;
855 }
856 static inline void ip6_flowlabel_proc_fini(struct net *net)
857 {
858 }
859 #endif
860
861 static void __net_exit ip6_flowlabel_net_exit(struct net *net)
862 {
863         ip6_fl_purge(net);
864         ip6_flowlabel_proc_fini(net);
865 }
866
867 static struct pernet_operations ip6_flowlabel_net_ops = {
868         .init = ip6_flowlabel_proc_init,
869         .exit = ip6_flowlabel_net_exit,
870 };
871
872 int ip6_flowlabel_init(void)
873 {
874         return register_pernet_subsys(&ip6_flowlabel_net_ops);
875 }
876
877 void ip6_flowlabel_cleanup(void)
878 {
879         del_timer(&ip6_fl_gc_timer);
880         unregister_pernet_subsys(&ip6_flowlabel_net_ops);
881 }