5478a015ab003f0f6b62ba48ae7ffcac7770fa51
[firefly-linux-kernel-4.4.55.git] / net / sunrpc / cache.c
1 /*
2  * net/sunrpc/cache.c
3  *
4  * Generic code for various authentication-related caches
5  * used by sunrpc clients and servers.
6  *
7  * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
8  *
9  * Released under terms in GPL version 2.  See COPYING.
10  *
11  */
12
13 #include <linux/types.h>
14 #include <linux/fs.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <asm/uaccess.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/net.h>
28 #include <linux/workqueue.h>
29 #include <linux/mutex.h>
30 #include <linux/pagemap.h>
31 #include <asm/ioctls.h>
32 #include <linux/sunrpc/types.h>
33 #include <linux/sunrpc/cache.h>
34 #include <linux/sunrpc/stats.h>
35 #include <linux/sunrpc/rpc_pipe_fs.h>
36 #include "netns.h"
37
38 #define  RPCDBG_FACILITY RPCDBG_CACHE
39
40 static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
41 static void cache_revisit_request(struct cache_head *item);
42
43 static void cache_init(struct cache_head *h)
44 {
45         time_t now = seconds_since_boot();
46         h->next = NULL;
47         h->flags = 0;
48         kref_init(&h->ref);
49         h->expiry_time = now + CACHE_NEW_EXPIRY;
50         h->last_refresh = now;
51 }
52
53 struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
54                                        struct cache_head *key, int hash)
55 {
56         struct cache_head **head,  **hp;
57         struct cache_head *new = NULL, *freeme = NULL;
58
59         head = &detail->hash_table[hash];
60
61         read_lock(&detail->hash_lock);
62
63         for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
64                 struct cache_head *tmp = *hp;
65                 if (detail->match(tmp, key)) {
66                         if (cache_is_expired(detail, tmp))
67                                 /* This entry is expired, we will discard it. */
68                                 break;
69                         cache_get(tmp);
70                         read_unlock(&detail->hash_lock);
71                         return tmp;
72                 }
73         }
74         read_unlock(&detail->hash_lock);
75         /* Didn't find anything, insert an empty entry */
76
77         new = detail->alloc();
78         if (!new)
79                 return NULL;
80         /* must fully initialise 'new', else
81          * we might get lose if we need to
82          * cache_put it soon.
83          */
84         cache_init(new);
85         detail->init(new, key);
86
87         write_lock(&detail->hash_lock);
88
89         /* check if entry appeared while we slept */
90         for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
91                 struct cache_head *tmp = *hp;
92                 if (detail->match(tmp, key)) {
93                         if (cache_is_expired(detail, tmp)) {
94                                 *hp = tmp->next;
95                                 tmp->next = NULL;
96                                 detail->entries --;
97                                 freeme = tmp;
98                                 break;
99                         }
100                         cache_get(tmp);
101                         write_unlock(&detail->hash_lock);
102                         cache_put(new, detail);
103                         return tmp;
104                 }
105         }
106         new->next = *head;
107         *head = new;
108         detail->entries++;
109         cache_get(new);
110         write_unlock(&detail->hash_lock);
111
112         if (freeme)
113                 cache_put(freeme, detail);
114         return new;
115 }
116 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
117
118
119 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
120
121 static void cache_fresh_locked(struct cache_head *head, time_t expiry)
122 {
123         head->expiry_time = expiry;
124         head->last_refresh = seconds_since_boot();
125         smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
126         set_bit(CACHE_VALID, &head->flags);
127 }
128
129 static void cache_fresh_unlocked(struct cache_head *head,
130                                  struct cache_detail *detail)
131 {
132         if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
133                 cache_revisit_request(head);
134                 cache_dequeue(detail, head);
135         }
136 }
137
138 struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
139                                        struct cache_head *new, struct cache_head *old, int hash)
140 {
141         /* The 'old' entry is to be replaced by 'new'.
142          * If 'old' is not VALID, we update it directly,
143          * otherwise we need to replace it
144          */
145         struct cache_head **head;
146         struct cache_head *tmp;
147
148         if (!test_bit(CACHE_VALID, &old->flags)) {
149                 write_lock(&detail->hash_lock);
150                 if (!test_bit(CACHE_VALID, &old->flags)) {
151                         if (test_bit(CACHE_NEGATIVE, &new->flags))
152                                 set_bit(CACHE_NEGATIVE, &old->flags);
153                         else
154                                 detail->update(old, new);
155                         cache_fresh_locked(old, new->expiry_time);
156                         write_unlock(&detail->hash_lock);
157                         cache_fresh_unlocked(old, detail);
158                         return old;
159                 }
160                 write_unlock(&detail->hash_lock);
161         }
162         /* We need to insert a new entry */
163         tmp = detail->alloc();
164         if (!tmp) {
165                 cache_put(old, detail);
166                 return NULL;
167         }
168         cache_init(tmp);
169         detail->init(tmp, old);
170         head = &detail->hash_table[hash];
171
172         write_lock(&detail->hash_lock);
173         if (test_bit(CACHE_NEGATIVE, &new->flags))
174                 set_bit(CACHE_NEGATIVE, &tmp->flags);
175         else
176                 detail->update(tmp, new);
177         tmp->next = *head;
178         *head = tmp;
179         detail->entries++;
180         cache_get(tmp);
181         cache_fresh_locked(tmp, new->expiry_time);
182         cache_fresh_locked(old, 0);
183         write_unlock(&detail->hash_lock);
184         cache_fresh_unlocked(tmp, detail);
185         cache_fresh_unlocked(old, detail);
186         cache_put(old, detail);
187         return tmp;
188 }
189 EXPORT_SYMBOL_GPL(sunrpc_cache_update);
190
191 static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
192 {
193         if (cd->cache_upcall)
194                 return cd->cache_upcall(cd, h);
195         return sunrpc_cache_pipe_upcall(cd, h);
196 }
197
198 static inline int cache_is_valid(struct cache_head *h)
199 {
200         if (!test_bit(CACHE_VALID, &h->flags))
201                 return -EAGAIN;
202         else {
203                 /* entry is valid */
204                 if (test_bit(CACHE_NEGATIVE, &h->flags))
205                         return -ENOENT;
206                 else {
207                         /*
208                          * In combination with write barrier in
209                          * sunrpc_cache_update, ensures that anyone
210                          * using the cache entry after this sees the
211                          * updated contents:
212                          */
213                         smp_rmb();
214                         return 0;
215                 }
216         }
217 }
218
219 static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
220 {
221         int rv;
222
223         write_lock(&detail->hash_lock);
224         rv = cache_is_valid(h);
225         if (rv == -EAGAIN) {
226                 set_bit(CACHE_NEGATIVE, &h->flags);
227                 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
228                 rv = -ENOENT;
229         }
230         write_unlock(&detail->hash_lock);
231         cache_fresh_unlocked(h, detail);
232         return rv;
233 }
234
235 /*
236  * This is the generic cache management routine for all
237  * the authentication caches.
238  * It checks the currency of a cache item and will (later)
239  * initiate an upcall to fill it if needed.
240  *
241  *
242  * Returns 0 if the cache_head can be used, or cache_puts it and returns
243  * -EAGAIN if upcall is pending and request has been queued
244  * -ETIMEDOUT if upcall failed or request could not be queue or
245  *           upcall completed but item is still invalid (implying that
246  *           the cache item has been replaced with a newer one).
247  * -ENOENT if cache entry was negative
248  */
249 int cache_check(struct cache_detail *detail,
250                     struct cache_head *h, struct cache_req *rqstp)
251 {
252         int rv;
253         long refresh_age, age;
254
255         /* First decide return status as best we can */
256         rv = cache_is_valid(h);
257
258         /* now see if we want to start an upcall */
259         refresh_age = (h->expiry_time - h->last_refresh);
260         age = seconds_since_boot() - h->last_refresh;
261
262         if (rqstp == NULL) {
263                 if (rv == -EAGAIN)
264                         rv = -ENOENT;
265         } else if (rv == -EAGAIN || age > refresh_age/2) {
266                 dprintk("RPC:       Want update, refage=%ld, age=%ld\n",
267                                 refresh_age, age);
268                 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
269                         switch (cache_make_upcall(detail, h)) {
270                         case -EINVAL:
271                                 rv = try_to_negate_entry(detail, h);
272                                 break;
273                         case -EAGAIN:
274                                 cache_fresh_unlocked(h, detail);
275                                 break;
276                         }
277                 }
278         }
279
280         if (rv == -EAGAIN) {
281                 if (!cache_defer_req(rqstp, h)) {
282                         /*
283                          * Request was not deferred; handle it as best
284                          * we can ourselves:
285                          */
286                         rv = cache_is_valid(h);
287                         if (rv == -EAGAIN)
288                                 rv = -ETIMEDOUT;
289                 }
290         }
291         if (rv)
292                 cache_put(h, detail);
293         return rv;
294 }
295 EXPORT_SYMBOL_GPL(cache_check);
296
297 /*
298  * caches need to be periodically cleaned.
299  * For this we maintain a list of cache_detail and
300  * a current pointer into that list and into the table
301  * for that entry.
302  *
303  * Each time cache_clean is called it finds the next non-empty entry
304  * in the current table and walks the list in that entry
305  * looking for entries that can be removed.
306  *
307  * An entry gets removed if:
308  * - The expiry is before current time
309  * - The last_refresh time is before the flush_time for that cache
310  *
311  * later we might drop old entries with non-NEVER expiry if that table
312  * is getting 'full' for some definition of 'full'
313  *
314  * The question of "how often to scan a table" is an interesting one
315  * and is answered in part by the use of the "nextcheck" field in the
316  * cache_detail.
317  * When a scan of a table begins, the nextcheck field is set to a time
318  * that is well into the future.
319  * While scanning, if an expiry time is found that is earlier than the
320  * current nextcheck time, nextcheck is set to that expiry time.
321  * If the flush_time is ever set to a time earlier than the nextcheck
322  * time, the nextcheck time is then set to that flush_time.
323  *
324  * A table is then only scanned if the current time is at least
325  * the nextcheck time.
326  *
327  */
328
329 static LIST_HEAD(cache_list);
330 static DEFINE_SPINLOCK(cache_list_lock);
331 static struct cache_detail *current_detail;
332 static int current_index;
333
334 static void do_cache_clean(struct work_struct *work);
335 static struct delayed_work cache_cleaner;
336
337 void sunrpc_init_cache_detail(struct cache_detail *cd)
338 {
339         rwlock_init(&cd->hash_lock);
340         INIT_LIST_HEAD(&cd->queue);
341         spin_lock(&cache_list_lock);
342         cd->nextcheck = 0;
343         cd->entries = 0;
344         atomic_set(&cd->readers, 0);
345         cd->last_close = 0;
346         cd->last_warn = -1;
347         list_add(&cd->others, &cache_list);
348         spin_unlock(&cache_list_lock);
349
350         /* start the cleaning process */
351         schedule_delayed_work(&cache_cleaner, 0);
352 }
353 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
354
355 void sunrpc_destroy_cache_detail(struct cache_detail *cd)
356 {
357         cache_purge(cd);
358         spin_lock(&cache_list_lock);
359         write_lock(&cd->hash_lock);
360         if (cd->entries || atomic_read(&cd->inuse)) {
361                 write_unlock(&cd->hash_lock);
362                 spin_unlock(&cache_list_lock);
363                 goto out;
364         }
365         if (current_detail == cd)
366                 current_detail = NULL;
367         list_del_init(&cd->others);
368         write_unlock(&cd->hash_lock);
369         spin_unlock(&cache_list_lock);
370         if (list_empty(&cache_list)) {
371                 /* module must be being unloaded so its safe to kill the worker */
372                 cancel_delayed_work_sync(&cache_cleaner);
373         }
374         return;
375 out:
376         printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name);
377 }
378 EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
379
380 /* clean cache tries to find something to clean
381  * and cleans it.
382  * It returns 1 if it cleaned something,
383  *            0 if it didn't find anything this time
384  *           -1 if it fell off the end of the list.
385  */
386 static int cache_clean(void)
387 {
388         int rv = 0;
389         struct list_head *next;
390
391         spin_lock(&cache_list_lock);
392
393         /* find a suitable table if we don't already have one */
394         while (current_detail == NULL ||
395             current_index >= current_detail->hash_size) {
396                 if (current_detail)
397                         next = current_detail->others.next;
398                 else
399                         next = cache_list.next;
400                 if (next == &cache_list) {
401                         current_detail = NULL;
402                         spin_unlock(&cache_list_lock);
403                         return -1;
404                 }
405                 current_detail = list_entry(next, struct cache_detail, others);
406                 if (current_detail->nextcheck > seconds_since_boot())
407                         current_index = current_detail->hash_size;
408                 else {
409                         current_index = 0;
410                         current_detail->nextcheck = seconds_since_boot()+30*60;
411                 }
412         }
413
414         /* find a non-empty bucket in the table */
415         while (current_detail &&
416                current_index < current_detail->hash_size &&
417                current_detail->hash_table[current_index] == NULL)
418                 current_index++;
419
420         /* find a cleanable entry in the bucket and clean it, or set to next bucket */
421
422         if (current_detail && current_index < current_detail->hash_size) {
423                 struct cache_head *ch, **cp;
424                 struct cache_detail *d;
425
426                 write_lock(&current_detail->hash_lock);
427
428                 /* Ok, now to clean this strand */
429
430                 cp = & current_detail->hash_table[current_index];
431                 for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) {
432                         if (current_detail->nextcheck > ch->expiry_time)
433                                 current_detail->nextcheck = ch->expiry_time+1;
434                         if (!cache_is_expired(current_detail, ch))
435                                 continue;
436
437                         *cp = ch->next;
438                         ch->next = NULL;
439                         current_detail->entries--;
440                         rv = 1;
441                         break;
442                 }
443
444                 write_unlock(&current_detail->hash_lock);
445                 d = current_detail;
446                 if (!ch)
447                         current_index ++;
448                 spin_unlock(&cache_list_lock);
449                 if (ch) {
450                         set_bit(CACHE_CLEANED, &ch->flags);
451                         cache_fresh_unlocked(ch, d);
452                         cache_put(ch, d);
453                 }
454         } else
455                 spin_unlock(&cache_list_lock);
456
457         return rv;
458 }
459
460 /*
461  * We want to regularly clean the cache, so we need to schedule some work ...
462  */
463 static void do_cache_clean(struct work_struct *work)
464 {
465         int delay = 5;
466         if (cache_clean() == -1)
467                 delay = round_jiffies_relative(30*HZ);
468
469         if (list_empty(&cache_list))
470                 delay = 0;
471
472         if (delay)
473                 schedule_delayed_work(&cache_cleaner, delay);
474 }
475
476
477 /*
478  * Clean all caches promptly.  This just calls cache_clean
479  * repeatedly until we are sure that every cache has had a chance to
480  * be fully cleaned
481  */
482 void cache_flush(void)
483 {
484         while (cache_clean() != -1)
485                 cond_resched();
486         while (cache_clean() != -1)
487                 cond_resched();
488 }
489 EXPORT_SYMBOL_GPL(cache_flush);
490
491 void cache_purge(struct cache_detail *detail)
492 {
493         detail->flush_time = LONG_MAX;
494         detail->nextcheck = seconds_since_boot();
495         cache_flush();
496         detail->flush_time = 1;
497 }
498 EXPORT_SYMBOL_GPL(cache_purge);
499
500
501 /*
502  * Deferral and Revisiting of Requests.
503  *
504  * If a cache lookup finds a pending entry, we
505  * need to defer the request and revisit it later.
506  * All deferred requests are stored in a hash table,
507  * indexed by "struct cache_head *".
508  * As it may be wasteful to store a whole request
509  * structure, we allow the request to provide a
510  * deferred form, which must contain a
511  * 'struct cache_deferred_req'
512  * This cache_deferred_req contains a method to allow
513  * it to be revisited when cache info is available
514  */
515
516 #define DFR_HASHSIZE    (PAGE_SIZE/sizeof(struct list_head))
517 #define DFR_HASH(item)  ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
518
519 #define DFR_MAX 300     /* ??? */
520
521 static DEFINE_SPINLOCK(cache_defer_lock);
522 static LIST_HEAD(cache_defer_list);
523 static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
524 static int cache_defer_cnt;
525
526 static void __unhash_deferred_req(struct cache_deferred_req *dreq)
527 {
528         hlist_del_init(&dreq->hash);
529         if (!list_empty(&dreq->recent)) {
530                 list_del_init(&dreq->recent);
531                 cache_defer_cnt--;
532         }
533 }
534
535 static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
536 {
537         int hash = DFR_HASH(item);
538
539         INIT_LIST_HEAD(&dreq->recent);
540         hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
541 }
542
543 static void setup_deferral(struct cache_deferred_req *dreq,
544                            struct cache_head *item,
545                            int count_me)
546 {
547
548         dreq->item = item;
549
550         spin_lock(&cache_defer_lock);
551
552         __hash_deferred_req(dreq, item);
553
554         if (count_me) {
555                 cache_defer_cnt++;
556                 list_add(&dreq->recent, &cache_defer_list);
557         }
558
559         spin_unlock(&cache_defer_lock);
560
561 }
562
563 struct thread_deferred_req {
564         struct cache_deferred_req handle;
565         struct completion completion;
566 };
567
568 static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
569 {
570         struct thread_deferred_req *dr =
571                 container_of(dreq, struct thread_deferred_req, handle);
572         complete(&dr->completion);
573 }
574
575 static void cache_wait_req(struct cache_req *req, struct cache_head *item)
576 {
577         struct thread_deferred_req sleeper;
578         struct cache_deferred_req *dreq = &sleeper.handle;
579
580         sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
581         dreq->revisit = cache_restart_thread;
582
583         setup_deferral(dreq, item, 0);
584
585         if (!test_bit(CACHE_PENDING, &item->flags) ||
586             wait_for_completion_interruptible_timeout(
587                     &sleeper.completion, req->thread_wait) <= 0) {
588                 /* The completion wasn't completed, so we need
589                  * to clean up
590                  */
591                 spin_lock(&cache_defer_lock);
592                 if (!hlist_unhashed(&sleeper.handle.hash)) {
593                         __unhash_deferred_req(&sleeper.handle);
594                         spin_unlock(&cache_defer_lock);
595                 } else {
596                         /* cache_revisit_request already removed
597                          * this from the hash table, but hasn't
598                          * called ->revisit yet.  It will very soon
599                          * and we need to wait for it.
600                          */
601                         spin_unlock(&cache_defer_lock);
602                         wait_for_completion(&sleeper.completion);
603                 }
604         }
605 }
606
607 static void cache_limit_defers(void)
608 {
609         /* Make sure we haven't exceed the limit of allowed deferred
610          * requests.
611          */
612         struct cache_deferred_req *discard = NULL;
613
614         if (cache_defer_cnt <= DFR_MAX)
615                 return;
616
617         spin_lock(&cache_defer_lock);
618
619         /* Consider removing either the first or the last */
620         if (cache_defer_cnt > DFR_MAX) {
621                 if (net_random() & 1)
622                         discard = list_entry(cache_defer_list.next,
623                                              struct cache_deferred_req, recent);
624                 else
625                         discard = list_entry(cache_defer_list.prev,
626                                              struct cache_deferred_req, recent);
627                 __unhash_deferred_req(discard);
628         }
629         spin_unlock(&cache_defer_lock);
630         if (discard)
631                 discard->revisit(discard, 1);
632 }
633
634 /* Return true if and only if a deferred request is queued. */
635 static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
636 {
637         struct cache_deferred_req *dreq;
638
639         if (req->thread_wait) {
640                 cache_wait_req(req, item);
641                 if (!test_bit(CACHE_PENDING, &item->flags))
642                         return false;
643         }
644         dreq = req->defer(req);
645         if (dreq == NULL)
646                 return false;
647         setup_deferral(dreq, item, 1);
648         if (!test_bit(CACHE_PENDING, &item->flags))
649                 /* Bit could have been cleared before we managed to
650                  * set up the deferral, so need to revisit just in case
651                  */
652                 cache_revisit_request(item);
653
654         cache_limit_defers();
655         return true;
656 }
657
658 static void cache_revisit_request(struct cache_head *item)
659 {
660         struct cache_deferred_req *dreq;
661         struct list_head pending;
662         struct hlist_node *tmp;
663         int hash = DFR_HASH(item);
664
665         INIT_LIST_HEAD(&pending);
666         spin_lock(&cache_defer_lock);
667
668         hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
669                 if (dreq->item == item) {
670                         __unhash_deferred_req(dreq);
671                         list_add(&dreq->recent, &pending);
672                 }
673
674         spin_unlock(&cache_defer_lock);
675
676         while (!list_empty(&pending)) {
677                 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
678                 list_del_init(&dreq->recent);
679                 dreq->revisit(dreq, 0);
680         }
681 }
682
683 void cache_clean_deferred(void *owner)
684 {
685         struct cache_deferred_req *dreq, *tmp;
686         struct list_head pending;
687
688
689         INIT_LIST_HEAD(&pending);
690         spin_lock(&cache_defer_lock);
691
692         list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
693                 if (dreq->owner == owner) {
694                         __unhash_deferred_req(dreq);
695                         list_add(&dreq->recent, &pending);
696                 }
697         }
698         spin_unlock(&cache_defer_lock);
699
700         while (!list_empty(&pending)) {
701                 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
702                 list_del_init(&dreq->recent);
703                 dreq->revisit(dreq, 1);
704         }
705 }
706
707 /*
708  * communicate with user-space
709  *
710  * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
711  * On read, you get a full request, or block.
712  * On write, an update request is processed.
713  * Poll works if anything to read, and always allows write.
714  *
715  * Implemented by linked list of requests.  Each open file has
716  * a ->private that also exists in this list.  New requests are added
717  * to the end and may wakeup and preceding readers.
718  * New readers are added to the head.  If, on read, an item is found with
719  * CACHE_UPCALLING clear, we free it from the list.
720  *
721  */
722
723 static DEFINE_SPINLOCK(queue_lock);
724 static DEFINE_MUTEX(queue_io_mutex);
725
726 struct cache_queue {
727         struct list_head        list;
728         int                     reader; /* if 0, then request */
729 };
730 struct cache_request {
731         struct cache_queue      q;
732         struct cache_head       *item;
733         char                    * buf;
734         int                     len;
735         int                     readers;
736 };
737 struct cache_reader {
738         struct cache_queue      q;
739         int                     offset; /* if non-0, we have a refcnt on next request */
740 };
741
742 static int cache_request(struct cache_detail *detail,
743                                struct cache_request *crq)
744 {
745         char *bp = crq->buf;
746         int len = PAGE_SIZE;
747
748         detail->cache_request(detail, crq->item, &bp, &len);
749         if (len < 0)
750                 return -EAGAIN;
751         return PAGE_SIZE - len;
752 }
753
754 static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
755                           loff_t *ppos, struct cache_detail *cd)
756 {
757         struct cache_reader *rp = filp->private_data;
758         struct cache_request *rq;
759         struct inode *inode = file_inode(filp);
760         int err;
761
762         if (count == 0)
763                 return 0;
764
765         mutex_lock(&inode->i_mutex); /* protect against multiple concurrent
766                               * readers on this file */
767  again:
768         spin_lock(&queue_lock);
769         /* need to find next request */
770         while (rp->q.list.next != &cd->queue &&
771                list_entry(rp->q.list.next, struct cache_queue, list)
772                ->reader) {
773                 struct list_head *next = rp->q.list.next;
774                 list_move(&rp->q.list, next);
775         }
776         if (rp->q.list.next == &cd->queue) {
777                 spin_unlock(&queue_lock);
778                 mutex_unlock(&inode->i_mutex);
779                 WARN_ON_ONCE(rp->offset);
780                 return 0;
781         }
782         rq = container_of(rp->q.list.next, struct cache_request, q.list);
783         WARN_ON_ONCE(rq->q.reader);
784         if (rp->offset == 0)
785                 rq->readers++;
786         spin_unlock(&queue_lock);
787
788         if (rq->len == 0) {
789                 err = cache_request(cd, rq);
790                 if (err < 0)
791                         goto out;
792                 rq->len = err;
793         }
794
795         if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
796                 err = -EAGAIN;
797                 spin_lock(&queue_lock);
798                 list_move(&rp->q.list, &rq->q.list);
799                 spin_unlock(&queue_lock);
800         } else {
801                 if (rp->offset + count > rq->len)
802                         count = rq->len - rp->offset;
803                 err = -EFAULT;
804                 if (copy_to_user(buf, rq->buf + rp->offset, count))
805                         goto out;
806                 rp->offset += count;
807                 if (rp->offset >= rq->len) {
808                         rp->offset = 0;
809                         spin_lock(&queue_lock);
810                         list_move(&rp->q.list, &rq->q.list);
811                         spin_unlock(&queue_lock);
812                 }
813                 err = 0;
814         }
815  out:
816         if (rp->offset == 0) {
817                 /* need to release rq */
818                 spin_lock(&queue_lock);
819                 rq->readers--;
820                 if (rq->readers == 0 &&
821                     !test_bit(CACHE_PENDING, &rq->item->flags)) {
822                         list_del(&rq->q.list);
823                         spin_unlock(&queue_lock);
824                         cache_put(rq->item, cd);
825                         kfree(rq->buf);
826                         kfree(rq);
827                 } else
828                         spin_unlock(&queue_lock);
829         }
830         if (err == -EAGAIN)
831                 goto again;
832         mutex_unlock(&inode->i_mutex);
833         return err ? err :  count;
834 }
835
836 static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
837                                  size_t count, struct cache_detail *cd)
838 {
839         ssize_t ret;
840
841         if (count == 0)
842                 return -EINVAL;
843         if (copy_from_user(kaddr, buf, count))
844                 return -EFAULT;
845         kaddr[count] = '\0';
846         ret = cd->cache_parse(cd, kaddr, count);
847         if (!ret)
848                 ret = count;
849         return ret;
850 }
851
852 static ssize_t cache_slow_downcall(const char __user *buf,
853                                    size_t count, struct cache_detail *cd)
854 {
855         static char write_buf[8192]; /* protected by queue_io_mutex */
856         ssize_t ret = -EINVAL;
857
858         if (count >= sizeof(write_buf))
859                 goto out;
860         mutex_lock(&queue_io_mutex);
861         ret = cache_do_downcall(write_buf, buf, count, cd);
862         mutex_unlock(&queue_io_mutex);
863 out:
864         return ret;
865 }
866
867 static ssize_t cache_downcall(struct address_space *mapping,
868                               const char __user *buf,
869                               size_t count, struct cache_detail *cd)
870 {
871         struct page *page;
872         char *kaddr;
873         ssize_t ret = -ENOMEM;
874
875         if (count >= PAGE_CACHE_SIZE)
876                 goto out_slow;
877
878         page = find_or_create_page(mapping, 0, GFP_KERNEL);
879         if (!page)
880                 goto out_slow;
881
882         kaddr = kmap(page);
883         ret = cache_do_downcall(kaddr, buf, count, cd);
884         kunmap(page);
885         unlock_page(page);
886         page_cache_release(page);
887         return ret;
888 out_slow:
889         return cache_slow_downcall(buf, count, cd);
890 }
891
892 static ssize_t cache_write(struct file *filp, const char __user *buf,
893                            size_t count, loff_t *ppos,
894                            struct cache_detail *cd)
895 {
896         struct address_space *mapping = filp->f_mapping;
897         struct inode *inode = file_inode(filp);
898         ssize_t ret = -EINVAL;
899
900         if (!cd->cache_parse)
901                 goto out;
902
903         mutex_lock(&inode->i_mutex);
904         ret = cache_downcall(mapping, buf, count, cd);
905         mutex_unlock(&inode->i_mutex);
906 out:
907         return ret;
908 }
909
910 static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
911
912 static unsigned int cache_poll(struct file *filp, poll_table *wait,
913                                struct cache_detail *cd)
914 {
915         unsigned int mask;
916         struct cache_reader *rp = filp->private_data;
917         struct cache_queue *cq;
918
919         poll_wait(filp, &queue_wait, wait);
920
921         /* alway allow write */
922         mask = POLL_OUT | POLLWRNORM;
923
924         if (!rp)
925                 return mask;
926
927         spin_lock(&queue_lock);
928
929         for (cq= &rp->q; &cq->list != &cd->queue;
930              cq = list_entry(cq->list.next, struct cache_queue, list))
931                 if (!cq->reader) {
932                         mask |= POLLIN | POLLRDNORM;
933                         break;
934                 }
935         spin_unlock(&queue_lock);
936         return mask;
937 }
938
939 static int cache_ioctl(struct inode *ino, struct file *filp,
940                        unsigned int cmd, unsigned long arg,
941                        struct cache_detail *cd)
942 {
943         int len = 0;
944         struct cache_reader *rp = filp->private_data;
945         struct cache_queue *cq;
946
947         if (cmd != FIONREAD || !rp)
948                 return -EINVAL;
949
950         spin_lock(&queue_lock);
951
952         /* only find the length remaining in current request,
953          * or the length of the next request
954          */
955         for (cq= &rp->q; &cq->list != &cd->queue;
956              cq = list_entry(cq->list.next, struct cache_queue, list))
957                 if (!cq->reader) {
958                         struct cache_request *cr =
959                                 container_of(cq, struct cache_request, q);
960                         len = cr->len - rp->offset;
961                         break;
962                 }
963         spin_unlock(&queue_lock);
964
965         return put_user(len, (int __user *)arg);
966 }
967
968 static int cache_open(struct inode *inode, struct file *filp,
969                       struct cache_detail *cd)
970 {
971         struct cache_reader *rp = NULL;
972
973         if (!cd || !try_module_get(cd->owner))
974                 return -EACCES;
975         nonseekable_open(inode, filp);
976         if (filp->f_mode & FMODE_READ) {
977                 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
978                 if (!rp) {
979                         module_put(cd->owner);
980                         return -ENOMEM;
981                 }
982                 rp->offset = 0;
983                 rp->q.reader = 1;
984                 atomic_inc(&cd->readers);
985                 spin_lock(&queue_lock);
986                 list_add(&rp->q.list, &cd->queue);
987                 spin_unlock(&queue_lock);
988         }
989         filp->private_data = rp;
990         return 0;
991 }
992
993 static int cache_release(struct inode *inode, struct file *filp,
994                          struct cache_detail *cd)
995 {
996         struct cache_reader *rp = filp->private_data;
997
998         if (rp) {
999                 spin_lock(&queue_lock);
1000                 if (rp->offset) {
1001                         struct cache_queue *cq;
1002                         for (cq= &rp->q; &cq->list != &cd->queue;
1003                              cq = list_entry(cq->list.next, struct cache_queue, list))
1004                                 if (!cq->reader) {
1005                                         container_of(cq, struct cache_request, q)
1006                                                 ->readers--;
1007                                         break;
1008                                 }
1009                         rp->offset = 0;
1010                 }
1011                 list_del(&rp->q.list);
1012                 spin_unlock(&queue_lock);
1013
1014                 filp->private_data = NULL;
1015                 kfree(rp);
1016
1017                 cd->last_close = seconds_since_boot();
1018                 atomic_dec(&cd->readers);
1019         }
1020         module_put(cd->owner);
1021         return 0;
1022 }
1023
1024
1025
1026 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1027 {
1028         struct cache_queue *cq, *tmp;
1029         struct cache_request *cr;
1030         struct list_head dequeued;
1031
1032         INIT_LIST_HEAD(&dequeued);
1033         spin_lock(&queue_lock);
1034         list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1035                 if (!cq->reader) {
1036                         cr = container_of(cq, struct cache_request, q);
1037                         if (cr->item != ch)
1038                                 continue;
1039                         if (test_bit(CACHE_PENDING, &ch->flags))
1040                                 /* Lost a race and it is pending again */
1041                                 break;
1042                         if (cr->readers != 0)
1043                                 continue;
1044                         list_move(&cr->q.list, &dequeued);
1045                 }
1046         spin_unlock(&queue_lock);
1047         while (!list_empty(&dequeued)) {
1048                 cr = list_entry(dequeued.next, struct cache_request, q.list);
1049                 list_del(&cr->q.list);
1050                 cache_put(cr->item, detail);
1051                 kfree(cr->buf);
1052                 kfree(cr);
1053         }
1054 }
1055
1056 /*
1057  * Support routines for text-based upcalls.
1058  * Fields are separated by spaces.
1059  * Fields are either mangled to quote space tab newline slosh with slosh
1060  * or a hexified with a leading \x
1061  * Record is terminated with newline.
1062  *
1063  */
1064
1065 void qword_add(char **bpp, int *lp, char *str)
1066 {
1067         char *bp = *bpp;
1068         int len = *lp;
1069         char c;
1070
1071         if (len < 0) return;
1072
1073         while ((c=*str++) && len)
1074                 switch(c) {
1075                 case ' ':
1076                 case '\t':
1077                 case '\n':
1078                 case '\\':
1079                         if (len >= 4) {
1080                                 *bp++ = '\\';
1081                                 *bp++ = '0' + ((c & 0300)>>6);
1082                                 *bp++ = '0' + ((c & 0070)>>3);
1083                                 *bp++ = '0' + ((c & 0007)>>0);
1084                         }
1085                         len -= 4;
1086                         break;
1087                 default:
1088                         *bp++ = c;
1089                         len--;
1090                 }
1091         if (c || len <1) len = -1;
1092         else {
1093                 *bp++ = ' ';
1094                 len--;
1095         }
1096         *bpp = bp;
1097         *lp = len;
1098 }
1099 EXPORT_SYMBOL_GPL(qword_add);
1100
1101 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1102 {
1103         char *bp = *bpp;
1104         int len = *lp;
1105
1106         if (len < 0) return;
1107
1108         if (len > 2) {
1109                 *bp++ = '\\';
1110                 *bp++ = 'x';
1111                 len -= 2;
1112                 while (blen && len >= 2) {
1113                         unsigned char c = *buf++;
1114                         *bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
1115                         *bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
1116                         len -= 2;
1117                         blen--;
1118                 }
1119         }
1120         if (blen || len<1) len = -1;
1121         else {
1122                 *bp++ = ' ';
1123                 len--;
1124         }
1125         *bpp = bp;
1126         *lp = len;
1127 }
1128 EXPORT_SYMBOL_GPL(qword_addhex);
1129
1130 static void warn_no_listener(struct cache_detail *detail)
1131 {
1132         if (detail->last_warn != detail->last_close) {
1133                 detail->last_warn = detail->last_close;
1134                 if (detail->warn_no_listener)
1135                         detail->warn_no_listener(detail, detail->last_close != 0);
1136         }
1137 }
1138
1139 static bool cache_listeners_exist(struct cache_detail *detail)
1140 {
1141         if (atomic_read(&detail->readers))
1142                 return true;
1143         if (detail->last_close == 0)
1144                 /* This cache was never opened */
1145                 return false;
1146         if (detail->last_close < seconds_since_boot() - 30)
1147                 /*
1148                  * We allow for the possibility that someone might
1149                  * restart a userspace daemon without restarting the
1150                  * server; but after 30 seconds, we give up.
1151                  */
1152                  return false;
1153         return true;
1154 }
1155
1156 /*
1157  * register an upcall request to user-space and queue it up for read() by the
1158  * upcall daemon.
1159  *
1160  * Each request is at most one page long.
1161  */
1162 int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1163 {
1164
1165         char *buf;
1166         struct cache_request *crq;
1167         int ret = 0;
1168
1169         if (!detail->cache_request)
1170                 return -EINVAL;
1171
1172         if (!cache_listeners_exist(detail)) {
1173                 warn_no_listener(detail);
1174                 return -EINVAL;
1175         }
1176         if (test_bit(CACHE_CLEANED, &h->flags))
1177                 /* Too late to make an upcall */
1178                 return -EAGAIN;
1179
1180         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1181         if (!buf)
1182                 return -EAGAIN;
1183
1184         crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1185         if (!crq) {
1186                 kfree(buf);
1187                 return -EAGAIN;
1188         }
1189
1190         crq->q.reader = 0;
1191         crq->item = cache_get(h);
1192         crq->buf = buf;
1193         crq->len = 0;
1194         crq->readers = 0;
1195         spin_lock(&queue_lock);
1196         if (test_bit(CACHE_PENDING, &h->flags))
1197                 list_add_tail(&crq->q.list, &detail->queue);
1198         else
1199                 /* Lost a race, no longer PENDING, so don't enqueue */
1200                 ret = -EAGAIN;
1201         spin_unlock(&queue_lock);
1202         wake_up(&queue_wait);
1203         if (ret == -EAGAIN) {
1204                 kfree(buf);
1205                 kfree(crq);
1206         }
1207         return ret;
1208 }
1209 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1210
1211 /*
1212  * parse a message from user-space and pass it
1213  * to an appropriate cache
1214  * Messages are, like requests, separated into fields by
1215  * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1216  *
1217  * Message is
1218  *   reply cachename expiry key ... content....
1219  *
1220  * key and content are both parsed by cache
1221  */
1222
1223 int qword_get(char **bpp, char *dest, int bufsize)
1224 {
1225         /* return bytes copied, or -1 on error */
1226         char *bp = *bpp;
1227         int len = 0;
1228
1229         while (*bp == ' ') bp++;
1230
1231         if (bp[0] == '\\' && bp[1] == 'x') {
1232                 /* HEX STRING */
1233                 bp += 2;
1234                 while (len < bufsize) {
1235                         int h, l;
1236
1237                         h = hex_to_bin(bp[0]);
1238                         if (h < 0)
1239                                 break;
1240
1241                         l = hex_to_bin(bp[1]);
1242                         if (l < 0)
1243                                 break;
1244
1245                         *dest++ = (h << 4) | l;
1246                         bp += 2;
1247                         len++;
1248                 }
1249         } else {
1250                 /* text with \nnn octal quoting */
1251                 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1252                         if (*bp == '\\' &&
1253                             isodigit(bp[1]) && (bp[1] <= '3') &&
1254                             isodigit(bp[2]) &&
1255                             isodigit(bp[3])) {
1256                                 int byte = (*++bp -'0');
1257                                 bp++;
1258                                 byte = (byte << 3) | (*bp++ - '0');
1259                                 byte = (byte << 3) | (*bp++ - '0');
1260                                 *dest++ = byte;
1261                                 len++;
1262                         } else {
1263                                 *dest++ = *bp++;
1264                                 len++;
1265                         }
1266                 }
1267         }
1268
1269         if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1270                 return -1;
1271         while (*bp == ' ') bp++;
1272         *bpp = bp;
1273         *dest = '\0';
1274         return len;
1275 }
1276 EXPORT_SYMBOL_GPL(qword_get);
1277
1278
1279 /*
1280  * support /proc/sunrpc/cache/$CACHENAME/content
1281  * as a seqfile.
1282  * We call ->cache_show passing NULL for the item to
1283  * get a header, then pass each real item in the cache
1284  */
1285
1286 struct handle {
1287         struct cache_detail *cd;
1288 };
1289
1290 static void *c_start(struct seq_file *m, loff_t *pos)
1291         __acquires(cd->hash_lock)
1292 {
1293         loff_t n = *pos;
1294         unsigned int hash, entry;
1295         struct cache_head *ch;
1296         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1297
1298
1299         read_lock(&cd->hash_lock);
1300         if (!n--)
1301                 return SEQ_START_TOKEN;
1302         hash = n >> 32;
1303         entry = n & ((1LL<<32) - 1);
1304
1305         for (ch=cd->hash_table[hash]; ch; ch=ch->next)
1306                 if (!entry--)
1307                         return ch;
1308         n &= ~((1LL<<32) - 1);
1309         do {
1310                 hash++;
1311                 n += 1LL<<32;
1312         } while(hash < cd->hash_size &&
1313                 cd->hash_table[hash]==NULL);
1314         if (hash >= cd->hash_size)
1315                 return NULL;
1316         *pos = n+1;
1317         return cd->hash_table[hash];
1318 }
1319
1320 static void *c_next(struct seq_file *m, void *p, loff_t *pos)
1321 {
1322         struct cache_head *ch = p;
1323         int hash = (*pos >> 32);
1324         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1325
1326         if (p == SEQ_START_TOKEN)
1327                 hash = 0;
1328         else if (ch->next == NULL) {
1329                 hash++;
1330                 *pos += 1LL<<32;
1331         } else {
1332                 ++*pos;
1333                 return ch->next;
1334         }
1335         *pos &= ~((1LL<<32) - 1);
1336         while (hash < cd->hash_size &&
1337                cd->hash_table[hash] == NULL) {
1338                 hash++;
1339                 *pos += 1LL<<32;
1340         }
1341         if (hash >= cd->hash_size)
1342                 return NULL;
1343         ++*pos;
1344         return cd->hash_table[hash];
1345 }
1346
1347 static void c_stop(struct seq_file *m, void *p)
1348         __releases(cd->hash_lock)
1349 {
1350         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1351         read_unlock(&cd->hash_lock);
1352 }
1353
1354 static int c_show(struct seq_file *m, void *p)
1355 {
1356         struct cache_head *cp = p;
1357         struct cache_detail *cd = ((struct handle*)m->private)->cd;
1358
1359         if (p == SEQ_START_TOKEN)
1360                 return cd->cache_show(m, cd, NULL);
1361
1362         ifdebug(CACHE)
1363                 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1364                            convert_to_wallclock(cp->expiry_time),
1365                            atomic_read(&cp->ref.refcount), cp->flags);
1366         cache_get(cp);
1367         if (cache_check(cd, cp, NULL))
1368                 /* cache_check does a cache_put on failure */
1369                 seq_printf(m, "# ");
1370         else {
1371                 if (cache_is_expired(cd, cp))
1372                         seq_printf(m, "# ");
1373                 cache_put(cp, cd);
1374         }
1375
1376         return cd->cache_show(m, cd, cp);
1377 }
1378
1379 static const struct seq_operations cache_content_op = {
1380         .start  = c_start,
1381         .next   = c_next,
1382         .stop   = c_stop,
1383         .show   = c_show,
1384 };
1385
1386 static int content_open(struct inode *inode, struct file *file,
1387                         struct cache_detail *cd)
1388 {
1389         struct handle *han;
1390
1391         if (!cd || !try_module_get(cd->owner))
1392                 return -EACCES;
1393         han = __seq_open_private(file, &cache_content_op, sizeof(*han));
1394         if (han == NULL) {
1395                 module_put(cd->owner);
1396                 return -ENOMEM;
1397         }
1398
1399         han->cd = cd;
1400         return 0;
1401 }
1402
1403 static int content_release(struct inode *inode, struct file *file,
1404                 struct cache_detail *cd)
1405 {
1406         int ret = seq_release_private(inode, file);
1407         module_put(cd->owner);
1408         return ret;
1409 }
1410
1411 static int open_flush(struct inode *inode, struct file *file,
1412                         struct cache_detail *cd)
1413 {
1414         if (!cd || !try_module_get(cd->owner))
1415                 return -EACCES;
1416         return nonseekable_open(inode, file);
1417 }
1418
1419 static int release_flush(struct inode *inode, struct file *file,
1420                         struct cache_detail *cd)
1421 {
1422         module_put(cd->owner);
1423         return 0;
1424 }
1425
1426 static ssize_t read_flush(struct file *file, char __user *buf,
1427                           size_t count, loff_t *ppos,
1428                           struct cache_detail *cd)
1429 {
1430         char tbuf[22];
1431         unsigned long p = *ppos;
1432         size_t len;
1433
1434         snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time));
1435         len = strlen(tbuf);
1436         if (p >= len)
1437                 return 0;
1438         len -= p;
1439         if (len > count)
1440                 len = count;
1441         if (copy_to_user(buf, (void*)(tbuf+p), len))
1442                 return -EFAULT;
1443         *ppos += len;
1444         return len;
1445 }
1446
1447 static ssize_t write_flush(struct file *file, const char __user *buf,
1448                            size_t count, loff_t *ppos,
1449                            struct cache_detail *cd)
1450 {
1451         char tbuf[20];
1452         char *bp, *ep;
1453
1454         if (*ppos || count > sizeof(tbuf)-1)
1455                 return -EINVAL;
1456         if (copy_from_user(tbuf, buf, count))
1457                 return -EFAULT;
1458         tbuf[count] = 0;
1459         simple_strtoul(tbuf, &ep, 0);
1460         if (*ep && *ep != '\n')
1461                 return -EINVAL;
1462
1463         bp = tbuf;
1464         cd->flush_time = get_expiry(&bp);
1465         cd->nextcheck = seconds_since_boot();
1466         cache_flush();
1467
1468         *ppos += count;
1469         return count;
1470 }
1471
1472 static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1473                                  size_t count, loff_t *ppos)
1474 {
1475         struct cache_detail *cd = PDE_DATA(file_inode(filp));
1476
1477         return cache_read(filp, buf, count, ppos, cd);
1478 }
1479
1480 static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1481                                   size_t count, loff_t *ppos)
1482 {
1483         struct cache_detail *cd = PDE_DATA(file_inode(filp));
1484
1485         return cache_write(filp, buf, count, ppos, cd);
1486 }
1487
1488 static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
1489 {
1490         struct cache_detail *cd = PDE_DATA(file_inode(filp));
1491
1492         return cache_poll(filp, wait, cd);
1493 }
1494
1495 static long cache_ioctl_procfs(struct file *filp,
1496                                unsigned int cmd, unsigned long arg)
1497 {
1498         struct inode *inode = file_inode(filp);
1499         struct cache_detail *cd = PDE_DATA(inode);
1500
1501         return cache_ioctl(inode, filp, cmd, arg, cd);
1502 }
1503
1504 static int cache_open_procfs(struct inode *inode, struct file *filp)
1505 {
1506         struct cache_detail *cd = PDE_DATA(inode);
1507
1508         return cache_open(inode, filp, cd);
1509 }
1510
1511 static int cache_release_procfs(struct inode *inode, struct file *filp)
1512 {
1513         struct cache_detail *cd = PDE_DATA(inode);
1514
1515         return cache_release(inode, filp, cd);
1516 }
1517
1518 static const struct file_operations cache_file_operations_procfs = {
1519         .owner          = THIS_MODULE,
1520         .llseek         = no_llseek,
1521         .read           = cache_read_procfs,
1522         .write          = cache_write_procfs,
1523         .poll           = cache_poll_procfs,
1524         .unlocked_ioctl = cache_ioctl_procfs, /* for FIONREAD */
1525         .open           = cache_open_procfs,
1526         .release        = cache_release_procfs,
1527 };
1528
1529 static int content_open_procfs(struct inode *inode, struct file *filp)
1530 {
1531         struct cache_detail *cd = PDE_DATA(inode);
1532
1533         return content_open(inode, filp, cd);
1534 }
1535
1536 static int content_release_procfs(struct inode *inode, struct file *filp)
1537 {
1538         struct cache_detail *cd = PDE_DATA(inode);
1539
1540         return content_release(inode, filp, cd);
1541 }
1542
1543 static const struct file_operations content_file_operations_procfs = {
1544         .open           = content_open_procfs,
1545         .read           = seq_read,
1546         .llseek         = seq_lseek,
1547         .release        = content_release_procfs,
1548 };
1549
1550 static int open_flush_procfs(struct inode *inode, struct file *filp)
1551 {
1552         struct cache_detail *cd = PDE_DATA(inode);
1553
1554         return open_flush(inode, filp, cd);
1555 }
1556
1557 static int release_flush_procfs(struct inode *inode, struct file *filp)
1558 {
1559         struct cache_detail *cd = PDE_DATA(inode);
1560
1561         return release_flush(inode, filp, cd);
1562 }
1563
1564 static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1565                             size_t count, loff_t *ppos)
1566 {
1567         struct cache_detail *cd = PDE_DATA(file_inode(filp));
1568
1569         return read_flush(filp, buf, count, ppos, cd);
1570 }
1571
1572 static ssize_t write_flush_procfs(struct file *filp,
1573                                   const char __user *buf,
1574                                   size_t count, loff_t *ppos)
1575 {
1576         struct cache_detail *cd = PDE_DATA(file_inode(filp));
1577
1578         return write_flush(filp, buf, count, ppos, cd);
1579 }
1580
1581 static const struct file_operations cache_flush_operations_procfs = {
1582         .open           = open_flush_procfs,
1583         .read           = read_flush_procfs,
1584         .write          = write_flush_procfs,
1585         .release        = release_flush_procfs,
1586         .llseek         = no_llseek,
1587 };
1588
1589 static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net)
1590 {
1591         struct sunrpc_net *sn;
1592
1593         if (cd->u.procfs.proc_ent == NULL)
1594                 return;
1595         if (cd->u.procfs.flush_ent)
1596                 remove_proc_entry("flush", cd->u.procfs.proc_ent);
1597         if (cd->u.procfs.channel_ent)
1598                 remove_proc_entry("channel", cd->u.procfs.proc_ent);
1599         if (cd->u.procfs.content_ent)
1600                 remove_proc_entry("content", cd->u.procfs.proc_ent);
1601         cd->u.procfs.proc_ent = NULL;
1602         sn = net_generic(net, sunrpc_net_id);
1603         remove_proc_entry(cd->name, sn->proc_net_rpc);
1604 }
1605
1606 #ifdef CONFIG_PROC_FS
1607 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1608 {
1609         struct proc_dir_entry *p;
1610         struct sunrpc_net *sn;
1611
1612         sn = net_generic(net, sunrpc_net_id);
1613         cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc);
1614         if (cd->u.procfs.proc_ent == NULL)
1615                 goto out_nomem;
1616         cd->u.procfs.channel_ent = NULL;
1617         cd->u.procfs.content_ent = NULL;
1618
1619         p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
1620                              cd->u.procfs.proc_ent,
1621                              &cache_flush_operations_procfs, cd);
1622         cd->u.procfs.flush_ent = p;
1623         if (p == NULL)
1624                 goto out_nomem;
1625
1626         if (cd->cache_request || cd->cache_parse) {
1627                 p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
1628                                      cd->u.procfs.proc_ent,
1629                                      &cache_file_operations_procfs, cd);
1630                 cd->u.procfs.channel_ent = p;
1631                 if (p == NULL)
1632                         goto out_nomem;
1633         }
1634         if (cd->cache_show) {
1635                 p = proc_create_data("content", S_IFREG|S_IRUSR,
1636                                 cd->u.procfs.proc_ent,
1637                                 &content_file_operations_procfs, cd);
1638                 cd->u.procfs.content_ent = p;
1639                 if (p == NULL)
1640                         goto out_nomem;
1641         }
1642         return 0;
1643 out_nomem:
1644         remove_cache_proc_entries(cd, net);
1645         return -ENOMEM;
1646 }
1647 #else /* CONFIG_PROC_FS */
1648 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1649 {
1650         return 0;
1651 }
1652 #endif
1653
1654 void __init cache_initialize(void)
1655 {
1656         INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1657 }
1658
1659 int cache_register_net(struct cache_detail *cd, struct net *net)
1660 {
1661         int ret;
1662
1663         sunrpc_init_cache_detail(cd);
1664         ret = create_cache_proc_entries(cd, net);
1665         if (ret)
1666                 sunrpc_destroy_cache_detail(cd);
1667         return ret;
1668 }
1669 EXPORT_SYMBOL_GPL(cache_register_net);
1670
1671 void cache_unregister_net(struct cache_detail *cd, struct net *net)
1672 {
1673         remove_cache_proc_entries(cd, net);
1674         sunrpc_destroy_cache_detail(cd);
1675 }
1676 EXPORT_SYMBOL_GPL(cache_unregister_net);
1677
1678 struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
1679 {
1680         struct cache_detail *cd;
1681
1682         cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1683         if (cd == NULL)
1684                 return ERR_PTR(-ENOMEM);
1685
1686         cd->hash_table = kzalloc(cd->hash_size * sizeof(struct cache_head *),
1687                                  GFP_KERNEL);
1688         if (cd->hash_table == NULL) {
1689                 kfree(cd);
1690                 return ERR_PTR(-ENOMEM);
1691         }
1692         cd->net = net;
1693         return cd;
1694 }
1695 EXPORT_SYMBOL_GPL(cache_create_net);
1696
1697 void cache_destroy_net(struct cache_detail *cd, struct net *net)
1698 {
1699         kfree(cd->hash_table);
1700         kfree(cd);
1701 }
1702 EXPORT_SYMBOL_GPL(cache_destroy_net);
1703
1704 static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1705                                  size_t count, loff_t *ppos)
1706 {
1707         struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1708
1709         return cache_read(filp, buf, count, ppos, cd);
1710 }
1711
1712 static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1713                                   size_t count, loff_t *ppos)
1714 {
1715         struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1716
1717         return cache_write(filp, buf, count, ppos, cd);
1718 }
1719
1720 static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
1721 {
1722         struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1723
1724         return cache_poll(filp, wait, cd);
1725 }
1726
1727 static long cache_ioctl_pipefs(struct file *filp,
1728                               unsigned int cmd, unsigned long arg)
1729 {
1730         struct inode *inode = file_inode(filp);
1731         struct cache_detail *cd = RPC_I(inode)->private;
1732
1733         return cache_ioctl(inode, filp, cmd, arg, cd);
1734 }
1735
1736 static int cache_open_pipefs(struct inode *inode, struct file *filp)
1737 {
1738         struct cache_detail *cd = RPC_I(inode)->private;
1739
1740         return cache_open(inode, filp, cd);
1741 }
1742
1743 static int cache_release_pipefs(struct inode *inode, struct file *filp)
1744 {
1745         struct cache_detail *cd = RPC_I(inode)->private;
1746
1747         return cache_release(inode, filp, cd);
1748 }
1749
1750 const struct file_operations cache_file_operations_pipefs = {
1751         .owner          = THIS_MODULE,
1752         .llseek         = no_llseek,
1753         .read           = cache_read_pipefs,
1754         .write          = cache_write_pipefs,
1755         .poll           = cache_poll_pipefs,
1756         .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
1757         .open           = cache_open_pipefs,
1758         .release        = cache_release_pipefs,
1759 };
1760
1761 static int content_open_pipefs(struct inode *inode, struct file *filp)
1762 {
1763         struct cache_detail *cd = RPC_I(inode)->private;
1764
1765         return content_open(inode, filp, cd);
1766 }
1767
1768 static int content_release_pipefs(struct inode *inode, struct file *filp)
1769 {
1770         struct cache_detail *cd = RPC_I(inode)->private;
1771
1772         return content_release(inode, filp, cd);
1773 }
1774
1775 const struct file_operations content_file_operations_pipefs = {
1776         .open           = content_open_pipefs,
1777         .read           = seq_read,
1778         .llseek         = seq_lseek,
1779         .release        = content_release_pipefs,
1780 };
1781
1782 static int open_flush_pipefs(struct inode *inode, struct file *filp)
1783 {
1784         struct cache_detail *cd = RPC_I(inode)->private;
1785
1786         return open_flush(inode, filp, cd);
1787 }
1788
1789 static int release_flush_pipefs(struct inode *inode, struct file *filp)
1790 {
1791         struct cache_detail *cd = RPC_I(inode)->private;
1792
1793         return release_flush(inode, filp, cd);
1794 }
1795
1796 static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1797                             size_t count, loff_t *ppos)
1798 {
1799         struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1800
1801         return read_flush(filp, buf, count, ppos, cd);
1802 }
1803
1804 static ssize_t write_flush_pipefs(struct file *filp,
1805                                   const char __user *buf,
1806                                   size_t count, loff_t *ppos)
1807 {
1808         struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1809
1810         return write_flush(filp, buf, count, ppos, cd);
1811 }
1812
1813 const struct file_operations cache_flush_operations_pipefs = {
1814         .open           = open_flush_pipefs,
1815         .read           = read_flush_pipefs,
1816         .write          = write_flush_pipefs,
1817         .release        = release_flush_pipefs,
1818         .llseek         = no_llseek,
1819 };
1820
1821 int sunrpc_cache_register_pipefs(struct dentry *parent,
1822                                  const char *name, umode_t umode,
1823                                  struct cache_detail *cd)
1824 {
1825         struct qstr q;
1826         struct dentry *dir;
1827         int ret = 0;
1828
1829         q.name = name;
1830         q.len = strlen(name);
1831         q.hash = full_name_hash(q.name, q.len);
1832         dir = rpc_create_cache_dir(parent, &q, umode, cd);
1833         if (!IS_ERR(dir))
1834                 cd->u.pipefs.dir = dir;
1835         else
1836                 ret = PTR_ERR(dir);
1837         return ret;
1838 }
1839 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1840
1841 void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1842 {
1843         rpc_remove_cache_dir(cd->u.pipefs.dir);
1844         cd->u.pipefs.dir = NULL;
1845 }
1846 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1847