4 * Client-side XDR for NFSv4.
6 * Copyright (c) 2002 The Regents of the University of Michigan.
9 * Kendrick Smith <kmsmith@umich.edu>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Implementation of the NFSv4 state model. For the time being,
37 * this is minimal, but will be made much more complex in a
41 #include <linux/config.h>
42 #include <linux/slab.h>
43 #include <linux/smp_lock.h>
44 #include <linux/nfs_fs.h>
45 #include <linux/nfs_idmap.h>
46 #include <linux/workqueue.h>
47 #include <linux/bitops.h>
50 #include "delegation.h"
52 #define OPENOWNER_POOL_SIZE 8
54 static DEFINE_SPINLOCK(state_spinlock);
56 nfs4_stateid zero_stateid;
59 nfs4_stateid one_stateid =
60 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
63 static LIST_HEAD(nfs4_clientid_list);
65 static void nfs4_recover_state(void *);
66 extern void nfs4_renew_state(void *);
69 init_nfsv4_state(struct nfs_server *server)
71 server->nfs4_state = NULL;
72 INIT_LIST_HEAD(&server->nfs4_siblings);
76 destroy_nfsv4_state(struct nfs_server *server)
78 if (server->mnt_path) {
79 kfree(server->mnt_path);
80 server->mnt_path = NULL;
82 if (server->nfs4_state) {
83 nfs4_put_client(server->nfs4_state);
84 server->nfs4_state = NULL;
89 * nfs4_get_client(): returns an empty client structure
90 * nfs4_put_client(): drops reference to client structure
92 * Since these are allocated/deallocated very rarely, we don't
93 * bother putting them in a slab cache...
95 static struct nfs4_client *
96 nfs4_alloc_client(struct in_addr *addr)
98 struct nfs4_client *clp;
100 if (nfs_callback_up() < 0)
102 if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
106 memset(clp, 0, sizeof(*clp));
107 memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
108 init_rwsem(&clp->cl_sem);
109 INIT_LIST_HEAD(&clp->cl_delegations);
110 INIT_LIST_HEAD(&clp->cl_state_owners);
111 INIT_LIST_HEAD(&clp->cl_unused);
112 spin_lock_init(&clp->cl_lock);
113 atomic_set(&clp->cl_count, 1);
114 INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp);
115 INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
116 INIT_LIST_HEAD(&clp->cl_superblocks);
117 init_waitqueue_head(&clp->cl_waitq);
118 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
119 clp->cl_boot_time = CURRENT_TIME;
120 clp->cl_state = 1 << NFS4CLNT_OK;
125 nfs4_free_client(struct nfs4_client *clp)
127 struct nfs4_state_owner *sp;
129 while (!list_empty(&clp->cl_unused)) {
130 sp = list_entry(clp->cl_unused.next,
131 struct nfs4_state_owner,
133 list_del(&sp->so_list);
136 BUG_ON(!list_empty(&clp->cl_state_owners));
138 put_rpccred(clp->cl_cred);
139 nfs_idmap_delete(clp);
140 if (clp->cl_rpcclient)
141 rpc_shutdown_client(clp->cl_rpcclient);
146 static struct nfs4_client *__nfs4_find_client(struct in_addr *addr)
148 struct nfs4_client *clp;
149 list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) {
150 if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0) {
151 atomic_inc(&clp->cl_count);
158 struct nfs4_client *nfs4_find_client(struct in_addr *addr)
160 struct nfs4_client *clp;
161 spin_lock(&state_spinlock);
162 clp = __nfs4_find_client(addr);
163 spin_unlock(&state_spinlock);
168 nfs4_get_client(struct in_addr *addr)
170 struct nfs4_client *clp, *new = NULL;
172 spin_lock(&state_spinlock);
174 clp = __nfs4_find_client(addr);
179 list_add(&clp->cl_servers, &nfs4_clientid_list);
183 spin_unlock(&state_spinlock);
184 new = nfs4_alloc_client(addr);
185 spin_lock(&state_spinlock);
189 spin_unlock(&state_spinlock);
191 nfs4_free_client(new);
196 nfs4_put_client(struct nfs4_client *clp)
198 if (!atomic_dec_and_lock(&clp->cl_count, &state_spinlock))
200 list_del(&clp->cl_servers);
201 spin_unlock(&state_spinlock);
202 BUG_ON(!list_empty(&clp->cl_superblocks));
203 wake_up_all(&clp->cl_waitq);
204 rpc_wake_up(&clp->cl_rpcwaitq);
205 nfs4_kill_renewd(clp);
206 nfs4_free_client(clp);
209 static int __nfs4_init_client(struct nfs4_client *clp)
211 int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, nfs_callback_tcpport);
213 status = nfs4_proc_setclientid_confirm(clp);
215 nfs4_schedule_state_renewal(clp);
219 int nfs4_init_client(struct nfs4_client *clp)
221 return nfs4_map_errors(__nfs4_init_client(clp));
225 nfs4_alloc_lockowner_id(struct nfs4_client *clp)
227 return clp->cl_lockowner_id ++;
230 static struct nfs4_state_owner *
231 nfs4_client_grab_unused(struct nfs4_client *clp, struct rpc_cred *cred)
233 struct nfs4_state_owner *sp = NULL;
235 if (!list_empty(&clp->cl_unused)) {
236 sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list);
237 atomic_inc(&sp->so_count);
239 list_move(&sp->so_list, &clp->cl_state_owners);
245 static struct nfs4_state_owner *
246 nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred)
248 struct nfs4_state_owner *sp, *res = NULL;
250 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
251 if (sp->so_cred != cred)
253 atomic_inc(&sp->so_count);
254 /* Move to the head of the list */
255 list_move(&sp->so_list, &clp->cl_state_owners);
263 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
264 * create a new state_owner.
267 static struct nfs4_state_owner *
268 nfs4_alloc_state_owner(void)
270 struct nfs4_state_owner *sp;
272 sp = kmalloc(sizeof(*sp),GFP_KERNEL);
275 init_MUTEX(&sp->so_sema);
276 sp->so_seqid = 0; /* arbitrary */
277 INIT_LIST_HEAD(&sp->so_states);
278 INIT_LIST_HEAD(&sp->so_delegations);
279 atomic_set(&sp->so_count, 1);
284 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
286 struct nfs4_client *clp = sp->so_client;
287 spin_lock(&clp->cl_lock);
288 list_del_init(&sp->so_list);
289 spin_unlock(&clp->cl_lock);
293 * Note: must be called with clp->cl_sem held in order to prevent races
294 * with reboot recovery!
296 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
298 struct nfs4_client *clp = server->nfs4_state;
299 struct nfs4_state_owner *sp, *new;
302 new = nfs4_alloc_state_owner();
303 spin_lock(&clp->cl_lock);
304 sp = nfs4_find_state_owner(clp, cred);
306 sp = nfs4_client_grab_unused(clp, cred);
307 if (sp == NULL && new != NULL) {
308 list_add(&new->so_list, &clp->cl_state_owners);
309 new->so_client = clp;
310 new->so_id = nfs4_alloc_lockowner_id(clp);
315 spin_unlock(&clp->cl_lock);
325 * Must be called with clp->cl_sem held in order to avoid races
326 * with state recovery...
328 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
330 struct nfs4_client *clp = sp->so_client;
331 struct rpc_cred *cred = sp->so_cred;
333 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
335 if (clp->cl_nunused >= OPENOWNER_POOL_SIZE)
337 if (list_empty(&sp->so_list))
339 list_move(&sp->so_list, &clp->cl_unused);
341 spin_unlock(&clp->cl_lock);
346 list_del(&sp->so_list);
347 spin_unlock(&clp->cl_lock);
352 static struct nfs4_state *
353 nfs4_alloc_open_state(void)
355 struct nfs4_state *state;
357 state = kmalloc(sizeof(*state), GFP_KERNEL);
364 memset(state->stateid.data, 0, sizeof(state->stateid.data));
365 atomic_set(&state->count, 1);
366 INIT_LIST_HEAD(&state->lock_states);
367 init_MUTEX(&state->lock_sema);
368 rwlock_init(&state->state_lock);
372 static struct nfs4_state *
373 __nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode)
375 struct nfs_inode *nfsi = NFS_I(inode);
376 struct nfs4_state *state;
378 mode &= (FMODE_READ|FMODE_WRITE);
379 list_for_each_entry(state, &nfsi->open_states, inode_states) {
380 if (state->owner->so_cred != cred)
382 if ((mode & FMODE_READ) != 0 && state->nreaders == 0)
384 if ((mode & FMODE_WRITE) != 0 && state->nwriters == 0)
386 if ((state->state & mode) != mode)
388 atomic_inc(&state->count);
389 if (mode & FMODE_READ)
391 if (mode & FMODE_WRITE)
398 static struct nfs4_state *
399 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
401 struct nfs_inode *nfsi = NFS_I(inode);
402 struct nfs4_state *state;
404 list_for_each_entry(state, &nfsi->open_states, inode_states) {
405 /* Is this in the process of being freed? */
406 if (state->nreaders == 0 && state->nwriters == 0)
408 if (state->owner == owner) {
409 atomic_inc(&state->count);
417 nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode)
419 struct nfs4_state *state;
421 spin_lock(&inode->i_lock);
422 state = __nfs4_find_state(inode, cred, mode);
423 spin_unlock(&inode->i_lock);
428 nfs4_free_open_state(struct nfs4_state *state)
434 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
436 struct nfs4_state *state, *new;
437 struct nfs_inode *nfsi = NFS_I(inode);
439 spin_lock(&inode->i_lock);
440 state = __nfs4_find_state_byowner(inode, owner);
441 spin_unlock(&inode->i_lock);
444 new = nfs4_alloc_open_state();
445 spin_lock(&inode->i_lock);
446 state = __nfs4_find_state_byowner(inode, owner);
447 if (state == NULL && new != NULL) {
449 /* Caller *must* be holding owner->so_sem */
450 /* Note: The reclaim code dictates that we add stateless
451 * and read-only stateids to the end of the list */
452 list_add_tail(&state->open_states, &owner->so_states);
453 state->owner = owner;
454 atomic_inc(&owner->so_count);
455 list_add(&state->inode_states, &nfsi->open_states);
456 state->inode = igrab(inode);
457 spin_unlock(&inode->i_lock);
459 spin_unlock(&inode->i_lock);
461 nfs4_free_open_state(new);
468 * Beware! Caller must be holding exactly one
469 * reference to clp->cl_sem and owner->so_sema!
471 void nfs4_put_open_state(struct nfs4_state *state)
473 struct inode *inode = state->inode;
474 struct nfs4_state_owner *owner = state->owner;
476 if (!atomic_dec_and_lock(&state->count, &inode->i_lock))
478 if (!list_empty(&state->inode_states))
479 list_del(&state->inode_states);
480 spin_unlock(&inode->i_lock);
481 list_del(&state->open_states);
483 BUG_ON (state->state != 0);
484 nfs4_free_open_state(state);
485 nfs4_put_state_owner(owner);
489 * Beware! Caller must be holding no references to clp->cl_sem!
492 void nfs4_close_state(struct nfs4_state *state, mode_t mode)
494 struct inode *inode = state->inode;
495 struct nfs4_state_owner *owner = state->owner;
496 struct nfs4_client *clp = owner->so_client;
499 atomic_inc(&owner->so_count);
500 down_read(&clp->cl_sem);
501 down(&owner->so_sema);
502 /* Protect against nfs4_find_state() */
503 spin_lock(&inode->i_lock);
504 if (mode & FMODE_READ)
506 if (mode & FMODE_WRITE)
508 if (state->nwriters == 0) {
509 if (state->nreaders == 0)
510 list_del_init(&state->inode_states);
511 /* See reclaim code */
512 list_move_tail(&state->open_states, &owner->so_states);
514 spin_unlock(&inode->i_lock);
516 if (state->state != 0) {
518 newstate |= FMODE_READ;
520 newstate |= FMODE_WRITE;
521 if (state->state == newstate)
523 if (nfs4_do_close(inode, state, newstate) == -EINPROGRESS)
527 nfs4_put_open_state(state);
529 nfs4_put_state_owner(owner);
530 up_read(&clp->cl_sem);
534 * Search the state->lock_states for an existing lock_owner
535 * that is compatible with current->files
537 static struct nfs4_lock_state *
538 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
540 struct nfs4_lock_state *pos;
541 list_for_each_entry(pos, &state->lock_states, ls_locks) {
542 if (pos->ls_owner != fl_owner)
544 atomic_inc(&pos->ls_count);
550 struct nfs4_lock_state *
551 nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
553 struct nfs4_lock_state *lsp;
554 read_lock(&state->state_lock);
555 lsp = __nfs4_find_lock_state(state, fl_owner);
556 read_unlock(&state->state_lock);
561 * Return a compatible lock_state. If no initialized lock_state structure
562 * exists, return an uninitialized one.
564 * The caller must be holding state->lock_sema
566 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
568 struct nfs4_lock_state *lsp;
569 struct nfs4_client *clp = state->owner->so_client;
571 lsp = kmalloc(sizeof(*lsp), GFP_KERNEL);
575 lsp->ls_seqid = 0; /* arbitrary */
577 memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data));
578 atomic_set(&lsp->ls_count, 1);
579 lsp->ls_owner = fl_owner;
580 INIT_LIST_HEAD(&lsp->ls_locks);
581 spin_lock(&clp->cl_lock);
582 lsp->ls_id = nfs4_alloc_lockowner_id(clp);
583 spin_unlock(&clp->cl_lock);
588 * Return a compatible lock_state. If no initialized lock_state structure
589 * exists, return an uninitialized one.
591 * The caller must be holding state->lock_sema and clp->cl_sem
593 struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
595 struct nfs4_lock_state * lsp;
597 lsp = nfs4_find_lock_state(state, owner);
599 lsp = nfs4_alloc_lock_state(state, owner);
604 * Byte-range lock aware utility to initialize the stateid of read/write
608 nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
610 if (test_bit(LK_STATE_IN_USE, &state->flags)) {
611 struct nfs4_lock_state *lsp;
613 lsp = nfs4_find_lock_state(state, fl_owner);
615 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
616 nfs4_put_lock_state(lsp);
620 memcpy(dst, &state->stateid, sizeof(*dst));
624 * Called with state->lock_sema and clp->cl_sem held.
626 void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp)
628 if (status == NFS_OK || seqid_mutating_err(-status))
633 * Check to see if the request lock (type FL_UNLK) effects the fl lock.
635 * fl and request must have the same posix owner
638 * 0 -> fl not effected by request
639 * 1 -> fl consumed by request
643 nfs4_check_unlock(struct file_lock *fl, struct file_lock *request)
645 if (fl->fl_start >= request->fl_start && fl->fl_end <= request->fl_end)
651 * Post an initialized lock_state on the state->lock_states list.
653 void nfs4_notify_setlk(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp)
655 if (!list_empty(&lsp->ls_locks))
657 atomic_inc(&lsp->ls_count);
658 write_lock(&state->state_lock);
659 list_add(&lsp->ls_locks, &state->lock_states);
660 set_bit(LK_STATE_IN_USE, &state->flags);
661 write_unlock(&state->state_lock);
665 * to decide to 'reap' lock state:
666 * 1) search i_flock for file_locks with fl.lock_state = to ls.
667 * 2) determine if unlock will consume found lock.
674 nfs4_notify_unlck(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp)
676 struct inode *inode = state->inode;
677 struct file_lock *fl;
679 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
680 if (!(fl->fl_flags & FL_POSIX))
682 if (fl->fl_owner != lsp->ls_owner)
684 /* Exit if we find at least one lock which is not consumed */
685 if (nfs4_check_unlock(fl,request) == 0)
689 write_lock(&state->state_lock);
690 list_del_init(&lsp->ls_locks);
691 if (list_empty(&state->lock_states))
692 clear_bit(LK_STATE_IN_USE, &state->flags);
693 write_unlock(&state->state_lock);
694 nfs4_put_lock_state(lsp);
698 * Release reference to lock_state, and free it if we see that
699 * it is no longer in use
702 nfs4_put_lock_state(struct nfs4_lock_state *lsp)
704 if (!atomic_dec_and_test(&lsp->ls_count))
706 BUG_ON (!list_empty(&lsp->ls_locks));
711 * Called with sp->so_sema and clp->cl_sem held.
713 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
714 * failed with a seqid incrementing error -
715 * see comments nfs_fs.h:seqid_mutating_error()
717 void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp)
719 if (status == NFS_OK || seqid_mutating_err(-status))
721 /* If the server returns BAD_SEQID, unhash state_owner here */
722 if (status == -NFS4ERR_BAD_SEQID)
723 nfs4_drop_state_owner(sp);
726 static int reclaimer(void *);
727 struct reclaimer_args {
728 struct nfs4_client *clp;
729 struct completion complete;
733 * State recovery routine
736 nfs4_recover_state(void *data)
738 struct nfs4_client *clp = (struct nfs4_client *)data;
739 struct reclaimer_args args = {
744 init_completion(&args.complete);
746 if (kernel_thread(reclaimer, &args, CLONE_KERNEL) < 0)
747 goto out_failed_clear;
748 wait_for_completion(&args.complete);
751 set_bit(NFS4CLNT_OK, &clp->cl_state);
752 wake_up_all(&clp->cl_waitq);
753 rpc_wake_up(&clp->cl_rpcwaitq);
757 * Schedule a state recovery attempt
760 nfs4_schedule_state_recovery(struct nfs4_client *clp)
764 if (test_and_clear_bit(NFS4CLNT_OK, &clp->cl_state))
765 schedule_work(&clp->cl_recoverd);
768 static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
770 struct inode *inode = state->inode;
771 struct file_lock *fl;
774 for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
775 if (!(fl->fl_flags & FL_POSIX))
777 if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state)
779 status = ops->recover_lock(state, fl);
784 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
785 __FUNCTION__, status);
786 case -NFS4ERR_EXPIRED:
787 case -NFS4ERR_NO_GRACE:
788 case -NFS4ERR_RECLAIM_BAD:
789 case -NFS4ERR_RECLAIM_CONFLICT:
790 /* kill_proc(fl->fl_owner, SIGLOST, 1); */
792 case -NFS4ERR_STALE_CLIENTID:
801 static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
803 struct nfs4_state *state;
804 struct nfs4_lock_state *lock;
807 /* Note: we rely on the sp->so_states list being ordered
808 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
810 * This is needed to ensure that the server won't give us any
811 * read delegations that we have to return if, say, we are
812 * recovering after a network partition or a reboot from a
813 * server that doesn't support a grace period.
815 list_for_each_entry(state, &sp->so_states, open_states) {
816 if (state->state == 0)
818 status = ops->recover_open(sp, state);
819 list_for_each_entry(lock, &state->lock_states, ls_locks)
820 lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
822 status = nfs4_reclaim_locks(ops, state);
825 list_for_each_entry(lock, &state->lock_states, ls_locks) {
826 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
827 printk("%s: Lock reclaim failed!\n",
834 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
835 __FUNCTION__, status);
837 case -NFS4ERR_RECLAIM_BAD:
838 case -NFS4ERR_RECLAIM_CONFLICT:
840 * Open state on this file cannot be recovered
841 * All we can do is revert to using the zero stateid.
843 memset(state->stateid.data, 0,
844 sizeof(state->stateid.data));
845 /* Mark the file as being 'closed' */
848 case -NFS4ERR_EXPIRED:
849 case -NFS4ERR_NO_GRACE:
850 case -NFS4ERR_STALE_CLIENTID:
859 static int reclaimer(void *ptr)
861 struct reclaimer_args *args = (struct reclaimer_args *)ptr;
862 struct nfs4_client *clp = args->clp;
863 struct nfs4_state_owner *sp;
864 struct nfs4_state_recovery_ops *ops;
867 daemonize("%u.%u.%u.%u-reclaim", NIPQUAD(clp->cl_addr));
868 allow_signal(SIGKILL);
870 atomic_inc(&clp->cl_count);
871 complete(&args->complete);
873 /* Ensure exclusive access to NFSv4 state */
875 down_write(&clp->cl_sem);
876 /* Are there any NFS mounts out there? */
877 if (list_empty(&clp->cl_superblocks))
880 status = nfs4_proc_renew(clp);
883 case -NFS4ERR_CB_PATH_DOWN:
885 case -NFS4ERR_STALE_CLIENTID:
886 case -NFS4ERR_LEASE_MOVED:
887 ops = &nfs4_reboot_recovery_ops;
890 ops = &nfs4_network_partition_recovery_ops;
892 status = __nfs4_init_client(clp);
895 /* Mark all delegations for reclaim */
896 nfs_delegation_mark_reclaim(clp);
897 /* Note: list is protected by exclusive lock on cl->cl_sem */
898 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
899 status = nfs4_reclaim_open_state(ops, sp);
901 if (status == -NFS4ERR_NO_GRACE) {
902 ops = &nfs4_network_partition_recovery_ops;
903 status = nfs4_reclaim_open_state(ops, sp);
905 if (status == -NFS4ERR_STALE_CLIENTID)
907 if (status == -NFS4ERR_EXPIRED)
911 nfs_delegation_reap_unclaimed(clp);
913 set_bit(NFS4CLNT_OK, &clp->cl_state);
914 up_write(&clp->cl_sem);
916 wake_up_all(&clp->cl_waitq);
917 rpc_wake_up(&clp->cl_rpcwaitq);
918 if (status == -NFS4ERR_CB_PATH_DOWN)
919 nfs_handle_cb_pathdown(clp);
920 nfs4_put_client(clp);
923 printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
924 NIPQUAD(clp->cl_addr.s_addr), -status);