2 * linux/fs/nfs/callback_proc.c
4 * Copyright (C) 2004 Trond Myklebust
6 * NFSv4 callback procedures
8 #include <linux/nfs4.h>
9 #include <linux/nfs_fs.h>
10 #include <linux/slab.h>
11 #include <linux/rcupdate.h>
14 #include "delegation.h"
19 #define NFSDBG_FACILITY NFSDBG_CALLBACK
22 __be32 nfs4_callback_getattr(struct cb_getattrargs *args,
23 struct cb_getattrres *res,
24 struct cb_process_state *cps)
26 struct nfs_delegation *delegation;
27 struct nfs_inode *nfsi;
30 res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
31 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
34 res->bitmap[0] = res->bitmap[1] = 0;
35 res->status = htonl(NFS4ERR_BADHANDLE);
37 dprintk_rcu("NFS: GETATTR callback request from %s\n",
38 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
40 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
45 delegation = rcu_dereference(nfsi->delegation);
46 if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
48 res->size = i_size_read(inode);
49 res->change_attr = delegation->change_attr;
50 if (nfsi->npages != 0)
52 res->ctime = inode->i_ctime;
53 res->mtime = inode->i_mtime;
54 res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
56 res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
63 dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
67 __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
68 struct cb_process_state *cps)
73 res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
74 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
77 dprintk_rcu("NFS: RECALL callback request from %s\n",
78 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
80 res = htonl(NFS4ERR_BADHANDLE);
81 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
84 /* Set up a helper thread to actually return the delegation */
85 switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
90 res = htonl(NFS4ERR_BAD_STATEID);
93 res = htonl(NFS4ERR_RESOURCE);
97 dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
101 #if defined(CONFIG_NFS_V4_1)
104 * Lookup a layout by filehandle.
106 * Note: gets a refcount on the layout hdr and on its respective inode.
107 * Caller must put the layout hdr and the inode.
109 * TODO: keep track of all layouts (and delegations) in a hash table
110 * hashed by filehandle.
112 static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp, struct nfs_fh *fh)
114 struct nfs_server *server;
116 struct pnfs_layout_hdr *lo;
118 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
119 list_for_each_entry(lo, &server->layouts, plh_layouts) {
120 if (nfs_compare_fh(fh, &NFS_I(lo->plh_inode)->fh))
122 ino = igrab(lo->plh_inode);
125 spin_lock(&ino->i_lock);
126 /* Is this layout in the process of being freed? */
127 if (NFS_I(ino)->layout != lo) {
128 spin_unlock(&ino->i_lock);
132 pnfs_get_layout_hdr(lo);
133 spin_unlock(&ino->i_lock);
141 static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp, struct nfs_fh *fh)
143 struct pnfs_layout_hdr *lo;
145 spin_lock(&clp->cl_lock);
147 lo = get_layout_by_fh_locked(clp, fh);
149 spin_unlock(&clp->cl_lock);
154 static u32 initiate_file_draining(struct nfs_client *clp,
155 struct cb_layoutrecallargs *args)
158 struct pnfs_layout_hdr *lo;
159 u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
160 LIST_HEAD(free_me_list);
162 lo = get_layout_by_fh(clp, &args->cbl_fh);
164 return NFS4ERR_NOMATCHING_LAYOUT;
167 spin_lock(&ino->i_lock);
168 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
169 pnfs_mark_matching_lsegs_invalid(lo, &free_me_list,
173 rv = NFS4ERR_NOMATCHING_LAYOUT;
174 pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
175 spin_unlock(&ino->i_lock);
176 pnfs_free_lseg_list(&free_me_list);
177 pnfs_put_layout_hdr(lo);
182 static u32 initiate_bulk_draining(struct nfs_client *clp,
183 struct cb_layoutrecallargs *args)
185 struct nfs_server *server;
186 struct pnfs_layout_hdr *lo;
188 u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
189 struct pnfs_layout_hdr *tmp;
190 LIST_HEAD(recall_list);
191 LIST_HEAD(free_me_list);
192 struct pnfs_layout_range range = {
193 .iomode = IOMODE_ANY,
195 .length = NFS4_MAX_UINT64,
198 spin_lock(&clp->cl_lock);
200 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
201 if ((args->cbl_recall_type == RETURN_FSID) &&
202 memcmp(&server->fsid, &args->cbl_fsid,
203 sizeof(struct nfs_fsid)))
206 list_for_each_entry(lo, &server->layouts, plh_layouts) {
207 ino = igrab(lo->plh_inode);
210 spin_lock(&ino->i_lock);
211 /* Is this layout in the process of being freed? */
212 if (NFS_I(ino)->layout != lo) {
213 spin_unlock(&ino->i_lock);
217 pnfs_get_layout_hdr(lo);
218 spin_unlock(&ino->i_lock);
219 list_add(&lo->plh_bulk_recall, &recall_list);
223 spin_unlock(&clp->cl_lock);
225 list_for_each_entry_safe(lo, tmp,
226 &recall_list, plh_bulk_recall) {
228 spin_lock(&ino->i_lock);
229 set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
230 if (pnfs_mark_matching_lsegs_invalid(lo, &free_me_list, &range))
232 list_del_init(&lo->plh_bulk_recall);
233 spin_unlock(&ino->i_lock);
234 pnfs_free_lseg_list(&free_me_list);
235 pnfs_put_layout_hdr(lo);
241 static u32 do_callback_layoutrecall(struct nfs_client *clp,
242 struct cb_layoutrecallargs *args)
246 dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type);
247 if (args->cbl_recall_type == RETURN_FILE)
248 res = initiate_file_draining(clp, args);
250 res = initiate_bulk_draining(clp, args);
251 dprintk("%s returning %i\n", __func__, res);
256 __be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args,
257 void *dummy, struct cb_process_state *cps)
261 dprintk("%s: -->\n", __func__);
264 res = do_callback_layoutrecall(cps->clp, args);
266 res = NFS4ERR_OP_NOT_IN_SESSION;
268 dprintk("%s: exit with status = %d\n", __func__, res);
269 return cpu_to_be32(res);
272 static void pnfs_recall_all_layouts(struct nfs_client *clp)
274 struct cb_layoutrecallargs args;
276 /* Pretend we got a CB_LAYOUTRECALL(ALL) */
277 memset(&args, 0, sizeof(args));
278 args.cbl_recall_type = RETURN_ALL;
279 /* FIXME we ignore errors, what should we do? */
280 do_callback_layoutrecall(clp, &args);
283 __be32 nfs4_callback_devicenotify(struct cb_devicenotifyargs *args,
284 void *dummy, struct cb_process_state *cps)
288 struct nfs_client *clp = cps->clp;
289 struct nfs_server *server = NULL;
291 dprintk("%s: -->\n", __func__);
294 res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
298 for (i = 0; i < args->ndevs; i++) {
299 struct cb_devicenotifyitem *dev = &args->devs[i];
302 server->pnfs_curr_ld->id != dev->cbd_layout_type) {
304 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
305 if (server->pnfs_curr_ld &&
306 server->pnfs_curr_ld->id == dev->cbd_layout_type) {
311 dprintk("%s: layout type %u not found\n",
312 __func__, dev->cbd_layout_type);
317 if (dev->cbd_notify_type == NOTIFY_DEVICEID4_CHANGE)
318 dprintk("%s: NOTIFY_DEVICEID4_CHANGE not supported, "
319 "deleting instead\n", __func__);
320 nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
325 dprintk("%s: exit with status = %u\n",
326 __func__, be32_to_cpu(res));
331 * Validate the sequenceID sent by the server.
332 * Return success if the sequenceID is one more than what we last saw on
333 * this slot, accounting for wraparound. Increments the slot's sequence.
335 * We don't yet implement a duplicate request cache, instead we set the
336 * back channel ca_maxresponsesize_cached to zero. This is OK for now
337 * since we only currently implement idempotent callbacks anyway.
339 * We have a single slot backchannel at this time, so we don't bother
340 * checking the used_slots bit array on the table. The lower layer guarantees
341 * a single outstanding callback request at a time.
344 validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
346 struct nfs4_slot *slot;
348 dprintk("%s enter. slotid %d seqid %d\n",
349 __func__, args->csa_slotid, args->csa_sequenceid);
351 if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS)
352 return htonl(NFS4ERR_BADSLOT);
354 slot = tbl->slots + args->csa_slotid;
355 dprintk("%s slot table seqid: %d\n", __func__, slot->seq_nr);
358 if (likely(args->csa_sequenceid == slot->seq_nr + 1)) {
364 if (args->csa_sequenceid == slot->seq_nr) {
365 dprintk("%s seqid %d is a replay\n",
366 __func__, args->csa_sequenceid);
367 /* Signal process_op to set this error on next op */
368 if (args->csa_cachethis == 0)
369 return htonl(NFS4ERR_RETRY_UNCACHED_REP);
371 /* The ca_maxresponsesize_cached is 0 with no DRC */
372 else if (args->csa_cachethis == 1)
373 return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
377 if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) {
382 /* Misordered request */
383 return htonl(NFS4ERR_SEQ_MISORDERED);
385 tbl->highest_used_slotid = args->csa_slotid;
386 return htonl(NFS4_OK);
390 * For each referring call triple, check the session's slot table for
391 * a match. If the slot is in use and the sequence numbers match, the
392 * client is still waiting for a response to the original request.
394 static bool referring_call_exists(struct nfs_client *clp,
396 struct referring_call_list *rclists)
400 struct nfs4_session *session;
401 struct nfs4_slot_table *tbl;
402 struct referring_call_list *rclist;
403 struct referring_call *ref;
406 * XXX When client trunking is implemented, this becomes
407 * a session lookup from within the loop
409 session = clp->cl_session;
410 tbl = &session->fc_slot_table;
412 for (i = 0; i < nrclists; i++) {
413 rclist = &rclists[i];
414 if (memcmp(session->sess_id.data,
415 rclist->rcl_sessionid.data,
416 NFS4_MAX_SESSIONID_LEN) != 0)
419 for (j = 0; j < rclist->rcl_nrefcalls; j++) {
420 ref = &rclist->rcl_refcalls[j];
422 dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u "
423 "slotid %u\n", __func__,
424 ((u32 *)&rclist->rcl_sessionid.data)[0],
425 ((u32 *)&rclist->rcl_sessionid.data)[1],
426 ((u32 *)&rclist->rcl_sessionid.data)[2],
427 ((u32 *)&rclist->rcl_sessionid.data)[3],
428 ref->rc_sequenceid, ref->rc_slotid);
430 spin_lock(&tbl->slot_tbl_lock);
431 status = (test_bit(ref->rc_slotid, tbl->used_slots) &&
432 tbl->slots[ref->rc_slotid].seq_nr ==
434 spin_unlock(&tbl->slot_tbl_lock);
444 __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
445 struct cb_sequenceres *res,
446 struct cb_process_state *cps)
448 struct nfs4_slot_table *tbl;
449 struct nfs_client *clp;
451 __be32 status = htonl(NFS4ERR_BADSESSION);
453 clp = nfs4_find_client_sessionid(cps->net, args->csa_addr, &args->csa_sessionid);
457 tbl = &clp->cl_session->bc_slot_table;
459 spin_lock(&tbl->slot_tbl_lock);
460 /* state manager is resetting the session */
461 if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) {
462 spin_unlock(&tbl->slot_tbl_lock);
463 status = htonl(NFS4ERR_DELAY);
464 /* Return NFS4ERR_BADSESSION if we're draining the session
465 * in order to reset it.
467 if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
468 status = htonl(NFS4ERR_BADSESSION);
472 status = validate_seqid(&clp->cl_session->bc_slot_table, args);
473 spin_unlock(&tbl->slot_tbl_lock);
477 cps->slotid = args->csa_slotid;
480 * Check for pending referring calls. If a match is found, a
481 * related callback was received before the response to the original
484 if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
485 status = htonl(NFS4ERR_DELAY);
489 memcpy(&res->csr_sessionid, &args->csa_sessionid,
490 sizeof(res->csr_sessionid));
491 res->csr_sequenceid = args->csa_sequenceid;
492 res->csr_slotid = args->csa_slotid;
493 res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
494 res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
497 cps->clp = clp; /* put in nfs4_callback_compound */
498 for (i = 0; i < args->csa_nrclists; i++)
499 kfree(args->csa_rclists[i].rcl_refcalls);
500 kfree(args->csa_rclists);
502 if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
503 cps->drc_status = status;
506 res->csr_status = status;
508 dprintk("%s: exit with status = %d res->csr_status %d\n", __func__,
509 ntohl(status), ntohl(res->csr_status));
514 validate_bitmap_values(unsigned long mask)
516 return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
519 __be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy,
520 struct cb_process_state *cps)
525 status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
526 if (!cps->clp) /* set in cb_sequence */
529 dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
530 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
532 status = cpu_to_be32(NFS4ERR_INVAL);
533 if (!validate_bitmap_values(args->craa_type_mask))
536 status = cpu_to_be32(NFS4_OK);
537 if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
538 &args->craa_type_mask))
540 if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
541 &args->craa_type_mask))
542 flags |= FMODE_WRITE;
543 if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
544 &args->craa_type_mask))
545 pnfs_recall_all_layouts(cps->clp);
547 nfs_expire_all_delegation_types(cps->clp, flags);
549 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
553 /* Reduce the fore channel's max_slots to the target value */
554 __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
555 struct cb_process_state *cps)
557 struct nfs4_slot_table *fc_tbl;
560 status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
561 if (!cps->clp) /* set in cb_sequence */
564 dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target max slots %d\n",
565 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
566 args->crsa_target_max_slots);
568 fc_tbl = &cps->clp->cl_session->fc_slot_table;
570 status = htonl(NFS4ERR_BAD_HIGH_SLOT);
571 if (args->crsa_target_max_slots > fc_tbl->max_slots ||
572 args->crsa_target_max_slots < 1)
575 status = htonl(NFS4_OK);
576 if (args->crsa_target_max_slots == fc_tbl->max_slots)
579 nfs41_set_target_slotid(fc_tbl, args->crsa_target_max_slots);
580 nfs41_handle_recall_slot(cps->clp);
582 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
585 #endif /* CONFIG_NFS_V4_1 */